repo_name
string
path
string
copies
string
size
string
content
string
license
string
MathewWi/supertux-wii
src/sprite_manager.cpp
8
2606
// $Id: sprite_manager.cpp 752 2004-04-26 15:03:24Z grumbel $ // // SuperTux // Copyright (C) 2004 Ingo Ruhnke <grumbel@gmx.de> // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #include <iostream> #include "lispreader.h" #include "sprite_manager.h" SpriteManager::SpriteManager(const std::string& filename) { load_resfile(filename); } SpriteManager::~SpriteManager() { for(std::map<std::string, Sprite*>::iterator i = sprites.begin(); i != sprites.end(); ++i) { delete i->second; } } void SpriteManager::load_resfile(const std::string& filename) { lisp_object_t* root_obj = lisp_read_from_file(filename); if (!root_obj) { std::cout << "SpriteManager: Couldn't load: " << filename << std::endl; return; } lisp_object_t* cur = root_obj; if (strcmp(lisp_symbol(lisp_car(cur)), "supertux-resources") != 0) return; cur = lisp_cdr(cur); while(cur) { lisp_object_t* el = lisp_car(cur); if (strcmp(lisp_symbol(lisp_car(el)), "sprite") == 0) { Sprite* sprite = new Sprite(lisp_cdr(el)); Sprites::iterator i = sprites.find(sprite->get_name()); if (i == sprites.end()) { sprites[sprite->get_name()] = sprite; } else { delete i->second; i->second = sprite; std::cout << "Warning: dulpicate entry: '" << sprite->get_name() << "'" << std::endl; } } else { std::cout << "SpriteManager: Unknown tag" << std::endl; } cur = lisp_cdr(cur); } lisp_free(root_obj); } Sprite* SpriteManager::load(const std::string& name) { Sprites::iterator i = sprites.find(name); if (i != sprites.end()) { return i->second; } else { std::cout << "SpriteManager: Sprite '" << name << "' not found" << std::endl; return 0; } } /* EOF */
gpl-2.0
toastcfh/android_kernel_lge_d851
fs/ubifs/ioctl.c
8
5338
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Zoltan Sogor * Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* */ #include <linux/compat.h> #include <linux/mount.h> #include "ubifs.h" /* */ void ubifs_set_inode_flags(struct inode *inode) { unsigned int flags = ubifs_inode(inode)->flags; inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_DIRSYNC); if (flags & UBIFS_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & UBIFS_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & UBIFS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; if (flags & UBIFS_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; } /* */ static int ioctl2ubifs(int ioctl_flags) { int ubifs_flags = 0; if (ioctl_flags & FS_COMPR_FL) ubifs_flags |= UBIFS_COMPR_FL; if (ioctl_flags & FS_SYNC_FL) ubifs_flags |= UBIFS_SYNC_FL; if (ioctl_flags & FS_APPEND_FL) ubifs_flags |= UBIFS_APPEND_FL; if (ioctl_flags & FS_IMMUTABLE_FL) ubifs_flags |= UBIFS_IMMUTABLE_FL; if (ioctl_flags & FS_DIRSYNC_FL) ubifs_flags |= UBIFS_DIRSYNC_FL; return ubifs_flags; } /* */ static int ubifs2ioctl(int ubifs_flags) { int ioctl_flags = 0; if (ubifs_flags & UBIFS_COMPR_FL) ioctl_flags |= FS_COMPR_FL; if (ubifs_flags & UBIFS_SYNC_FL) ioctl_flags |= FS_SYNC_FL; if (ubifs_flags & UBIFS_APPEND_FL) ioctl_flags |= FS_APPEND_FL; if (ubifs_flags & UBIFS_IMMUTABLE_FL) ioctl_flags |= FS_IMMUTABLE_FL; if (ubifs_flags & UBIFS_DIRSYNC_FL) ioctl_flags |= FS_DIRSYNC_FL; return ioctl_flags; } static int setflags(struct inode *inode, int flags) { int oldflags, err, release; struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ui->data_len }; err = ubifs_budget_space(c, &req); if (err) return err; /* */ mutex_lock(&ui->ui_mutex); oldflags = ubifs2ioctl(ui->flags); if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto out_unlock; } } ui->flags = ioctl2ubifs(flags); ubifs_set_inode_flags(inode); inode->i_ctime = ubifs_current_time(inode); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); if (IS_SYNC(inode)) err = write_inode_now(inode, 1); return err; out_unlock: ubifs_err("can't modify inode %lu attributes", inode->i_ino); mutex_unlock(&ui->ui_mutex); ubifs_release_budget(c, &req); return err; } long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int flags, err; struct inode *inode = file->f_path.dentry->d_inode; switch (cmd) { case FS_IOC_GETFLAGS: flags = ubifs2ioctl(ubifs_inode(inode)->flags); dbg_gen("get flags: %#x, i_flags %#x", flags, inode->i_flags); return put_user(flags, (int __user *) arg); case FS_IOC_SETFLAGS: { if (IS_RDONLY(inode)) return -EROFS; if (!inode_owner_or_capable(inode)) return -EACCES; if (get_user(flags, (int __user *) arg)) return -EFAULT; if (!S_ISDIR(inode->i_mode)) flags &= ~FS_DIRSYNC_FL; /* */ err = mnt_want_write_file(file); if (err) return err; dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags); err = setflags(inode, flags); mnt_drop_write_file(file); return err; } default: return -ENOTTY; } } #ifdef CONFIG_COMPAT long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case FS_IOC32_GETFLAGS: cmd = FS_IOC_GETFLAGS; break; case FS_IOC32_SETFLAGS: cmd = FS_IOC_SETFLAGS; break; default: return -ENOIOCTLCMD; } return ubifs_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } #endif
gpl-2.0
Frimost/IceLands
src/server/scripts/Northrend/AzjolNerub/ahnkahet/instance_ahnkahet.cpp
8
11443
/* * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptPCH.h" #include "ahnkahet.h" /* Ahn'kahet encounters: 0 - Elder Nadox 1 - Prince Taldaram 2 - Jedoga Shadowseeker 3 - Herald Volazj 4 - Amanitar (Heroic only) */ #define MAX_ENCOUNTER 5 enum Achievements { ACHIEV_VOLUNTEER_WORK = 2056 }; class instance_ahnkahet : public InstanceMapScript { public: instance_ahnkahet() : InstanceMapScript("instance_ahnkahet", 619) { } struct instance_ahnkahet_InstanceScript : public InstanceScript { instance_ahnkahet_InstanceScript(Map* pMap) : InstanceScript(pMap) {} uint64 Elder_Nadox; uint64 Prince_Taldaram; uint64 Jedoga_Shadowseeker; uint64 Herald_Volazj; uint64 Amanitar; uint64 Prince_TaldaramSpheres[2]; uint64 Prince_TaldaramPlatform; uint64 Prince_TaldaramGate; std::set<uint64> InitiandGUIDs; uint64 JedogaSacrifices; uint64 JedogaTarget; uint32 m_auiEncounter[MAX_ENCOUNTER]; uint32 spheres[2]; uint8 InitiandCnt; uint8 switchtrigger; std::string str_data; void Initialize() { memset(&m_auiEncounter, 0, sizeof(m_auiEncounter)); InitiandGUIDs.clear(); Elder_Nadox =0; Prince_Taldaram =0; Jedoga_Shadowseeker =0; Herald_Volazj =0; Amanitar =0; spheres[0] = NOT_STARTED; spheres[1] = NOT_STARTED; InitiandCnt = 0; switchtrigger = 0; JedogaSacrifices = 0; JedogaTarget = 0; } bool IsEncounterInProgress() const { for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (m_auiEncounter[i] == IN_PROGRESS) return true; return false; } void OnCreatureCreate(Creature* creature) { switch(creature->GetEntry()) { case 29309: Elder_Nadox = creature->GetGUID(); break; case 29308: Prince_Taldaram = creature->GetGUID(); break; case 29310: Jedoga_Shadowseeker = creature->GetGUID(); break; case 29311: Herald_Volazj = creature->GetGUID(); break; case 30258: Amanitar = creature->GetGUID(); break; case 30114: InitiandGUIDs.insert(creature->GetGUID()); break; } } void OnGameObjectCreate(GameObject* go) { switch(go->GetEntry()) { case 193564: Prince_TaldaramPlatform = go->GetGUID(); if (m_auiEncounter[1] == DONE) HandleGameObject(0, true, go); break; case 193093: Prince_TaldaramSpheres[0] = go->GetGUID(); if (spheres[0] == IN_PROGRESS) { go->SetGoState(GO_STATE_ACTIVE); go->SetFlag(GAMEOBJECT_FLAGS, GO_FLAG_UNK1); } else go->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_UNK1); break; case 193094: Prince_TaldaramSpheres[1] = go->GetGUID(); if (spheres[1] == IN_PROGRESS) { go->SetGoState(GO_STATE_ACTIVE); go->SetFlag(GAMEOBJECT_FLAGS, GO_FLAG_UNK1); } else go->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_UNK1); break; case 192236: Prince_TaldaramGate = go->GetGUID(); // Web gate past Prince Taldaram if (m_auiEncounter[1] == DONE)HandleGameObject(0, true, go);break; } } void SetData64(uint32 idx, uint64 guid) { switch(idx) { case DATA_ADD_JEDOGA_OPFER: JedogaSacrifices = guid; break; case DATA_PL_JEDOGA_TARGET: JedogaTarget = guid; break; } } uint64 GetData64(uint32 identifier) { switch(identifier) { case DATA_ELDER_NADOX: return Elder_Nadox; case DATA_PRINCE_TALDARAM: return Prince_Taldaram; case DATA_JEDOGA_SHADOWSEEKER: return Jedoga_Shadowseeker; case DATA_HERALD_VOLAZJ: return Herald_Volazj; case DATA_AMANITAR: return Amanitar; case DATA_SPHERE1: return Prince_TaldaramSpheres[0]; case DATA_SPHERE2: return Prince_TaldaramSpheres[1]; case DATA_PRINCE_TALDARAM_PLATFORM: return Prince_TaldaramPlatform; case DATA_ADD_JEDOGA_INITIAND: { std::vector<uint64> vInitiands; vInitiands.clear(); for (std::set<uint64>::const_iterator itr = InitiandGUIDs.begin(); itr != InitiandGUIDs.end(); ++itr) { Creature* cr = instance->GetCreature(*itr); if (cr && cr->isAlive()) vInitiands.push_back(*itr); } if (vInitiands.empty()) return 0; uint8 j = urand(0, vInitiands.size() -1); return vInitiands[j]; } case DATA_ADD_JEDOGA_OPFER: return JedogaSacrifices; case DATA_PL_JEDOGA_TARGET: return JedogaTarget; } return 0; } void SetData(uint32 type, uint32 data) { switch(type) { case DATA_ELDER_NADOX_EVENT: m_auiEncounter[0] = data; break; case DATA_PRINCE_TALDARAM_EVENT: if (data == DONE) HandleGameObject(Prince_TaldaramGate, true); m_auiEncounter[1] = data; break; case DATA_JEDOGA_SHADOWSEEKER_EVENT: m_auiEncounter[2] = data; if (data == DONE) { for (std::set<uint64>::const_iterator itr = InitiandGUIDs.begin(); itr != InitiandGUIDs.end(); ++itr) { Creature* cr = instance->GetCreature(*itr); if (cr && cr->isAlive()) { cr->SetVisible(false); cr->setDeathState(JUST_DIED); cr->RemoveCorpse(); } } } break; case DATA_HERALD_VOLAZJ_EVENT: m_auiEncounter[3] = data; break; case DATA_AMANITAR_EVENT: m_auiEncounter[4] = data; break; case DATA_SPHERE1_EVENT: spheres[0] = data; break; case DATA_SPHERE2_EVENT: spheres[1] = data; break; case DATA_JEDOGA_TRIGGER_SWITCH: switchtrigger = data; break; case DATA_JEDOGA_RESET_INITIANDS: for (std::set<uint64>::const_iterator itr = InitiandGUIDs.begin(); itr != InitiandGUIDs.end(); ++itr) { Creature* cr = instance->GetCreature(*itr); if (cr) { cr->Respawn(); if (!cr->IsInEvadeMode()) cr->AI()->EnterEvadeMode(); } } break; } if (data == DONE) SaveToDB(); } uint32 GetData(uint32 type) { switch(type) { case DATA_ELDER_NADOX_EVENT: return m_auiEncounter[0]; case DATA_PRINCE_TALDARAM_EVENT: return m_auiEncounter[1]; case DATA_JEDOGA_SHADOWSEEKER_EVENT: return m_auiEncounter[2]; case DATA_HERALD_VOLAZJ: return m_auiEncounter[3]; case DATA_AMANITAR_EVENT: return m_auiEncounter[4]; case DATA_SPHERE1_EVENT: return spheres[0]; case DATA_SPHERE2_EVENT: return spheres[1]; case DATA_ALL_INITIAND_DEAD: for (std::set<uint64>::const_iterator itr = InitiandGUIDs.begin(); itr != InitiandGUIDs.end(); ++itr) { Creature* cr = instance->GetCreature(*itr); if (!cr || (cr && cr->isAlive())) return 0; } return 1; case DATA_JEDOGA_TRIGGER_SWITCH: return switchtrigger; } return 0; } std::string GetSaveData() { OUT_SAVE_INST_DATA; std::ostringstream saveStream; saveStream << "A K " << m_auiEncounter[0] << " " << m_auiEncounter[1] << " " << m_auiEncounter[2] << " " << m_auiEncounter[3] << " " << m_auiEncounter[4] << " " << spheres[0] << " " << spheres[1]; str_data = saveStream.str(); OUT_SAVE_INST_DATA_COMPLETE; return str_data; } void Load(const char* in) { if (!in) { OUT_LOAD_INST_DATA_FAIL; return; } OUT_LOAD_INST_DATA(in); char dataHead1, dataHead2; uint16 data0, data1, data2, data3, data4, data5, data6; std::istringstream loadStream(in); loadStream >> dataHead1 >> dataHead2 >> data0 >> data1 >> data2 >> data3 >> data4 >> data5 >> data6; if (dataHead1 == 'A' && dataHead2 == 'K') { m_auiEncounter[0] = data0; m_auiEncounter[1] = data1; m_auiEncounter[2] = data2; m_auiEncounter[3] = data3; m_auiEncounter[4] = data4; for (uint8 i = 0; i < MAX_ENCOUNTER; ++i) if (m_auiEncounter[i] == IN_PROGRESS) m_auiEncounter[i] = NOT_STARTED; spheres[0] = data5; spheres[1] = data6; } else OUT_LOAD_INST_DATA_FAIL; OUT_LOAD_INST_DATA_COMPLETE; } }; InstanceScript* GetInstanceScript(InstanceMap *map) const { return new instance_ahnkahet_InstanceScript(map); } }; void AddSC_instance_ahnkahet() { new instance_ahnkahet; }
gpl-2.0
OpenELEC/linux
drivers/mtd/ftl.c
264
32056
/* This version ported to the Linux-MTD system by dwmw2@infradead.org * * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups * * Based on: */ /*====================================================================== A Flash Translation Layer memory card driver This driver implements a disk-like block device driver with an apparent block size of 512 bytes for flash memory cards. ftl_cs.c 1.62 2000/02/01 00:59:04 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright © 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. LEGAL NOTE: The FTL format is patented by M-Systems. They have granted a license for its use with PCMCIA devices: "M-Systems grants a royalty-free, non-exclusive license under any presently existing M-Systems intellectual property rights necessary for the design and development of FTL-compatible drivers, file systems and utilities using the data formats with PCMCIA PC Cards as described in the PCMCIA Flash Translation Layer (FTL) Specification." Use of the FTL format for non-PCMCIA applications may be an infringement of these patents. For additional information, contact M-Systems directly. M-Systems since acquired by Sandisk. ======================================================================*/ #include <linux/mtd/blktrans.h> #include <linux/module.h> #include <linux/mtd/mtd.h> /*#define PSYCHO_DEBUG */ #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/hdreg.h> #include <linux/vmalloc.h> #include <linux/blkpg.h> #include <asm/uaccess.h> #include <linux/mtd/ftl.h> /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int shuffle_freq = 50; module_param(shuffle_freq, int, 0); /*====================================================================*/ /* Major device # for FTL device */ #ifndef FTL_MAJOR #define FTL_MAJOR 44 #endif /*====================================================================*/ /* Maximum number of separate memory devices we'll allow */ #define MAX_DEV 4 /* Maximum number of regions per device */ #define MAX_REGION 4 /* Maximum number of partitions in an FTL region */ #define PART_BITS 4 /* Maximum number of outstanding erase requests per socket */ #define MAX_ERASE 8 /* Sector size -- shouldn't need to change */ #define SECTOR_SIZE 512 /* Each memory region corresponds to a minor device */ typedef struct partition_t { struct mtd_blktrans_dev mbd; uint32_t state; uint32_t *VirtualBlockMap; uint32_t FreeTotal; struct eun_info_t { uint32_t Offset; uint32_t EraseCount; uint32_t Free; uint32_t Deleted; } *EUNInfo; struct xfer_info_t { uint32_t Offset; uint32_t EraseCount; uint16_t state; } *XferInfo; uint16_t bam_index; uint32_t *bam_cache; uint16_t DataUnits; uint32_t BlocksPerUnit; erase_unit_header_t header; } partition_t; /* Partition state flags */ #define FTL_FORMATTED 0x01 /* Transfer unit states */ #define XFER_UNKNOWN 0x00 #define XFER_ERASING 0x01 #define XFER_ERASED 0x02 #define XFER_PREPARED 0x03 #define XFER_FAILED 0x04 /*====================================================================*/ static void ftl_erase_callback(struct erase_info *done); /*====================================================================== Scan_header() checks to see if a memory region contains an FTL partition. build_maps() reads all the erase unit headers, builds the erase unit map, and then builds the virtual page map. ======================================================================*/ static int scan_header(partition_t *part) { erase_unit_header_t header; loff_t offset, max_offset; size_t ret; int err; part->header.FormattedSize = 0; max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size; /* Search first megabyte for a valid FTL header */ for (offset = 0; (offset + sizeof(header)) < max_offset; offset += part->mbd.mtd->erasesize ? : 0x2000) { err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret, (unsigned char *)&header); if (err) return err; if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break; } if (offset == max_offset) { printk(KERN_NOTICE "ftl_cs: FTL header not found.\n"); return -ENOENT; } if (header.BlockSize != 9 || (header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) || (header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) { printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n"); return -1; } if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) { printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n", 1 << header.EraseUnitSize,part->mbd.mtd->erasesize); return -1; } part->header = header; return 0; } static int build_maps(partition_t *part) { erase_unit_header_t header; uint16_t xvalid, xtrans, i; unsigned blocks, j; int hdr_ok, ret = -1; ssize_t retval; loff_t offset; /* Set up erase unit maps */ part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) - part->header.NumTransferUnits; part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t), GFP_KERNEL); if (!part->EUNInfo) goto out; for (i = 0; i < part->DataUnits; i++) part->EUNInfo[i].Offset = 0xffffffff; part->XferInfo = kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t), GFP_KERNEL); if (!part->XferInfo) goto out_EUNInfo; xvalid = xtrans = 0; for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) { offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN)) << part->header.EraseUnitSize); ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval, (unsigned char *)&header); if (ret) goto out_XferInfo; ret = -1; /* Is this a transfer partition? */ hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0); if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) && (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) { part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset; part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount = le32_to_cpu(header.EraseCount); xvalid++; } else { if (xtrans == part->header.NumTransferUnits) { printk(KERN_NOTICE "ftl_cs: format error: too many " "transfer units!\n"); goto out_XferInfo; } if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) { part->XferInfo[xtrans].state = XFER_PREPARED; part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount); } else { part->XferInfo[xtrans].state = XFER_UNKNOWN; /* Pick anything reasonable for the erase count */ part->XferInfo[xtrans].EraseCount = le32_to_cpu(part->header.EraseCount); } part->XferInfo[xtrans].Offset = offset; xtrans++; } } /* Check for format trouble */ header = part->header; if ((xtrans != header.NumTransferUnits) || (xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) { printk(KERN_NOTICE "ftl_cs: format error: erase units " "don't add up!\n"); goto out_XferInfo; } /* Set up virtual page map */ blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize; part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t)); if (!part->VirtualBlockMap) goto out_XferInfo; memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t)); part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize; part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t), GFP_KERNEL); if (!part->bam_cache) goto out_VirtualBlockMap; part->bam_index = 0xffff; part->FreeTotal = 0; for (i = 0; i < part->DataUnits; i++) { part->EUNInfo[i].Free = 0; part->EUNInfo[i].Deleted = 0; offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset); ret = mtd_read(part->mbd.mtd, offset, part->BlocksPerUnit * sizeof(uint32_t), &retval, (unsigned char *)part->bam_cache); if (ret) goto out_bam_cache; for (j = 0; j < part->BlocksPerUnit; j++) { if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) { part->EUNInfo[i].Free++; part->FreeTotal++; } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) && (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks)) part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] = (i << header.EraseUnitSize) + (j << header.BlockSize); else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j]))) part->EUNInfo[i].Deleted++; } } ret = 0; goto out; out_bam_cache: kfree(part->bam_cache); out_VirtualBlockMap: vfree(part->VirtualBlockMap); out_XferInfo: kfree(part->XferInfo); out_EUNInfo: kfree(part->EUNInfo); out: return ret; } /* build_maps */ /*====================================================================== Erase_xfer() schedules an asynchronous erase operation for a transfer unit. ======================================================================*/ static int erase_xfer(partition_t *part, uint16_t xfernum) { int ret; struct xfer_info_t *xfer; struct erase_info *erase; xfer = &part->XferInfo[xfernum]; pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); xfer->state = XFER_ERASING; /* Is there a free erase slot? Always in MTD. */ erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL); if (!erase) return -ENOMEM; erase->mtd = part->mbd.mtd; erase->callback = ftl_erase_callback; erase->addr = xfer->Offset; erase->len = 1 << part->header.EraseUnitSize; erase->priv = (u_long)part; ret = mtd_erase(part->mbd.mtd, erase); if (!ret) xfer->EraseCount++; else kfree(erase); return ret; } /* erase_xfer */ /*====================================================================== Prepare_xfer() takes a freshly erased transfer unit and gives it an appropriate header. ======================================================================*/ static void ftl_erase_callback(struct erase_info *erase) { partition_t *part; struct xfer_info_t *xfer; int i; /* Look up the transfer unit */ part = (partition_t *)(erase->priv); for (i = 0; i < part->header.NumTransferUnits; i++) if (part->XferInfo[i].Offset == erase->addr) break; if (i == part->header.NumTransferUnits) { printk(KERN_NOTICE "ftl_cs: internal error: " "erase lookup failed!\n"); return; } xfer = &part->XferInfo[i]; if (erase->state == MTD_ERASE_DONE) xfer->state = XFER_ERASED; else { xfer->state = XFER_FAILED; printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n", erase->state); } kfree(erase); } /* ftl_erase_callback */ static int prepare_xfer(partition_t *part, int i) { erase_unit_header_t header; struct xfer_info_t *xfer; int nbam, ret; uint32_t ctl; ssize_t retlen; loff_t offset; xfer = &part->XferInfo[i]; xfer->state = XFER_FAILED; pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); /* Write the transfer unit header */ header = part->header; header.LogicalEUN = cpu_to_le16(0xffff); header.EraseCount = cpu_to_le32(xfer->EraseCount); ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen, (u_char *)&header); if (ret) { return ret; } /* Write the BAM stub */ nbam = (part->BlocksPerUnit * sizeof(uint32_t) + le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); ctl = cpu_to_le32(BLOCK_CONTROL); for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) { ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, (u_char *)&ctl); if (ret) return ret; } xfer->state = XFER_PREPARED; return 0; } /* prepare_xfer */ /*====================================================================== Copy_erase_unit() takes a full erase block and a transfer unit, copies everything to the transfer unit, then swaps the block pointers. All data blocks are copied to the corresponding blocks in the target unit, so the virtual block map does not need to be updated. ======================================================================*/ static int copy_erase_unit(partition_t *part, uint16_t srcunit, uint16_t xferunit) { u_char buf[SECTOR_SIZE]; struct eun_info_t *eun; struct xfer_info_t *xfer; uint32_t src, dest, free, i; uint16_t unit; int ret; ssize_t retlen; loff_t offset; uint16_t srcunitswap = cpu_to_le16(srcunit); eun = &part->EUNInfo[srcunit]; xfer = &part->XferInfo[xferunit]; pr_debug("ftl_cs: copying block 0x%x to 0x%x\n", eun->Offset, xfer->Offset); /* Read current BAM */ if (part->bam_index != srcunit) { offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); ret = mtd_read(part->mbd.mtd, offset, part->BlocksPerUnit * sizeof(uint32_t), &retlen, (u_char *)(part->bam_cache)); /* mark the cache bad, in case we get an error later */ part->bam_index = 0xffff; if (ret) { printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n"); return ret; } } /* Write the LogicalEUN for the transfer unit */ xfer->state = XFER_UNKNOWN; offset = xfer->Offset + 20; /* Bad! */ unit = cpu_to_le16(0x7fff); ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen, (u_char *)&unit); if (ret) { printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); return ret; } /* Copy all data blocks from source unit to transfer unit */ src = eun->Offset; dest = xfer->Offset; free = 0; ret = 0; for (i = 0; i < part->BlocksPerUnit; i++) { switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) { case BLOCK_CONTROL: /* This gets updated later */ break; case BLOCK_DATA: case BLOCK_REPLACEMENT: ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen, (u_char *)buf); if (ret) { printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n"); return ret; } ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen, (u_char *)buf); if (ret) { printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n"); return ret; } break; default: /* All other blocks must be free */ part->bam_cache[i] = cpu_to_le32(0xffffffff); free++; break; } src += SECTOR_SIZE; dest += SECTOR_SIZE; } /* Write the BAM to the transfer unit */ ret = mtd_write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset), part->BlocksPerUnit * sizeof(int32_t), &retlen, (u_char *)part->bam_cache); if (ret) { printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); return ret; } /* All clear? Then update the LogicalEUN again */ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t), &retlen, (u_char *)&srcunitswap); if (ret) { printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); return ret; } /* Update the maps and usage stats*/ swap(xfer->EraseCount, eun->EraseCount); swap(xfer->Offset, eun->Offset); part->FreeTotal -= eun->Free; part->FreeTotal += free; eun->Free = free; eun->Deleted = 0; /* Now, the cache should be valid for the new block */ part->bam_index = srcunit; return 0; } /* copy_erase_unit */ /*====================================================================== reclaim_block() picks a full erase unit and a transfer unit and then calls copy_erase_unit() to copy one to the other. Then, it schedules an erase on the expired block. What's a good way to decide which transfer unit and which erase unit to use? Beats me. My way is to always pick the transfer unit with the fewest erases, and usually pick the data unit with the most deleted blocks. But with a small probability, pick the oldest data unit instead. This means that we generally postpone the next reclamation as long as possible, but shuffle static stuff around a bit for wear leveling. ======================================================================*/ static int reclaim_block(partition_t *part) { uint16_t i, eun, xfer; uint32_t best; int queued, ret; pr_debug("ftl_cs: reclaiming space...\n"); pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits); /* Pick the least erased transfer unit */ best = 0xffffffff; xfer = 0xffff; do { queued = 0; for (i = 0; i < part->header.NumTransferUnits; i++) { int n=0; if (part->XferInfo[i].state == XFER_UNKNOWN) { pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i); n=1; erase_xfer(part, i); } if (part->XferInfo[i].state == XFER_ERASING) { pr_debug("XferInfo[%d].state == XFER_ERASING\n",i); n=1; queued = 1; } else if (part->XferInfo[i].state == XFER_ERASED) { pr_debug("XferInfo[%d].state == XFER_ERASED\n",i); n=1; prepare_xfer(part, i); } if (part->XferInfo[i].state == XFER_PREPARED) { pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i); n=1; if (part->XferInfo[i].EraseCount <= best) { best = part->XferInfo[i].EraseCount; xfer = i; } } if (!n) pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); } if (xfer == 0xffff) { if (queued) { pr_debug("ftl_cs: waiting for transfer " "unit to be prepared...\n"); mtd_sync(part->mbd.mtd); } else { static int ne = 0; if (++ne < 5) printk(KERN_NOTICE "ftl_cs: reclaim failed: no " "suitable transfer units!\n"); else pr_debug("ftl_cs: reclaim failed: no " "suitable transfer units!\n"); return -EIO; } } } while (xfer == 0xffff); eun = 0; if ((jiffies % shuffle_freq) == 0) { pr_debug("ftl_cs: recycling freshest block...\n"); best = 0xffffffff; for (i = 0; i < part->DataUnits; i++) if (part->EUNInfo[i].EraseCount <= best) { best = part->EUNInfo[i].EraseCount; eun = i; } } else { best = 0; for (i = 0; i < part->DataUnits; i++) if (part->EUNInfo[i].Deleted >= best) { best = part->EUNInfo[i].Deleted; eun = i; } if (best == 0) { static int ne = 0; if (++ne < 5) printk(KERN_NOTICE "ftl_cs: reclaim failed: " "no free blocks!\n"); else pr_debug("ftl_cs: reclaim failed: " "no free blocks!\n"); return -EIO; } } ret = copy_erase_unit(part, eun, xfer); if (!ret) erase_xfer(part, xfer); else printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n"); return ret; } /* reclaim_block */ /*====================================================================== Find_free() searches for a free block. If necessary, it updates the BAM cache for the erase unit containing the free block. It returns the block index -- the erase unit is just the currently cached unit. If there are no free blocks, it returns 0 -- this is never a valid data block because it contains the header. ======================================================================*/ #ifdef PSYCHO_DEBUG static void dump_lists(partition_t *part) { int i; printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal); for (i = 0; i < part->DataUnits; i++) printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, " "%d deleted\n", i, part->EUNInfo[i].Offset >> part->header.EraseUnitSize, part->EUNInfo[i].Free, part->EUNInfo[i].Deleted); } #endif static uint32_t find_free(partition_t *part) { uint16_t stop, eun; uint32_t blk; size_t retlen; int ret; /* Find an erase unit with some free space */ stop = (part->bam_index == 0xffff) ? 0 : part->bam_index; eun = stop; do { if (part->EUNInfo[eun].Free != 0) break; /* Wrap around at end of table */ if (++eun == part->DataUnits) eun = 0; } while (eun != stop); if (part->EUNInfo[eun].Free == 0) return 0; /* Is this unit's BAM cached? */ if (eun != part->bam_index) { /* Invalidate cache */ part->bam_index = 0xffff; ret = mtd_read(part->mbd.mtd, part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset), part->BlocksPerUnit * sizeof(uint32_t), &retlen, (u_char *)(part->bam_cache)); if (ret) { printk(KERN_WARNING"ftl: Error reading BAM in find_free\n"); return 0; } part->bam_index = eun; } /* Find a free block */ for (blk = 0; blk < part->BlocksPerUnit; blk++) if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break; if (blk == part->BlocksPerUnit) { #ifdef PSYCHO_DEBUG static int ne = 0; if (++ne == 1) dump_lists(part); #endif printk(KERN_NOTICE "ftl_cs: bad free list!\n"); return 0; } pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun); return blk; } /* find_free */ /*====================================================================== Read a series of sectors from an FTL partition. ======================================================================*/ static int ftl_read(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) { uint32_t log_addr, bsize; u_long i; int ret; size_t offset, retlen; pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", part, sector, nblocks); if (!(part->state & FTL_FORMATTED)) { printk(KERN_NOTICE "ftl_cs: bad partition\n"); return -EIO; } bsize = 1 << part->header.EraseUnitSize; for (i = 0; i < nblocks; i++) { if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) { printk(KERN_NOTICE "ftl_cs: bad read offset\n"); return -EIO; } log_addr = part->VirtualBlockMap[sector+i]; if (log_addr == 0xffffffff) memset(buffer, 0, SECTOR_SIZE); else { offset = (part->EUNInfo[log_addr / bsize].Offset + (log_addr % bsize)); ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, (u_char *)buffer); if (ret) { printk(KERN_WARNING "Error reading MTD device in ftl_read()\n"); return ret; } } buffer += SECTOR_SIZE; } return 0; } /* ftl_read */ /*====================================================================== Write a series of sectors to an FTL partition ======================================================================*/ static int set_bam_entry(partition_t *part, uint32_t log_addr, uint32_t virt_addr) { uint32_t bsize, blk, le_virt_addr; #ifdef PSYCHO_DEBUG uint32_t old_addr; #endif uint16_t eun; int ret; size_t retlen, offset; pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", part, log_addr, virt_addr); bsize = 1 << part->header.EraseUnitSize; eun = log_addr / bsize; blk = (log_addr % bsize) / SECTOR_SIZE; offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) + le32_to_cpu(part->header.BAMOffset)); #ifdef PSYCHO_DEBUG ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, (u_char *)&old_addr); if (ret) { printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret); return ret; } old_addr = le32_to_cpu(old_addr); if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) || ((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) || (!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) { static int ne = 0; if (++ne < 5) { printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n"); printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x" ", new = 0x%x\n", log_addr, old_addr, virt_addr); } return -EIO; } #endif le_virt_addr = cpu_to_le32(virt_addr); if (part->bam_index == eun) { #ifdef PSYCHO_DEBUG if (le32_to_cpu(part->bam_cache[blk]) != old_addr) { static int ne = 0; if (++ne < 5) { printk(KERN_NOTICE "ftl_cs: set_bam_entry() " "inconsistency!\n"); printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache" " = 0x%x\n", le32_to_cpu(part->bam_cache[blk]), old_addr); } return -EIO; } #endif part->bam_cache[blk] = le_virt_addr; } ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen, (u_char *)&le_virt_addr); if (ret) { printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n"); printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n", log_addr, virt_addr); } return ret; } /* set_bam_entry */ static int ftl_write(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) { uint32_t bsize, log_addr, virt_addr, old_addr, blk; u_long i; int ret; size_t retlen, offset; pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n", part, sector, nblocks); if (!(part->state & FTL_FORMATTED)) { printk(KERN_NOTICE "ftl_cs: bad partition\n"); return -EIO; } /* See if we need to reclaim space, before we start */ while (part->FreeTotal < nblocks) { ret = reclaim_block(part); if (ret) return ret; } bsize = 1 << part->header.EraseUnitSize; virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; for (i = 0; i < nblocks; i++) { if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) { printk(KERN_NOTICE "ftl_cs: bad write offset\n"); return -EIO; } /* Grab a free block */ blk = find_free(part); if (blk == 0) { static int ne = 0; if (++ne < 5) printk(KERN_NOTICE "ftl_cs: internal error: " "no free blocks!\n"); return -ENOSPC; } /* Tag the BAM entry, and write the new block */ log_addr = part->bam_index * bsize + blk * SECTOR_SIZE; part->EUNInfo[part->bam_index].Free--; part->FreeTotal--; if (set_bam_entry(part, log_addr, 0xfffffffe)) return -EIO; part->EUNInfo[part->bam_index].Deleted++; offset = (part->EUNInfo[part->bam_index].Offset + blk * SECTOR_SIZE); ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer); if (ret) { printk(KERN_NOTICE "ftl_cs: block write failed!\n"); printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr" " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr, offset); return -EIO; } /* Only delete the old entry when the new entry is ready */ old_addr = part->VirtualBlockMap[sector+i]; if (old_addr != 0xffffffff) { part->VirtualBlockMap[sector+i] = 0xffffffff; part->EUNInfo[old_addr/bsize].Deleted++; if (set_bam_entry(part, old_addr, 0)) return -EIO; } /* Finally, set up the new pointers */ if (set_bam_entry(part, log_addr, virt_addr)) return -EIO; part->VirtualBlockMap[sector+i] = log_addr; part->EUNInfo[part->bam_index].Deleted--; buffer += SECTOR_SIZE; virt_addr += SECTOR_SIZE; } return 0; } /* ftl_write */ static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { partition_t *part = (void *)dev; u_long sect; /* Sort of arbitrary: round size down to 4KiB boundary */ sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE; geo->heads = 1; geo->sectors = 8; geo->cylinders = sect >> 3; return 0; } static int ftl_readsect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf) { return ftl_read((void *)dev, buf, block, 1); } static int ftl_writesect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf) { return ftl_write((void *)dev, buf, block, 1); } static int ftl_discardsect(struct mtd_blktrans_dev *dev, unsigned long sector, unsigned nr_sects) { partition_t *part = (void *)dev; uint32_t bsize = 1 << part->header.EraseUnitSize; pr_debug("FTL erase sector %ld for %d sectors\n", sector, nr_sects); while (nr_sects) { uint32_t old_addr = part->VirtualBlockMap[sector]; if (old_addr != 0xffffffff) { part->VirtualBlockMap[sector] = 0xffffffff; part->EUNInfo[old_addr/bsize].Deleted++; if (set_bam_entry(part, old_addr, 0)) return -EIO; } nr_sects--; sector++; } return 0; } /*====================================================================*/ static void ftl_freepart(partition_t *part) { vfree(part->VirtualBlockMap); part->VirtualBlockMap = NULL; kfree(part->EUNInfo); part->EUNInfo = NULL; kfree(part->XferInfo); part->XferInfo = NULL; kfree(part->bam_cache); part->bam_cache = NULL; } /* ftl_freepart */ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { partition_t *partition; partition = kzalloc(sizeof(partition_t), GFP_KERNEL); if (!partition) { printk(KERN_WARNING "No memory to scan for FTL on %s\n", mtd->name); return; } partition->mbd.mtd = mtd; if ((scan_header(partition) == 0) && (build_maps(partition) == 0)) { partition->state = FTL_FORMATTED; #ifdef PCMCIA_DEBUG printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n", le32_to_cpu(partition->header.FormattedSize) >> 10); #endif partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9; partition->mbd.tr = tr; partition->mbd.devnum = -1; if (!add_mtd_blktrans_dev((void *)partition)) return; } kfree(partition); } static void ftl_remove_dev(struct mtd_blktrans_dev *dev) { del_mtd_blktrans_dev(dev); ftl_freepart((partition_t *)dev); } static struct mtd_blktrans_ops ftl_tr = { .name = "ftl", .major = FTL_MAJOR, .part_bits = PART_BITS, .blksize = SECTOR_SIZE, .readsect = ftl_readsect, .writesect = ftl_writesect, .discard = ftl_discardsect, .getgeo = ftl_getgeo, .add_mtd = ftl_add_mtd, .remove_dev = ftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_ftl(void) { return register_mtd_blktrans(&ftl_tr); } static void __exit cleanup_ftl(void) { deregister_mtd_blktrans(&ftl_tr); } module_init(init_ftl); module_exit(cleanup_ftl); MODULE_LICENSE("Dual MPL/GPL"); MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
gpl-2.0
jchuang1977/openwrt_AA
package/ead/src/tinysrp/bn_div.c
776
9734
/* crypto/bn/bn_div.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include <stdio.h> #include "bn_lcl.h" #define NO_ASM /* The old slow way */ #if 0 int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) { int i,nm,nd; int ret = 0; BIGNUM *D; bn_check_top(m); bn_check_top(d); if (BN_is_zero(d)) { return(0); } if (BN_ucmp(m,d) < 0) { if (rem != NULL) { if (BN_copy(rem,m) == NULL) return(0); } if (dv != NULL) BN_zero(dv); return(1); } BN_CTX_start(ctx); D = BN_CTX_get(ctx); if (dv == NULL) dv = BN_CTX_get(ctx); if (rem == NULL) rem = BN_CTX_get(ctx); if (D == NULL || dv == NULL || rem == NULL) goto end; nd=BN_num_bits(d); nm=BN_num_bits(m); if (BN_copy(D,d) == NULL) goto end; if (BN_copy(rem,m) == NULL) goto end; /* The next 2 are needed so we can do a dv->d[0]|=1 later * since BN_lshift1 will only work once there is a value :-) */ BN_zero(dv); bn_wexpand(dv,1); dv->top=1; if (!BN_lshift(D,D,nm-nd)) goto end; for (i=nm-nd; i>=0; i--) { if (!BN_lshift1(dv,dv)) goto end; if (BN_ucmp(rem,D) >= 0) { dv->d[0]|=1; if (!BN_usub(rem,rem,D)) goto end; } /* CAN IMPROVE (and have now :=) */ if (!BN_rshift1(D,D)) goto end; } rem->neg=BN_is_zero(rem)?0:m->neg; dv->neg=m->neg^d->neg; ret = 1; end: BN_CTX_end(ctx); return(ret); } #else #if !defined(NO_ASM) && !defined(NO_INLINE_ASM) && !defined(PEDANTIC) && !defined(BN_DIV3W) # if defined(__GNUC__) && __GNUC__>=2 # if defined(__i386) /* * There were two reasons for implementing this template: * - GNU C generates a call to a function (__udivdi3 to be exact) * in reply to ((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0 (I fail to * understand why...); * - divl doesn't only calculate quotient, but also leaves * remainder in %edx which we can definitely use here:-) * * <appro@fy.chalmers.se> */ # define bn_div_words(n0,n1,d0) \ ({ asm volatile ( \ "divl %4" \ : "=a"(q), "=d"(rem) \ : "a"(n1), "d"(n0), "g"(d0) \ : "cc"); \ q; \ }) # define REMAINDER_IS_ALREADY_CALCULATED # endif /* __<cpu> */ # endif /* __GNUC__ */ #endif /* NO_ASM */ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, BN_CTX *ctx) { int norm_shift,i,j,loop; BIGNUM *tmp,wnum,*snum,*sdiv,*res; BN_ULONG *resp,*wnump; BN_ULONG d0,d1; int num_n,div_n; bn_check_top(num); bn_check_top(divisor); if (BN_is_zero(divisor)) { return(0); } if (BN_ucmp(num,divisor) < 0) { if (rm != NULL) { if (BN_copy(rm,num) == NULL) return(0); } if (dv != NULL) BN_zero(dv); return(1); } BN_CTX_start(ctx); tmp=BN_CTX_get(ctx); tmp->neg=0; snum=BN_CTX_get(ctx); sdiv=BN_CTX_get(ctx); if (dv == NULL) res=BN_CTX_get(ctx); else res=dv; if (res == NULL) goto err; /* First we normalise the numbers */ norm_shift=BN_BITS2-((BN_num_bits(divisor))%BN_BITS2); BN_lshift(sdiv,divisor,norm_shift); sdiv->neg=0; norm_shift+=BN_BITS2; BN_lshift(snum,num,norm_shift); snum->neg=0; div_n=sdiv->top; num_n=snum->top; loop=num_n-div_n; /* Lets setup a 'window' into snum * This is the part that corresponds to the current * 'area' being divided */ BN_init(&wnum); wnum.d= &(snum->d[loop]); wnum.top= div_n; wnum.dmax= snum->dmax+1; /* a bit of a lie */ /* Get the top 2 words of sdiv */ /* i=sdiv->top; */ d0=sdiv->d[div_n-1]; d1=(div_n == 1)?0:sdiv->d[div_n-2]; /* pointer to the 'top' of snum */ wnump= &(snum->d[num_n-1]); /* Setup to 'res' */ res->neg= (num->neg^divisor->neg); if (!bn_wexpand(res,(loop+1))) goto err; res->top=loop; resp= &(res->d[loop-1]); /* space for temp */ if (!bn_wexpand(tmp,(div_n+1))) goto err; if (BN_ucmp(&wnum,sdiv) >= 0) { if (!BN_usub(&wnum,&wnum,sdiv)) goto err; *resp=1; res->d[res->top-1]=1; } else res->top--; resp--; for (i=0; i<loop-1; i++) { BN_ULONG q,l0; #ifdef BN_DIV3W q=bn_div_3_words(wnump,d1,d0); #else BN_ULONG n0,n1,rem=0; n0=wnump[0]; n1=wnump[-1]; if (n0 == d0) q=BN_MASK2; else /* n0 < d0 */ { #ifdef BN_LLONG BN_ULLONG t2; #if defined(BN_LLONG) && defined(BN_DIV2W) && !defined(bn_div_words) q=(BN_ULONG)(((((BN_ULLONG)n0)<<BN_BITS2)|n1)/d0); #else q=bn_div_words(n0,n1,d0); #endif #ifndef REMAINDER_IS_ALREADY_CALCULATED /* * rem doesn't have to be BN_ULLONG. The least we * know it's less that d0, isn't it? */ rem=(n1-q*d0)&BN_MASK2; #endif t2=(BN_ULLONG)d1*q; for (;;) { if (t2 <= ((((BN_ULLONG)rem)<<BN_BITS2)|wnump[-2])) break; q--; rem += d0; if (rem < d0) break; /* don't let rem overflow */ t2 -= d1; } #else /* !BN_LLONG */ BN_ULONG t2l,t2h,ql,qh; q=bn_div_words(n0,n1,d0); #ifndef REMAINDER_IS_ALREADY_CALCULATED rem=(n1-q*d0)&BN_MASK2; #endif #ifdef BN_UMULT_HIGH t2l = d1 * q; t2h = BN_UMULT_HIGH(d1,q); #else t2l=LBITS(d1); t2h=HBITS(d1); ql =LBITS(q); qh =HBITS(q); mul64(t2l,t2h,ql,qh); /* t2=(BN_ULLONG)d1*q; */ #endif for (;;) { if ((t2h < rem) || ((t2h == rem) && (t2l <= wnump[-2]))) break; q--; rem += d0; if (rem < d0) break; /* don't let rem overflow */ if (t2l < d1) t2h--; t2l -= d1; } #endif /* !BN_LLONG */ } #endif /* !BN_DIV3W */ l0=bn_mul_words(tmp->d,sdiv->d,div_n,q); wnum.d--; wnum.top++; tmp->d[div_n]=l0; for (j=div_n+1; j>0; j--) if (tmp->d[j-1]) break; tmp->top=j; j=wnum.top; BN_sub(&wnum,&wnum,tmp); snum->top=snum->top+wnum.top-j; if (wnum.neg) { q--; j=wnum.top; BN_add(&wnum,&wnum,sdiv); snum->top+=wnum.top-j; } *(resp--)=q; wnump--; } if (rm != NULL) { BN_rshift(rm,snum,norm_shift); rm->neg=num->neg; } BN_CTX_end(ctx); return(1); err: BN_CTX_end(ctx); return(0); } #endif /* rem != m */ int BN_mod(BIGNUM *rem, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) { #if 0 /* The old slow way */ int i,nm,nd; BIGNUM *dv; if (BN_ucmp(m,d) < 0) return((BN_copy(rem,m) == NULL)?0:1); BN_CTX_start(ctx); dv=BN_CTX_get(ctx); if (!BN_copy(rem,m)) goto err; nm=BN_num_bits(rem); nd=BN_num_bits(d); if (!BN_lshift(dv,d,nm-nd)) goto err; for (i=nm-nd; i>=0; i--) { if (BN_cmp(rem,dv) >= 0) { if (!BN_sub(rem,rem,dv)) goto err; } if (!BN_rshift1(dv,dv)) goto err; } BN_CTX_end(ctx); return(1); err: BN_CTX_end(ctx); return(0); #else return(BN_div(NULL,rem,m,d,ctx)); #endif }
gpl-2.0
pbeeler/Linux-stable
arch/arm64/crypto/sha2-ce-glue.c
1288
6190
/* * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions * * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <asm/neon.h> #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state, u8 *head, long bytes); static int sha224_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha256_state){ .state = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, } }; return 0; } static int sha256_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha256_state){ .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, } }; return 0; } static int sha2_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sctx->count += len; if ((partial + len) >= SHA256_BLOCK_SIZE) { int blocks; if (partial) { int p = SHA256_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, p); data += p; len -= p; } blocks = len / SHA256_BLOCK_SIZE; len %= SHA256_BLOCK_SIZE; kernel_neon_begin_partial(28); sha2_ce_transform(blocks, data, sctx->state, partial ? sctx->buf : NULL, 0); kernel_neon_end(); data += blocks * SHA256_BLOCK_SIZE; partial = 0; } if (len) memcpy(sctx->buf + partial, data, len); return 0; } static void sha2_final(struct shash_desc *desc) { static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; struct sha256_state *sctx = shash_desc_ctx(desc); __be64 bits = cpu_to_be64(sctx->count << 3); u32 padlen = SHA256_BLOCK_SIZE - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE); sha2_update(desc, padding, padlen); sha2_update(desc, (const u8 *)&bits, sizeof(bits)); } static int sha224_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; int i; sha2_final(desc); for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], dst++); *sctx = (struct sha256_state){}; return 0; } static int sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; int i; sha2_final(desc); for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], dst++); *sctx = (struct sha256_state){}; return 0; } static void sha2_finup(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); int blocks; if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) { sha2_update(desc, data, len); sha2_final(desc); return; } /* * Use a fast path if the input is a multiple of 64 bytes. In * this case, there is no need to copy data around, and we can * perform the entire digest calculation in a single invocation * of sha2_ce_transform() */ blocks = len / SHA256_BLOCK_SIZE; kernel_neon_begin_partial(28); sha2_ce_transform(blocks, data, sctx->state, NULL, len); kernel_neon_end(); data += blocks * SHA256_BLOCK_SIZE; } static int sha224_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; int i; sha2_finup(desc, data, len); for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], dst++); *sctx = (struct sha256_state){}; return 0; } static int sha256_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; int i; sha2_finup(desc, data, len); for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], dst++); *sctx = (struct sha256_state){}; return 0; } static int sha2_export(struct shash_desc *desc, void *out) { struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *dst = out; *dst = *sctx; return 0; } static int sha2_import(struct shash_desc *desc, const void *in) { struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state const *src = in; *sctx = *src; return 0; } static struct shash_alg algs[] = { { .init = sha224_init, .update = sha2_update, .final = sha224_final, .finup = sha224_finup, .export = sha2_export, .import = sha2_import, .descsize = sizeof(struct sha256_state), .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-ce", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .init = sha256_init, .update = sha2_update, .final = sha256_final, .finup = sha256_finup, .export = sha2_export, .import = sha2_import, .descsize = sizeof(struct sha256_state), .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-ce", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init sha2_ce_mod_init(void) { return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit sha2_ce_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_cpu_feature_match(SHA2, sha2_ce_mod_init); module_exit(sha2_ce_mod_fini);
gpl-2.0
Galland/Linux3188
drivers/staging/iio/accel/adis16240_trigger.c
2312
1972
#include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/spi/spi.h> #include "../iio.h" #include "../sysfs.h" #include "../trigger.h" #include "adis16240.h" /** * adis16240_data_rdy_trig_poll() the event handler for the data rdy trig **/ static irqreturn_t adis16240_data_rdy_trig_poll(int irq, void *trig) { iio_trigger_poll(trig, iio_get_time_ns()); return IRQ_HANDLED; } /** * adis16240_data_rdy_trigger_set_state() set datardy interrupt state **/ static int adis16240_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct adis16240_state *st = trig->private_data; struct iio_dev *indio_dev = st->indio_dev; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return adis16240_set_irq(st->indio_dev, state); } int adis16240_probe_trigger(struct iio_dev *indio_dev) { int ret; struct adis16240_state *st = indio_dev->dev_data; st->trig = iio_allocate_trigger("adis16240-dev%d", indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, adis16240_data_rdy_trig_poll, IRQF_TRIGGER_RISING, "adis16240", st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->owner = THIS_MODULE; st->trig->private_data = st; st->trig->set_trigger_state = &adis16240_data_rdy_trigger_set_state; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void adis16240_remove_trigger(struct iio_dev *indio_dev) { struct adis16240_state *state = indio_dev->dev_data; iio_trigger_unregister(state->trig); free_irq(state->us->irq, state->trig); iio_free_trigger(state->trig); }
gpl-2.0
abhijeet-dev/ll-arndale-octa
fs/exofs/namei.c
3080
7398
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <bharrosh@panasas.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = exofs_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); return 0; } inode_dec_link_count(inode); iput(inode); return err; } static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; ino_t ino; if (dentry->d_name.len > EXOFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ino = exofs_inode_by_name(dir, dentry); inode = ino ? exofs_iget(dir->i_sb, ino) : NULL; return d_splice_alias(inode, dentry); } static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode = exofs_new_inode(dir, mode); int err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &exofs_file_inode_operations; inode->i_fop = &exofs_file_operations; inode->i_mapping->a_ops = &exofs_aops; mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; int err; if (!new_valid_dev(rdev)) return -EINVAL; inode = exofs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; int err = -ENAMETOOLONG; unsigned l = strlen(symname)+1; struct inode *inode; struct exofs_i_info *oi; if (l > sb->s_blocksize) goto out; inode = exofs_new_inode(dir, S_IFLNK | S_IRWXUGO); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; oi = exofs_i(inode); if (l > sizeof(oi->i_data)) { /* slow symlink */ inode->i_op = &exofs_symlink_inode_operations; inode->i_mapping->a_ops = &exofs_aops; memset(oi->i_data, 0, sizeof(oi->i_data)); err = page_symlink(inode, symname, l); if (err) goto out_fail; } else { /* fast symlink */ inode->i_op = &exofs_fast_symlink_inode_operations; memcpy(oi->i_data, symname, l); inode->i_size = l-1; } mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); iput(inode); goto out; } static int exofs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); ihold(inode); return exofs_add_nondir(dentry, inode); } static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int err; inode_inc_link_count(dir); inode = exofs_new_inode(dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_dir; inode->i_op = &exofs_dir_inode_operations; inode->i_fop = &exofs_dir_operations; inode->i_mapping->a_ops = &exofs_aops; inode_inc_link_count(inode); err = exofs_make_empty(inode, dir); if (err) goto out_fail; err = exofs_add_link(dentry, inode); if (err) goto out_fail; d_instantiate(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); iput(inode); out_dir: inode_dec_link_count(dir); goto out; } static int exofs_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct exofs_dir_entry *de; struct page *page; int err = -ENOENT; de = exofs_find_entry(dir, dentry, &page); if (!de) goto out; err = exofs_delete_entry(de, page); if (err) goto out; inode->i_ctime = dir->i_ctime; inode_dec_link_count(inode); err = 0; out: return err; } static int exofs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; int err = -ENOTEMPTY; if (exofs_empty_dir(inode)) { err = exofs_unlink(dir, dentry); if (!err) { inode->i_size = 0; inode_dec_link_count(inode); inode_dec_link_count(dir); } } return err; } static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *dir_page = NULL; struct exofs_dir_entry *dir_de = NULL; struct page *old_page; struct exofs_dir_entry *old_de; int err = -ENOENT; old_de = exofs_find_entry(old_dir, old_dentry, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = exofs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct exofs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !exofs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = exofs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; err = exofs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) drop_nlink(new_inode); inode_dec_link_count(new_inode); if (err) goto out_dir; } else { err = exofs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) inode_inc_link_count(new_dir); } old_inode->i_ctime = CURRENT_TIME; exofs_delete_entry(old_de, old_page); mark_inode_dirty(old_inode); if (dir_de) { err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); inode_dec_link_count(old_dir); if (err) goto out_dir; } return 0; out_dir: if (dir_de) { kunmap(dir_page); page_cache_release(dir_page); } out_old: kunmap(old_page); page_cache_release(old_page); out: return err; } const struct inode_operations exofs_dir_inode_operations = { .create = exofs_create, .lookup = exofs_lookup, .link = exofs_link, .unlink = exofs_unlink, .symlink = exofs_symlink, .mkdir = exofs_mkdir, .rmdir = exofs_rmdir, .mknod = exofs_mknod, .rename = exofs_rename, .setattr = exofs_setattr, }; const struct inode_operations exofs_special_inode_operations = { .setattr = exofs_setattr, };
gpl-2.0
mastero9017/Blu_Spark
arch/arm/mach-prima2/irq.c
4872
3239
/* * interrupt controller support for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <mach/hardware.h> #include <asm/mach/irq.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/irqdomain.h> #include <linux/syscore_ops.h> #define SIRFSOC_INT_RISC_MASK0 0x0018 #define SIRFSOC_INT_RISC_MASK1 0x001C #define SIRFSOC_INT_RISC_LEVEL0 0x0020 #define SIRFSOC_INT_RISC_LEVEL1 0x0024 void __iomem *sirfsoc_intc_base; static __init void sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) { struct irq_chip_generic *gc; struct irq_chip_type *ct; gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->regs.mask = SIRFSOC_INT_RISC_MASK0; irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); } static __init void sirfsoc_irq_init(void) { sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32); sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END + 1 - 32); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK0); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK1); } static struct of_device_id intc_ids[] = { { .compatible = "sirf,prima2-intc" }, {}, }; void __init sirfsoc_of_irq_init(void) { struct device_node *np; np = of_find_matching_node(NULL, intc_ids); if (!np) panic("unable to find compatible intc node in dtb\n"); sirfsoc_intc_base = of_iomap(np, 0); if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0, &irq_domain_simple_ops, NULL); of_node_put(np); sirfsoc_irq_init(); } struct sirfsoc_irq_status { u32 mask0; u32 mask1; u32 level0; u32 level1; }; static struct sirfsoc_irq_status sirfsoc_irq_st; static int sirfsoc_irq_suspend(void) { sirfsoc_irq_st.mask0 = readl_relaxed(sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK0); sirfsoc_irq_st.mask1 = readl_relaxed(sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK1); sirfsoc_irq_st.level0 = readl_relaxed(sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); sirfsoc_irq_st.level1 = readl_relaxed(sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); return 0; } static void sirfsoc_irq_resume(void) { writel_relaxed(sirfsoc_irq_st.mask0, sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK0); writel_relaxed(sirfsoc_irq_st.mask1, sirfsoc_intc_base + SIRFSOC_INT_RISC_MASK1); writel_relaxed(sirfsoc_irq_st.level0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); writel_relaxed(sirfsoc_irq_st.level1, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); } static struct syscore_ops sirfsoc_irq_syscore_ops = { .suspend = sirfsoc_irq_suspend, .resume = sirfsoc_irq_resume, }; static int __init sirfsoc_irq_pm_init(void) { register_syscore_ops(&sirfsoc_irq_syscore_ops); return 0; } device_initcall(sirfsoc_irq_pm_init);
gpl-2.0
petalyaa/htc-evo3d-shooteru
arch/avr32/mm/fault.c
7176
5841
/* * Copyright (C) 2004-2006 Atmel Corporation * * Based on linux/arch/sh/mm/fault.c: * Copyright (C) 1999 Niibe Yutaka * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/kdebug.h> #include <linux/kprobes.h> #include <asm/mmu_context.h> #include <asm/sysreg.h> #include <asm/tlb.h> #include <asm/uaccess.h> #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; if (!user_mode(regs)) { if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; } return ret; } #else static inline int notify_page_fault(struct pt_regs *regs, int trap) { return 0; } #endif int exception_trace = 1; /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: * 6: Protection fault (instruction access) * 15: Protection fault (read access) * 16: Protection fault (write access) * 20: Page not found (instruction access) * 24: Page not found (read access) * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; const struct exception_table_entry *fixup; unsigned long address; unsigned long page; int writeaccess; long signr; int code; int fault; if (notify_page_fault(regs, ecr)) return; address = sysreg_read(TLBEAR); tsk = current; mm = tsk->mm; signr = SIGSEGV; code = SEGV_MAPERR; /* * If we're in an interrupt or have no user context, we must * not take the fault... */ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) goto no_context; local_irq_enable(); down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so we * can handle it... */ good_area: code = SEGV_ACCERR; writeaccess = 0; switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: if (!(vma->vm_flags & VM_EXEC)) goto bad_area; break; case ECR_PROTECTION_R: case ECR_TLB_MISS_R: if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; break; case ECR_PROTECTION_W: case ECR_TLB_MISS_W: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; writeaccess = 1; break; default: panic("Unhandled case %lu in do_page_fault!", ecr); } /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory * map. Fix it, but check if it's kernel or user first... */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGSEGV, regs, code, address); return; } no_context: /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return; } /* * Oops. The kernel tried to access some bad page. We'll have * to terminate things with extreme prejudice. */ if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); if (address >= TASK_SIZE) page = (unsigned long)swapper_pg_dir; if (page) { page = ((unsigned long *)page)[address >> 22]; printk(" pgd = %08lx", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; printk(" pte = %08lx", page); } } printk("\n"); die("Kernel access of bad area", regs, signr); return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); pagefault_out_of_memory(); if (!user_mode(regs)) goto no_context; return; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ signr = SIGBUS; code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; if (exception_trace) printk("%s%s[%d]: bus error at %08lx pc %08lx " "sp %08lx ecr %lu\n", is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, struct pt_regs *regs) { printk(KERN_ALERT "Bus error at physical address 0x%08lx (%s access)\n", addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); die("Bus Error", regs, SIGKILL); }
gpl-2.0
lostemp/lsk-3.4-android-12.09
arch/arm/mach-omap2/clock34xx.c
7688
5175
/* * OMAP3-specific clock framework functions * * Copyright (C) 2007-2008 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley * Jouni Högander * * Parts of this code are based on code written by * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu, * Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include "clock.h" #include "clock34xx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-34xx.h" /** * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift * from the CM_{I,F}CLKEN bit. Pass back the correct info via * @idlest_reg and @idlest_bit. No return value. */ static void omap3430es2_clk_ssi_find_idlest(struct clk *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { u32 r; r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); *idlest_reg = (__force void __iomem *)r; *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } const struct clkops clkops_omap3430es2_ssi_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_ssi_find_idlest, .find_companion = omap2_clk_dflt_find_companion, }; const struct clkops clkops_omap3430es2_iclk_ssi_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_ssi_find_idlest, .find_companion = omap2_clk_dflt_find_companion, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, }; /** * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * Some OMAP modules on OMAP3 ES2+ chips have both initiator and * target IDLEST bits. For our purposes, we are concerned with the * target IDLEST bits, which exist at a different bit position than * the *CLKEN bit position for these modules (DSS and USBHOST) (The * default find_idlest code assumes that they are at the same * position.) No return value. */ static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { u32 r; r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); *idlest_reg = (__force void __iomem *)r; /* USBHOST_IDLE has same shift */ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } const struct clkops clkops_omap3430es2_dss_usbhost_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, .find_companion = omap2_clk_dflt_find_companion, }; const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, .find_companion = omap2_clk_dflt_find_companion, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, }; /** * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via * @idlest_reg and @idlest_bit. No return value. */ static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { u32 r; r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); *idlest_reg = (__force void __iomem *)r; *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } const struct clkops clkops_omap3430es2_hsotgusb_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, .find_companion = omap2_clk_dflt_find_companion, }; const struct clkops clkops_omap3430es2_iclk_hsotgusb_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, .find_companion = omap2_clk_dflt_find_companion, .allow_idle = omap2_clkt_iclk_allow_idle, .deny_idle = omap2_clkt_iclk_deny_idle, };
gpl-2.0
desteam/android_kernel_huawei_msm7x30
drivers/gpu/drm/mga/mga_irq.c
8456
4968
/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell <keith@tungstengraphics.com> * Eric Anholt <anholt@FreeBSD.org> */ #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) { const drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; if (crtc != 0) return 0; return atomic_read(&dev_priv->vbl_received); } irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; int status; int handled = 0; status = MGA_READ(MGA_STATUS); /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); atomic_inc(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); handled = 1; } /* SOFTRAP interrupt */ if (status & MGA_SOFTRAPEN) { const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); const u32 prim_end = MGA_READ(MGA_PRIMEND); MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); /* In addition to clearing the interrupt-pending bit, we * have to write to MGA_PRIMEND to re-start the DMA operation. */ if ((prim_start & ~0x03) != (prim_end & ~0x03)) MGA_WRITE(MGA_PRIMEND, prim_end); atomic_inc(&dev_priv->last_fence_retired); DRM_WAKEUP(&dev_priv->fence_queue); handled = 1; } if (handled) return IRQ_HANDLED; return IRQ_NONE; } int mga_enable_vblank(struct drm_device *dev, int crtc) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (crtc != 0) { DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc); return 0; } MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); return 0; } void mga_disable_vblank(struct drm_device *dev, int crtc) { if (crtc != 0) { DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", crtc); } /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have * a nice hardware counter that tracks the number of refreshes when * the interrupt is disabled, and the kernel doesn't know the refresh * rate to calculate an estimate. */ /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ } int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int cur_fence; int ret = 0; /* Assume that the user has missed the current sequence number * by about a day rather than she wants to wait for years * using fences. */ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) - *sequence) <= (1 << 23))); *sequence = cur_fence; return ret; } void mga_driver_irq_preinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; /* Disable *all* interrupts */ MGA_WRITE(MGA_IEN, 0); /* Clear bits if they're already high */ MGA_WRITE(MGA_ICLEAR, ~0); } int mga_driver_irq_postinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); /* Turn on soft trap interrupt. Vertical blank interrupts are enabled * in mga_enable_vblank. */ MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); return 0; } void mga_driver_irq_uninstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (!dev_priv) return; /* Disable *all* interrupts */ MGA_WRITE(MGA_IEN, 0); dev->irq_enabled = 0; }
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/sh/intc/virq-debugfs.c
13320
1519
/* * Support for virtual IRQ subgroups debugfs mapping. * * Copyright (C) 2010 Paul Mundt * * Modelled after arch/powerpc/kernel/irq.c. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/debugfs.h> #include "internals.h" static int intc_irq_xlate_debug(struct seq_file *m, void *priv) { int i; seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name"); for (i = 1; i < nr_irqs; i++) { struct intc_map_entry *entry = intc_irq_xlate_get(i); struct intc_desc_int *desc = entry->desc; if (!desc) continue; seq_printf(m, "%5d ", i); seq_printf(m, "0x%05x ", entry->enum_id); seq_printf(m, "%-15s\n", desc->chip.name); } return 0; } static int intc_irq_xlate_open(struct inode *inode, struct file *file) { return single_open(file, intc_irq_xlate_debug, inode->i_private); } static const struct file_operations intc_irq_xlate_fops = { .open = intc_irq_xlate_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init intc_irq_xlate_init(void) { /* * XXX.. use arch_debugfs_dir here when all of the intc users are * converted. */ if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL, &intc_irq_xlate_fops) == NULL) return -ENOMEM; return 0; } fs_initcall(intc_irq_xlate_init);
gpl-2.0
diorahman/linux
drivers/sh/intc/virq-debugfs.c
13320
1519
/* * Support for virtual IRQ subgroups debugfs mapping. * * Copyright (C) 2010 Paul Mundt * * Modelled after arch/powerpc/kernel/irq.c. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/debugfs.h> #include "internals.h" static int intc_irq_xlate_debug(struct seq_file *m, void *priv) { int i; seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name"); for (i = 1; i < nr_irqs; i++) { struct intc_map_entry *entry = intc_irq_xlate_get(i); struct intc_desc_int *desc = entry->desc; if (!desc) continue; seq_printf(m, "%5d ", i); seq_printf(m, "0x%05x ", entry->enum_id); seq_printf(m, "%-15s\n", desc->chip.name); } return 0; } static int intc_irq_xlate_open(struct inode *inode, struct file *file) { return single_open(file, intc_irq_xlate_debug, inode->i_private); } static const struct file_operations intc_irq_xlate_fops = { .open = intc_irq_xlate_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init intc_irq_xlate_init(void) { /* * XXX.. use arch_debugfs_dir here when all of the intc users are * converted. */ if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL, &intc_irq_xlate_fops) == NULL) return -ENOMEM; return 0; } fs_initcall(intc_irq_xlate_init);
gpl-2.0
zarboz/Evita-Jellybean
drivers/video/sis/sis_accel.c
14856
12136
/* * SiS 300/540/630[S]/730[S], * SiS 315[E|PRO]/550/[M]650/651/[M]661[F|M]X/740/[M]741[GX]/330/[M]760[GX], * XGI V3XT/V5/V8, Z7 * frame buffer driver for Linux kernels >= 2.4.14 and >=2.6.3 * * 2D acceleration part * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the named License, * or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA * * Based on the XFree86/X.org driver which is * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria * * Author: Thomas Winischhofer <thomas@winischhofer.net> * (see http://www.winischhofer.net/ * for more information and updates) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/types.h> #include <asm/io.h> #include "sis.h" #include "sis_accel.h" static const u8 sisALUConv[] = { 0x00, /* dest = 0; 0, GXclear, 0 */ 0x88, /* dest &= src; DSa, GXand, 0x1 */ 0x44, /* dest = src & ~dest; SDna, GXandReverse, 0x2 */ 0xCC, /* dest = src; S, GXcopy, 0x3 */ 0x22, /* dest &= ~src; DSna, GXandInverted, 0x4 */ 0xAA, /* dest = dest; D, GXnoop, 0x5 */ 0x66, /* dest = ^src; DSx, GXxor, 0x6 */ 0xEE, /* dest |= src; DSo, GXor, 0x7 */ 0x11, /* dest = ~src & ~dest; DSon, GXnor, 0x8 */ 0x99, /* dest ^= ~src ; DSxn, GXequiv, 0x9 */ 0x55, /* dest = ~dest; Dn, GXInvert, 0xA */ 0xDD, /* dest = src|~dest ; SDno, GXorReverse, 0xB */ 0x33, /* dest = ~src; Sn, GXcopyInverted, 0xC */ 0xBB, /* dest |= ~src; DSno, GXorInverted, 0xD */ 0x77, /* dest = ~src|~dest; DSan, GXnand, 0xE */ 0xFF, /* dest = 0xFF; 1, GXset, 0xF */ }; /* same ROP but with Pattern as Source */ static const u8 sisPatALUConv[] = { 0x00, /* dest = 0; 0, GXclear, 0 */ 0xA0, /* dest &= src; DPa, GXand, 0x1 */ 0x50, /* dest = src & ~dest; PDna, GXandReverse, 0x2 */ 0xF0, /* dest = src; P, GXcopy, 0x3 */ 0x0A, /* dest &= ~src; DPna, GXandInverted, 0x4 */ 0xAA, /* dest = dest; D, GXnoop, 0x5 */ 0x5A, /* dest = ^src; DPx, GXxor, 0x6 */ 0xFA, /* dest |= src; DPo, GXor, 0x7 */ 0x05, /* dest = ~src & ~dest; DPon, GXnor, 0x8 */ 0xA5, /* dest ^= ~src ; DPxn, GXequiv, 0x9 */ 0x55, /* dest = ~dest; Dn, GXInvert, 0xA */ 0xF5, /* dest = src|~dest ; PDno, GXorReverse, 0xB */ 0x0F, /* dest = ~src; Pn, GXcopyInverted, 0xC */ 0xAF, /* dest |= ~src; DPno, GXorInverted, 0xD */ 0x5F, /* dest = ~src|~dest; DPan, GXnand, 0xE */ 0xFF, /* dest = 0xFF; 1, GXset, 0xF */ }; static const int myrops[] = { 3, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; /* 300 series ----------------------------------------------------- */ #ifdef CONFIG_FB_SIS_300 static void SiS300Sync(struct sis_video_info *ivideo) { SiS300Idle } static void SiS300SetupForScreenToScreenCopy(struct sis_video_info *ivideo, int xdir, int ydir, int rop, int trans_color) { SiS300SetupDSTColorDepth(ivideo->DstColor); SiS300SetupSRCPitch(ivideo->video_linelength) SiS300SetupDSTRect(ivideo->video_linelength, 0xffff) if(trans_color != -1) { SiS300SetupROP(0x0A) SiS300SetupSRCTrans(trans_color) SiS300SetupCMDFlag(TRANSPARENT_BITBLT) } else { SiS300SetupROP(sisALUConv[rop]) } if(xdir > 0) { SiS300SetupCMDFlag(X_INC) } if(ydir > 0) { SiS300SetupCMDFlag(Y_INC) } } static void SiS300SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x, int src_y, int dst_x, int dst_y, int width, int height) { u32 srcbase = 0, dstbase = 0; if(src_y >= 2048) { srcbase = ivideo->video_linelength * src_y; src_y = 0; } if(dst_y >= 2048) { dstbase = ivideo->video_linelength * dst_y; dst_y = 0; } SiS300SetupSRCBase(srcbase); SiS300SetupDSTBase(dstbase); if(!(ivideo->CommandReg & X_INC)) { src_x += width-1; dst_x += width-1; } if(!(ivideo->CommandReg & Y_INC)) { src_y += height-1; dst_y += height-1; } SiS300SetupRect(width, height) SiS300SetupSRCXY(src_x, src_y) SiS300SetupDSTXY(dst_x, dst_y) SiS300DoCMD } static void SiS300SetupForSolidFill(struct sis_video_info *ivideo, u32 color, int rop) { SiS300SetupPATFG(color) SiS300SetupDSTRect(ivideo->video_linelength, 0xffff) SiS300SetupDSTColorDepth(ivideo->DstColor); SiS300SetupROP(sisPatALUConv[rop]) SiS300SetupCMDFlag(PATFG) } static void SiS300SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h) { u32 dstbase = 0; if(y >= 2048) { dstbase = ivideo->video_linelength * y; y = 0; } SiS300SetupDSTBase(dstbase) SiS300SetupDSTXY(x,y) SiS300SetupRect(w,h) SiS300SetupCMDFlag(X_INC | Y_INC | BITBLT) SiS300DoCMD } #endif /* 315/330/340 series ---------------------------------------------- */ #ifdef CONFIG_FB_SIS_315 static void SiS310Sync(struct sis_video_info *ivideo) { SiS310Idle } static void SiS310SetupForScreenToScreenCopy(struct sis_video_info *ivideo, int rop, int trans_color) { SiS310SetupDSTColorDepth(ivideo->DstColor); SiS310SetupSRCPitch(ivideo->video_linelength) SiS310SetupDSTRect(ivideo->video_linelength, 0x0fff) if(trans_color != -1) { SiS310SetupROP(0x0A) SiS310SetupSRCTrans(trans_color) SiS310SetupCMDFlag(TRANSPARENT_BITBLT) } else { SiS310SetupROP(sisALUConv[rop]) /* Set command - not needed, both 0 */ /* SiSSetupCMDFlag(BITBLT | SRCVIDEO) */ } SiS310SetupCMDFlag(ivideo->SiS310_AccelDepth) /* The chip is smart enough to know the direction */ } static void SiS310SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x, int src_y, int dst_x, int dst_y, int width, int height) { u32 srcbase = 0, dstbase = 0; int mymin = min(src_y, dst_y); int mymax = max(src_y, dst_y); /* Although the chip knows the direction to use * if the source and destination areas overlap, * that logic fails if we fiddle with the bitmap * addresses. Therefore, we check if the source * and destination blitting areas overlap and * adapt the bitmap addresses synchronously * if the coordinates exceed the valid range. * The the areas do not overlap, we do our * normal check. */ if((mymax - mymin) < height) { if((src_y >= 2048) || (dst_y >= 2048)) { srcbase = ivideo->video_linelength * mymin; dstbase = ivideo->video_linelength * mymin; src_y -= mymin; dst_y -= mymin; } } else { if(src_y >= 2048) { srcbase = ivideo->video_linelength * src_y; src_y = 0; } if(dst_y >= 2048) { dstbase = ivideo->video_linelength * dst_y; dst_y = 0; } } srcbase += ivideo->video_offset; dstbase += ivideo->video_offset; SiS310SetupSRCBase(srcbase); SiS310SetupDSTBase(dstbase); SiS310SetupRect(width, height) SiS310SetupSRCXY(src_x, src_y) SiS310SetupDSTXY(dst_x, dst_y) SiS310DoCMD } static void SiS310SetupForSolidFill(struct sis_video_info *ivideo, u32 color, int rop) { SiS310SetupPATFG(color) SiS310SetupDSTRect(ivideo->video_linelength, 0x0fff) SiS310SetupDSTColorDepth(ivideo->DstColor); SiS310SetupROP(sisPatALUConv[rop]) SiS310SetupCMDFlag(PATFG | ivideo->SiS310_AccelDepth) } static void SiS310SubsequentSolidFillRect(struct sis_video_info *ivideo, int x, int y, int w, int h) { u32 dstbase = 0; if(y >= 2048) { dstbase = ivideo->video_linelength * y; y = 0; } dstbase += ivideo->video_offset; SiS310SetupDSTBase(dstbase) SiS310SetupDSTXY(x,y) SiS310SetupRect(w,h) SiS310SetupCMDFlag(BITBLT) SiS310DoCMD } #endif /* --------------------------------------------------------------------- */ /* The exported routines */ int sisfb_initaccel(struct sis_video_info *ivideo) { #ifdef SISFB_USE_SPINLOCKS spin_lock_init(&ivideo->lockaccel); #endif return 0; } void sisfb_syncaccel(struct sis_video_info *ivideo) { if(ivideo->sisvga_engine == SIS_300_VGA) { #ifdef CONFIG_FB_SIS_300 SiS300Sync(ivideo); #endif } else { #ifdef CONFIG_FB_SIS_315 SiS310Sync(ivideo); #endif } } int fbcon_sis_sync(struct fb_info *info) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; CRITFLAGS if((!ivideo->accel) || (!ivideo->engineok)) return 0; CRITBEGIN sisfb_syncaccel(ivideo); CRITEND return 0; } void fbcon_sis_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; u32 col = 0; u32 vxres = info->var.xres_virtual; u32 vyres = info->var.yres_virtual; int width, height; CRITFLAGS if(info->state != FBINFO_STATE_RUNNING) return; if((!ivideo->accel) || (!ivideo->engineok)) { cfb_fillrect(info, rect); return; } if(!rect->width || !rect->height || rect->dx >= vxres || rect->dy >= vyres) return; /* Clipping */ width = ((rect->dx + rect->width) > vxres) ? (vxres - rect->dx) : rect->width; height = ((rect->dy + rect->height) > vyres) ? (vyres - rect->dy) : rect->height; switch(info->var.bits_per_pixel) { case 8: col = rect->color; break; case 16: case 32: col = ((u32 *)(info->pseudo_palette))[rect->color]; break; } if(ivideo->sisvga_engine == SIS_300_VGA) { #ifdef CONFIG_FB_SIS_300 CRITBEGIN SiS300SetupForSolidFill(ivideo, col, myrops[rect->rop]); SiS300SubsequentSolidFillRect(ivideo, rect->dx, rect->dy, width, height); CRITEND #endif } else { #ifdef CONFIG_FB_SIS_315 CRITBEGIN SiS310SetupForSolidFill(ivideo, col, myrops[rect->rop]); SiS310SubsequentSolidFillRect(ivideo, rect->dx, rect->dy, width, height); CRITEND #endif } sisfb_syncaccel(ivideo); } void fbcon_sis_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct sis_video_info *ivideo = (struct sis_video_info *)info->par; u32 vxres = info->var.xres_virtual; u32 vyres = info->var.yres_virtual; int width = area->width; int height = area->height; CRITFLAGS if(info->state != FBINFO_STATE_RUNNING) return; if((!ivideo->accel) || (!ivideo->engineok)) { cfb_copyarea(info, area); return; } if(!width || !height || area->sx >= vxres || area->sy >= vyres || area->dx >= vxres || area->dy >= vyres) return; /* Clipping */ if((area->sx + width) > vxres) width = vxres - area->sx; if((area->dx + width) > vxres) width = vxres - area->dx; if((area->sy + height) > vyres) height = vyres - area->sy; if((area->dy + height) > vyres) height = vyres - area->dy; if(ivideo->sisvga_engine == SIS_300_VGA) { #ifdef CONFIG_FB_SIS_300 int xdir, ydir; if(area->sx < area->dx) xdir = 0; else xdir = 1; if(area->sy < area->dy) ydir = 0; else ydir = 1; CRITBEGIN SiS300SetupForScreenToScreenCopy(ivideo, xdir, ydir, 3, -1); SiS300SubsequentScreenToScreenCopy(ivideo, area->sx, area->sy, area->dx, area->dy, width, height); CRITEND #endif } else { #ifdef CONFIG_FB_SIS_315 CRITBEGIN SiS310SetupForScreenToScreenCopy(ivideo, 3, -1); SiS310SubsequentScreenToScreenCopy(ivideo, area->sx, area->sy, area->dx, area->dy, width, height); CRITEND #endif } sisfb_syncaccel(ivideo); }
gpl-2.0
quang-ha/lammps
lib/kokkos/core/unit_test/cuda/TestCuda_Atomics.cpp
9
2030
/* //@HEADER // ************************************************************************ // // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott (crtrott@sandia.gov) // // ************************************************************************ //@HEADER */ #include <cuda/TestCuda_Category.hpp> #include <TestAtomic.hpp>
gpl-2.0
scs/uclinux
user/mgetty/voice/libmgsm/preprocess.c
9
2882
/* * Copyright 1992 by Jutta Degener and Carsten Bormann, Technische * Universitaet Berlin. See the accompanying file "COPYRIGHT" for * details. THERE IS ABSOLUTELY NO WARRANTY FOR THIS SOFTWARE. */ /* $Id: preprocess.c,v 1.4 1998/09/09 21:06:51 gert Exp $ */ #include <stdio.h> #include <assert.h> #include "private.h" #include "gsm.h" #include "proto.h" /* 4.2.0 .. 4.2.3 PREPROCESSING SECTION * * After A-law to linear conversion (or directly from the * Ato D converter) the following scaling is assumed for * input to the RPE-LTP algorithm: * * in: 0.1.....................12 * S.v.v.v.v.v.v.v.v.v.v.v.v.*.*.* * * Where S is the sign bit, v a valid bit, and * a "don't care" bit. * The original signal is called sop[..] * * out: 0.1................... 12 * S.S.v.v.v.v.v.v.v.v.v.v.v.v.0.0 */ void Gsm_Preprocess P3((S, s, so), struct gsm_state * S, word * s, word * so ) /* [0..159] IN/OUT */ { word z1 = S->z1; longword L_z2 = S->L_z2; word mp = S->mp; word s1; longword L_s2; longword L_temp; word msp, lsp; word SO; longword ltmp; /* for ADD */ ulongword utmp; /* for L_ADD */ register int k = 160; while (k--) { /* 4.2.1 Downscaling of the input signal */ SO = SASR( *s, 3 ) << 2; s++; assert (SO >= -0x4000); /* downscaled by */ assert (SO <= 0x3FFC); /* previous routine. */ /* 4.2.2 Offset compensation * * This part implements a high-pass filter and requires extended * arithmetic precision for the recursive part of this filter. * The input of this procedure is the array so[0...159] and the * output the array sof[ 0...159 ]. */ /* Compute the non-recursive part */ s1 = SO - z1; /* s1 = gsm_sub( *so, z1 ); */ z1 = SO; assert(s1 != MIN_WORD); /* Compute the recursive part */ L_s2 = s1; L_s2 <<= 15; /* Execution of a 31 bv 16 bits multiplication */ msp = SASR( L_z2, 15 ); lsp = L_z2-((longword)msp<<15); /* gsm_L_sub(L_z2,(msp<<15)); */ L_s2 += GSM_MULT_R( lsp, 32735 ); L_temp = (longword)msp * 32735; /* GSM_L_MULT(msp,32735) >> 1;*/ L_z2 = GSM_L_ADD( L_temp, L_s2 ); /* Compute sof[k] with rounding */ L_temp = GSM_L_ADD( L_z2, 16384 ); /* 4.2.3 Preemphasis */ msp = GSM_MULT_R( mp, -28180 ); mp = SASR( L_temp, 15 ); *so++ = GSM_ADD( mp, msp ); } S->z1 = z1; S->L_z2 = L_z2; S->mp = mp; }
gpl-2.0
rneugeba/linux-stable
drivers/gpio/gpio-davinci.c
9
17414
/* * TI DaVinci GPIO Support * * Copyright (c) 2006-2007 David Brownell * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/gpio/driver.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/platform_data/gpio-davinci.h> #include <linux/irqchip/chained_irq.h> #include <linux/spinlock.h> #include <asm-generic/gpio.h> #define MAX_REGS_BANKS 5 #define MAX_INT_PER_BANK 32 struct davinci_gpio_regs { u32 dir; u32 out_data; u32 set_data; u32 clr_data; u32 in_data; u32 set_rising; u32 clr_rising; u32 set_falling; u32 clr_falling; u32 intstat; }; typedef struct irq_chip *(*gpio_get_irq_chip_cb_t)(unsigned int irq); #define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */ static void __iomem *gpio_base; static unsigned int offset_array[5] = {0x10, 0x38, 0x60, 0x88, 0xb0}; struct davinci_gpio_irq_data { void __iomem *regs; struct davinci_gpio_controller *chip; int bank_num; }; struct davinci_gpio_controller { struct gpio_chip chip; struct irq_domain *irq_domain; /* Serialize access to GPIO registers */ spinlock_t lock; void __iomem *regs[MAX_REGS_BANKS]; int gpio_unbanked; int irqs[MAX_INT_PER_BANK]; }; static inline u32 __gpio_mask(unsigned gpio) { return 1 << (gpio % 32); } static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d) { struct davinci_gpio_regs __iomem *g; g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d); return g; } static int davinci_gpio_irq_setup(struct platform_device *pdev); /*--------------------------------------------------------------------------*/ /* board setup code *MUST* setup pinmux and enable the GPIO clock. */ static inline int __davinci_direction(struct gpio_chip *chip, unsigned offset, bool out, int value) { struct davinci_gpio_controller *d = gpiochip_get_data(chip); struct davinci_gpio_regs __iomem *g; unsigned long flags; u32 temp; int bank = offset / 32; u32 mask = __gpio_mask(offset); g = d->regs[bank]; spin_lock_irqsave(&d->lock, flags); temp = readl_relaxed(&g->dir); if (out) { temp &= ~mask; writel_relaxed(mask, value ? &g->set_data : &g->clr_data); } else { temp |= mask; } writel_relaxed(temp, &g->dir); spin_unlock_irqrestore(&d->lock, flags); return 0; } static int davinci_direction_in(struct gpio_chip *chip, unsigned offset) { return __davinci_direction(chip, offset, false, 0); } static int davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value) { return __davinci_direction(chip, offset, true, value); } /* * Read the pin's value (works even if it's set up as output); * returns zero/nonzero. * * Note that changes are synched to the GPIO clock, so reading values back * right after you've set them may give old values. */ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *d = gpiochip_get_data(chip); struct davinci_gpio_regs __iomem *g; int bank = offset / 32; g = d->regs[bank]; return !!(__gpio_mask(offset) & readl_relaxed(&g->in_data)); } /* * Assuming the pin is muxed as a gpio output, set its output value. */ static void davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct davinci_gpio_controller *d = gpiochip_get_data(chip); struct davinci_gpio_regs __iomem *g; int bank = offset / 32; g = d->regs[bank]; writel_relaxed(__gpio_mask(offset), value ? &g->set_data : &g->clr_data); } static struct davinci_gpio_platform_data * davinci_gpio_get_pdata(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct davinci_gpio_platform_data *pdata; int ret; u32 val; if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) return dev_get_platdata(&pdev->dev); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; ret = of_property_read_u32(dn, "ti,ngpio", &val); if (ret) goto of_err; pdata->ngpio = val; ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked", &val); if (ret) goto of_err; pdata->gpio_unbanked = val; return pdata; of_err: dev_err(&pdev->dev, "Populating pdata from DT failed: err %d\n", ret); return NULL; } static int davinci_gpio_probe(struct platform_device *pdev) { int bank, i, ret = 0; unsigned int ngpio, nbank, nirq; struct davinci_gpio_controller *chips; struct davinci_gpio_platform_data *pdata; struct device *dev = &pdev->dev; struct resource *res; pdata = davinci_gpio_get_pdata(pdev); if (!pdata) { dev_err(dev, "No platform data found\n"); return -EINVAL; } dev->platform_data = pdata; /* * The gpio banks conceptually expose a segmented bitmap, * and "ngpio" is one more than the largest zero-based * bit index that's valid. */ ngpio = pdata->ngpio; if (ngpio == 0) { dev_err(dev, "How many GPIOs?\n"); return -EINVAL; } if (WARN_ON(ARCH_NR_GPIOS < ngpio)) ngpio = ARCH_NR_GPIOS; /* * If there are unbanked interrupts then the number of * interrupts is equal to number of gpios else all are banked so * number of interrupts is equal to number of banks(each with 16 gpios) */ if (pdata->gpio_unbanked) nirq = pdata->gpio_unbanked; else nirq = DIV_ROUND_UP(ngpio, 16); chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL); if (!chips) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); gpio_base = devm_ioremap_resource(dev, res); if (IS_ERR(gpio_base)) return PTR_ERR(gpio_base); for (i = 0; i < nirq; i++) { chips->irqs[i] = platform_get_irq(pdev, i); if (chips->irqs[i] < 0) { dev_info(dev, "IRQ not populated, err = %d\n", chips->irqs[i]); return chips->irqs[i]; } } chips->chip.label = dev_name(dev); chips->chip.direction_input = davinci_direction_in; chips->chip.get = davinci_gpio_get; chips->chip.direction_output = davinci_direction_out; chips->chip.set = davinci_gpio_set; chips->chip.ngpio = ngpio; chips->chip.base = -1; #ifdef CONFIG_OF_GPIO chips->chip.of_gpio_n_cells = 2; chips->chip.parent = dev; chips->chip.of_node = dev->of_node; if (of_property_read_bool(dev->of_node, "gpio-ranges")) { chips->chip.request = gpiochip_generic_request; chips->chip.free = gpiochip_generic_free; } #endif spin_lock_init(&chips->lock); nbank = DIV_ROUND_UP(ngpio, 32); for (bank = 0; bank < nbank; bank++) chips->regs[bank] = gpio_base + offset_array[bank]; ret = devm_gpiochip_add_data(dev, &chips->chip, chips); if (ret) return ret; platform_set_drvdata(pdev, chips); ret = davinci_gpio_irq_setup(pdev); if (ret) return ret; return 0; } /*--------------------------------------------------------------------------*/ /* * We expect irqs will normally be set up as input pins, but they can also be * used as output pins ... which is convenient for testing. * * NOTE: The first few GPIOs also have direct INTC hookups in addition * to their GPIOBNK0 irq, with a bit less overhead. * * All those INTC hookups (direct, plus several IRQ banks) can also * serve as EDMA event triggers. */ static void gpio_irq_disable(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d); u32 mask = (u32) irq_data_get_irq_handler_data(d); writel_relaxed(mask, &g->clr_falling); writel_relaxed(mask, &g->clr_rising); } static void gpio_irq_enable(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d); u32 mask = (u32) irq_data_get_irq_handler_data(d); unsigned status = irqd_get_trigger_type(d); status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; if (!status) status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; if (status & IRQ_TYPE_EDGE_FALLING) writel_relaxed(mask, &g->set_falling); if (status & IRQ_TYPE_EDGE_RISING) writel_relaxed(mask, &g->set_rising); } static int gpio_irq_type(struct irq_data *d, unsigned trigger) { if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) return -EINVAL; return 0; } static struct irq_chip gpio_irqchip = { .name = "GPIO", .irq_enable = gpio_irq_enable, .irq_disable = gpio_irq_disable, .irq_set_type = gpio_irq_type, .flags = IRQCHIP_SET_TYPE_MASKED, }; static void gpio_irq_handler(struct irq_desc *desc) { struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; int bank_num; struct davinci_gpio_controller *d; struct davinci_gpio_irq_data *irqdata; irqdata = (struct davinci_gpio_irq_data *)irq_desc_get_handler_data(desc); bank_num = irqdata->bank_num; g = irqdata->regs; d = irqdata->chip; /* we only care about one bank */ if ((bank_num % 2) == 1) mask <<= 16; /* temporarily mask (level sensitive) parent IRQ */ chained_irq_enter(irq_desc_get_chip(desc), desc); while (1) { u32 status; int bit; irq_hw_number_t hw_irq; /* ack any irqs */ status = readl_relaxed(&g->intstat) & mask; if (!status) break; writel_relaxed(status, &g->intstat); /* now demux them to the right lowlevel handler */ while (status) { bit = __ffs(status); status &= ~BIT(bit); /* Max number of gpios per controller is 144 so * hw_irq will be in [0..143] */ hw_irq = (bank_num / 2) * 32 + bit; generic_handle_irq( irq_find_mapping(d->irq_domain, hw_irq)); } } chained_irq_exit(irq_desc_get_chip(desc), desc); /* now it may re-trigger */ } static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *d = gpiochip_get_data(chip); if (d->irq_domain) return irq_create_mapping(d->irq_domain, offset); else return -ENXIO; } static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *d = gpiochip_get_data(chip); /* * NOTE: we assume for now that only irqs in the first gpio_chip * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). */ if (offset < d->gpio_unbanked) return d->irqs[offset]; else return -ENODEV; } static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) { struct davinci_gpio_controller *d; struct davinci_gpio_regs __iomem *g; u32 mask, i; d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); g = (struct davinci_gpio_regs __iomem *)d->regs[0]; for (i = 0; i < MAX_INT_PER_BANK; i++) if (data->irq == d->irqs[i]) break; if (i == MAX_INT_PER_BANK) return -EINVAL; mask = __gpio_mask(i); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) return -EINVAL; writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_FALLING) ? &g->set_falling : &g->clr_falling); writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_RISING) ? &g->set_rising : &g->clr_rising); return 0; } static int davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct davinci_gpio_controller *chips = (struct davinci_gpio_controller *)d->host_data; struct davinci_gpio_regs __iomem *g = chips->regs[hw / 32]; irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq, "davinci_gpio"); irq_set_irq_type(irq, IRQ_TYPE_NONE); irq_set_chip_data(irq, (__force void *)g); irq_set_handler_data(irq, (void *)__gpio_mask(hw)); return 0; } static const struct irq_domain_ops davinci_gpio_irq_ops = { .map = davinci_gpio_irq_map, .xlate = irq_domain_xlate_onetwocell, }; static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq) { static struct irq_chip_type gpio_unbanked; gpio_unbanked = *irq_data_get_chip_type(irq_get_irq_data(irq)); return &gpio_unbanked.chip; }; static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq) { static struct irq_chip gpio_unbanked; gpio_unbanked = *irq_get_chip(irq); return &gpio_unbanked; }; static const struct of_device_id davinci_gpio_ids[]; /* * NOTE: for suspend/resume, probably best to make a platform_device with * suspend_late/resume_resume calls hooking into results of the set_wake() * calls ... so if no gpios are wakeup events the clock can be disabled, * with outputs left at previously set levels, and so that VDD3P3V.IOPWDN0 * (dm6446) can be set appropriately for GPIOV33 pins. */ static int davinci_gpio_irq_setup(struct platform_device *pdev) { unsigned gpio, bank; int irq; int ret; struct clk *clk; u32 binten = 0; unsigned ngpio; struct device *dev = &pdev->dev; struct davinci_gpio_controller *chips = platform_get_drvdata(pdev); struct davinci_gpio_platform_data *pdata = dev->platform_data; struct davinci_gpio_regs __iomem *g; struct irq_domain *irq_domain = NULL; const struct of_device_id *match; struct irq_chip *irq_chip; struct davinci_gpio_irq_data *irqdata; gpio_get_irq_chip_cb_t gpio_get_irq_chip; /* * Use davinci_gpio_get_irq_chip by default to handle non DT cases */ gpio_get_irq_chip = davinci_gpio_get_irq_chip; match = of_match_device(of_match_ptr(davinci_gpio_ids), dev); if (match) gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data; ngpio = pdata->ngpio; clk = devm_clk_get(dev, "gpio"); if (IS_ERR(clk)) { dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk)); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) return ret; if (!pdata->gpio_unbanked) { irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0); if (irq < 0) { dev_err(dev, "Couldn't allocate IRQ numbers\n"); clk_disable_unprepare(clk); return irq; } irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, &davinci_gpio_irq_ops, chips); if (!irq_domain) { dev_err(dev, "Couldn't register an IRQ domain\n"); clk_disable_unprepare(clk); return -ENODEV; } } /* * Arrange gpio_to_irq() support, handling either direct IRQs or * banked IRQs. Having GPIOs in the first GPIO bank use direct * IRQs, while the others use banked IRQs, would need some setup * tweaks to recognize hardware which can do that. */ chips->chip.to_irq = gpio_to_irq_banked; chips->irq_domain = irq_domain; /* * AINTC can handle direct/unbanked IRQs for GPIOs, with the GPIO * controller only handling trigger modes. We currently assume no * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. */ if (pdata->gpio_unbanked) { /* pass "bank 0" GPIO IRQs to AINTC */ chips->chip.to_irq = gpio_to_irq_unbanked; chips->gpio_unbanked = pdata->gpio_unbanked; binten = GENMASK(pdata->gpio_unbanked / 16, 0); /* AINTC handles mask/unmask; GPIO handles triggering */ irq = chips->irqs[0]; irq_chip = gpio_get_irq_chip(irq); irq_chip->name = "GPIO-AINTC"; irq_chip->irq_set_type = gpio_irq_type_unbanked; /* default trigger: both edges */ g = chips->regs[0]; writel_relaxed(~0, &g->set_falling); writel_relaxed(~0, &g->set_rising); /* set the direct IRQs up to use that irqchip */ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++) { irq_set_chip(chips->irqs[gpio], irq_chip); irq_set_handler_data(chips->irqs[gpio], chips); irq_set_status_flags(chips->irqs[gpio], IRQ_TYPE_EDGE_BOTH); } goto done; } /* * Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we * then chain through our own handler. */ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 16) { /* disabled by default, enabled only as needed * There are register sets for 32 GPIOs. 2 banks of 16 * GPIOs are covered by each set of registers hence divide by 2 */ g = chips->regs[bank / 2]; writel_relaxed(~0, &g->clr_falling); writel_relaxed(~0, &g->clr_rising); /* * Each chip handles 32 gpios, and each irq bank consists of 16 * gpio irqs. Pass the irq bank's corresponding controller to * the chained irq handler. */ irqdata = devm_kzalloc(&pdev->dev, sizeof(struct davinci_gpio_irq_data), GFP_KERNEL); if (!irqdata) { clk_disable_unprepare(clk); return -ENOMEM; } irqdata->regs = g; irqdata->bank_num = bank; irqdata->chip = chips; irq_set_chained_handler_and_data(chips->irqs[bank], gpio_irq_handler, irqdata); binten |= BIT(bank); } done: /* * BINTEN -- per-bank interrupt enable. genirq would also let these * bits be set/cleared dynamically. */ writel_relaxed(binten, gpio_base + BINTEN); return 0; } static const struct of_device_id davinci_gpio_ids[] = { { .compatible = "ti,keystone-gpio", keystone_gpio_get_irq_chip}, { .compatible = "ti,dm6441-gpio", davinci_gpio_get_irq_chip}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, davinci_gpio_ids); static struct platform_driver davinci_gpio_driver = { .probe = davinci_gpio_probe, .driver = { .name = "davinci_gpio", .of_match_table = of_match_ptr(davinci_gpio_ids), }, }; /** * GPIO driver registration needs to be done before machine_init functions * access GPIO. Hence davinci_gpio_drv_reg() is a postcore_initcall. */ static int __init davinci_gpio_drv_reg(void) { return platform_driver_register(&davinci_gpio_driver); } postcore_initcall(davinci_gpio_drv_reg);
gpl-2.0
slowfranklin/wireshark
ui/gtk/packet_history.c
9
4472
/* packet_history.c * packet history related functions 2004 Ulf Lamping * * $Id$ * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <stdio.h> #include <gtk/gtk.h> #include "../file.h" #include "../globals.h" #include "ui/gtk/main.h" #include "ui/gtk/packet_history.h" static GList *history_current = NULL; static GList *history_list = NULL; static gboolean ignore_jump = FALSE; #if 0 /* print the complete packet history to console */ static void history_print(void) { GList *current = g_list_first(history_list); printf(" List:\n"); while(current) { if(current == history_current) { printf(" Row: %u *\n", GPOINTER_TO_INT(current->data)); } else { printf(" Row: %u\n", GPOINTER_TO_INT(current->data)); } current = g_list_next(current); } } #endif /* adjust menu and toolbar sensitivity depending on the history entries */ static void adjust_menus(void) { if(history_current) { main_set_for_packet_history( (g_list_previous(history_current) != NULL), (g_list_next(history_current) != NULL)); } else { /* we don't have any history */ main_set_for_packet_history(FALSE, FALSE); } /* history_print(); */ } /* clear the history list from the given entry to the end of the list */ static void clear_list(GList *current) { GList *next_packet; while(current) { next_packet = g_list_next(current); history_list = g_list_remove(history_list, current->data); current = next_packet; } } /* add an entry to the history list */ void packet_history_add(gint row) { if(row < 1) { /* Not a valid row number */ return; } if(ignore_jump) { /* we jumping back and forward in history, so don't change list */ return; } if (history_current) { /* clear list behind current position */ clear_list(g_list_next(history_current)); /* ignore duplicates */ if(GPOINTER_TO_INT(history_current->data) == row) { adjust_menus(); return; } } /* add row */ history_list = g_list_append(history_list, GINT_TO_POINTER(row)); history_current = g_list_last(history_list); adjust_menus(); } void packet_history_clear(void) { /* clear "old" list */ clear_list(history_list); history_current = NULL; /* add the currently selected first row */ packet_history_add(0); adjust_menus(); } static void packet_history_back(void) { GList *previous; if(history_current) { previous = g_list_previous(history_current); /* do we have a previous entry */ if(previous) { history_current = previous; /* goto that packet but don't change history */ ignore_jump = TRUE; cf_goto_frame(&cfile, GPOINTER_TO_INT(previous->data)); ignore_jump = FALSE; } } adjust_menus(); } static void packet_history_forward(void) { GList *next; if(history_current) { next = g_list_next(history_current); /* do we have a forward entry? */ if(next) { history_current = next; /* goto that packet but don't change history */ ignore_jump = TRUE; cf_goto_frame(&cfile, GPOINTER_TO_INT(next->data)); ignore_jump = FALSE; } } adjust_menus(); } void history_forward_cb(GtkWidget *widget _U_, gpointer data _U_) { packet_history_forward(); } void history_back_cb(GtkWidget *widget _U_, gpointer data _U_) { packet_history_back(); }
gpl-2.0
wolflee/coreboot
src/mainboard/samsung/lumpy/gpio.c
9
9953
/* * This file is part of the coreboot project. * * Copyright (C) 2011 The Chromium OS Authors. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef LUMPY_GPIO_H #define LUMPY_GPIO_H #include <southbridge/intel/common/gpio.h> /* * GPIO SET 1 includes GPIO0 to GPIO31 */ const struct pch_gpio_set1 pch_gpio_set1_mode = { .gpio0 = GPIO_MODE_GPIO, /* CHP3_SERDBG */ .gpio1 = GPIO_MODE_GPIO, /* KBC3_EXTSMI# */ .gpio2 = GPIO_MODE_NATIVE, /* CHP3_ALSINT# (Light Sensor) */ .gpio3 = GPIO_MODE_NATIVE, /* CHP3_TP_INT# (Trackpad) */ .gpio4 = GPIO_MODE_NONE, .gpio5 = GPIO_MODE_GPIO, /* SIM3_CARD_DET# */ .gpio6 = GPIO_MODE_NONE, .gpio7 = GPIO_MODE_GPIO, /* KBC3_RUNSCI# */ .gpio8 = GPIO_MODE_GPIO, /* CHP3_INTELBT_OFF# */ .gpio9 = GPIO_MODE_NONE, .gpio10 = GPIO_MODE_NONE, .gpio11 = GPIO_MODE_GPIO, /* CHP3_TP_INT# (Trackpad wake) */ .gpio12 = GPIO_MODE_NONE, .gpio13 = GPIO_MODE_GPIO, /* CHP3_DEBUG13 */ .gpio14 = GPIO_MODE_GPIO, /* KBC3_WAKESCI# */ .gpio15 = GPIO_MODE_NONE, .gpio16 = GPIO_MODE_NONE, .gpio17 = GPIO_MODE_GPIO, /* KBC3_DVP_MODE */ .gpio18 = GPIO_MODE_NATIVE, /* MIN3_CLKREQ1# */ .gpio19 = GPIO_MODE_NONE, .gpio20 = GPIO_MODE_NONE, .gpio21 = GPIO_MODE_GPIO, /* LCD3_SIZE */ .gpio22 = GPIO_MODE_GPIO, /* CHP3_BIOS_CRISIS# */ .gpio23 = GPIO_MODE_NONE, .gpio24 = GPIO_MODE_GPIO, /* KBC3_SPI_WP# */ .gpio25 = GPIO_MODE_NONE, .gpio26 = GPIO_MODE_NATIVE, /* LAN3_CLKREQ# */ .gpio27 = GPIO_MODE_NONE, .gpio28 = GPIO_MODE_NONE, .gpio29 = GPIO_MODE_NONE, .gpio30 = GPIO_MODE_NATIVE, /* CHP3_SUSWARN# */ .gpio31 = GPIO_MODE_NATIVE, /* KBC3_AC_PRESENT */ }; const struct pch_gpio_set1 pch_gpio_set1_direction = { .gpio0 = GPIO_DIR_OUTPUT, .gpio1 = GPIO_DIR_INPUT, .gpio2 = GPIO_DIR_INPUT, .gpio3 = GPIO_DIR_INPUT, .gpio4 = GPIO_DIR_INPUT, .gpio5 = GPIO_DIR_INPUT, .gpio6 = GPIO_DIR_INPUT, .gpio7 = GPIO_DIR_INPUT, .gpio8 = GPIO_DIR_OUTPUT, .gpio9 = GPIO_DIR_INPUT, .gpio10 = GPIO_DIR_INPUT, .gpio11 = GPIO_DIR_INPUT, .gpio12 = GPIO_DIR_INPUT, .gpio13 = GPIO_DIR_INPUT, .gpio14 = GPIO_DIR_INPUT, .gpio15 = GPIO_DIR_INPUT, .gpio16 = GPIO_DIR_INPUT, .gpio17 = GPIO_DIR_INPUT, .gpio18 = GPIO_DIR_INPUT, .gpio19 = GPIO_DIR_INPUT, .gpio20 = GPIO_DIR_INPUT, .gpio21 = GPIO_DIR_INPUT, .gpio22 = GPIO_DIR_OUTPUT, .gpio23 = GPIO_DIR_INPUT, .gpio24 = GPIO_DIR_INPUT, .gpio25 = GPIO_DIR_INPUT, .gpio26 = GPIO_DIR_INPUT, .gpio27 = GPIO_DIR_INPUT, .gpio28 = GPIO_DIR_INPUT, .gpio29 = GPIO_DIR_INPUT, .gpio30 = GPIO_DIR_INPUT, .gpio31 = GPIO_DIR_INPUT, }; const struct pch_gpio_set1 pch_gpio_set1_level = { .gpio0 = GPIO_LEVEL_LOW, .gpio1 = GPIO_LEVEL_LOW, .gpio2 = GPIO_LEVEL_LOW, .gpio3 = GPIO_LEVEL_LOW, .gpio4 = GPIO_LEVEL_LOW, .gpio5 = GPIO_LEVEL_LOW, .gpio6 = GPIO_LEVEL_LOW, .gpio7 = GPIO_LEVEL_LOW, .gpio8 = GPIO_LEVEL_LOW, .gpio9 = GPIO_LEVEL_LOW, .gpio10 = GPIO_LEVEL_LOW, .gpio11 = GPIO_LEVEL_LOW, .gpio12 = GPIO_LEVEL_LOW, .gpio13 = GPIO_LEVEL_LOW, .gpio14 = GPIO_LEVEL_LOW, .gpio15 = GPIO_LEVEL_LOW, .gpio16 = GPIO_LEVEL_LOW, .gpio17 = GPIO_LEVEL_LOW, .gpio18 = GPIO_LEVEL_LOW, .gpio19 = GPIO_LEVEL_LOW, .gpio20 = GPIO_LEVEL_LOW, .gpio21 = GPIO_LEVEL_LOW, .gpio22 = GPIO_LEVEL_HIGH, .gpio23 = GPIO_LEVEL_LOW, .gpio24 = GPIO_LEVEL_LOW, .gpio25 = GPIO_LEVEL_LOW, .gpio26 = GPIO_LEVEL_LOW, .gpio27 = GPIO_LEVEL_LOW, .gpio28 = GPIO_LEVEL_LOW, .gpio29 = GPIO_LEVEL_LOW, .gpio30 = GPIO_LEVEL_LOW, .gpio31 = GPIO_LEVEL_LOW, }; const struct pch_gpio_set1 pch_gpio_set1_invert = { .gpio0 = GPIO_NO_INVERT, .gpio1 = GPIO_INVERT, .gpio2 = GPIO_INVERT, .gpio3 = GPIO_INVERT, .gpio4 = GPIO_NO_INVERT, .gpio5 = GPIO_INVERT, .gpio6 = GPIO_NO_INVERT, .gpio7 = GPIO_INVERT, .gpio8 = GPIO_NO_INVERT, .gpio9 = GPIO_NO_INVERT, .gpio10 = GPIO_NO_INVERT, .gpio11 = GPIO_INVERT, .gpio12 = GPIO_NO_INVERT, .gpio13 = GPIO_NO_INVERT, .gpio14 = GPIO_INVERT, .gpio15 = GPIO_NO_INVERT, }; /* * GPIO SET 2 includes GPIO32 to GPIO63 */ const struct pch_gpio_set2 pch_gpio_set2_mode = { .gpio32 = GPIO_MODE_NATIVE, /* PCI3_CLKRUN# */ .gpio33 = GPIO_MODE_GPIO, /* Onboard Memory Capacity */ .gpio34 = GPIO_MODE_NONE, .gpio35 = GPIO_MODE_GPIO, /* CHP3_WLAN_OFF# */ .gpio36 = GPIO_MODE_NONE, .gpio37 = GPIO_MODE_GPIO, /* CHP3_FDI_OVRVLTG */ .gpio38 = GPIO_MODE_GPIO, /* CHP3_3G_OFF# */ .gpio39 = GPIO_MODE_NONE, .gpio40 = GPIO_MODE_NATIVE, /* USB3_OC1# */ .gpio41 = GPIO_MODE_GPIO, /* Onboard Memory Revision */ .gpio42 = GPIO_MODE_GPIO, /* CHP3_REC_MODE# */ .gpio43 = GPIO_MODE_GPIO, /* CHP3_HSPA_PWRON# */ .gpio44 = GPIO_MODE_GPIO, /* CHP3_SMRT_CHG0_CTL2# */ .gpio45 = GPIO_MODE_GPIO, /* CHP3_SMRT_CHG0_CTL3# */ .gpio46 = GPIO_MODE_GPIO, /* CHP3_SMRT_CHG1_CTL2# */ .gpio47 = GPIO_MODE_GPIO, /* CHP3_CHG_ENABLE0 */ .gpio48 = GPIO_MODE_GPIO, /* CHP3_BT_OFF# */ .gpio49 = GPIO_MODE_GPIO, /* Onboard Memory Vendor */ .gpio50 = GPIO_MODE_NONE, .gpio51 = GPIO_MODE_NONE, .gpio52 = GPIO_MODE_NONE, .gpio53 = GPIO_MODE_NATIVE, .gpio54 = GPIO_MODE_NONE, .gpio55 = GPIO_MODE_GPIO, /* STP_A16OVR */ .gpio56 = GPIO_MODE_GPIO, /* CHP3_CHG_ENABLE1 */ .gpio57 = GPIO_MODE_GPIO, /* CHP3_DEBUG10 */ .gpio58 = GPIO_MODE_NATIVE, /* SIO3_THERM_SMCLK# */ .gpio59 = GPIO_MODE_NATIVE, /* USB3_OC0# */ .gpio60 = GPIO_MODE_GPIO, /* CHP3_DRAMRST_GATE */ .gpio61 = GPIO_MODE_NATIVE, /* CHP3_SUSSTAT# */ .gpio62 = GPIO_MODE_NATIVE, /* CHP3_SUSCLK */ .gpio63 = GPIO_MODE_NATIVE, /* CHP3_SLPS5# */ }; const struct pch_gpio_set2 pch_gpio_set2_direction = { .gpio32 = GPIO_DIR_INPUT, .gpio33 = GPIO_DIR_INPUT, .gpio34 = GPIO_DIR_INPUT, .gpio35 = GPIO_DIR_OUTPUT, .gpio36 = GPIO_DIR_INPUT, .gpio37 = GPIO_DIR_INPUT, .gpio38 = GPIO_DIR_OUTPUT, .gpio39 = GPIO_DIR_INPUT, .gpio40 = GPIO_DIR_INPUT, .gpio41 = GPIO_DIR_INPUT, .gpio42 = GPIO_DIR_INPUT, .gpio43 = GPIO_DIR_OUTPUT, .gpio44 = GPIO_DIR_OUTPUT, .gpio45 = GPIO_DIR_OUTPUT, .gpio46 = GPIO_DIR_OUTPUT, .gpio47 = GPIO_DIR_OUTPUT, .gpio48 = GPIO_DIR_OUTPUT, .gpio49 = GPIO_DIR_INPUT, .gpio50 = GPIO_DIR_INPUT, .gpio51 = GPIO_DIR_INPUT, .gpio52 = GPIO_DIR_INPUT, .gpio53 = GPIO_DIR_INPUT, .gpio54 = GPIO_DIR_INPUT, .gpio55 = GPIO_DIR_INPUT, .gpio56 = GPIO_DIR_OUTPUT, .gpio57 = GPIO_DIR_OUTPUT, .gpio58 = GPIO_DIR_INPUT, .gpio59 = GPIO_DIR_INPUT, .gpio60 = GPIO_DIR_OUTPUT, .gpio61 = GPIO_DIR_INPUT, .gpio62 = GPIO_DIR_INPUT, .gpio63 = GPIO_DIR_INPUT, }; const struct pch_gpio_set2 pch_gpio_set2_level = { .gpio32 = GPIO_LEVEL_LOW, .gpio33 = GPIO_LEVEL_LOW, .gpio34 = GPIO_LEVEL_LOW, .gpio35 = GPIO_LEVEL_HIGH, /* Enable WLAN */ .gpio36 = GPIO_LEVEL_LOW, .gpio37 = GPIO_LEVEL_LOW, .gpio38 = GPIO_LEVEL_HIGH, /* Enable 3G */ .gpio39 = GPIO_LEVEL_LOW, .gpio40 = GPIO_LEVEL_LOW, .gpio41 = GPIO_LEVEL_LOW, .gpio42 = GPIO_LEVEL_LOW, .gpio43 = GPIO_LEVEL_LOW, .gpio44 = GPIO_LEVEL_HIGH, /* CTL2 = 1 for USB0 SDP */ .gpio45 = GPIO_LEVEL_LOW, /* CTL3 = 0 for USB0 SDP */ .gpio46 = GPIO_LEVEL_HIGH, /* CTL2 = 1 for USB1 SDP */ .gpio47 = GPIO_LEVEL_HIGH, /* Enable USB0 */ .gpio48 = GPIO_LEVEL_LOW, /* Disable Bluetooth */ .gpio49 = GPIO_LEVEL_LOW, .gpio50 = GPIO_LEVEL_LOW, .gpio51 = GPIO_LEVEL_LOW, .gpio52 = GPIO_LEVEL_LOW, .gpio53 = GPIO_LEVEL_LOW, .gpio54 = GPIO_LEVEL_LOW, .gpio55 = GPIO_LEVEL_LOW, .gpio56 = GPIO_LEVEL_HIGH, /* Enable USB1 */ .gpio57 = GPIO_LEVEL_LOW, .gpio58 = GPIO_LEVEL_LOW, .gpio59 = GPIO_LEVEL_LOW, .gpio60 = GPIO_LEVEL_HIGH, .gpio61 = GPIO_LEVEL_LOW, .gpio62 = GPIO_LEVEL_LOW, .gpio63 = GPIO_LEVEL_LOW, }; /* * GPIO SET 3 includes GPIO64 to GPIO75 */ const struct pch_gpio_set3 pch_gpio_set3_mode = { .gpio64 = GPIO_MODE_NONE, .gpio65 = GPIO_MODE_NONE, .gpio66 = GPIO_MODE_NONE, .gpio67 = GPIO_MODE_NONE, .gpio68 = GPIO_MODE_NONE, .gpio69 = GPIO_MODE_GPIO, /* PEX3_WWAN_DET# */ .gpio70 = GPIO_MODE_GPIO, /* CHP3_WLAN_RST# */ .gpio71 = GPIO_MODE_GPIO, /* CHP3_WLAN_PWRON */ .gpio72 = GPIO_MODE_NATIVE, /* BATLOW# (pullup) */ .gpio73 = GPIO_MODE_GPIO, /* CHP3_SMRT_CHG1_CTL3# */ .gpio74 = GPIO_MODE_NONE, .gpio75 = GPIO_MODE_NATIVE, /* SIO3_THERM_SMDATA# */ }; const struct pch_gpio_set3 pch_gpio_set3_direction = { .gpio64 = GPIO_DIR_INPUT, .gpio65 = GPIO_DIR_INPUT, .gpio66 = GPIO_DIR_INPUT, .gpio67 = GPIO_DIR_INPUT, .gpio68 = GPIO_DIR_INPUT, .gpio69 = GPIO_DIR_INPUT, .gpio70 = GPIO_DIR_OUTPUT, .gpio71 = GPIO_DIR_OUTPUT, .gpio72 = GPIO_DIR_INPUT, .gpio73 = GPIO_DIR_OUTPUT, .gpio74 = GPIO_DIR_INPUT, .gpio75 = GPIO_DIR_INPUT, }; const struct pch_gpio_set3 pch_gpio_set3_level = { .gpio64 = GPIO_LEVEL_LOW, .gpio65 = GPIO_LEVEL_LOW, .gpio66 = GPIO_LEVEL_LOW, .gpio67 = GPIO_LEVEL_LOW, .gpio68 = GPIO_LEVEL_LOW, .gpio69 = GPIO_LEVEL_LOW, .gpio70 = GPIO_LEVEL_HIGH, /* WLAN out of reset */ .gpio71 = GPIO_LEVEL_HIGH, /* WLAN power on */ .gpio72 = GPIO_LEVEL_LOW, .gpio73 = GPIO_LEVEL_LOW, /* USB1 CTL3 = 0 for SDP */ .gpio74 = GPIO_LEVEL_LOW, .gpio75 = GPIO_LEVEL_LOW, }; const struct pch_gpio_set2 pch_gpio_set2_reset = { .gpio38 = GPIO_RESET_RSMRST, .gpio43 = GPIO_RESET_RSMRST, }; const struct pch_gpio_map mainboard_gpio_map = { .set1 = { .mode = &pch_gpio_set1_mode, .direction = &pch_gpio_set1_direction, .level = &pch_gpio_set1_level, .invert = &pch_gpio_set1_invert, }, .set2 = { .mode = &pch_gpio_set2_mode, .direction = &pch_gpio_set2_direction, .level = &pch_gpio_set2_level, .reset = &pch_gpio_set2_reset, }, .set3 = { .mode = &pch_gpio_set3_mode, .direction = &pch_gpio_set3_direction, .level = &pch_gpio_set3_level, }, }; #endif
gpl-2.0
wankdanker/freetds
src/odbc/unittests/earlybind.c
9
1598
#include "common.h" static char software_version[] = "$Id: earlybind.c,v 1.5 2010-07-05 09:20:33 freddy77 Exp $"; static void *no_unused_var_warn[] = { software_version, no_unused_var_warn }; int main(int argc, char *argv[]) { SQLINTEGER id; SQLLEN ind1, ind2; char name[64]; odbc_connect(); odbc_command("CREATE TABLE #test(id INT, name VARCHAR(100))"); odbc_command("INSERT INTO #test(id, name) VALUES(8, 'sysobjects')"); /* bind before select */ SQLBindCol(odbc_stmt, 1, SQL_C_SLONG, &id, sizeof(SQLINTEGER), &ind1); SQLBindCol(odbc_stmt, 2, SQL_C_CHAR, name, sizeof(name), &ind2); /* do select */ odbc_command("SELECT id, name FROM #test WHERE name = 'sysobjects' SELECT 123, 'foo'"); /* get results */ id = -1; memset(name, 0, sizeof(name)); SQLFetch(odbc_stmt); if (id == -1 || strcmp(name, "sysobjects") != 0) { fprintf(stderr, "wrong results\n"); return 1; } /* discard others data */ SQLFetch(odbc_stmt); SQLMoreResults(odbc_stmt); id = -1; memset(name, 0, sizeof(name)); SQLFetch(odbc_stmt); if (id != 123 || strcmp(name, "foo") != 0) { fprintf(stderr, "wrong results\n"); return 1; } /* discard others data */ SQLFetch(odbc_stmt); SQLMoreResults(odbc_stmt); /* other select */ odbc_command("SELECT 321, 'minni'"); /* get results */ id = -1; memset(name, 0, sizeof(name)); SQLFetch(odbc_stmt); if (id != 321 || strcmp(name, "minni") != 0) { fprintf(stderr, "wrong results\n"); return 1; } /* discard others data */ SQLFetch(odbc_stmt); SQLMoreResults(odbc_stmt); odbc_disconnect(); printf("Done.\n"); return 0; }
gpl-2.0
jpoet/gst-plugins-bad
gst/dvdspu/gstspu-vobsub-render.c
9
22462
/* GStreamer DVD Sub-Picture Unit * Copyright (C) 2007 Fluendo S.A. <info@fluendo.com> * Copyright (C) 2009 Jan Schmidt <thaytan@noraisin.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <string.h> #include <gst/gst.h> #include "gstdvdspu.h" GST_DEBUG_CATEGORY_EXTERN (dvdspu_debug); #define GST_CAT_DEFAULT dvdspu_debug static void gstspu_vobsub_recalc_palette (GstDVDSpu * dvdspu, SpuColour * dest, guint8 * idx, guint8 * alpha) { SpuState *state = &dvdspu->spu_state; gint i; if (state->vobsub.current_clut[idx[0]] != 0) { for (i = 0; i < 4; i++, dest++) { guint32 col = state->vobsub.current_clut[idx[i]]; /* Convert incoming 4-bit alpha to 8 bit for blending */ dest->A = (alpha[i] << 4) | alpha[i]; dest->Y = ((guint16) ((col >> 16) & 0xff)) * dest->A; /* U/V are stored as V/U in the clut words, so switch them */ dest->V = ((guint16) ((col >> 8) & 0xff)) * dest->A; dest->U = ((guint16) (col & 0xff)) * dest->A; } } else { int y = 240; /* The CLUT presumably hasn't been set, so we'll just guess some * values for the non-transparent colors (white, grey, black) */ for (i = 0; i < 4; i++, dest++) { dest->A = (alpha[i] << 4) | alpha[i]; if (alpha[i] != 0) { dest[0].Y = y * dest[0].A; y -= 112; if (y < 0) y = 0; } dest[0].U = 128 * dest[0].A; dest[0].V = 128 * dest[0].A; } } } /* Recalculate the main, HL & ChgCol palettes */ static void gstspu_vobsub_update_palettes (GstDVDSpu * dvdspu, SpuState * state) { guint8 index[4]; /* Indices for the palette */ guint8 alpha[4]; /* Alpha values the palette */ if (state->vobsub.main_pal_dirty) { gstspu_vobsub_recalc_palette (dvdspu, state->vobsub.main_pal, state->vobsub.main_idx, state->vobsub.main_alpha); /* Need to refresh the hl_ctrl info copies of the main palette too */ memcpy (state->vobsub.hl_ctrl_i.pix_ctrl_i[0].pal_cache, state->vobsub.main_pal, 4 * sizeof (SpuColour)); memcpy (state->vobsub.hl_ctrl_i.pix_ctrl_i[2].pal_cache, state->vobsub.main_pal, 4 * sizeof (SpuColour)); state->vobsub.main_pal_dirty = FALSE; } if (state->vobsub.hl_pal_dirty) { gstspu_vobsub_recalc_palette (dvdspu, state->vobsub.hl_ctrl_i.pix_ctrl_i[1].pal_cache, state->vobsub.hl_idx, state->vobsub.hl_alpha); state->vobsub.hl_pal_dirty = FALSE; } /* Update the offset positions for the highlight region */ if (state->vobsub.hl_rect.top != -1) { state->vobsub.hl_ctrl_i.top = state->vobsub.hl_rect.top; state->vobsub.hl_ctrl_i.bottom = state->vobsub.hl_rect.bottom; state->vobsub.hl_ctrl_i.n_changes = 3; state->vobsub.hl_ctrl_i.pix_ctrl_i[0].left = 0; state->vobsub.hl_ctrl_i.pix_ctrl_i[1].left = state->vobsub.hl_rect.left; state->vobsub.hl_ctrl_i.pix_ctrl_i[2].left = state->vobsub.hl_rect.right + 1; } if (state->vobsub.line_ctrl_i_pal_dirty) { gint16 l, c; GST_LOG_OBJECT (dvdspu, "Updating chg-col-con palettes"); for (l = 0; l < state->vobsub.n_line_ctrl_i; l++) { SpuVobsubLineCtrlI *cur_line_ctrl = state->vobsub.line_ctrl_i + l; for (c = 0; c < cur_line_ctrl->n_changes; c++) { SpuVobsubPixCtrlI *cur = cur_line_ctrl->pix_ctrl_i + c; index[3] = (cur->palette >> 28) & 0x0f; index[2] = (cur->palette >> 24) & 0x0f; index[1] = (cur->palette >> 20) & 0x0f; index[0] = (cur->palette >> 16) & 0x0f; alpha[3] = (cur->palette >> 12) & 0x0f; alpha[2] = (cur->palette >> 8) & 0x0f; alpha[1] = (cur->palette >> 4) & 0x0f; alpha[0] = (cur->palette) & 0x0f; gstspu_vobsub_recalc_palette (dvdspu, cur->pal_cache, index, alpha); } } state->vobsub.line_ctrl_i_pal_dirty = FALSE; } } static inline guint8 gstspu_vobsub_get_nibble (SpuState * state, guint16 * rle_offset) { guint8 ret; if (G_UNLIKELY (*rle_offset >= state->vobsub.max_offset)) return 0; /* Overran the buffer */ ret = state->vobsub.pix_buf_map.data[(*rle_offset) / 2]; /* If the offset is even, we shift the answer down 4 bits, otherwise not */ if (*rle_offset & 0x01) ret &= 0x0f; else ret = ret >> 4; (*rle_offset)++; return ret; } static guint16 gstspu_vobsub_get_rle_code (SpuState * state, guint16 * rle_offset) { guint16 code; code = gstspu_vobsub_get_nibble (state, rle_offset); if (code < 0x4) { /* 4 .. f */ code = (code << 4) | gstspu_vobsub_get_nibble (state, rle_offset); if (code < 0x10) { /* 1x .. 3x */ code = (code << 4) | gstspu_vobsub_get_nibble (state, rle_offset); if (code < 0x40) { /* 04x .. 0fx */ code = (code << 4) | gstspu_vobsub_get_nibble (state, rle_offset); } } } return code; } static inline gboolean gstspu_vobsub_draw_rle_run (SpuState * state, gint16 x, gint16 end, SpuColour * colour) { #if 0 GST_LOG ("Y: %d x: %d end %d col %d %d %d %d", state->vobsub.cur_Y, x, end, colour->Y, colour->U, colour->V, colour->A); #endif if (colour->A != 0) { guint32 inv_A = 0xff - colour->A; /* FIXME: This could be more efficient */ while (x < end) { state->vobsub.out_Y[x] = (inv_A * state->vobsub.out_Y[x] + colour->Y) / 0xff; state->vobsub.out_U[x / 2] += colour->U; state->vobsub.out_V[x / 2] += colour->V; state->vobsub.out_A[x / 2] += colour->A; x++; } /* Update the compositing buffer so we know how much to blend later */ *(state->vobsub.comp_last_x_ptr) = end - 1; /* end is the start of the *next* run */ return TRUE; } return FALSE; } static inline gint16 rle_end_x (guint16 rle_code, gint16 x, gint16 end) { /* run length = rle_code >> 2 */ if (G_UNLIKELY (((rle_code >> 2) == 0))) return end; else return MIN (end, x + (rle_code >> 2)); } static gboolean gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], guint16 * rle_offset); static gboolean gstspu_vobsub_update_chgcol (SpuState * state); static gboolean gstspu_vobsub_render_line (SpuState * state, guint8 * planes[3], guint16 * rle_offset) { gint16 x, next_x, end, rle_code, next_draw_x; SpuColour *colour; gboolean visible = FALSE; /* Check for special case of chg_col info to use (either highlight or * ChgCol command */ if (state->vobsub.cur_chg_col != NULL) { if (gstspu_vobsub_update_chgcol (state)) { /* Check the top & bottom, because we might not be within the region yet */ if (state->vobsub.cur_Y >= state->vobsub.cur_chg_col->top && state->vobsub.cur_Y <= state->vobsub.cur_chg_col->bottom) { return gstspu_vobsub_render_line_with_chgcol (state, planes, rle_offset); } } } /* No special case. Render as normal */ /* Set up our output pointers */ state->vobsub.out_Y = planes[0]; state->vobsub.out_U = state->comp_bufs[0]; state->vobsub.out_V = state->comp_bufs[1]; state->vobsub.out_A = state->comp_bufs[2]; /* We always need to start our RLE decoding byte_aligned */ *rle_offset = GST_ROUND_UP_2 (*rle_offset); x = state->vobsub.disp_rect.left; end = state->vobsub.disp_rect.right + 1; while (x < end) { rle_code = gstspu_vobsub_get_rle_code (state, rle_offset); colour = &state->vobsub.main_pal[rle_code & 3]; next_x = rle_end_x (rle_code, x, end); next_draw_x = next_x; if (next_draw_x > state->vobsub.clip_rect.right) next_draw_x = state->vobsub.clip_rect.right; /* ensure no overflow */ /* Now draw the run between [x,next_x) */ if (state->vobsub.cur_Y >= state->vobsub.clip_rect.top && state->vobsub.cur_Y <= state->vobsub.clip_rect.bottom) visible |= gstspu_vobsub_draw_rle_run (state, x, next_draw_x, colour); x = next_x; } return visible; } static gboolean gstspu_vobsub_update_chgcol (SpuState * state) { if (state->vobsub.cur_chg_col == NULL) return FALSE; if (state->vobsub.cur_Y <= state->vobsub.cur_chg_col->bottom) return TRUE; while (state->vobsub.cur_chg_col < state->vobsub.cur_chg_col_end) { if (state->vobsub.cur_Y >= state->vobsub.cur_chg_col->top && state->vobsub.cur_Y <= state->vobsub.cur_chg_col->bottom) { #if 0 g_print ("Stopped @ entry %d with top %d bottom %d, cur_y %d", (gint16) (state->vobsub.cur_chg_col - state->vobsub.line_ctrl_i), state->vobsub.cur_chg_col->top, state->vobsub.cur_chg_col->bottom, y); #endif return TRUE; } state->vobsub.cur_chg_col++; } /* Finished all our cur_chg_col entries. Use the main palette from here on */ state->vobsub.cur_chg_col = NULL; return FALSE; } static gboolean gstspu_vobsub_render_line_with_chgcol (SpuState * state, guint8 * planes[3], guint16 * rle_offset) { SpuVobsubLineCtrlI *chg_col = state->vobsub.cur_chg_col; gint16 x, next_x, disp_end, rle_code, run_end, run_draw_end; SpuColour *colour; SpuVobsubPixCtrlI *cur_pix_ctrl; SpuVobsubPixCtrlI *next_pix_ctrl; SpuVobsubPixCtrlI *end_pix_ctrl; SpuVobsubPixCtrlI dummy_pix_ctrl; gboolean visible = FALSE; gint16 cur_reg_end; gint i; state->vobsub.out_Y = planes[0]; state->vobsub.out_U = state->comp_bufs[0]; state->vobsub.out_V = state->comp_bufs[1]; state->vobsub.out_A = state->comp_bufs[2]; /* We always need to start our RLE decoding byte_aligned */ *rle_offset = GST_ROUND_UP_2 (*rle_offset); /* Our run will cover the display rect */ x = state->vobsub.disp_rect.left; disp_end = state->vobsub.disp_rect.right + 1; /* Work out the first pixel control info, which may point to the dummy entry if * the global palette/alpha need using initally */ cur_pix_ctrl = chg_col->pix_ctrl_i; end_pix_ctrl = chg_col->pix_ctrl_i + chg_col->n_changes; if (cur_pix_ctrl->left != 0) { next_pix_ctrl = cur_pix_ctrl; cur_pix_ctrl = &dummy_pix_ctrl; for (i = 0; i < 4; i++) /* Copy the main palette to our dummy entry */ dummy_pix_ctrl.pal_cache[i] = state->vobsub.main_pal[i]; } else { next_pix_ctrl = cur_pix_ctrl + 1; } if (next_pix_ctrl < end_pix_ctrl) cur_reg_end = next_pix_ctrl->left; else cur_reg_end = disp_end; /* Render stuff */ while (x < disp_end) { rle_code = gstspu_vobsub_get_rle_code (state, rle_offset); next_x = rle_end_x (rle_code, x, disp_end); /* Now draw the run between [x,next_x), crossing palette regions as needed */ while (x < next_x) { run_end = MIN (next_x, cur_reg_end); run_draw_end = run_end; if (run_draw_end > state->vobsub.clip_rect.right) run_draw_end = state->vobsub.clip_rect.right; /* ensure no overflow */ if (G_LIKELY (x < run_end)) { colour = &cur_pix_ctrl->pal_cache[rle_code & 3]; visible |= gstspu_vobsub_draw_rle_run (state, x, run_draw_end, colour); x = run_end; } if (x >= cur_reg_end) { /* Advance to next region */ cur_pix_ctrl = next_pix_ctrl; next_pix_ctrl++; if (next_pix_ctrl < end_pix_ctrl) cur_reg_end = next_pix_ctrl->left; else cur_reg_end = disp_end; } } } return visible; } static void gstspu_vobsub_blend_comp_buffers (SpuState * state, guint8 * planes[3]) { state->comp_left = state->vobsub.disp_rect.left; state->comp_right = MAX (state->vobsub.comp_last_x[0], state->vobsub.comp_last_x[1]); state->comp_left = MAX (state->comp_left, state->vobsub.clip_rect.left); state->comp_right = MIN (state->comp_right, state->vobsub.clip_rect.right); gstspu_blend_comp_buffers (state, planes); } static void gstspu_vobsub_clear_comp_buffers (SpuState * state) { state->comp_left = state->vobsub.clip_rect.left; state->comp_right = state->vobsub.clip_rect.right; gstspu_clear_comp_buffers (state); state->vobsub.comp_last_x[0] = -1; state->vobsub.comp_last_x[1] = -1; } static void gstspu_vobsub_draw_highlight (SpuState * state, GstVideoFrame * frame, SpuRect * rect) { guint8 *cur; gint16 pos; gint ystride; ystride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->top; for (pos = rect->left + 1; pos < rect->right; pos++) cur[pos] = (cur[pos] / 2) + 0x8; cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->bottom; for (pos = rect->left + 1; pos < rect->right; pos++) cur[pos] = (cur[pos] / 2) + 0x8; cur = GST_VIDEO_FRAME_COMP_DATA (frame, 0) + ystride * rect->top; for (pos = rect->top; pos <= rect->bottom; pos++) { cur[rect->left] = (cur[rect->left] / 2) + 0x8; cur[rect->right] = (cur[rect->right] / 2) + 0x8; cur += ystride; } } void gstspu_vobsub_render (GstDVDSpu * dvdspu, GstVideoFrame * frame) { SpuState *state = &dvdspu->spu_state; guint8 *planes[3]; /* YUV frame pointers */ gint y, last_y; gint width, height; gint strides[3]; gint offset_index = 0; /* Set up our initial state */ if (G_UNLIKELY (state->vobsub.pix_buf == NULL)) return; if (!gst_buffer_map (state->vobsub.pix_buf, &state->vobsub.pix_buf_map, GST_MAP_READ)) return; /* Store the start of each plane */ planes[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0); planes[1] = GST_VIDEO_FRAME_COMP_DATA (frame, 1); planes[2] = GST_VIDEO_FRAME_COMP_DATA (frame, 2); strides[0] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); strides[1] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1); strides[2] = GST_VIDEO_FRAME_COMP_STRIDE (frame, 2); width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); GST_DEBUG_OBJECT (dvdspu, "Rendering SPU. disp_rect %d,%d to %d,%d. hl_rect %d,%d to %d,%d", state->vobsub.disp_rect.left, state->vobsub.disp_rect.top, state->vobsub.disp_rect.right, state->vobsub.disp_rect.bottom, state->vobsub.hl_rect.left, state->vobsub.hl_rect.top, state->vobsub.hl_rect.right, state->vobsub.hl_rect.bottom); GST_DEBUG_OBJECT (dvdspu, "video size %d,%d", width, height); /* When reading RLE data, we track the offset in nibbles... */ state->vobsub.cur_offsets[0] = state->vobsub.pix_data[0] * 2; state->vobsub.cur_offsets[1] = state->vobsub.pix_data[1] * 2; state->vobsub.max_offset = state->vobsub.pix_buf_map.size * 2; /* Update all the palette caches */ gstspu_vobsub_update_palettes (dvdspu, state); /* Set up HL or Change Color & Contrast rect tracking */ if (state->vobsub.hl_rect.top != -1) { state->vobsub.cur_chg_col = &state->vobsub.hl_ctrl_i; state->vobsub.cur_chg_col_end = state->vobsub.cur_chg_col + 1; } else if (state->vobsub.n_line_ctrl_i > 0) { state->vobsub.cur_chg_col = state->vobsub.line_ctrl_i; state->vobsub.cur_chg_col_end = state->vobsub.cur_chg_col + state->vobsub.n_line_ctrl_i; } else state->vobsub.cur_chg_col = NULL; state->vobsub.clip_rect.left = state->vobsub.disp_rect.left; state->vobsub.clip_rect.right = state->vobsub.disp_rect.right; /* center the image when display rectangle exceeds the video width */ if (width <= state->vobsub.disp_rect.right) { gint left, disp_width; disp_width = state->vobsub.disp_rect.right - state->vobsub.disp_rect.left + 1; left = (width - disp_width) / 2; state->vobsub.disp_rect.left = left; state->vobsub.disp_rect.right = left + disp_width - 1; /* if it clips to the right, shift it left, but only till zero */ if (state->vobsub.disp_rect.right >= width) { gint shift = state->vobsub.disp_rect.right - width - 1; if (shift > state->vobsub.disp_rect.left) shift = state->vobsub.disp_rect.left; state->vobsub.disp_rect.left -= shift; state->vobsub.disp_rect.right -= shift; } /* init clip to disp */ state->vobsub.clip_rect.left = state->vobsub.disp_rect.left; state->vobsub.clip_rect.right = state->vobsub.disp_rect.right; /* clip right after the shift */ if (state->vobsub.clip_rect.right >= width) state->vobsub.clip_rect.right = width - 1; GST_DEBUG_OBJECT (dvdspu, "clipping width to %d,%d", state->vobsub.clip_rect.left, state->vobsub.clip_rect.right); } /* for the height, bring it up till it fits as well as it can. We * assume the picture is in the lower part. We should better check where it * is and do something more clever. */ state->vobsub.clip_rect.top = state->vobsub.disp_rect.top; state->vobsub.clip_rect.bottom = state->vobsub.disp_rect.bottom; if (height <= state->vobsub.disp_rect.bottom) { /* shift it up, but only till zero */ gint shift = state->vobsub.disp_rect.bottom - height - 1; if (shift > state->vobsub.disp_rect.top) shift = state->vobsub.disp_rect.top; state->vobsub.disp_rect.top -= shift; state->vobsub.disp_rect.bottom -= shift; /* start on even line */ if (state->vobsub.disp_rect.top & 1) { state->vobsub.disp_rect.top--; state->vobsub.disp_rect.bottom--; } /* init clip to disp */ state->vobsub.clip_rect.top = state->vobsub.disp_rect.top; state->vobsub.clip_rect.bottom = state->vobsub.disp_rect.bottom; /* clip bottom after the shift */ if (state->vobsub.clip_rect.bottom >= height) state->vobsub.clip_rect.bottom = height - 1; GST_DEBUG_OBJECT (dvdspu, "clipping height to %d,%d", state->vobsub.clip_rect.top, state->vobsub.clip_rect.bottom); } /* We start rendering from the first line of the display rect */ y = state->vobsub.disp_rect.top; /* We render most lines in pairs starting from an even y, * accumulating 2 lines of chroma then blending it. We might need to render a * single line at the start and end if the display rect starts on an odd line * or ends on an even one */ if (y > state->vobsub.disp_rect.bottom) return; /* Empty clip rect, nothing to do */ /* Update our plane references to the first line of the disp_rect */ planes[0] += strides[0] * y; planes[1] += strides[1] * (y / 2); planes[2] += strides[2] * (y / 2); /* If the render rect starts on an odd line, render that only to start */ state->vobsub.cur_Y = y; if (state->vobsub.cur_Y & 0x1) { gboolean clip, visible = FALSE; clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); if (!clip) { /* Render a first odd line. */ gstspu_vobsub_clear_comp_buffers (state); state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1; visible |= gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[offset_index]); if (visible) gstspu_vobsub_blend_comp_buffers (state, planes); } /* Update all the output pointers */ state->vobsub.cur_Y++; planes[0] += strides[0]; planes[1] += strides[1]; planes[2] += strides[2]; /* Switch the offset index 0 <=> 1 */ offset_index ^= 0x1; } last_y = (state->vobsub.disp_rect.bottom - 1) & ~(0x01); for (; state->vobsub.cur_Y <= last_y; state->vobsub.cur_Y++) { gboolean clip, visible = FALSE; clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); /* Reset the compositing buffer */ gstspu_vobsub_clear_comp_buffers (state); /* Render even line */ state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[offset_index]); /* Advance the luminance output pointer */ planes[0] += strides[0]; /* Switch the offset index 0 <=> 1 */ offset_index ^= 0x1; state->vobsub.cur_Y++; /* Render odd line */ state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x + 1; visible |= gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[offset_index]); if (visible && !clip) { /* Blend the accumulated UV compositing buffers onto the output */ gstspu_vobsub_blend_comp_buffers (state, planes); } /* Update all the output pointers */ planes[0] += strides[0]; planes[1] += strides[1]; planes[2] += strides[2]; /* Switch the offset index 0 <=> 1 */ offset_index ^= 0x1; } if (state->vobsub.cur_Y == state->vobsub.disp_rect.bottom) { gboolean clip, visible = FALSE; clip = (state->vobsub.cur_Y < state->vobsub.clip_rect.top || state->vobsub.cur_Y > state->vobsub.clip_rect.bottom); g_return_if_fail ((state->vobsub.disp_rect.bottom & 0x01) == 0); if (!clip) { /* Render a remaining lone last even line. y already has the correct value * after the above loop exited. */ gstspu_vobsub_clear_comp_buffers (state); state->vobsub.comp_last_x_ptr = state->vobsub.comp_last_x; visible |= gstspu_vobsub_render_line (state, planes, &state->vobsub.cur_offsets[offset_index]); if (visible) gstspu_vobsub_blend_comp_buffers (state, planes); } } /* for debugging purposes, draw a faint rectangle at the edges of the disp_rect */ if ((dvdspu_debug_flags & GST_DVD_SPU_DEBUG_RENDER_RECTANGLE) != 0) { gstspu_vobsub_draw_highlight (state, frame, &state->vobsub.disp_rect); } /* For debugging purposes, draw a faint rectangle around the highlight rect */ if ((dvdspu_debug_flags & GST_DVD_SPU_DEBUG_HIGHLIGHT_RECTANGLE) != 0 && state->vobsub.hl_rect.top != -1) { gstspu_vobsub_draw_highlight (state, frame, &state->vobsub.hl_rect); } gst_buffer_unmap (state->vobsub.pix_buf, &state->vobsub.pix_buf_map); }
gpl-2.0
tako0910/android_kernel_htc_msm8960
arch/arm/mach-msm/htc/elite/board-elite.c
9
139680
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/i2c.h> #include <linux/i2c/sx150x.h> #include <linux/gpio.h> #include <linux/usb/android.h> #include <linux/msm_ssbi.h> #include <linux/pn544.h> #include <linux/regulator/msm-gpio-regulator.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <linux/slimbus/slimbus.h> #include <linux/bootmem.h> #ifdef CONFIG_ANDROID_PMEM #include <linux/android_pmem.h> #endif #include <linux/synaptics_i2c_rmi.h> #include <linux/dma-contiguous.h> #include <linux/dma-mapping.h> #include <linux/platform_data/qcom_crypto_device.h> #include <linux/platform_data/qcom_wcnss_device.h> #include <linux/leds.h> #include <linux/leds-pm8xxx-htc.h> #include <linux/msm_tsens.h> #include <linux/proc_fs.h> #include <linux/cm3629.h> #include <linux/memblock.h> #include <linux/msm_thermal.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <asm/hardware/gic.h> #include <asm/mach/mmc.h> #include <mach/board.h> #include <mach/msm_iomap.h> #include <mach/msm_spi.h> #ifdef CONFIG_USB_MSM_OTG_72K #include <mach/msm_hsusb.h> #else #include <linux/usb/msm_hsusb.h> #endif #include <mach/usbdiag.h> #include <mach/socinfo.h> #include <mach/rpm.h> #include <mach/gpio.h> #include <mach/msm_bus_board.h> #include <mach/msm_memtypes.h> #include <mach/dma.h> #include <mach/msm_dsps.h> #include <mach/msm_xo.h> #include <mach/restart.h> #include <mach/panel_id.h> #ifdef CONFIG_WCD9310_CODEC #include <linux/slimbus/slimbus.h> #include <linux/mfd/wcd9xxx/core.h> #include <linux/mfd/wcd9xxx/pdata.h> #endif #include <linux/a1028.h> #include <linux/msm_ion.h> #include <mach/ion.h> #include <mach/msm_rtb.h> #include <mach/msm_cache_dump.h> #include <mach/scm.h> #include <mach/iommu_domains.h> #include <mach/kgsl.h> #include <linux/fmem.h> #include <linux/mpu.h> #include <linux/r3gd20.h> #include <linux/akm8975.h> #include <linux/bma250.h> #include <linux/ewtzmu2.h> #ifdef CONFIG_BT #include <mach/htc_bdaddress.h> #endif #include <mach/htc_headset_mgr.h> #include <mach/htc_headset_pmic.h> #ifdef CONFIG_HTC_HEADSET_ONE_WIRE #include <mach/htc_headset_one_wire.h> #endif #include <mach/cable_detect.h> #include "timer.h" #include "devices.h" #include "devices-msm8x60.h" #include "spm.h" #include "board-elite.h" #include "pm.h" #include <mach/cpuidle.h> #include "rpm_resources.h" #include <mach/mpm.h> #include "acpuclock.h" #include "rpm_log.h" #include "smd_private.h" #include "pm-boot.h" #ifdef CONFIG_FB_MSM_HDMI_MHL #include <mach/mhl.h> #endif #ifdef CONFIG_MSM_CAMERA_FLASH #include <linux/htc_flashlight.h> #endif #include <mach/board_htc.h> #include <linux/mfd/pm8xxx/pm8xxx-vibrator-pwm.h> #ifdef CONFIG_HTC_BATT_8960 #include "mach/htc_battery_8960.h" #include "mach/htc_battery_cell.h" #include "linux/mfd/pm8xxx/pm8921-charger-htc.h" #endif #ifdef CONFIG_PERFLOCK #include <mach/perflock.h> #endif extern unsigned int engineerid; // bit 0 #define HW_VER_ID_VIRT (MSM_TLMM_BASE + 0x00002054) unsigned skuid; #ifdef CONFIG_MSM_CAMERA_FLASH #ifdef CONFIG_FLASHLIGHT_TPS61310 static void config_flashlight_gpios(void) { static uint32_t flashlight_gpio_table[] = { GPIO_CFG(32, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(33, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; gpio_tlmm_config(flashlight_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(flashlight_gpio_table[1], GPIO_CFG_ENABLE); } static struct TPS61310_flashlight_platform_data elite_flashlight_data = { .gpio_init = config_flashlight_gpios, .tps61310_strb0 = 33, .tps61310_strb1 = 32, .led_count = 1, .flash_duration_ms = 600, }; static struct i2c_board_info i2c_tps61310_flashlight[] = { { I2C_BOARD_INFO("TPS61310_FLASHLIGHT", 0x66 >> 1), .platform_data = &elite_flashlight_data, }, }; #endif #endif static struct platform_device msm_fm_platform_init = { .name = "iris_fm", .id = -1, }; #if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE) enum { GPIO_EXPANDER_IRQ_BASE = (PM8921_IRQ_BASE + PM8921_NR_IRQS), GPIO_EXPANDER_GPIO_BASE = (PM8921_MPP_BASE + PM8921_NR_MPPS), /* CAM Expander */ GPIO_CAM_EXPANDER_BASE = GPIO_EXPANDER_GPIO_BASE, GPIO_CAM_GP_STROBE_READY = GPIO_CAM_EXPANDER_BASE, GPIO_CAM_GP_AFBUSY, GPIO_CAM_GP_STROBE_CE, GPIO_CAM_GP_CAM1MP_XCLR, GPIO_CAM_GP_CAMIF_RESET_N, GPIO_CAM_GP_XMT_FLASH_INT, GPIO_CAM_GP_LED_EN1, GPIO_CAM_GP_LED_EN2, }; #endif #ifdef CONFIG_I2C #define MSM_8960_GSBI5_QUP_I2C_BUS_ID 5 #define MSM_8960_GSBI4_QUP_I2C_BUS_ID 4 #define MSM_8960_GSBI2_QUP_I2C_BUS_ID 2 #define MSM_8960_GSBI3_QUP_I2C_BUS_ID 3 #define MSM_8960_GSBI8_QUP_I2C_BUS_ID 8 #define MSM_8960_GSBI12_QUP_I2C_BUS_ID 12 #endif #define MSM_PMEM_ADSP_SIZE 0x6D00000 #define MSM_PMEM_AUDIO_SIZE 0x4CF000 #define MSM_PMEM_SIZE 0x2800000 /* 40 Mbytes */ #define MSM_LIQUID_PMEM_SIZE 0x4000000 /* 64 Mbytes */ #define MSM_HDMI_PRIM_PMEM_SIZE 0x4000000 /* 64 Mbytes */ #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION #define HOLE_SIZE 0x20000 #define MSM_CONTIG_MEM_SIZE 0x65000 #ifdef CONFIG_MSM_IOMMU #define MSM_ION_MM_SIZE 0x3800000 /* Need to be multiple of 64K */ #define MSM_ION_SF_SIZE 0x0 #define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */ #define MSM_ION_HEAP_NUM 7 #else #define MSM_ION_MM_SIZE MSM_PMEM_ADSP_SIZE #define MSM_ION_SF_SIZE MSM_PMEM_SIZE #define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */ #define MSM_ION_HEAP_NUM 8 #endif #define MSM_ION_MM_FW_SIZE (0x200000 - HOLE_SIZE) /* 128kb */ #define MSM_ION_MFC_SIZE SZ_8K #define MSM_ION_AUDIO_SIZE MSM_PMEM_AUDIO_SIZE #define MSM_LIQUID_ION_MM_SIZE (MSM_ION_MM_SIZE + 0x600000) #define MSM_LIQUID_ION_SF_SIZE MSM_LIQUID_PMEM_SIZE #define MSM_HDMI_PRIM_ION_SF_SIZE MSM_HDMI_PRIM_PMEM_SIZE #define MSM_MM_FW_SIZE (0x200000 - HOLE_SIZE) /* 2mb -128kb*/ #define MSM8960_FIXED_AREA_START (0xa0000000 - (MSM_ION_MM_FW_SIZE + \ HOLE_SIZE)) #define MAX_FIXED_AREA_SIZE 0x10000000 #define MSM8960_FW_START MSM8960_FIXED_AREA_START #define MSM_ION_ADSP_SIZE SZ_8M #else #define MSM_CONTIG_MEM_SIZE 0x110C000 #define MSM_ION_HEAP_NUM 1 #endif static unsigned msm_contig_mem_size = MSM_CONTIG_MEM_SIZE; #ifdef CONFIG_KERNEL_MSM_CONTIG_MEM_REGION static int __init msm_contig_mem_size_setup(char *p) { msm_contig_mem_size = memparse(p, NULL); return 0; } early_param("msm_contig_mem_size", msm_contig_mem_size_setup); #endif #ifdef CONFIG_ANDROID_PMEM static unsigned pmem_size = MSM_PMEM_SIZE; static unsigned pmem_param_set = 0; static int __init pmem_size_setup(char *p) { pmem_size = memparse(p, NULL); pmem_param_set = 1; return 0; } early_param("pmem_size", pmem_size_setup); static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE; static int __init pmem_adsp_size_setup(char *p) { pmem_adsp_size = memparse(p, NULL); return 0; } early_param("pmem_adsp_size", pmem_adsp_size_setup); static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE; static int __init pmem_audio_size_setup(char *p) { pmem_audio_size = memparse(p, NULL); return 0; } early_param("pmem_audio_size", pmem_audio_size_setup); #endif #ifdef CONFIG_ANDROID_PMEM #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION static struct android_pmem_platform_data android_pmem_pdata = { .name = "pmem", .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, .cached = 1, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_device = { .name = "android_pmem", .id = 0, .dev = {.platform_data = &android_pmem_pdata}, }; static struct android_pmem_platform_data android_pmem_adsp_pdata = { .name = "pmem_adsp", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_adsp_device = { .name = "android_pmem", .id = 2, .dev = { .platform_data = &android_pmem_adsp_pdata }, }; static struct android_pmem_platform_data android_pmem_audio_pdata = { .name = "pmem_audio", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_audio_device = { .name = "android_pmem", .id = 4, .dev = { .platform_data = &android_pmem_audio_pdata }, }; #endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/ #endif /*CONFIG_ANDROID_PMEM*/ struct fmem_platform_data fmem_pdata = { }; static struct memtype_reserve msm8960_reserve_table[] __initdata = { [MEMTYPE_SMI] = { }, [MEMTYPE_EBI0] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, [MEMTYPE_EBI1] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, }; #if defined(CONFIG_MSM_RTB) static struct msm_rtb_platform_data msm_rtb_pdata = { .size = SZ_1K, }; static int __init msm_rtb_set_buffer_size(char *p) { int s; s = memparse(p, NULL); msm_rtb_pdata.size = ALIGN(s, SZ_4K); return 0; } early_param("msm_rtb_size", msm_rtb_set_buffer_size); static struct platform_device msm_rtb_device = { .name = "msm_rtb", .id = -1, .dev = { .platform_data = &msm_rtb_pdata, }, }; #endif static void __init reserve_rtb_memory(void) { #if defined(CONFIG_MSM_RTB) msm8960_reserve_table[MEMTYPE_EBI1].size += msm_rtb_pdata.size; #endif } static void __init size_pmem_devices(void) { #ifdef CONFIG_ANDROID_PMEM #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION android_pmem_adsp_pdata.size = pmem_adsp_size; if (!pmem_param_set && machine_is_msm8960_liquid()) pmem_size = MSM_LIQUID_PMEM_SIZE; android_pmem_pdata.size = pmem_size; android_pmem_audio_pdata.size = MSM_PMEM_AUDIO_SIZE; #endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/ #endif /*CONFIG_ANDROID_PMEM*/ } #ifdef CONFIG_ANDROID_PMEM #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION static void __init reserve_memory_for(struct android_pmem_platform_data *p) { msm8960_reserve_table[p->memory_type].size += p->size; } #endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/ #endif /*CONFIG_ANDROID_PMEM*/ static void __init reserve_pmem_memory(void) { #ifdef CONFIG_ANDROID_PMEM #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION reserve_memory_for(&android_pmem_adsp_pdata); reserve_memory_for(&android_pmem_pdata); reserve_memory_for(&android_pmem_audio_pdata); #endif msm8960_reserve_table[MEMTYPE_EBI1].size += msm_contig_mem_size; #endif } static int msm8960_paddr_to_memtype(unsigned int paddr) { return MEMTYPE_EBI1; } #define FMEM_ENABLED 0 #ifdef CONFIG_ION_MSM #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION static struct ion_cp_heap_pdata cp_mm_ion_pdata = { .permission_type = IPT_TYPE_MM_CARVEOUT, .align = PAGE_SIZE, .reusable = FMEM_ENABLED, .mem_is_fmem = FMEM_ENABLED, .fixed_position = FIXED_MIDDLE, .iommu_map_all = 1, .iommu_2x_map_domain = VIDEO_DOMAIN, #ifdef CONFIG_CMA .is_cma = 1, #endif }; static struct ion_cp_heap_pdata cp_mfc_ion_pdata = { .permission_type = IPT_TYPE_MFC_SHAREDMEM, .align = PAGE_SIZE, .reusable = 0, .mem_is_fmem = FMEM_ENABLED, .fixed_position = FIXED_HIGH, }; static struct ion_co_heap_pdata co_ion_pdata = { .adjacent_mem_id = INVALID_HEAP_ID, .align = PAGE_SIZE, .mem_is_fmem = 0, }; static struct ion_co_heap_pdata fw_co_ion_pdata = { .adjacent_mem_id = ION_CP_MM_HEAP_ID, .align = SZ_128K, .mem_is_fmem = FMEM_ENABLED, .fixed_position = FIXED_LOW, }; #endif static u64 msm_dmamask = DMA_BIT_MASK(32); static struct platform_device ion_mm_heap_device = { .name = "ion-mm-heap-device", .id = -1, .dev = { .dma_mask = &msm_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), } }; #ifdef CONFIG_CMA static struct platform_device ion_adsp_heap_device = { .name = "ion-adsp-heap-device", .id = -1, .dev = { .dma_mask = &msm_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), } }; #endif /** * These heaps are listed in the order they will be allocated. Due to * video hardware restrictions and content protection the FW heap has to * be allocated adjacent (below) the MM heap and the MFC heap has to be * allocated after the MM heap to ensure MFC heap is not more than 256MB * away from the base address of the FW heap. * However, the order of FW heap and MM heap doesn't matter since these * two heaps are taken care of by separate code to ensure they are adjacent * to each other. * Don't swap the order unless you know what you are doing! */ struct ion_platform_heap msm8960_heaps[] = { { .id = ION_SYSTEM_HEAP_ID, .type = ION_HEAP_TYPE_SYSTEM, .name = ION_VMALLOC_HEAP_NAME, }, #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION { .id = ION_CP_MM_HEAP_ID, .type = ION_HEAP_TYPE_CP, .name = ION_MM_HEAP_NAME, .size = MSM_ION_MM_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &cp_mm_ion_pdata, .priv = &ion_mm_heap_device.dev, }, { .id = ION_MM_FIRMWARE_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_MM_FIRMWARE_HEAP_NAME, .size = MSM_ION_MM_FW_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &fw_co_ion_pdata, }, { .id = ION_CP_MFC_HEAP_ID, .type = ION_HEAP_TYPE_CP, .name = ION_MFC_HEAP_NAME, .size = MSM_ION_MFC_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &cp_mfc_ion_pdata, }, #ifndef CONFIG_MSM_IOMMU { .id = ION_SF_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_SF_HEAP_NAME, .size = MSM_ION_SF_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &co_ion_pdata, }, #endif { .id = ION_IOMMU_HEAP_ID, .type = ION_HEAP_TYPE_IOMMU, .name = ION_IOMMU_HEAP_NAME, }, { .id = ION_QSECOM_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_QSECOM_HEAP_NAME, .size = MSM_ION_QSECOM_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &co_ion_pdata, }, { .id = ION_AUDIO_HEAP_ID, .type = ION_HEAP_TYPE_CARVEOUT, .name = ION_AUDIO_HEAP_NAME, .size = MSM_ION_AUDIO_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &co_ion_pdata, }, #ifdef CONFIG_CMA { .id = ION_ADSP_HEAP_ID, .type = ION_HEAP_TYPE_DMA, .name = ION_ADSP_HEAP_NAME, .size = MSM_ION_ADSP_SIZE, .memory_type = ION_EBI_TYPE, .extra_data = (void *) &co_ion_pdata, .priv = &ion_adsp_heap_device.dev, }, #endif #endif }; static struct ion_platform_data msm8960_ion_pdata = { .nr = MSM_ION_HEAP_NUM, .heaps = msm8960_heaps, }; static struct platform_device ion_dev = { .name = "ion-msm", .id = 1, .dev = { .platform_data = &msm8960_ion_pdata }, }; #endif struct platform_device fmem_device = { .name = "fmem", .id = 1, .dev = { .platform_data = &fmem_pdata }, }; static void __init reserve_mem_for_ion(enum ion_memory_types mem_type, unsigned long size) { msm8960_reserve_table[MEMTYPE_EBI1].size += size; } static void __init msm8960_reserve_fixed_area(unsigned long fixed_area_size) { #if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION) int ret; if (fixed_area_size > MAX_FIXED_AREA_SIZE) panic("fixed area size is larger than %dM\n", MAX_FIXED_AREA_SIZE >> 20); reserve_info->fixed_area_size = fixed_area_size; reserve_info->fixed_area_start = MSM8960_FW_START; ret = memblock_remove(reserve_info->fixed_area_start, reserve_info->fixed_area_size); BUG_ON(ret); #endif } /** * Reserve memory for ION and calculate amount of reusable memory for fmem. * We only reserve memory for heaps that are not reusable. However, we only * support one reusable heap at the moment so we ignore the reusable flag for * other than the first heap with reusable flag set. Also handle special case * for video heaps (MM,FW, and MFC). Video requires heaps MM and MFC to be * at a higher address than FW in addition to not more than 256MB away from the * base address of the firmware. This means that if MM is reusable the other * two heaps must be allocated in the same region as FW. This is handled by the * mem_is_fmem flag in the platform data. In addition the MM heap must be * adjacent to the FW heap for content protection purposes. */ static void __init reserve_ion_memory(void) { #if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION) unsigned int i; int ret; unsigned int fixed_size = 0; unsigned int fixed_low_size, fixed_middle_size, fixed_high_size; unsigned long fixed_low_start, fixed_middle_start, fixed_high_start; unsigned long cma_alignment; unsigned int low_use_cma = 0; unsigned int middle_use_cma = 0; unsigned int high_use_cma = 0; fixed_low_size = 0; fixed_middle_size = 0; fixed_high_size = 0; cma_alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); for (i = 0; i < msm8960_ion_pdata.nr; ++i) { struct ion_platform_heap *heap = &(msm8960_ion_pdata.heaps[i]); int align = SZ_4K; int iommu_map_all = 0; int adjacent_mem_id = INVALID_HEAP_ID; int use_cma = 0; if (heap->extra_data) { int fixed_position = NOT_FIXED; switch ((int) heap->type) { case ION_HEAP_TYPE_CP: fixed_position = ((struct ion_cp_heap_pdata *) heap->extra_data)->fixed_position; align = ((struct ion_cp_heap_pdata *) heap->extra_data)->align; iommu_map_all = ((struct ion_cp_heap_pdata *) heap->extra_data)->iommu_map_all; if (((struct ion_cp_heap_pdata *) heap->extra_data)->is_cma) { heap->size = ALIGN(heap->size, cma_alignment); use_cma = 1; } break; case ION_HEAP_TYPE_DMA: use_cma = 1; /* Purposely fall through here */ case ION_HEAP_TYPE_CARVEOUT: fixed_position = ((struct ion_co_heap_pdata *) heap->extra_data)->fixed_position; adjacent_mem_id = ((struct ion_co_heap_pdata *) heap->extra_data)->adjacent_mem_id; break; default: break; } if (iommu_map_all) { if (heap->size & (SZ_64K-1)) { heap->size = ALIGN(heap->size, SZ_64K); pr_info("Heap %s not aligned to 64K. Adjusting size to %x\n", heap->name, heap->size); } } if (fixed_position != NOT_FIXED) fixed_size += heap->size; else if (!use_cma) reserve_mem_for_ion(MEMTYPE_EBI1, heap->size); if (fixed_position == FIXED_LOW) { fixed_low_size += heap->size; low_use_cma = use_cma; } else if (fixed_position == FIXED_MIDDLE) { fixed_middle_size += heap->size; middle_use_cma = use_cma; } else if (fixed_position == FIXED_HIGH) { fixed_high_size += heap->size; high_use_cma = use_cma; } else if (use_cma) { /* * Heaps that use CMA but are not part of the * fixed set. Create wherever. */ dma_declare_contiguous( heap->priv, heap->size, 0, 0xb0000000); } } } if (!fixed_size) return; /* * Given the setup for the fixed area, we can't round up all sizes. * Some sizes must be set up exactly and aligned correctly. Incorrect * alignments are considered a configuration issue */ fixed_low_start = MSM8960_FIXED_AREA_START; if (low_use_cma) { BUG_ON(!IS_ALIGNED(fixed_low_start, cma_alignment)); BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, cma_alignment)); } else { BUG_ON(!IS_ALIGNED(fixed_low_size + HOLE_SIZE, SECTION_SIZE)); ret = memblock_remove(fixed_low_start, fixed_low_size + HOLE_SIZE); BUG_ON(ret); } fixed_middle_start = fixed_low_start + fixed_low_size + HOLE_SIZE; if (middle_use_cma) { BUG_ON(!IS_ALIGNED(fixed_middle_start, cma_alignment)); BUG_ON(!IS_ALIGNED(fixed_middle_size, cma_alignment)); } else { BUG_ON(!IS_ALIGNED(fixed_middle_size, SECTION_SIZE)); ret = memblock_remove(fixed_middle_start, fixed_middle_size); BUG_ON(ret); } fixed_high_start = fixed_middle_start + fixed_middle_size; if (high_use_cma) { fixed_high_size = ALIGN(fixed_high_size, cma_alignment); BUG_ON(!IS_ALIGNED(fixed_high_start, cma_alignment)); } else { /* This is the end of the fixed area so it's okay to round up */ fixed_high_size = ALIGN(fixed_high_size, SECTION_SIZE); ret = memblock_remove(fixed_high_start, fixed_high_size); BUG_ON(ret); } for (i = 0; i < msm8960_ion_pdata.nr; ++i) { struct ion_platform_heap *heap = &(msm8960_ion_pdata.heaps[i]); if (heap->extra_data) { int fixed_position = NOT_FIXED; struct ion_cp_heap_pdata *pdata = NULL; switch ((int) heap->type) { case ION_HEAP_TYPE_CP: pdata = (struct ion_cp_heap_pdata *)heap->extra_data; fixed_position = pdata->fixed_position; break; case ION_HEAP_TYPE_CARVEOUT: case ION_HEAP_TYPE_DMA: fixed_position = ((struct ion_co_heap_pdata *) heap->extra_data)->fixed_position; break; default: break; } switch (fixed_position) { case FIXED_LOW: heap->base = fixed_low_start; break; case FIXED_MIDDLE: heap->base = fixed_middle_start; if (middle_use_cma) { ret = dma_declare_contiguous( &ion_mm_heap_device.dev, heap->size, fixed_middle_start, 0xa0000000); WARN_ON(ret); } pdata->secure_base = fixed_middle_start - HOLE_SIZE; pdata->secure_size = HOLE_SIZE + heap->size; break; case FIXED_HIGH: heap->base = fixed_high_start; break; default: break; } } } #endif } static void __init reserve_mdp_memory(void) { msm8960_mdp_writeback(msm8960_reserve_table); } #if defined(CONFIG_MSM_CACHE_DUMP) static struct msm_cache_dump_platform_data msm_cache_dump_pdata = { .l2_size = L2_BUFFER_SIZE, }; static struct platform_device msm_cache_dump_device = { .name = "msm_cache_dump", .id = -1, .dev = { .platform_data = &msm_cache_dump_pdata, }, }; #endif static void reserve_cache_dump_memory(void) { #ifdef CONFIG_MSM_CACHE_DUMP unsigned int spare; unsigned int l1_size; unsigned int total; int ret; ret = scm_call(L1C_SERVICE_ID, L1C_BUFFER_GET_SIZE_COMMAND_ID, &spare, sizeof(spare), &l1_size, sizeof(l1_size)); if (ret) /* Fall back to something reasonable here */ l1_size = L1_BUFFER_SIZE; total = l1_size + L2_BUFFER_SIZE; msm8960_reserve_table[MEMTYPE_EBI1].size += total; msm_cache_dump_pdata.l1_size = l1_size; #endif } static void __init msm8960_calculate_reserve_sizes(void) { size_pmem_devices(); reserve_pmem_memory(); reserve_ion_memory(); reserve_mdp_memory(); reserve_rtb_memory(); reserve_cache_dump_memory(); } static struct reserve_info msm8960_reserve_info __initdata = { .memtype_reserve_table = msm8960_reserve_table, .calculate_reserve_sizes = msm8960_calculate_reserve_sizes, .reserve_fixed_area = msm8960_reserve_fixed_area, .paddr_to_memtype = msm8960_paddr_to_memtype, }; static void __init elite_early_memory(void) { reserve_info = &msm8960_reserve_info; } static void __init elite_reserve(void) { msm_reserve(); } static void __init msm8960_allocate_memory_regions(void) { msm8960_allocate_fb_region(); } #ifdef CONFIG_WCD9310_CODEC #define TABLA_INTERRUPT_BASE (NR_MSM_IRQS + NR_GPIO_IRQS + NR_PM8921_IRQS) /* Micbias setting is based on 8660 CDP/MTP/FLUID requirement * 4 micbiases are used to power various analog and digital * microphones operating at 1800 mV. Technically, all micbiases * can source from single cfilter since all microphones operate * at the same voltage level. The arrangement below is to make * sure all cfilters are exercised. LDO_H regulator ouput level * does not need to be as high as 2.85V. It is choosen for * microphone sensitivity purpose. */ static struct wcd9xxx_pdata tabla_platform_data = { .slimbus_slave_device = { .name = "tabla-slave", .e_addr = {0, 0, 0x10, 0, 0x17, 2}, }, .irq = MSM_GPIO_TO_INT(62), .irq_base = TABLA_INTERRUPT_BASE, .num_irqs = NR_TABLA_IRQS, .reset_gpio = PM8921_GPIO_PM_TO_SYS(34), .micbias = { .ldoh_v = TABLA_LDOH_2P85_V, .cfilt1_mv = 1800, .cfilt2_mv = 1800, .cfilt3_mv = 1800, .bias1_cfilt_sel = TABLA_CFILT1_SEL, .bias2_cfilt_sel = TABLA_CFILT2_SEL, .bias3_cfilt_sel = TABLA_CFILT3_SEL, .bias4_cfilt_sel = TABLA_CFILT3_SEL, }, .regulator = { { .name = "CDC_VDD_CP", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_CP_CUR_MAX, }, { .name = "CDC_VDDA_RX", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_RX_CUR_MAX, }, { .name = "CDC_VDDA_TX", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_TX_CUR_MAX, }, { .name = "VDDIO_CDC", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_VDDIO_CDC_CUR_MAX, }, { .name = "VDDD_CDC_D", .min_uV = 1225000, .max_uV = 1250000, .optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX, }, { .name = "CDC_VDDA_A_1P2V", .min_uV = 1225000, .max_uV = 1250000, .optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX, }, }, }; static struct slim_device msm_slim_tabla = { .name = "tabla-slim", .e_addr = {0, 1, 0x10, 0, 0x17, 2}, .dev = { .platform_data = &tabla_platform_data, }, }; static struct wcd9xxx_pdata tabla20_platform_data = { .slimbus_slave_device = { .name = "tabla-slave", .e_addr = {0, 0, 0x60, 0, 0x17, 2}, }, .irq = MSM_GPIO_TO_INT(62), .irq_base = TABLA_INTERRUPT_BASE, .num_irqs = NR_TABLA_IRQS, .reset_gpio = PM8921_GPIO_PM_TO_SYS(34), .amic_settings = { .legacy_mode = 0x7F, .use_pdata = 0x7F, }, .micbias = { .ldoh_v = TABLA_LDOH_2P85_V, .cfilt1_mv = 1800, .cfilt2_mv = 1800, .cfilt3_mv = 1800, .bias1_cfilt_sel = TABLA_CFILT1_SEL, .bias2_cfilt_sel = TABLA_CFILT2_SEL, .bias3_cfilt_sel = TABLA_CFILT3_SEL, .bias4_cfilt_sel = TABLA_CFILT3_SEL, }, .regulator = { { .name = "CDC_VDD_CP", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_CP_CUR_MAX, }, { .name = "CDC_VDDA_RX", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_RX_CUR_MAX, }, { .name = "CDC_VDDA_TX", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_CDC_VDDA_TX_CUR_MAX, }, { .name = "VDDIO_CDC", .min_uV = 1800000, .max_uV = 1800000, .optimum_uA = WCD9XXX_VDDIO_CDC_CUR_MAX, }, { .name = "VDDD_CDC_D", .min_uV = 1225000, .max_uV = 1250000, .optimum_uA = WCD9XXX_VDDD_CDC_D_CUR_MAX, }, { .name = "CDC_VDDA_A_1P2V", .min_uV = 1225000, .max_uV = 1250000, .optimum_uA = WCD9XXX_VDDD_CDC_A_CUR_MAX, }, }, }; static struct slim_device msm_slim_tabla20 = { .name = "tabla2x-slim", .e_addr = {0, 1, 0x60, 0, 0x17, 2}, .dev = { .platform_data = &tabla20_platform_data, }, }; #endif static struct slim_boardinfo msm_slim_devices[] = { #ifdef CONFIG_WCD9310_CODEC { .bus_num = 1, .slim_slave = &msm_slim_tabla, }, { .bus_num = 1, .slim_slave = &msm_slim_tabla20, }, #endif /* add more slimbus slaves as needed */ }; static struct a1028_platform_data a1028_data = { .gpio_a1028_wakeup = ELITE_GPIO_AUD_A1028_WAKE, .gpio_a1028_reset = ELITE_GPIO_AUD_A1028_RSTz, }; #define A1028_I2C_SLAVE_ADDR (0x3E) static struct i2c_board_info msm_i2c_gsbi2_a1028_info[] = { { I2C_BOARD_INFO(A1028_I2C_NAME, A1028_I2C_SLAVE_ADDR), .platform_data = &a1028_data, }, }; #define MSM_WCNSS_PHYS 0x03000000 #define MSM_WCNSS_SIZE 0x280000 static struct resource resources_wcnss_wlan[] = { { .start = RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ, .end = RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ, .name = "wcnss_wlanrx_irq", .flags = IORESOURCE_IRQ, }, { .start = RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ, .end = RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ, .name = "wcnss_wlantx_irq", .flags = IORESOURCE_IRQ, }, { .start = MSM_WCNSS_PHYS, .end = MSM_WCNSS_PHYS + MSM_WCNSS_SIZE - 1, .name = "wcnss_mmio", .flags = IORESOURCE_MEM, }, { .start = 84, .end = 88, .name = "wcnss_gpios_5wire", .flags = IORESOURCE_IO, }, }; static struct qcom_wcnss_opts qcom_wcnss_pdata = { .has_48mhz_xo = 1, }; static struct platform_device msm_device_wcnss_wlan = { .name = "wcnss_wlan", .id = 0, .num_resources = ARRAY_SIZE(resources_wcnss_wlan), .resource = resources_wcnss_wlan, .dev = {.platform_data = &qcom_wcnss_pdata}, }; #ifdef CONFIG_QSEECOM /* qseecom bus scaling */ static struct msm_bus_vectors qseecom_clks_init_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_SPDM, .dst = MSM_BUS_SLAVE_SPDM, .ib = 0, .ab = 0, }, }; static struct msm_bus_vectors qseecom_enable_dfab_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 70000000UL, .ib = 70000000UL, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 2480000000UL, .ib = 2480000000UL, }, { .src = MSM_BUS_MASTER_SPDM, .dst = MSM_BUS_SLAVE_SPDM, .ib = 0, .ab = 0, }, }; static struct msm_bus_vectors qseecom_enable_sfpb_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_SPDM, .dst = MSM_BUS_SLAVE_SPDM, .ib = (64 * 8) * 1000000UL, .ab = (64 * 8) * 100000UL, }, }; static struct msm_bus_vectors qseecom_enable_dfab_sfpb_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 70000000UL, .ib = 70000000UL, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 2480000000UL, .ib = 2480000000UL, }, { .src = MSM_BUS_MASTER_SPDM, .dst = MSM_BUS_SLAVE_SPDM, .ib = (64 * 8) * 1000000UL, .ab = (64 * 8) * 100000UL, }, }; static struct msm_bus_paths qseecom_hw_bus_scale_usecases[] = { { ARRAY_SIZE(qseecom_clks_init_vectors), qseecom_clks_init_vectors, }, { ARRAY_SIZE(qseecom_enable_dfab_vectors), qseecom_enable_dfab_vectors, }, { ARRAY_SIZE(qseecom_enable_sfpb_vectors), qseecom_enable_sfpb_vectors, }, { ARRAY_SIZE(qseecom_enable_dfab_sfpb_vectors), qseecom_enable_dfab_sfpb_vectors, }, }; static struct msm_bus_scale_pdata qseecom_bus_pdata = { qseecom_hw_bus_scale_usecases, ARRAY_SIZE(qseecom_hw_bus_scale_usecases), .name = "qsee", }; static struct platform_device qseecom_device = { .name = "qseecom", .id = 0, .dev = { .platform_data = &qseecom_bus_pdata, }, }; #endif #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) #define QCE_SIZE 0x10000 #define QCE_0_BASE 0x18500000 #define QCE_HW_KEY_SUPPORT 0 #define QCE_SHA_HMAC_SUPPORT 1 #define QCE_SHARE_CE_RESOURCE 1 #define QCE_CE_SHARED 0 /* Begin Bus scaling definitions */ static struct msm_bus_vectors crypto_hw_init_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors crypto_hw_active_vectors[] = { { .src = MSM_BUS_MASTER_ADM_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 70000000UL, .ib = 70000000UL, }, { .src = MSM_BUS_MASTER_ADM_PORT1, .dst = MSM_BUS_SLAVE_GSBI1_UART, .ab = 2480000000UL, .ib = 2480000000UL, }, }; static struct msm_bus_paths crypto_hw_bus_scale_usecases[] = { { ARRAY_SIZE(crypto_hw_init_vectors), crypto_hw_init_vectors, }, { ARRAY_SIZE(crypto_hw_active_vectors), crypto_hw_active_vectors, }, }; static struct msm_bus_scale_pdata crypto_hw_bus_scale_pdata = { crypto_hw_bus_scale_usecases, ARRAY_SIZE(crypto_hw_bus_scale_usecases), .name = "cryptohw", }; /* End Bus Scaling Definitions*/ static struct resource qcrypto_resources[] = { [0] = { .start = QCE_0_BASE, .end = QCE_0_BASE + QCE_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "crypto_channels", .start = DMOV_CE_IN_CHAN, .end = DMOV_CE_OUT_CHAN, .flags = IORESOURCE_DMA, }, [2] = { .name = "crypto_crci_in", .start = DMOV_CE_IN_CRCI, .end = DMOV_CE_IN_CRCI, .flags = IORESOURCE_DMA, }, [3] = { .name = "crypto_crci_out", .start = DMOV_CE_OUT_CRCI, .end = DMOV_CE_OUT_CRCI, .flags = IORESOURCE_DMA, }, }; static struct resource qcedev_resources[] = { [0] = { .start = QCE_0_BASE, .end = QCE_0_BASE + QCE_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "crypto_channels", .start = DMOV_CE_IN_CHAN, .end = DMOV_CE_OUT_CHAN, .flags = IORESOURCE_DMA, }, [2] = { .name = "crypto_crci_in", .start = DMOV_CE_IN_CRCI, .end = DMOV_CE_IN_CRCI, .flags = IORESOURCE_DMA, }, [3] = { .name = "crypto_crci_out", .start = DMOV_CE_OUT_CRCI, .end = DMOV_CE_OUT_CRCI, .flags = IORESOURCE_DMA, }, }; #endif #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) static struct msm_ce_hw_support qcrypto_ce_hw_suppport = { .ce_shared = QCE_CE_SHARED, .shared_ce_resource = QCE_SHARE_CE_RESOURCE, .hw_key_support = QCE_HW_KEY_SUPPORT, .sha_hmac = QCE_SHA_HMAC_SUPPORT, .bus_scale_table = &crypto_hw_bus_scale_pdata, }; static struct platform_device qcrypto_device = { .name = "qcrypto", .id = 0, .num_resources = ARRAY_SIZE(qcrypto_resources), .resource = qcrypto_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &qcrypto_ce_hw_suppport, }, }; #endif #if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) static struct msm_ce_hw_support qcedev_ce_hw_suppport = { .ce_shared = QCE_CE_SHARED, .shared_ce_resource = QCE_SHARE_CE_RESOURCE, .hw_key_support = QCE_HW_KEY_SUPPORT, .sha_hmac = QCE_SHA_HMAC_SUPPORT, .bus_scale_table = &crypto_hw_bus_scale_pdata, }; static struct platform_device qcedev_device = { .name = "qce", .id = 0, .num_resources = ARRAY_SIZE(qcedev_resources), .resource = qcedev_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &qcedev_ce_hw_suppport, }, }; #endif #ifdef CONFIG_HTC_BATT_8960 static int critical_alarm_voltage_mv[] = {3000, 3200, 3400}; static struct htc_battery_platform_data htc_battery_pdev_data = { .guage_driver = 0, .chg_limit_active_mask = HTC_BATT_CHG_LIMIT_BIT_TALK | HTC_BATT_CHG_LIMIT_BIT_NAVI, #ifdef CONFIG_DUTY_CYCLE_LIMIT .chg_limit_timer_sub_mask = HTC_BATT_CHG_LIMIT_BIT_THRML, #endif .critical_low_voltage_mv = 3200, .critical_alarm_vol_ptr = critical_alarm_voltage_mv, .critical_alarm_vol_cols = sizeof(critical_alarm_voltage_mv) / sizeof(int), .overload_vol_thr_mv = 4000, .overload_curr_thr_ma = 0, /* charger */ .icharger.name = "pm8921", .icharger.get_charging_source = pm8921_get_charging_source, .icharger.get_charging_enabled = pm8921_get_charging_enabled, .icharger.set_charger_enable = pm8921_charger_enable, .icharger.set_pwrsrc_enable = pm8921_pwrsrc_enable, .icharger.set_pwrsrc_and_charger_enable = pm8921_set_pwrsrc_and_charger_enable, .icharger.set_limit_charge_enable = pm8921_limit_charge_enable, .icharger.is_ovp = pm8921_is_charger_ovp, .icharger.is_batt_temp_fault_disable_chg = pm8921_is_batt_temp_fault_disable_chg, .icharger.charger_change_notifier_register = cable_detect_register_notifier, .icharger.dump_all = pm8921_dump_all, .icharger.get_attr_text = pm8921_charger_get_attr_text, /* gauge */ .igauge.name = "pm8921", .igauge.get_battery_voltage = pm8921_get_batt_voltage, .igauge.get_battery_current = pm8921_bms_get_batt_current, .igauge.get_battery_temperature = pm8921_get_batt_temperature, .igauge.get_battery_id = pm8921_get_batt_id, .igauge.get_battery_soc = pm8921_bms_get_batt_soc, .igauge.get_battery_cc = pm8921_bms_get_batt_cc, .igauge.store_battery_data = pm8921_bms_store_battery_data_emmc, .igauge.store_battery_ui_soc = pm8921_bms_store_battery_ui_soc, .igauge.get_battery_ui_soc = pm8921_bms_get_battery_ui_soc, .igauge.is_battery_temp_fault = pm8921_is_batt_temperature_fault, .igauge.is_battery_full = pm8921_is_batt_full, .igauge.get_attr_text = pm8921_gauge_get_attr_text, .igauge.register_lower_voltage_alarm_notifier = pm8xxx_batt_lower_alarm_register_notifier, .igauge.enable_lower_voltage_alarm = pm8xxx_batt_lower_alarm_enable, .igauge.set_lower_voltage_alarm_threshold = pm8xxx_batt_lower_alarm_threshold_set, }; static struct platform_device htc_battery_pdev = { .name = "htc_battery", .id = -1, .dev = { .platform_data = &htc_battery_pdev_data, }, }; #endif /* CONFIG_HTC_BATT_8960 */ struct pm8xxx_gpio_init { unsigned gpio; struct pm_gpio config; }; #define PM8XXX_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \ _func, _inv, _disable) \ { \ .gpio = PM8921_GPIO_PM_TO_SYS(_gpio), \ .config = { \ .direction = _dir, \ .output_buffer = _buf, \ .output_value = _val, \ .pull = _pull, \ .vin_sel = _vin, \ .out_strength = _out_strength, \ .function = _func, \ .inv_int_pol = _inv, \ .disable_pin = _disable, \ } \ } static uint32_t headset_gpio_xb[] = { #ifdef CONFIG_HTC_HEADSET_ONE_WIRE GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DO, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DI, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), #endif GPIO_CFG(ELITE_GPIO_V_HSMIC_2v85_EN, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; #ifdef CONFIG_HTC_HEADSET_ONE_WIRE static uint32_t headset_cpu_gpio[] = { GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DI, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DI, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_AUD_1WIRE_DO, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; #endif struct pm8xxx_gpio_init headset_pmic_gpio_rx_xa[] = { PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_REMO_PRESz, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_L17, PM_GPIO_STRENGTH_LOW, PM_GPIO_FUNC_NORMAL, 0, 0), }; struct pm8xxx_gpio_init headset_pmic_gpio_rx_xb[] = { #ifdef CONFIG_HTC_HEADSET_ONE_WIRE PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_DO, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, PM_GPIO_STRENGTH_NO, PM_GPIO_FUNC_NORMAL, 0, 0), PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_O, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_OPEN_DRAIN, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_L17, PM_GPIO_STRENGTH_HIGH, PM_GPIO_FUNC_NORMAL, 0, 1), PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_DI, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, PM_GPIO_STRENGTH_HIGH, PM_GPIO_FUNC_PAIRED, 0, 0), #endif PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_REMO_PRES, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_L17, PM_GPIO_STRENGTH_LOW, PM_GPIO_FUNC_PAIRED, 0, 0), }; struct pm8xxx_gpio_init headset_pmic_gpio_rx_xd[] = { #ifdef CONFIG_HTC_HEADSET_ONE_WIRE PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_DO, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, PM_GPIO_STRENGTH_NO, PM_GPIO_FUNC_PAIRED, 0, 0), PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_O, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_L17, PM_GPIO_STRENGTH_HIGH, PM_GPIO_FUNC_PAIRED, 0, 0), PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_1WIRE_DI, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, PM_GPIO_STRENGTH_HIGH, PM_GPIO_FUNC_PAIRED, 0, 0), PM8XXX_GPIO_INIT(ELITE_PMGPIO_NC_40, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, PM_GPIO_STRENGTH_LOW, PM_GPIO_FUNC_NORMAL, 0, 0), #endif PM8XXX_GPIO_INIT(ELITE_PMGPIO_AUD_REMO_PRES, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, PM_GPIO_PULL_NO, PM_GPIO_VIN_L17, PM_GPIO_STRENGTH_LOW, PM_GPIO_FUNC_PAIRED, 0, 0), }; static void headset_init(void) { int i = 0; int rc = 0; pr_info("[HS_BOARD] (%s) Headset initiation (system_rev=%d)\n", __func__, system_rev); if (system_rev < 1) { rc = pm8xxx_gpio_config(headset_pmic_gpio_rx_xa[0].gpio, &headset_pmic_gpio_rx_xa[0].config); if (rc) pr_info("[HS_BOARD] %s: Config ERROR: GPIO=%u, rc=%d\n", __func__, headset_pmic_gpio_rx_xa[0].gpio, rc); return; } else if (system_rev < 3) { for (i = 0; i < ARRAY_SIZE(headset_gpio_xb); i++) gpio_tlmm_config(headset_gpio_xb[i], GPIO_CFG_ENABLE); gpio_set_value(ELITE_GPIO_V_HSMIC_2v85_EN, 0); for (i = 0; i < ARRAY_SIZE(headset_pmic_gpio_rx_xb); i++) { rc = pm8xxx_gpio_config(headset_pmic_gpio_rx_xb[i].gpio, &headset_pmic_gpio_rx_xb[i].config); if (rc) pr_info("[HS_BOARD] %s: Config ERROR: GPIO=%u, rc=%d\n", __func__, headset_pmic_gpio_rx_xb[i].gpio, rc); } } else { for (i = 0; i < ARRAY_SIZE(headset_gpio_xb); i++) gpio_tlmm_config(headset_gpio_xb[i], GPIO_CFG_ENABLE); gpio_set_value(ELITE_GPIO_V_HSMIC_2v85_EN, 0); for (i = 0; i < ARRAY_SIZE(headset_pmic_gpio_rx_xd); i++) { rc = pm8xxx_gpio_config(headset_pmic_gpio_rx_xd[i].gpio, &headset_pmic_gpio_rx_xd[i].config); if (rc) pr_info("[HS_BOARD] %s: Config ERROR: GPIO=%u, rc=%d\n", __func__, headset_pmic_gpio_rx_xd[i].gpio, rc); } } } static void headset_power(int enable) { if (system_rev < 1) return; pr_info("[HS_BOARD] (%s) Set MIC bias %d\n", __func__, enable); if (enable) gpio_set_value(ELITE_GPIO_V_HSMIC_2v85_EN, 1); else gpio_set_value(ELITE_GPIO_V_HSMIC_2v85_EN, 0); } #ifdef CONFIG_HTC_HEADSET_ONE_WIRE static void uart_tx_gpo(int mode) { switch (mode) { case 0: gpio_tlmm_config(headset_cpu_gpio[1], GPIO_CFG_ENABLE); gpio_set_value_cansleep(ELITE_GPIO_AUD_1WIRE_DO, 0); break; case 1: gpio_tlmm_config(headset_cpu_gpio[1], GPIO_CFG_ENABLE); gpio_set_value_cansleep(ELITE_GPIO_AUD_1WIRE_DO, 1); break; case 2: gpio_tlmm_config(headset_cpu_gpio[3], GPIO_CFG_ENABLE); break; } } static void uart_lv_shift_en(int enable) { gpio_set_value_cansleep(PM8921_GPIO_PM_TO_SYS(ELITE_PMGPIO_NC_40), enable); } #endif /* HTC_HEADSET_PMIC Driver */ static struct htc_headset_pmic_platform_data htc_headset_pmic_data = { .driver_flag = DRIVER_HS_PMIC_ADC, .hpin_gpio = PM8921_GPIO_PM_TO_SYS( ELITE_PMGPIO_EARPHONE_DETz), .hpin_irq = 0, .key_gpio = PM8921_GPIO_PM_TO_SYS( ELITE_PMGPIO_AUD_REMO_PRESz), .key_irq = 0, .key_enable_gpio = 0, .adc_mic = 0, .adc_remote = {0, 57, 58, 147, 148, 339}, .adc_mpp = PM8XXX_AMUX_MPP_10, .adc_amux = ADC_MPP_1_AMUX6, .hs_controller = 0, .hs_switch = 0, }; static struct platform_device htc_headset_pmic = { .name = "HTC_HEADSET_PMIC", .id = -1, .dev = { .platform_data = &htc_headset_pmic_data, }, }; #ifdef CONFIG_HTC_HEADSET_ONE_WIRE static struct htc_headset_1wire_platform_data htc_headset_1wire_data = { .tx_level_shift_en = PM8921_GPIO_PM_TO_SYS(ELITE_PMGPIO_NC_40), .uart_sw = 0, .one_wire_remote ={0x7E, 0x7F, 0x7D, 0x7F, 0x7B, 0x7F}, .remote_press = 0, .onewire_tty_dev = "/dev/ttyHSL1", }; static struct platform_device htc_headset_one_wire = { .name = "HTC_HEADSET_1WIRE", .id = -1, .dev = { .platform_data = &htc_headset_1wire_data, }, }; #endif /* HTC_HEADSET_MGR Driver */ static struct platform_device *headset_devices[] = { &htc_headset_pmic, #ifdef CONFIG_HTC_HEADSET_ONE_WIRE &htc_headset_one_wire, #endif /* Please put the headset detection driver on the last */ }; static struct headset_adc_config htc_headset_mgr_config[] = { { .type = HEADSET_MIC, .adc_max = 1530, .adc_min = 1244, }, { .type = HEADSET_BEATS, .adc_max = 1243, .adc_min = 916, }, { .type = HEADSET_BEATS_SOLO, .adc_max = 915, .adc_min = 566, }, { .type = HEADSET_MIC, .adc_max = 565, .adc_min = 255, }, { .type = HEADSET_NO_MIC, .adc_max = 254, .adc_min = 0, }, }; static struct htc_headset_mgr_platform_data htc_headset_mgr_data = { .driver_flag = DRIVER_HS_MGR_FLOAT_DET, .headset_devices_num = ARRAY_SIZE(headset_devices), .headset_devices = headset_devices, .headset_config_num = ARRAY_SIZE(htc_headset_mgr_config), .headset_config = htc_headset_mgr_config, .headset_init = headset_init, .headset_power = headset_power, #ifdef CONFIG_HTC_HEADSET_ONE_WIRE .uart_tx_gpo = uart_tx_gpo, .uart_lv_shift_en = uart_lv_shift_en, #endif }; static struct platform_device htc_headset_mgr = { .name = "HTC_HEADSET_MGR", .id = -1, .dev = { .platform_data = &htc_headset_mgr_data, }, }; static void headset_device_register(void) { pr_info("[HS_BOARD] (%s) Headset device register (system_rev=%d)\n", __func__, system_rev); if (system_rev >= 1) htc_headset_pmic_data.key_gpio = PM8921_GPIO_PM_TO_SYS( ELITE_PMGPIO_AUD_REMO_PRES); platform_device_register(&htc_headset_mgr); } static struct synaptics_i2c_rmi_platform_data syn_ts_3k_2p5D_3030_data[] = { /* Synaptics 2.5D 3030 sensor */ { .version = 0x3332, .packrat_number = 1293981, .abs_x_min = 0, .abs_x_max = 1088, .abs_y_min = 0, .abs_y_max = 1770, .display_width = 720, .display_height = 1280, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .psensor_detection = 1, .reduce_report_level = {60, 60, 50, 0, 0}, .config = { 0x30, 0x30, 0x00, 0x05, 0x00, 0x7F, 0x03, 0x1E, 0x05, 0x89, 0x00, 0x01, 0x01, 0x00, 0x10, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x50, 0x7A, 0x2A, 0xEE, 0x02, 0x01, 0x3C, 0x1A, 0x01, 0x1B, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x04, 0xBF, 0xD4, 0xC6, 0x00, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x16, 0x0C, 0x0A, 0x00, 0x14, 0x0A, 0x40, 0x64, 0x07, 0xF6, 0xC8, 0xBE, 0x43, 0x2A, 0x05, 0x00, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x3C, 0x32, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x1E, 0x05, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x12, 0x62, 0x00, 0x13, 0x04, 0x1B, 0x00, 0x10, 0x0A, 0x60, 0x68, 0x60, 0x68, 0x60, 0x68, 0x60, 0x48, 0x33, 0x31, 0x30, 0x2E, 0x2C, 0x2A, 0x29, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0xA0, 0x0F, 0x00, 0x3C, 0x00, 0xC8, 0x00, 0xCD, 0x0A, 0xC0, 0xA0, 0x0F, 0x00, 0xC0, 0x19, 0x05, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x02, 0x40, 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x10, 0x68, 0x66, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x56, 0x00, 0x78, 0x00, 0x10, 0x28, 0x00, 0x00, 0x00, 0x05, 0x0A, 0x10, 0x16, 0x1C, 0x22, 0x24, 0x00, 0x31, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x0F, 0x01, 0x4F, 0x53, }, }, { .version = 0x3330, .packrat_number = 1100755, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .segmentation_bef_unlock = 0x50, .reduce_report_level = {60, 60, 50, 0, 0}, .customer_register = {0xF9, 0x64, 0x05, 0x64}, .multitouch_calibration = 1, .config = {0x30, 0x32, 0x30, 0x34, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x6C, 0x19, 0x7B, 0x07, 0x01, 0x3C, 0x1B, 0x01, 0x1C, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x10, 0xB5, 0x3F, 0xBE, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x32, 0x05, 0x0F, 0x96, 0x16, 0x0C, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x12, 0x63, 0x00, 0x13, 0x04, 0x00, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x60, 0x60, 0x60, 0x68, 0x68, 0x68, 0x68, 0x68, 0x32, 0x31, 0x2F, 0x2E, 0x2C, 0x2B, 0x29, 0x28, 0x01, 0x06, 0x0B, 0x10, 0x15, 0x1A, 0x20, 0x26, 0x00, 0xA0, 0x0F, 0xCD, 0x3C, 0x00, 0xC8, 0x00, 0xB3, 0xC8, 0xCD, 0xA0, 0x0F, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x03, 0x03, 0x0B, 0x04, 0x03, 0x02, 0x02, 0x05, 0x20, 0x20, 0x70, 0x30, 0x20, 0x10, 0x10, 0x30, 0x58, 0x5C, 0x5B, 0x6F, 0x66, 0x4F, 0x52, 0x66, 0x00, 0xA0, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .packrat_number = 1100755, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .segmentation_bef_unlock = 0x50, .reduce_report_level = {60, 60, 50, 0, 0}, .customer_register = {0xF9, 0x64, 0x05, 0x64}, .multitouch_calibration = 1, .config = {0x30, 0x32, 0x30, 0x33, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x6C, 0x19, 0x7B, 0x07, 0x01, 0x3C, 0x1B, 0x01, 0x1C, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x10, 0xB5, 0x3F, 0xBE, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x32, 0x05, 0x0F, 0x96, 0x16, 0x0C, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x12, 0x63, 0x00, 0x13, 0x04, 0x00, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xA0, 0xA0, 0xA8, 0xA8, 0xA8, 0xA8, 0x88, 0x47, 0x46, 0x44, 0x42, 0x40, 0x3F, 0x3D, 0x3B, 0x01, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C, 0x00, 0xA0, 0x0F, 0xCD, 0x3C, 0x00, 0xC8, 0x00, 0xB3, 0xC8, 0xCD, 0xA0, 0x0F, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x03, 0x04, 0x04, 0x03, 0x04, 0x08, 0x03, 0x02, 0x20, 0x30, 0x30, 0x20, 0x30, 0x50, 0x20, 0x10, 0x58, 0x66, 0x69, 0x60, 0x6F, 0x5F, 0x68, 0x50, 0x00, 0xA0, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .packrat_number = 1091741, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x32, 0x30, 0x32, 0x80, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x9F, 0x0C, 0x5C, 0x02, 0x01, 0x3C, 0x1B, 0x01, 0x1C, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x10, 0xB5, 0x3F, 0xBE, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x32, 0x0F, 0x0F, 0x96, 0x16, 0x0C, 0x00, 0x02, 0xFC, 0x00, 0x80, 0x02, 0x0E, 0x1F, 0x12, 0x63, 0x00, 0x19, 0x08, 0x00, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80, 0x80, 0x60, 0x60, 0x60, 0x3C, 0x3A, 0x38, 0x36, 0x34, 0x33, 0x31, 0x2F, 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1B, 0x21, 0x27, 0x00, 0xA0, 0x0F, 0xCD, 0x64, 0x00, 0x20, 0x4E, 0xB3, 0xC8, 0xCD, 0xA0, 0x0F, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x03, 0x03, 0x03, 0x05, 0x02, 0x03, 0x05, 0x02, 0x20, 0x20, 0x20, 0x30, 0x10, 0x20, 0x30, 0x10, 0x5C, 0x60, 0x64, 0x5D, 0x50, 0x6E, 0x66, 0x58, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x32, 0x30, 0x31, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0F, 0x32, 0x32, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x28, 0xF5, 0x28, 0x1E, 0x05, 0x01, 0x3C, 0x30, 0x00, 0x30, 0x00, 0xCD, 0x4C, 0x00, 0x50, 0xF4, 0xEB, 0x97, 0xED, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x00, 0x08, 0xA0, 0x01, 0x31, 0x02, 0x01, 0xA0, 0x16, 0x0C, 0x00, 0x02, 0x05, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x00, 0x51, 0x00, 0x19, 0x04, 0x00, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80, 0x80, 0x60, 0x60, 0x60, 0x3C, 0x3A, 0x38, 0x36, 0x34, 0x33, 0x31, 0x2F, 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1B, 0x21, 0x27, 0x00, 0xD0, 0x07, 0xFD, 0x3C, 0x00, 0x64, 0x00, 0xCD, 0xC8, 0x80, 0xD0, 0x07, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x02, 0x04, 0x03, 0x04, 0x04, 0x03, 0x04, 0x20, 0x20, 0x30, 0x20, 0x30, 0x30, 0x20, 0x30, 0x77, 0x7C, 0x60, 0x58, 0x66, 0x69, 0x60, 0x6F, 0x00, 0x3C, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3230, .abs_x_min = 0, .abs_x_max = 1000, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 1, .large_obj_check = 1, .tw_pin_mask = 0x0088, .config = {0x30, 0x32, 0x30, 0x30, 0x84, 0x0F, 0x03, 0x1E, 0x05, 0x20, 0xB1, 0x08, 0x0B, 0x19, 0x19, 0x00, 0x00, 0xE8, 0x03, 0x75, 0x07, 0x1E, 0x05, 0x2D, 0x0E, 0x06, 0xD4, 0x01, 0x01, 0x48, 0xFD, 0x41, 0xFE, 0x00, 0x50, 0x65, 0x4E, 0xFF, 0xBA, 0xBF, 0xC0, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xB2, 0x00, 0x02, 0xF1, 0x00, 0x80, 0x02, 0x0D, 0x1E, 0x00, 0x4D, 0x00, 0x19, 0x04, 0x1E, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80, 0x80, 0x60, 0x60, 0x60, 0x3C, 0x3A, 0x38, 0x36, 0x34, 0x33, 0x31, 0x2F, 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1B, 0x21, 0x27, 0x00, 0x41, 0x04, 0x80, 0x41, 0x04, 0xE1, 0x28, 0xC0, 0x14, 0xCC, 0x81, 0x0D, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x02, 0x02, 0x03, 0x07, 0x03, 0x0B, 0x03, 0x20, 0x20, 0x20, 0x20, 0x50, 0x20, 0x70, 0x20, 0x73, 0x77, 0x7B, 0x56, 0x5F, 0x5C, 0x5B, 0x64, 0x48, 0x41, 0x00, 0x1E, 0x19, 0x05, 0xFD, 0xFE, 0x3D, 0x08} }, { .version = 0x0000 }, }; static struct synaptics_i2c_rmi_platform_data syn_ts_3k_2p5D_7070_data[] = { /* Synaptics 2.5D 7070 sensor */ { .version = 0x3332, .packrat_number = 1293981, .abs_x_min = 0, .abs_x_max = 1088, .abs_y_min = 0, .abs_y_max = 1770, .display_width = 720, .display_height = 1280, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .psensor_detection = 1, .reduce_report_level = {60, 60, 50, 0, 0}, .config = { 0x30, 0x30, 0x30, 0x33, 0x00, 0x7F, 0x03, 0x1E, 0x05, 0x89, 0x00, 0x01, 0x01, 0x00, 0x10, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x41, 0x05, 0x50, 0xAE, 0x27, 0x04, 0x03, 0x01, 0x3C, 0x19, 0x01, 0x1E, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x46, 0xBA, 0x1B, 0xC1, 0x00, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x16, 0x0C, 0x0A, 0x00, 0x14, 0x0A, 0x40, 0x64, 0x07, 0x56, 0xC8, 0xBE, 0x43, 0x2A, 0x05, 0x00, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x3C, 0x32, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x1E, 0x05, 0x00, 0x02, 0xFA, 0x00, 0x80, 0x03, 0x0E, 0x1F, 0x12, 0x64, 0x00, 0x13, 0x04, 0x1B, 0x00, 0x10, 0x0A, 0x60, 0x60, 0x68, 0x68, 0x60, 0x68, 0x60, 0x48, 0x32, 0x31, 0x2F, 0x2D, 0x2C, 0x2A, 0x29, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xA0, 0x0F, 0x00, 0x3C, 0x00, 0xC8, 0x00, 0xCD, 0x0A, 0xC0, 0xA0, 0x0F, 0x00, 0xC0, 0x19, 0x03, 0x03, 0x0B, 0x04, 0x03, 0x03, 0x03, 0x09, 0x20, 0x20, 0x70, 0x20, 0x20, 0x20, 0x20, 0x50, 0x58, 0x5C, 0x5B, 0x4A, 0x66, 0x6A, 0x6E, 0x5F, 0x00, 0x78, 0x00, 0x10, 0x28, 0x00, 0x00, 0x00, 0x05, 0x0A, 0x0F, 0x14, 0x1A, 0x20, 0x24, 0x00, 0x31, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0xCD, 0x0C, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7A, 0x7C, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x0F, 0x01, 0x4F, 0x53, }, }, { .version = 0x3330, .packrat_number = 1100755, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .segmentation_bef_unlock = 0x50, .reduce_report_level = {60, 60, 50, 0, 0}, .customer_register = {0xF9, 0x64, 0x05, 0x64}, .multitouch_calibration = 1, .config = {0x30, 0x31, 0x30, 0x34, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x67, 0x12, 0x00, 0x03, 0x01, 0x3C, 0x19, 0x01, 0x1A, 0x01, 0x0A, 0x4F, 0x71, 0x51, 0xE0, 0xAB, 0xC8, 0xAF, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x2D, 0x05, 0x0F, 0x60, 0x16, 0x0C, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x11, 0x62, 0x00, 0x13, 0x04, 0x1B, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x33, 0x32, 0x30, 0x2E, 0x2D, 0x2B, 0x2A, 0x28, 0x00, 0x04, 0x09, 0x0E, 0x13, 0x18, 0x1E, 0x25, 0x00, 0xA0, 0x0F, 0x02, 0x3C, 0x00, 0xC8, 0x00, 0xDA, 0xC8, 0xCD, 0xA0, 0x0F, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x05, 0x03, 0x04, 0x05, 0x03, 0x05, 0x07, 0x02, 0x40, 0x20, 0x30, 0x40, 0x20, 0x30, 0x40, 0x10, 0x68, 0x5A, 0x69, 0x74, 0x64, 0x5D, 0x5C, 0x54, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .packrat_number = 1100755, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .segmentation_bef_unlock = 0x50, .reduce_report_level = {60, 60, 50, 0, 0}, .customer_register = {0xF9, 0x64, 0x05, 0x64}, .multitouch_calibration = 1, .config = {0x30, 0x31, 0x30, 0x33, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x67, 0x12, 0x00, 0x03, 0x01, 0x3C, 0x19, 0x01, 0x1A, 0x01, 0x0A, 0x4F, 0x71, 0x51, 0xE0, 0xAB, 0xC8, 0xAF, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x2D, 0x05, 0x0F, 0x60, 0x16, 0x0C, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x11, 0x62, 0x00, 0x13, 0x04, 0x1B, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x45, 0x44, 0x42, 0x40, 0x3F, 0x3D, 0x3B, 0x3A, 0x00, 0x04, 0x09, 0x0E, 0x13, 0x18, 0x1E, 0x25, 0x00, 0xA0, 0x0F, 0x02, 0x3C, 0x00, 0xC8, 0x00, 0xDA, 0xC8, 0xCD, 0xA0, 0x0F, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x05, 0x03, 0x04, 0x05, 0x03, 0x05, 0x07, 0x02, 0x40, 0x20, 0x30, 0x40, 0x20, 0x30, 0x40, 0x10, 0x68, 0x5A, 0x69, 0x74, 0x64, 0x5D, 0x5C, 0x54, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .packrat_number = 1091741, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x31, 0x30, 0x32, 0x80, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0x9A, 0x0B, 0xD4, 0x01, 0x01, 0x3C, 0x1A, 0x01, 0x1A, 0x01, 0x0A, 0x4F, 0x71, 0x51, 0xE0, 0xAB, 0xC8, 0xAF, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA2, 0x02, 0x2D, 0x0F, 0x0F, 0x60, 0x16, 0x0C, 0x00, 0x02, 0x18, 0x01, 0x80, 0x01, 0x0E, 0x1F, 0x11, 0x62, 0x00, 0x19, 0x04, 0x1B, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x45, 0x44, 0x42, 0x40, 0x3F, 0x3D, 0x3B, 0x3A, 0x00, 0x03, 0x07, 0x0B, 0x0F, 0x13, 0x17, 0x1C, 0x00, 0xD0, 0x07, 0x02, 0x3C, 0x00, 0x64, 0x00, 0xCD, 0xC8, 0x80, 0xD0, 0x07, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x04, 0x03, 0x02, 0x08, 0x02, 0x02, 0x0D, 0x02, 0x30, 0x20, 0x10, 0x50, 0x10, 0x10, 0x70, 0x10, 0x66, 0x5E, 0x49, 0x5F, 0x4F, 0x52, 0x5B, 0x57, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x31, 0x30, 0x31, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0F, 0x32, 0x32, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x28, 0xF5, 0x28, 0x1E, 0x05, 0x01, 0x3C, 0x30, 0x00, 0x30, 0x00, 0xCD, 0x4C, 0x00, 0x50, 0xF4, 0xEB, 0x97, 0xED, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x00, 0x08, 0xA0, 0x01, 0x31, 0x02, 0x01, 0xA0, 0x16, 0x0C, 0x00, 0x02, 0x05, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x00, 0x51, 0x00, 0x19, 0x04, 0x00, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x45, 0x44, 0x42, 0x40, 0x3F, 0x3D, 0x3B, 0x3A, 0x00, 0x03, 0x07, 0x0B, 0x0F, 0x13, 0x17, 0x1C, 0x00, 0xD0, 0x07, 0xFD, 0x3C, 0x00, 0x64, 0x00, 0xCD, 0xC8, 0x80, 0xD0, 0x07, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x02, 0x04, 0x03, 0x04, 0x04, 0x03, 0x04, 0x20, 0x20, 0x30, 0x20, 0x30, 0x30, 0x20, 0x30, 0x77, 0x7C, 0x60, 0x58, 0x66, 0x69, 0x60, 0x6F, 0x00, 0x3C, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3230, .abs_x_min = 0, .abs_x_max = 1000, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 1, .large_obj_check = 1, .tw_pin_mask = 0x0088, .config = {0x30, 0x31, 0x30, 0x30, 0x84, 0x0F, 0x03, 0x1E, 0x05, 0x20, 0xB1, 0x08, 0x0B, 0x19, 0x19, 0x00, 0x00, 0xE8, 0x03, 0x75, 0x07, 0x1E, 0x05, 0x2D, 0x0E, 0x06, 0xD4, 0x01, 0x01, 0x48, 0xFD, 0x41, 0xFE, 0x00, 0x50, 0x65, 0x4E, 0xFF, 0xBA, 0xBF, 0xC0, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xB2, 0x00, 0x02, 0xF1, 0x00, 0x80, 0x02, 0x0D, 0x1E, 0x00, 0x4D, 0x00, 0x19, 0x04, 0x1E, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x45, 0x44, 0x42, 0x40, 0x3F, 0x3D, 0x3B, 0x3A, 0x00, 0x03, 0x07, 0x0B, 0x0F, 0x13, 0x17, 0x1C, 0x00, 0x41, 0x04, 0x80, 0x41, 0x04, 0xE1, 0x28, 0xC0, 0x14, 0xCC, 0x81, 0x0D, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x02, 0x02, 0x03, 0x07, 0x03, 0x0B, 0x03, 0x20, 0x20, 0x20, 0x20, 0x50, 0x20, 0x70, 0x20, 0x73, 0x77, 0x7B, 0x56, 0x5F, 0x5C, 0x5B, 0x64, 0x48, 0x41, 0x00, 0x1E, 0x19, 0x05, 0xFD, 0xFE, 0x3D, 0x08} }, { .version = 0x0000 }, }; static struct synaptics_i2c_rmi_platform_data syn_ts_3k_data[] = { /* Synaptics sensor */ { .version = 0x3332, .packrat_number = 1293981, .abs_x_min = 0, .abs_x_max = 1088, .abs_y_min = 0, .abs_y_max = 1770, .display_width = 720, .display_height = 1280, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .psensor_detection = 1, .reduce_report_level = {60, 60, 50, 0, 0}, .config = { 0x30, 0x30, 0x00, 0x05, 0x00, 0x7F, 0x03, 0x1E, 0x05, 0x89, 0x00, 0x01, 0x01, 0x00, 0x10, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x50, 0x7A, 0x2A, 0xEE, 0x02, 0x01, 0x3C, 0x1A, 0x01, 0x1B, 0x01, 0x66, 0x4E, 0x00, 0x50, 0x04, 0xBF, 0xD4, 0xC6, 0x00, 0xC8, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x16, 0x0C, 0x0A, 0x00, 0x14, 0x0A, 0x40, 0x64, 0x07, 0xF6, 0xC8, 0xBE, 0x43, 0x2A, 0x05, 0x00, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x3C, 0x32, 0x00, 0x00, 0x00, 0x4C, 0x75, 0x74, 0x1E, 0x05, 0x00, 0x02, 0x18, 0x01, 0x80, 0x03, 0x0E, 0x1F, 0x12, 0x62, 0x00, 0x13, 0x04, 0x1B, 0x00, 0x10, 0x0A, 0x60, 0x68, 0x60, 0x68, 0x60, 0x68, 0x60, 0x48, 0x33, 0x31, 0x30, 0x2E, 0x2C, 0x2A, 0x29, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0xA0, 0x0F, 0x00, 0x3C, 0x00, 0xC8, 0x00, 0xCD, 0x0A, 0xC0, 0xA0, 0x0F, 0x00, 0xC0, 0x19, 0x05, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x02, 0x40, 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x10, 0x68, 0x66, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x56, 0x00, 0x78, 0x00, 0x10, 0x28, 0x00, 0x00, 0x00, 0x05, 0x0A, 0x10, 0x16, 0x1C, 0x22, 0x24, 0x00, 0x31, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x0F, 0x01, 0x4F, 0x53, }, }, { .version = 0x3330, .packrat_number = 1100755, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .segmentation_bef_unlock = 0x50, .reduce_report_level = {60, 60, 50, 0, 0}, .customer_register = {0xF9, 0x64, 0x05, 0x64}, .multitouch_calibration = 1, .config = {0x30, 0x30, 0x30, 0x33, 0x80, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x24, 0x05, 0x2D, 0x66, 0x26, 0x9C, 0x04, 0x01, 0x3C, 0x1B, 0x01, 0x1A, 0x01, 0x0A, 0x4F, 0x71, 0x51, 0x80, 0xBB, 0x80, 0xBB, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA1, 0x02, 0x37, 0x05, 0x0F, 0xAE, 0x16, 0x0C, 0x00, 0x02, 0x5E, 0x01, 0x80, 0x02, 0x0E, 0x1F, 0x11, 0x63, 0x00, 0x19, 0x04, 0x1B, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x80, 0x44, 0x43, 0x41, 0x3F, 0x3E, 0x3C, 0x3A, 0x38, 0x01, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x1A, 0x1F, 0x00, 0xD0, 0x07, 0x02, 0x3C, 0x00, 0x64, 0x00, 0xCD, 0xC8, 0x80, 0xD0, 0x07, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x03, 0x03, 0x03, 0x05, 0x02, 0x03, 0x05, 0x02, 0x20, 0x20, 0x20, 0x30, 0x10, 0x20, 0x30, 0x10, 0x5C, 0x60, 0x64, 0x5D, 0x50, 0x6E, 0x66, 0x58, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .packrat_number = 1091741, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x30, 0x30, 0x32, 0x80, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x01, 0x01, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x24, 0x05, 0x2D, 0x66, 0x26, 0x9C, 0x04, 0x01, 0x3C, 0x1B, 0x01, 0x1A, 0x01, 0x0A, 0x4F, 0x71, 0x51, 0x80, 0xBB, 0x80, 0xBB, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x30, 0x32, 0xA1, 0x02, 0x37, 0x0F, 0x0F, 0xAE, 0x16, 0x0C, 0x00, 0x02, 0x5E, 0x01, 0x80, 0x02, 0x0E, 0x1F, 0x11, 0x63, 0x00, 0x19, 0x04, 0x1B, 0x00, 0x08, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x80, 0x44, 0x43, 0x41, 0x3F, 0x3E, 0x3C, 0x3A, 0x38, 0x01, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x1A, 0x1F, 0x00, 0xD0, 0x07, 0x02, 0x3C, 0x00, 0x64, 0x00, 0xCD, 0xC8, 0x80, 0xD0, 0x07, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x03, 0x03, 0x03, 0x05, 0x02, 0x03, 0x05, 0x02, 0x20, 0x20, 0x20, 0x30, 0x10, 0x20, 0x30, 0x10, 0x5C, 0x60, 0x64, 0x5D, 0x50, 0x6E, 0x66, 0x58, 0x00, 0xC8, 0x00, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3330, .abs_x_min = 0, .abs_x_max = 1100, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 2, .large_obj_check = 1, .tw_pin_mask = 0x0088, .report_type = SYN_AND_REPORT_TYPE_B, .config = {0x30, 0x30, 0x30, 0x31, 0x00, 0x3F, 0x03, 0x1E, 0x05, 0xB1, 0x09, 0x0B, 0x19, 0x19, 0x00, 0x00, 0x4C, 0x04, 0x75, 0x07, 0x02, 0x14, 0x1E, 0x05, 0x2D, 0xA3, 0x07, 0xED, 0x01, 0x01, 0x3C, 0x26, 0x00, 0x26, 0x00, 0x00, 0x50, 0x00, 0x50, 0x30, 0xB9, 0x3E, 0xC5, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xB2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x00, 0x0A, 0x00, 0x08, 0xA2, 0x01, 0x30, 0x09, 0x03, 0x90, 0x16, 0x0C, 0x00, 0x02, 0x2F, 0x01, 0x80, 0x01, 0x0E, 0x1F, 0x12, 0x58, 0x00, 0x19, 0x04, 0x00, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x80, 0x44, 0x43, 0x41, 0x3F, 0x3E, 0x3C, 0x3A, 0x38, 0x01, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x1A, 0x1F, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x03, 0x04, 0x04, 0x03, 0x04, 0x08, 0x02, 0x20, 0x20, 0x30, 0x30, 0x20, 0x30, 0x50, 0x10, 0x7E, 0x58, 0x66, 0x69, 0x60, 0x6F, 0x5F, 0x4F, 0x00, 0xFF, 0xFF, 0x10, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x51, 0x51, 0x51, 0x51, 0xCD, 0x0D, 0x04} }, { .version = 0x3230, .abs_x_min = 0, .abs_x_max = 1000, .abs_y_min = 0, .abs_y_max = 1770, .gpio_irq = ELITE_GPIO_TP_ATTz, .default_config = 1, .large_obj_check = 1, .tw_pin_mask = 0x0088, .config = {0x30, 0x30, 0x30, 0x30, 0x84, 0x0F, 0x03, 0x1E, 0x05, 0x20, 0xB1, 0x08, 0x0B, 0x19, 0x19, 0x00, 0x00, 0xE8, 0x03, 0x75, 0x07, 0x1E, 0x05, 0x2D, 0x0E, 0x06, 0xD4, 0x01, 0x01, 0x48, 0xFD, 0x41, 0xFE, 0x00, 0x50, 0x65, 0x4E, 0xFF, 0xBA, 0xBF, 0xC0, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x04, 0xB2, 0x00, 0x02, 0xF1, 0x00, 0x80, 0x02, 0x0D, 0x1E, 0x00, 0x4D, 0x00, 0x19, 0x04, 0x1E, 0x00, 0x10, 0xFF, 0x00, 0x06, 0x0C, 0x0D, 0x0B, 0x15, 0x17, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x11, 0x14, 0x12, 0x0F, 0x0E, 0x09, 0x0A, 0x07, 0x02, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x04, 0x05, 0x02, 0x06, 0x01, 0x0C, 0x07, 0x08, 0x0E, 0x10, 0x0F, 0x12, 0xFF, 0xFF, 0xFF, 0xFF, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0x80, 0x80, 0x80, 0x44, 0x43, 0x41, 0x3F, 0x3E, 0x3C, 0x3A, 0x38, 0x01, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x1A, 0x1F, 0x00, 0x41, 0x04, 0x80, 0x41, 0x04, 0xE1, 0x28, 0xC0, 0x14, 0xCC, 0x81, 0x0D, 0x00, 0xC0, 0x80, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02, 0x02, 0x02, 0x03, 0x07, 0x03, 0x0B, 0x03, 0x20, 0x20, 0x20, 0x20, 0x50, 0x20, 0x70, 0x20, 0x73, 0x77, 0x7B, 0x56, 0x5F, 0x5C, 0x5B, 0x64, 0x48, 0x41, 0x00, 0x1E, 0x19, 0x05, 0xFD, 0xFE, 0x3D, 0x08} }, { .version = 0x0000 }, }; static struct i2c_board_info msm_i2c_gsbi3_info[] = { { I2C_BOARD_INFO(SYNAPTICS_3200_NAME, 0x40 >> 1), .platform_data = &syn_ts_3k_data, .irq = MSM_GPIO_TO_INT(ELITE_GPIO_TP_ATTz) }, }; static ssize_t virtual_syn_keys_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":87:1345:110:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":273:1345:106:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":470:1345:120:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":660:1345:110:100" "\n"); } static ssize_t virtual_syn_three_keys_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":112:1345:120:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":360:1345:120:100" ":" __stringify(EV_KEY) ":" __stringify(KEY_APP_SWITCH) ":595:1345:120:100" "\n"); } static struct kobj_attribute syn_virtual_keys_attr = { .attr = { .name = "virtualkeys.synaptics-rmi-touchscreen", .mode = S_IRUGO, }, .show = &virtual_syn_keys_show, }; static struct kobj_attribute syn_three_virtual_keys_attr = { .attr = { .name = "virtualkeys.synaptics-rmi-touchscreen", .mode = S_IRUGO, }, .show = &virtual_syn_three_keys_show, }; static struct attribute *properties_attrs[] = { &syn_virtual_keys_attr.attr, NULL }; static struct attribute *three_virtual_key_properties_attrs[] = { &syn_three_virtual_keys_attr.attr, NULL }; static struct attribute_group properties_attr_group = { .attrs = properties_attrs, }; static struct attribute_group three_virtual_key_properties_attr_group = { .attrs = three_virtual_key_properties_attrs, }; static struct bma250_platform_data gsensor_bma250_platform_data = { .intr = ELITE_GPIO_GSENSOR_INT, .chip_layout = 1, }; static struct akm8975_platform_data compass_platform_data = { .layouts = ELITE_LAYOUTS, .use_pana_gyro = 0, }; static struct r3gd20_gyr_platform_data gyro_platform_data = { .fs_range = R3GD20_GYR_FS_2000DPS, .axis_map_x = 0, .axis_map_y = 1, .axis_map_z = 2, .negate_x = 0, .negate_y = 0, .negate_z = 0, .poll_interval = 50, .min_interval = R3GD20_MIN_POLL_PERIOD_MS, /*2 */ /*.gpio_int1 = DEFAULT_INT1_GPIO,*/ /*.gpio_int2 = DEFAULT_INT2_GPIO,*/ /* int for fifo */ .watermark = 0, .fifomode = 0, }; static struct i2c_board_info __initdata msm_i2c_sensor_gsbi12_info[] = { { I2C_BOARD_INFO(BMA250_I2C_NAME, 0x30 >> 1), .platform_data = &gsensor_bma250_platform_data, .irq = MSM_GPIO_TO_INT(ELITE_GPIO_GSENSOR_INT), }, { I2C_BOARD_INFO(AKM8975_I2C_NAME, 0x1A >> 1), .platform_data = &compass_platform_data, .irq = MSM_GPIO_TO_INT(ELITE_GPIO_COMPASS_INT), }, { I2C_BOARD_INFO(R3GD20_GYR_DEV_NAME, 0xD0 >> 1), .platform_data = &gyro_platform_data, /*.irq = MSM_GPIO_TO_INT(ELITE_GYRO_INT),*/ }, }; static struct mpu3050_platform_data mpu3050_data = { .int_config = 0x10, .orientation = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }, .level_shifter = 0, .accel = { .get_slave_descr = get_accel_slave_descr, .adapt_num = MSM_8960_GSBI12_QUP_I2C_BUS_ID, /* The i2c bus to which the mpu device is connected */ .bus = EXT_SLAVE_BUS_SECONDARY, .address = 0x30 >> 1, .orientation = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }, }, .compass = { .get_slave_descr = get_compass_slave_descr, .adapt_num = MSM_8960_GSBI12_QUP_I2C_BUS_ID, /* The i2c bus to which the mpu device is connected */ .bus = EXT_SLAVE_BUS_PRIMARY, .address = 0x1A >> 1, .orientation = { -1, 0, 0, 0, 1, 0, 0, 0, -1 }, }, }; static struct i2c_board_info __initdata mpu3050_GSBI12_boardinfo[] = { { I2C_BOARD_INFO("mpu3050", 0xD0 >> 1), .irq = MSM_GPIO_TO_INT(ELITE_GPIO_GYRO_INT), .platform_data = &mpu3050_data, }, }; static struct pn544_i2c_platform_data nfc_platform_data = { .irq_gpio = ELITE_GPIO_NFC_IRQ, .ven_gpio = ELITE_GPIO_NFC_VEN, .firm_gpio = ELITE_GPIO_NFC_DL_MODE, .ven_isinvert = 1, }; static struct i2c_board_info pn544_i2c_boardinfo[] = { { I2C_BOARD_INFO(PN544_I2C_NAME, 0x50 >> 1), .platform_data = &nfc_platform_data, .irq = MSM_GPIO_TO_INT(ELITE_GPIO_NFC_IRQ), }, }; static DEFINE_MUTEX(capella_cm36282_lock); static struct regulator *PL_sensor_pwr; static int capella_pl_sensor_lpm_power(uint8_t enable) { int ret = 0; int rc; mutex_lock(&capella_cm36282_lock); if (PL_sensor_pwr == NULL) { PL_sensor_pwr = regulator_get(NULL, "8921_l6"); } if (IS_ERR(PL_sensor_pwr)) { pr_err("[PS][cm3629] %s: Unable to get '8921_l6' \n", __func__); mutex_unlock(&capella_cm36282_lock); return -ENODEV; } if (enable == 1) { rc = regulator_set_optimum_mode(PL_sensor_pwr, 100); if (rc < 0) pr_err("[PS][cm3629] %s: enter lmp,set_optimum_mode l6 failed, rc=%d\n", __func__, rc); else pr_info("[PS][cm3629] %s: enter lmp,OK\n", __func__); } else { rc = regulator_set_optimum_mode(PL_sensor_pwr, 100000); if (rc < 0) pr_err("[PS][cm3629] %s: leave lmp,set_optimum_mode l6 failed, rc=%d\n", __func__, rc); else pr_info("[PS][cm3629] %s: leave lmp,OK\n", __func__); } mutex_unlock(&capella_cm36282_lock); return ret; } static int capella_cm36282_power(int pwr_device, uint8_t enable) { int ret = 0; int rc; mutex_lock(&capella_cm36282_lock); if (PL_sensor_pwr == NULL) { PL_sensor_pwr = regulator_get(NULL, "8921_l6"); } if (IS_ERR(PL_sensor_pwr)) { pr_err("[PS][cm3629] %s: Unable to get '8921_l6' \n", __func__); mutex_unlock(&capella_cm36282_lock); return -ENODEV; } if (enable == 1) { rc = regulator_set_voltage(PL_sensor_pwr, 2850000, 2850000); if (rc) pr_err("[PS][cm3629] %s: unable to regulator_set_voltage, rc:%d\n", __func__, rc); rc = regulator_enable(PL_sensor_pwr); if (rc) pr_err("[PS][cm3629]'%s' regulator enable L6 failed, rc=%d\n", __func__,rc); else pr_info("[PS][cm3629]'%s' L6 power on\n", __func__); } mutex_unlock(&capella_cm36282_lock); return ret; } static struct cm3629_platform_data cm36282_XD_pdata = { .model = CAPELLA_CM36282, .ps_select = CM3629_PS1_ONLY, .intr = PM8921_GPIO_PM_TO_SYS(ELITE_PMGPIO_PROXIMITY_INTz), .levels = { 0, 0, 23, 352, 1216, 3227, 5538, 8914, 10600, 65535}, .golden_adc = 3754, .power = capella_cm36282_power, .lpm_power = capella_pl_sensor_lpm_power, .cm3629_slave_address = 0xC0>>1, .ps1_thd_set = 11, .ps1_thd_no_cal = 0xF1, .ps1_thd_with_cal = 11, .ps_calibration_rule = 1, .ps_conf1_val = CM3629_PS_DR_1_80 | CM3629_PS_IT_1_6T | CM3629_PS1_PERS_4, .ps_conf2_val = CM3629_PS_ITB_1 | CM3629_PS_ITR_1 | CM3629_PS2_INT_DIS | CM3629_PS1_INT_DIS, .ps_conf3_val = CM3629_PS2_PROL_32, }; static struct i2c_board_info i2c_CM36282_XD_devices[] = { { I2C_BOARD_INFO(CM3629_I2C_NAME, 0xC0 >> 1), .platform_data = &cm36282_XD_pdata, .irq = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, ELITE_PMGPIO_PROXIMITY_INTz), }, }; static struct cm3629_platform_data cm36282_pdata = { .model = CAPELLA_CM36282, .ps_select = CM3629_PS1_ONLY, .intr = PM8921_GPIO_PM_TO_SYS(ELITE_PMGPIO_PROXIMITY_INTz), .levels = { 0, 0, 23, 352, 1216, 3227, 5538, 8914, 10600, 65535}, .golden_adc = 3754, .power = capella_cm36282_power, .lpm_power = capella_pl_sensor_lpm_power, .cm3629_slave_address = 0xC0>>1, .ps1_thd_set = 19, .ps1_thd_no_cal = 0xF1, .ps1_thd_with_cal = 19, .ps_calibration_rule = 1, .ps_conf1_val = CM3629_PS_DR_1_80 | CM3629_PS_IT_1_6T | CM3629_PS1_PERS_4, .ps_conf2_val = CM3629_PS_ITB_1 | CM3629_PS_ITR_1 | CM3629_PS2_INT_DIS | CM3629_PS1_INT_DIS, .ps_conf3_val = CM3629_PS2_PROL_32, }; static struct i2c_board_info i2c_CM36282_devices[] = { { I2C_BOARD_INFO(CM3629_I2C_NAME, 0xC0 >> 1), .platform_data = &cm36282_pdata, .irq = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, ELITE_PMGPIO_PROXIMITY_INTz), }, }; #define _GET_REGULATOR(var, name) do { \ var = regulator_get(NULL, name); \ if (IS_ERR(var)) { \ pr_err("'%s' regulator not found, rc=%ld\n", \ name, IS_ERR(var)); \ var = NULL; \ return -ENODEV; \ } \ } while (0) #ifdef CONFIG_FB_MSM_HDMI_MHL static uint32_t mhl_usb_switch_output_table[] = { GPIO_CFG(ELITE_GPIO_MHL_USB_SELz, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static uint32_t mhl_usb_switch_output_table0_XB[] = { GPIO_CFG(ELITE_GPIO_MHL_USB_SELz_XB, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static void config_gpio_table(uint32_t *table, int len) { int n, rc; for (n = 0; n < len; n++) { rc = gpio_tlmm_config(table[n], GPIO_CFG_ENABLE); if (rc) { pr_err("%s: gpio_tlmm_config(%#x)=%d\n", __func__, table[n], rc); break; } } } static void elite_usb_dpdn_switch(int path) { switch (path) { case PATH_USB: case PATH_MHL: { int polarity = 1; /* high = mhl */ int mhl = (path == PATH_MHL); if (system_rev < 1) { config_gpio_table(mhl_usb_switch_output_table, ARRAY_SIZE(mhl_usb_switch_output_table)); gpio_set_value(ELITE_GPIO_MHL_USB_SELz, (mhl ^ !polarity) ? 1 : 0); pr_info("[CABLE] XA %s: Set %s path\n", __func__, mhl ? "MHL" : "USB"); } else { config_gpio_table(mhl_usb_switch_output_table0_XB, ARRAY_SIZE(mhl_usb_switch_output_table0_XB)); gpio_set_value(ELITE_GPIO_MHL_USB_SELz_XB, (mhl ^ !polarity) ? 1 : 0); pr_info("[CABLE] XB %s: Set %s path\n", __func__, mhl ? "MHL" : "USB"); } break; } } #ifdef CONFIG_FB_MSM_HDMI_MHL_SII9234 sii9234_change_usb_owner((path == PATH_MHL) ? 1 : 0); #endif } uint32_t msm_hdmi_off_gpio[] = { GPIO_CFG(ELITE_GPIO_HDMI_DDC_CLK, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_HDMI_DDC_DATA, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_HDMI_HPD, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), }; uint32_t msm_hdmi_on_gpio[] = { GPIO_CFG(ELITE_GPIO_HDMI_DDC_CLK, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_HDMI_DDC_DATA, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_HDMI_HPD, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), }; static void mhl_sii9234_1v2_power(bool enable) { static bool prev_on; if (enable == prev_on) return; if (enable) { config_gpio_table(msm_hdmi_on_gpio, ARRAY_SIZE(msm_hdmi_on_gpio)); hdmi_hpd_feature(1); pr_info("%s(on): success\n", __func__); } else { config_gpio_table(msm_hdmi_off_gpio, ARRAY_SIZE(msm_hdmi_off_gpio)); hdmi_hpd_feature(0); pr_info("%s(off): success\n", __func__); } prev_on = enable; } #ifdef CONFIG_FB_MSM_HDMI_MHL_SII9234 static struct regulator *reg_8921_l10; static struct regulator *reg_8921_s2; static int mhl_sii9234_power_vote(bool enable) { int rc; if (!reg_8921_l10) { _GET_REGULATOR(reg_8921_l10, "8921_l10"); rc = regulator_set_voltage(reg_8921_l10, 3000000, 3000000); if (rc) { pr_err("%s: regulator_set_voltage reg_8921_l10 failed rc=%d\n", __func__, rc); return rc; } } if (!reg_8921_s2) { _GET_REGULATOR(reg_8921_s2, "8921_s2"); rc = regulator_set_voltage(reg_8921_s2, 1300000, 1300000); if (rc) { pr_err("%s: regulator_set_voltage reg_8921_s2 failed rc=%d\n", __func__, rc); return rc; } } if (enable) { if (reg_8921_l10) { rc = regulator_enable(reg_8921_l10); if (rc) pr_warning("'%s' regulator enable failed, rc=%d\n", "reg_8921_l10", rc); } if (reg_8921_s2) { rc = regulator_enable(reg_8921_s2); if (rc) pr_warning("'%s' regulator enable failed, rc=%d\n", "reg_8921_s2", rc); } pr_info("%s(on): success\n", __func__); } else { if (reg_8921_l10) { rc = regulator_disable(reg_8921_l10); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_8921_l10", rc); } if (reg_8921_s2) { rc = regulator_disable(reg_8921_s2); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_8921_s2", rc); } pr_info("%s(off): success\n", __func__); } return 0; } static struct regulator *reg_8921_l12; static struct regulator *reg_8921_s4; static struct regulator *reg_8921_l16; static int mhl_sii9234_all_power(bool enable) { static bool prev_on; int rc; if (enable == prev_on) return 0; if (!reg_8921_s4) _GET_REGULATOR(reg_8921_s4, "8921_s4"); if (!reg_8921_l16) _GET_REGULATOR(reg_8921_l16, "8921_l16"); if (!reg_8921_l12) _GET_REGULATOR(reg_8921_l12, "8921_l12"); if (enable) { rc = regulator_set_voltage(reg_8921_s4, 1800000, 1800000); if (rc) { pr_err("%s: regulator_set_voltage reg_8921_s4 failed rc=%d\n", __func__, rc); return rc; } rc = regulator_set_voltage(reg_8921_l16, 3300000, 3300000); if (rc) { pr_err("%s: regulator_set_voltage reg_8921_l16 failed rc=%d\n", __func__, rc); return rc; } rc = regulator_set_voltage(reg_8921_l12, 1200000, 1200000); if (rc) { pr_err("%s: regulator_set_voltage reg_8921_l12 failed rc=%d\n", __func__, rc); return rc; } rc = regulator_enable(reg_8921_s4); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "reg_8921_s4", rc); return rc; } rc = regulator_enable(reg_8921_l16); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "reg_8921_l16", rc); return rc; } rc = regulator_enable(reg_8921_l12); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "reg_8921_l12", rc); return rc; } pr_info("%s(on): success\n", __func__); } else { rc = regulator_disable(reg_8921_s4); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_8921_s4", rc); rc = regulator_disable(reg_8921_l16); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_8921_l16", rc); rc = regulator_disable(reg_8921_l12); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_8921_l12", rc); pr_info("%s(off): success\n", __func__); } prev_on = enable; return 0; } static uint32_t mhl_gpio_table[] = { GPIO_CFG(ELITE_GPIO_MHL_RSTz, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(ELITE_GPIO_MHL_INT, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), }; static int mhl_sii9234_power(int on) { int rc = 0; switch (on) { case 0: mhl_sii9234_1v2_power(false); break; case 1: mhl_sii9234_all_power(true); config_gpio_table(mhl_gpio_table, ARRAY_SIZE(mhl_gpio_table)); break; default: pr_warning("%s(%d) got unsupport parameter!!!\n", __func__, on); break; } return rc; } static T_MHL_PLATFORM_DATA mhl_sii9234_device_data = { .gpio_intr = ELITE_GPIO_MHL_INT, .gpio_reset = ELITE_GPIO_MHL_RSTz, .ci2ca = 0, .mhl_usb_switch = elite_usb_dpdn_switch, .mhl_1v2_power = mhl_sii9234_1v2_power, .enable_5v = hdmi_enable_5v, .mhl_power_vote = mhl_sii9234_power_vote, #ifdef CONFIG_FB_MSM_HDMI_MHL_SUPERDEMO .abs_x_min = 941,/* 0 */ .abs_x_max = 31664,/* 32767 */ .abs_y_min = 417,/* 0 */ .abs_y_max = 32053,/* 32767 */ .abs_pressure_min = 0, .abs_pressure_max = 255, .abs_width_min = 0, .abs_width_max = 20, #endif .power = mhl_sii9234_power, }; static struct i2c_board_info msm_i2c_gsbi8_mhl_sii9234_info[] = { { I2C_BOARD_INFO(MHL_SII9234_I2C_NAME, 0x72 >> 1), .platform_data = &mhl_sii9234_device_data, .irq = ELITE_GPIO_MHL_INT }, }; #endif /* CONFIG_FB_MSM_HDMI_MHL_SII9234 */ #endif /* CONFIG_FB_MSM_HDMI_MHL */ static uint32_t usb_ID_PIN_input_table[] = { GPIO_CFG(ELITE_GPIO_USB_ID1, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static uint32_t usb_ID_PIN_ouput_table[] = { GPIO_CFG(ELITE_GPIO_USB_ID1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; void config_elite_usb_id_gpios(bool output) { if (output) { gpio_tlmm_config(usb_ID_PIN_ouput_table[0], GPIO_CFG_ENABLE); gpio_set_value(ELITE_GPIO_USB_ID1, 1); pr_info("[CABLE] %s: %d output high\n", __func__, ELITE_GPIO_USB_ID1); } else { gpio_tlmm_config(usb_ID_PIN_input_table[0], GPIO_CFG_ENABLE); pr_info("[CABLE] %s: %d input none pull\n", __func__, ELITE_GPIO_USB_ID1); } } int64_t elite_get_usbid_adc(void) { struct pm8xxx_adc_chan_result result; int err = 0, adc = 0; err = pm8xxx_adc_mpp_config_read(PM8XXX_AMUX_MPP_7, ADC_MPP_1_AMUX6, &result); if (err) { pr_info("[CABLE] %s: get adc fail, err %d\n", __func__, err); return err; } pr_info("[CABLE] chan=%d, adc_code=%d, measurement=%lld, \ physical=%lld\n", result.chan, result.adc_code, result.measurement, result.physical); adc = result.physical; return adc/1000; } static uint32_t usbuart_pin_enable_usb_table[] = { GPIO_CFG(ELITE_GPIO_MHL_USB_ENz, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), }; static uint32_t usbuart_pin_enable_uart_table[] = { GPIO_CFG(ELITE_GPIO_MHL_USB_ENz, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), }; static void elite_usb_uart_switch(int nvbus) { printk(KERN_INFO "%s: %s, rev=%d\n", __func__, nvbus ? "uart" : "usb", system_rev); if(nvbus == 1) { gpio_tlmm_config(usbuart_pin_enable_uart_table[0], GPIO_CFG_ENABLE); } else { gpio_tlmm_config(usbuart_pin_enable_usb_table[0], GPIO_CFG_ENABLE); } } static struct cable_detect_platform_data cable_detect_pdata = { .detect_type = CABLE_TYPE_PMIC_ADC, .usb_id_pin_gpio = ELITE_GPIO_USB_ID1, .get_adc_cb = elite_get_usbid_adc, .config_usb_id_gpios = config_elite_usb_id_gpios, .mhl_reset_gpio = ELITE_GPIO_MHL_RSTz, #ifdef CONFIG_FB_MSM_HDMI_MHL .mhl_1v2_power = mhl_sii9234_1v2_power, .usb_dpdn_switch = elite_usb_dpdn_switch, #endif .usb_uart_switch = elite_usb_uart_switch, }; static struct platform_device cable_detect_device = { .name = "cable_detect", .id = -1, .dev = { .platform_data = &cable_detect_pdata, }, }; static void elite_cable_detect_register(void) { pr_info("%s\n", __func__); platform_device_register(&cable_detect_device); } void elite_pm8xxx_adc_device_register(void) { pr_info("%s: Register PM8921 ADC device\n", __func__); headset_device_register(); } #define MSM_SHARED_RAM_PHYS 0x80000000 static void __init elite_map_io(void) { msm_shared_ram_phys = MSM_SHARED_RAM_PHYS; msm_map_msm8960_io(); if (socinfo_init() < 0) pr_err("socinfo_init() failed!\n"); } static void __init elite_init_irq(void) { struct msm_mpm_device_data *data = NULL; #ifdef CONFIG_MSM_MPM data = &msm8960_mpm_dev_data; #endif msm_mpm_irq_extn_init(data); gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, (void *)MSM_QGIC_CPU_BASE); /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ writel_relaxed(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); writel_relaxed(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET); mb(); } static void __init msm8960_init_buses(void) { #ifdef CONFIG_MSM_BUS_SCALING msm_bus_8960_apps_fabric_pdata.rpm_enabled = 1; msm_bus_8960_sys_fabric_pdata.rpm_enabled = 1; msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1; msm_bus_apps_fabric.dev.platform_data = &msm_bus_8960_apps_fabric_pdata; msm_bus_sys_fabric.dev.platform_data = &msm_bus_8960_sys_fabric_pdata; msm_bus_mm_fabric.dev.platform_data = &msm_bus_8960_mm_fabric_pdata; msm_bus_sys_fpb.dev.platform_data = &msm_bus_8960_sys_fpb_pdata; msm_bus_cpss_fpb.dev.platform_data = &msm_bus_8960_cpss_fpb_pdata; msm_bus_rpm_set_mt_mask(); #endif } static struct msm_spi_platform_data msm8960_qup_spi_gsbi10_pdata = { .max_clock_speed = 27000000, }; #ifdef CONFIG_USB_MSM_OTG_72K static struct msm_otg_platform_data msm_otg_pdata; #else static int msm_hsusb_vbus_power(bool on) { static int prev_on; int rc = 0; if (on == prev_on) return rc; if (on) { rc = gpio_request(ELITE_GPIO_V_BOOST_5V_EN, "USB_BOOST_5V"); if (rc) { pr_err("'%s'(%d) gpio_request failed, rc=%d\n", "USB_BOOST_5V", ELITE_GPIO_V_BOOST_5V_EN, rc); return rc; } gpio_set_value(ELITE_GPIO_V_BOOST_5V_EN, 1); pr_info("%s(on): success\n", __func__); } else { gpio_set_value(ELITE_GPIO_V_BOOST_5V_EN, 0); gpio_free(ELITE_GPIO_V_BOOST_5V_EN); pr_info("%s(off): success\n", __func__); } prev_on = on; return rc; } static struct msm_bus_vectors usb_init_vectors[] = { { .src = MSM_BUS_MASTER_SPS, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors usb_max_vectors[] = { { .src = MSM_BUS_MASTER_SPS, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 60000000, .ib = 960000000, }, }; static struct msm_bus_paths usb_bus_scale_usecases[] = { { ARRAY_SIZE(usb_init_vectors), usb_init_vectors, }, { ARRAY_SIZE(usb_max_vectors), usb_max_vectors, }, }; static struct msm_bus_scale_pdata usb_bus_scale_pdata = { usb_bus_scale_usecases, ARRAY_SIZE(usb_bus_scale_usecases), .name = "usb", }; static int phy_init_seq_v3[] = { 0x7c, 0x81, 0x3c, 0x82, -1}; static int phy_init_seq_v3_2_1[] = { 0x5c, 0x81, 0x3c, 0x82, -1}; static struct msm_otg_platform_data msm_otg_pdata = { .phy_init_seq = phy_init_seq_v3, .mode = USB_OTG, .otg_control = OTG_PMIC_CONTROL, .phy_type = SNPS_28NM_INTEGRATED_PHY, /* .pmic_id_irq = PM8921_USB_ID_IN_IRQ(PM8921_IRQ_BASE), */ .vbus_power = msm_hsusb_vbus_power, .power_budget = 750, .bus_scale_table = &usb_bus_scale_pdata, }; #endif /* #ifdef CONFIG_USB_ANDROID_DIAG */ #define PID_MAGIC_ID 0x71432909 #define SERIAL_NUM_MAGIC_ID 0x61945374 #define SERIAL_NUMBER_LENGTH 127 #define DLOAD_USB_BASE_ADD 0x2A03F0C8 struct magic_num_struct { uint32_t pid; uint32_t serial_num; }; struct dload_struct { uint32_t reserved1; uint32_t reserved2; uint32_t reserved3; uint16_t reserved4; uint16_t pid; char serial_number[SERIAL_NUMBER_LENGTH]; uint16_t reserved5; struct magic_num_struct magic_struct; }; static int usb_diag_update_pid_and_serial_num(uint32_t pid, const char *snum) { struct dload_struct __iomem *dload = 0; dload = ioremap(DLOAD_USB_BASE_ADD, sizeof(*dload)); if (!dload) { pr_err("%s: cannot remap I/O memory region: %08x\n", __func__, DLOAD_USB_BASE_ADD); return -ENXIO; } pr_debug("%s: dload:%pK pid:%x serial_num:%s\n", __func__, dload, pid, snum); /* update pid */ dload->magic_struct.pid = PID_MAGIC_ID; dload->pid = pid; /* update serial number */ dload->magic_struct.serial_num = 0; if (!snum) { memset(dload->serial_number, 0, SERIAL_NUMBER_LENGTH); goto out; } dload->magic_struct.serial_num = SERIAL_NUM_MAGIC_ID; strlcpy(dload->serial_number, snum, SERIAL_NUMBER_LENGTH); out: iounmap(dload); return 0; } static struct android_usb_platform_data android_usb_pdata = { .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; #define VERSION_ID (readl(HW_VER_ID_VIRT) & 0xf0000000) >> 28 #define HW_8960_V3_2_1 0x07 void elite_add_usb_devices(void) { if (VERSION_ID >= HW_8960_V3_2_1) { printk(KERN_INFO "%s rev: %d v3.2.1\n", __func__, system_rev); msm_otg_pdata.phy_init_seq = phy_init_seq_v3_2_1; } else { printk(KERN_INFO "%s rev: %d\n", __func__, system_rev); msm_otg_pdata.phy_init_seq = phy_init_seq_v3; } printk(KERN_INFO "%s: OTG_PMIC_CONTROL in rev: %d\n", __func__, system_rev); } static uint8_t spm_wfi_cmd_sequence[] __initdata = { 0x03, 0x0f, }; static uint8_t spm_retention_cmd_sequence[] __initdata = { 0x00, 0x05, 0x03, 0x0D, 0x0B, 0x00, 0x0f, }; static uint8_t spm_retention_with_krait_v3_cmd_sequence[] __initdata = { 0x42, 0x1B, 0x00, 0x05, 0x03, 0x0D, 0x0B, 0x00, 0x42, 0x1B, 0x0f, }; static uint8_t spm_power_collapse_without_rpm[] __initdata = { 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01, 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0f, }; static uint8_t spm_power_collapse_with_rpm[] __initdata = { 0x00, 0x24, 0x54, 0x10, 0x09, 0x07, 0x01, 0x0B, 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0f, }; /* 8960AB has a different command to assert apc_pdn */ static uint8_t spm_power_collapse_without_rpm_krait_v3[] __initdata = { 0x00, 0x24, 0x84, 0x10, 0x09, 0x03, 0x01, 0x10, 0x84, 0x30, 0x0C, 0x24, 0x30, 0x0f, }; static uint8_t spm_power_collapse_with_rpm_krait_v3[] __initdata = { 0x00, 0x24, 0x84, 0x10, 0x09, 0x07, 0x01, 0x0B, 0x10, 0x84, 0x30, 0x0C, 0x24, 0x30, 0x0f, }; static struct msm_spm_seq_entry msm_spm_boot_cpu_seq_list[] __initdata = { [0] = { .mode = MSM_SPM_MODE_CLOCK_GATING, .notify_rpm = false, .cmd = spm_wfi_cmd_sequence, }, [1] = { .mode = MSM_SPM_MODE_POWER_RETENTION, .notify_rpm = false, .cmd = spm_retention_cmd_sequence, }, [2] = { .mode = MSM_SPM_MODE_POWER_COLLAPSE, .notify_rpm = false, .cmd = spm_power_collapse_without_rpm, }, [3] = { .mode = MSM_SPM_MODE_POWER_COLLAPSE, .notify_rpm = true, .cmd = spm_power_collapse_with_rpm, }, }; static struct msm_spm_seq_entry msm_spm_nonboot_cpu_seq_list[] __initdata = { [0] = { .mode = MSM_SPM_MODE_CLOCK_GATING, .notify_rpm = false, .cmd = spm_wfi_cmd_sequence, }, [1] = { .mode = MSM_SPM_MODE_POWER_RETENTION, .notify_rpm = false, .cmd = spm_retention_cmd_sequence, }, [2] = { .mode = MSM_SPM_MODE_POWER_COLLAPSE, .notify_rpm = false, .cmd = spm_power_collapse_without_rpm, }, [3] = { .mode = MSM_SPM_MODE_POWER_COLLAPSE, .notify_rpm = true, .cmd = spm_power_collapse_with_rpm, }, }; static struct msm_spm_platform_data msm_spm_data[] __initdata = { [0] = { .reg_base_addr = MSM_SAW0_BASE, .reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x1F, #if defined(CONFIG_MSM_AVS_HW) .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x58589464, .reg_init_values[MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x00020000, #endif .reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DLY] = 0x03020004, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x0084009C, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x00A4001C, .vctl_timeout_us = 50, .num_modes = ARRAY_SIZE(msm_spm_boot_cpu_seq_list), .modes = msm_spm_boot_cpu_seq_list, }, [1] = { .reg_base_addr = MSM_SAW1_BASE, .reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x1F, #if defined(CONFIG_MSM_AVS_HW) .reg_init_values[MSM_SPM_REG_SAW2_AVS_CTL] = 0x58589464, .reg_init_values[MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x00020000, #endif .reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DLY] = 0x03020004, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x0084009C, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x00A4001C, .vctl_timeout_us = 50, .num_modes = ARRAY_SIZE(msm_spm_nonboot_cpu_seq_list), .modes = msm_spm_nonboot_cpu_seq_list, }, }; static uint8_t l2_spm_wfi_cmd_sequence[] __initdata = { 0x00, 0x20, 0x03, 0x20, 0x00, 0x0f, }; static uint8_t l2_spm_gdhs_cmd_sequence[] __initdata = { 0x00, 0x20, 0x34, 0x64, 0x48, 0x07, 0x48, 0x20, 0x50, 0x64, 0x04, 0x34, 0x50, 0x0f, }; static uint8_t l2_spm_power_off_cmd_sequence[] __initdata = { 0x00, 0x10, 0x34, 0x64, 0x48, 0x07, 0x48, 0x10, 0x50, 0x64, 0x04, 0x34, 0x50, 0x0F, }; static struct msm_spm_seq_entry msm_spm_l2_seq_list[] __initdata = { [0] = { .mode = MSM_SPM_L2_MODE_RETENTION, .notify_rpm = false, .cmd = l2_spm_wfi_cmd_sequence, }, [1] = { .mode = MSM_SPM_L2_MODE_GDHS, .notify_rpm = true, .cmd = l2_spm_gdhs_cmd_sequence, }, [2] = { .mode = MSM_SPM_L2_MODE_POWER_COLLAPSE, .notify_rpm = true, .cmd = l2_spm_power_off_cmd_sequence, }, }; static struct msm_spm_platform_data msm_spm_l2_data[] __initdata = { [0] = { .reg_base_addr = MSM_SAW_L2_BASE, .reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x00, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DLY] = 0x02020204, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x00A000AE, .reg_init_values[MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x00A00020, .modes = msm_spm_l2_seq_list, .num_modes = ARRAY_SIZE(msm_spm_l2_seq_list), }, }; #ifdef CONFIG_PERFLOCK static unsigned elite_perf_acpu_table[] = { 810000000, /* LOWEST */ 918000000, /* LOW */ 1026000000, /* MEDIUM */ 1242000000,/* HIGH */ 1512000000, /* HIGHEST */ }; static unsigned elite_cpufreq_ceiling_acpu_table[] = { 702000000, 918000000, 1026000000, }; static struct perflock_data elite_perflock_data = { .perf_acpu_table = elite_perf_acpu_table, .table_size = ARRAY_SIZE(elite_perf_acpu_table), }; static struct perflock_data elite_cpufreq_ceiling_data = { .perf_acpu_table = elite_cpufreq_ceiling_acpu_table, .table_size = ARRAY_SIZE(elite_cpufreq_ceiling_acpu_table), }; static struct perflock_pdata perflock_pdata = { .perf_floor = &elite_perflock_data, .perf_ceiling = &elite_cpufreq_ceiling_data, }; struct platform_device msm8960_device_perf_lock = { .name = "perf_lock", .id = -1, .dev = { .platform_data = &perflock_pdata, }, }; #endif static uint32_t gsbi3_gpio_table[] = { GPIO_CFG(ELITE_GPIO_TP_I2C_DAT, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_TP_I2C_CLK, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi3_gpio_table_gpio[] = { GPIO_CFG(ELITE_GPIO_TP_I2C_DAT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_TP_I2C_CLK, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; /* CAMERA setting */ static uint32_t gsbi4_gpio_table[] = { GPIO_CFG(ELITE_GPIO_CAM_I2C_DAT, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_CAM_I2C_CLK, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi4_gpio_table_gpio[] = { GPIO_CFG(ELITE_GPIO_CAM_I2C_DAT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_CAM_I2C_CLK, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi5_gpio_table[] = { GPIO_CFG(ELITE_GPIO_NFC_I2C_SDA, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_NFC_I2C_SCL, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi5_gpio_table_gpio[] = { GPIO_CFG(ELITE_GPIO_NFC_I2C_SDA, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_NFC_I2C_SCL, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi8_gpio_table[] = { GPIO_CFG(ELITE_GPIO_MC_I2C_DAT, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_MC_I2C_CLK, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi8_gpio_table_gpio[] = { GPIO_CFG(ELITE_GPIO_MC_I2C_DAT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_MC_I2C_CLK, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi12_gpio_table[] = { GPIO_CFG(ELITE_GPIO_SR_I2C_DAT, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_SR_I2C_CLK, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static uint32_t gsbi12_gpio_table_gpio[] = { GPIO_CFG(ELITE_GPIO_SR_I2C_DAT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(ELITE_GPIO_SR_I2C_CLK, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), }; static void gsbi_qup_i2c_gpio_config(int adap_id, int config_type) { printk(KERN_INFO "%s(): adap_id = %d, config_type = %d \n", __func__, adap_id, config_type); if ((adap_id == MSM_8960_GSBI3_QUP_I2C_BUS_ID) && (config_type == 1)) { gpio_tlmm_config(gsbi3_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi3_gpio_table[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI3_QUP_I2C_BUS_ID) && (config_type == 0)) { gpio_tlmm_config(gsbi3_gpio_table_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi3_gpio_table_gpio[1], GPIO_CFG_ENABLE); } /* CAMERA setting */ if ((adap_id == MSM_8960_GSBI4_QUP_I2C_BUS_ID) && (config_type == 1)) { gpio_tlmm_config(gsbi4_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi4_gpio_table[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI4_QUP_I2C_BUS_ID) && (config_type == 0)) { gpio_tlmm_config(gsbi4_gpio_table_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi4_gpio_table_gpio[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI5_QUP_I2C_BUS_ID) && (config_type == 1)) { gpio_tlmm_config(gsbi5_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi5_gpio_table[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI5_QUP_I2C_BUS_ID) && (config_type == 0)) { gpio_tlmm_config(gsbi5_gpio_table_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi5_gpio_table_gpio[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI8_QUP_I2C_BUS_ID) && (config_type == 1)) { gpio_tlmm_config(gsbi8_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi8_gpio_table[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI8_QUP_I2C_BUS_ID) && (config_type == 0)) { gpio_tlmm_config(gsbi8_gpio_table_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi8_gpio_table_gpio[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI12_QUP_I2C_BUS_ID) && (config_type == 1)) { gpio_tlmm_config(gsbi12_gpio_table[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi12_gpio_table[1], GPIO_CFG_ENABLE); } if ((adap_id == MSM_8960_GSBI12_QUP_I2C_BUS_ID) && (config_type == 0)) { gpio_tlmm_config(gsbi12_gpio_table_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(gsbi12_gpio_table_gpio[1], GPIO_CFG_ENABLE); } } static struct msm_i2c_platform_data msm8960_i2c_qup_gsbi4_pdata = { .clk_freq = 400000, .src_clk_rate = 24000000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; static struct msm_i2c_platform_data msm8960_i2c_qup_gsbi3_pdata = { .clk_freq = 400000, .src_clk_rate = 24000000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; static struct msm_i2c_platform_data msm8960_i2c_qup_gsbi5_pdata = { .clk_freq = 100000, .src_clk_rate = 24000000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; static struct msm_i2c_platform_data msm8960_i2c_qup_gsbi8_pdata = { .clk_freq = 400000, .src_clk_rate = 24000000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, .share_uart_flag = 1, /* check if QUP-I2C and Uart share the gisb */ }; static struct msm_i2c_platform_data msm8960_i2c_qup_gsbi12_pdata = { .clk_freq = 400000, .src_clk_rate = 24000000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; static struct platform_device msm_device_saw_core0 = { .name = "saw-regulator", .id = 0, .dev = { .platform_data = &msm_saw_regulator_pdata_s5, }, }; static struct platform_device msm_device_saw_core1 = { .name = "saw-regulator", .id = 1, .dev = { .platform_data = &msm_saw_regulator_pdata_s6, }, }; static struct tsens_platform_data msm_tsens_pdata = { .slope = {910, 910, 910, 910, 910}, .tsens_factor = 1000, .hw_type = MSM_8960, .tsens_num_sensor = 5, }; static struct platform_device msm_tsens_device = { .name = "tsens8960-tm", .id = -1, }; static struct msm_thermal_data msm_thermal_pdata = { .sensor_id = 0, .poll_ms = 1000, .limit_temp_degC = 60, .temp_hysteresis_degC = 10, // .limit_freq = 918000, .freq_step = 2, }; #ifdef CONFIG_MSM_FAKE_BATTERY static struct platform_device fish_battery_device = { .name = "fish_battery", }; #endif static struct platform_device scm_memchk_device = { .name = "scm-memchk", .id = -1, }; static struct platform_device elite_device_rpm_regulator __devinitdata = { .name = "rpm-regulator", .id = -1, .dev = { .platform_data = &elite_rpm_regulator_pdata, }, }; static struct pm8xxx_vibrator_pwm_platform_data pm8xxx_vib_pwm_pdata = { .initial_vibrate_ms = 0, .max_timeout_ms = 15000, .duty_us = 49, .PERIOD_US = 62, .bank = 2, .ena_gpio = ELITE_GPIO_HAPTIC_EN, .vdd_gpio = PM8921_GPIO_PM_TO_SYS(ELITE_PMGPIO_HAPTIC_3V3_EN), }; static struct platform_device vibrator_pwm_device = { .name = PM8XXX_VIBRATOR_PWM_DEV_NAME, .dev = { .platform_data = &pm8xxx_vib_pwm_pdata, }, }; static struct platform_device *common_devices[] __initdata = { &msm8960_device_acpuclk, &msm8960_device_dmov, &msm_device_smd, &msm8960_device_uart_gsbi8, &msm_device_uart_dm6, &msm_device_saw_core0, &msm_device_saw_core1, &msm8960_device_ext_5v_vreg, &msm8960_device_qup_i2c_gsbi3, &msm8960_device_qup_i2c_gsbi4, &msm8960_device_qup_i2c_gsbi5, &msm8960_device_qup_i2c_gsbi8, &msm8960_device_qup_spi_gsbi10, #ifndef CONFIG_MSM_DSPS &msm8960_device_qup_i2c_gsbi12, #endif &msm8960_device_ssbi_pmic, &msm_slim_ctrl, &msm_device_wcnss_wlan, #if defined(CONFIG_QSEECOM) &qseecom_device, #endif #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \ defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) &qcrypto_device, #endif #if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \ defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE) &qcedev_device, #endif #ifdef CONFIG_MSM_ROTATOR &msm_rotator_device, #endif &msm8960_cpu_slp_status, &msm_device_sps, #ifdef CONFIG_MSM_FAKE_BATTERY &fish_battery_device, #endif &fmem_device, #ifdef CONFIG_ANDROID_PMEM #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION &android_pmem_device, &android_pmem_adsp_device, &android_pmem_audio_device, #endif #endif &msm_device_vidc, &msm_device_bam_dmux, &msm_fm_platform_init, #ifdef CONFIG_HW_RANDOM_MSM &msm_device_rng, #endif #ifdef CONFIG_ION_MSM &ion_dev, #endif &msm8960_rpm_device, &msm8960_rpm_log_device, &msm8960_rpm_stat_device, #ifdef CONFIG_MSM_QDSS &msm_etb_device, &msm_tpiu_device, &msm_funnel_device, &msm_etm_device, #endif &msm8960_device_watchdog, #ifdef CONFIG_MSM_RTB &msm_rtb_device, #endif &msm8960_device_cache_erp, &msm8960_iommu_domain_device, #ifdef CONFIG_MSM_CACHE_DUMP &msm_cache_dump_device, #endif #ifdef CONFIG_HTC_BATT_8960 &htc_battery_pdev, #endif &msm_tsens_device, &vibrator_pwm_device, }; static struct platform_device *elite_devices[] __initdata = { &msm_8960_q6_lpass, &msm_8960_q6_mss_fw, &msm_8960_q6_mss_sw, &msm_8960_riva, &msm_pil_tzapps, &msm8960_device_otg, &msm_device_hsusb_host, &msm8960_device_gadget_peripheral, &android_usb_device, &msm_pcm, &msm_pcm_routing, &msm_multi_ch_pcm, &msm_cpudai0, &msm_cpudai1, &msm8960_cpudai_slimbus_2_tx, &msm8960_cpudai_slimbus_2_rx, &msm_cpudai_hdmi_rx, &msm_cpudai_bt_rx, &msm_cpudai_bt_tx, &msm_cpudai_fm_rx, &msm_cpudai_fm_tx, &msm_cpudai_auxpcm_rx, &msm_cpudai_auxpcm_tx, &msm_cpu_fe, &msm_stub_codec, #ifdef CONFIG_MSM_GEMINI &msm8960_gemini_device, #endif &msm_voice, &msm_voip, &msm_lpa_pcm, &msm_cpudai_afe_01_rx, &msm_cpudai_afe_01_tx, &msm_cpudai_afe_02_rx, &msm_cpudai_afe_02_tx, &msm_pcm_afe, &msm_compr_dsp, &msm_cpudai_incall_music_rx, &msm_cpudai_incall_record_rx, &msm_cpudai_incall_record_tx, &msm_pcm_hostless, &msm_lowlatency_pcm, &msm_bus_apps_fabric, &msm_bus_sys_fabric, &msm_bus_mm_fabric, &msm_bus_sys_fpb, &msm_bus_cpss_fpb, &msm_device_tz_log, #ifdef CONFIG_PERFLOCK &msm8960_device_perf_lock, #endif &scm_memchk_device, }; static void __init msm8960_i2c_init(void) { msm8960_device_qup_i2c_gsbi4.dev.platform_data = &msm8960_i2c_qup_gsbi4_pdata; msm8960_device_qup_i2c_gsbi3.dev.platform_data = &msm8960_i2c_qup_gsbi3_pdata; msm8960_device_qup_i2c_gsbi5.dev.platform_data = &msm8960_i2c_qup_gsbi5_pdata; msm8960_device_qup_i2c_gsbi8.dev.platform_data = &msm8960_i2c_qup_gsbi8_pdata; msm8960_device_qup_i2c_gsbi12.dev.platform_data = &msm8960_i2c_qup_gsbi12_pdata; } static void __init msm8960_gfx_init(void) { struct kgsl_device_platform_data *kgsl_3d0_pdata = msm_kgsl_3d0.dev.platform_data; uint32_t soc_platform_version = socinfo_get_version(); /* Fixup data that needs to change based on GPU ID */ if (cpu_is_msm8960ab()) { kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0); /* 8960PRO nominal clock rate is 320Mhz */ kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 320000000; } else { kgsl_3d0_pdata->iommu_count = 1; if (SOCINFO_VERSION_MAJOR(soc_platform_version) == 1) { kgsl_3d0_pdata->pwrlevel[0].gpu_freq = 320000000; kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 266667000; } if (SOCINFO_VERSION_MAJOR(soc_platform_version) >= 3) { /* 8960v3 GPU registers returns 5 for patch release * but it should be 6, so dummy up the chipid here * based the platform type */ kgsl_3d0_pdata->chipid = ADRENO_CHIPID(2, 2, 0, 6); } } /* Register the 3D core */ platform_device_register(&msm_kgsl_3d0); /* Register the 2D cores if we are not 8960PRO */ if (!cpu_is_msm8960ab()) { platform_device_register(&msm_kgsl_2d0); platform_device_register(&msm_kgsl_2d1); } } #ifdef CONFIG_HTC_BATT_8960 static struct pm8921_charger_batt_param chg_batt_params[] = { [0] = { .max_voltage = 4200, .cool_bat_voltage = 4200, .warm_bat_voltage = 4000, }, [1] = { .max_voltage = 4340, .cool_bat_voltage = 4340, .warm_bat_voltage = 4000, }, [2] = { .max_voltage = 4300, .cool_bat_voltage = 4300, .warm_bat_voltage = 4000, }, [3] = { .max_voltage = 4350, .cool_bat_voltage = 4350, .warm_bat_voltage = 4000, }, }; static struct single_row_lut fcc_temp_id_1 = { .x = {-20, -10, 0, 5, 10, 20, 30, 40}, .y = {1268, 1269, 1270, 1470, 1580, 1760, 1801, 1802}, .cols = 8, }; static struct single_row_lut fcc_sf_id_1 = { .x = {100, 200, 300, 400, 500}, .y = {100, 97, 96, 93, 90}, .cols = 5, }; static struct sf_lut pc_sf_id_1 = { .rows = 10, .cols = 5, .row_entries = {100, 200, 300, 400, 500}, .percent = {100, 90, 80, 70, 60, 50, 40, 30, 20, 10}, .sf = { {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100} }, }; static struct pc_temp_ocv_lut pc_temp_ocv_id_1 = { .rows = 29, .cols = 8, .temp = {-20, -10, 0, 5, 10, 20, 30, 40}, .percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }, .ocv = { {4150 , 4150 , 4150 , 4150 , 4150 , 4150 , 4150 , 4150 }, {4130 , 4130 , 4130 , 4141 , 4138 , 4133 , 4130 , 4129 }, {4112 , 4112 , 4112 , 4104 , 4099 , 4090 , 4085 , 4084 }, {4082 , 4082 , 4082 , 4069 , 4062 , 4050 , 4044 , 4042 }, {4053 , 4053 , 4053 , 4037 , 4028 , 4013 , 4006 , 4000 }, {4025 , 4025 , 4025 , 4006 , 3996 , 3980 , 3972 , 3970 }, {3999 , 3999 , 3999 , 3979 , 3968 , 3951 , 3942 , 3940 }, {3975 , 3975 , 3975 , 3953 , 3942 , 3924 , 3915 , 3912 }, {3952 , 3952 , 3952 , 3929 , 3917 , 3898 , 3889 , 3887 }, {3929 , 3929 , 3929 , 3903 , 3889 , 3861 , 3846 , 3844 }, {3906 , 3906 , 3906 , 3874 , 3855 , 3826 , 3817 , 3816 }, {3881 , 3881 , 3881 , 3845 , 3828 , 3808 , 3800 , 3800 }, {3857 , 3857 , 3857 , 3824 , 3810 , 3794 , 3788 , 3787 }, {3836 , 3836 , 3836 , 3808 , 3797 , 3784 , 3777 , 3776 }, {3767 , 3767 , 3820 , 3796 , 3787 , 3776 , 3770 , 3767 }, {3754 , 3754 , 3807 , 3787 , 3779 , 3770 , 3762 , 3754 }, {3734 , 3734 , 3797 , 3781 , 3774 , 3761 , 3743 , 3734 }, {3705 , 3705 , 3789 , 3775 , 3768 , 3743 , 3713 , 3705 }, {3670 , 3670 , 3783 , 3769 , 3759 , 3707 , 3676 , 3670 }, {3668 , 3668 , 3782 , 3768 , 3756 , 3701 , 3674 , 3668 }, {3666 , 3666 , 3781 , 3766 , 3752 , 3695 , 3672 , 3666 }, {3664 , 3664 , 3780 , 3765 , 3749 , 3689 , 3669 , 3664 }, {3662 , 3662 , 3779 , 3763 , 3745 , 3683 , 3667 , 3662 }, {3660 , 3660 , 3777 , 3761 , 3741 , 3677 , 3664 , 3660 }, {3618 , 3618 , 3776 , 3758 , 3734 , 3674 , 3626 , 3618 }, {3576 , 3576 , 3775 , 3754 , 3726 , 3671 , 3587 , 3576 }, {3534 , 3534 , 3774 , 3751 , 3718 , 3668 , 3548 , 3534 }, {3491 , 3491 , 3772 , 3747 , 3710 , 3664 , 3509 , 3491 }, {3400 , 3400 , 3650 , 3650 , 3550 , 3500 , 3450 , 3400 } }, }; struct pm8921_bms_battery_data bms_battery_data_id_1 = { .fcc = 1800, .fcc_temp_lut = &fcc_temp_id_1, .fcc_sf_lut = &fcc_sf_id_1, .pc_temp_ocv_lut = &pc_temp_ocv_id_1, .pc_sf_lut = &pc_sf_id_1, }; static struct single_row_lut fcc_temp_id_2 = { .x = {-20, -10, 0, 5, 10, 20, 30, 40}, .y = {1540, 1543, 1623, 1715, 1759, 1794, 1785, 1780}, .cols = 8, }; static struct single_row_lut fcc_sf_id_2 = { .x = {100, 200, 300, 400, 500}, .y = {100, 97, 96, 93, 90}, .cols = 5, }; static struct sf_lut pc_sf_id_2 = { .rows = 10, .cols = 5, .row_entries = {100, 200, 300, 400, 500}, .percent = {100, 90, 80, 70, 60, 50, 40, 30, 20, 10}, .sf = { {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100}, {100, 100, 100, 100, 100} }, }; static struct pc_temp_ocv_lut pc_temp_ocv_id_2 = { .rows = 29, .cols = 8, .temp = {-20, -10, 0, 5, 10, 20, 30, 40}, .percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }, .ocv = { {4150 , 4150 , 4150 , 4150 , 4150 , 4150 , 4150 , 4150 }, {4141 , 4141 , 4140 , 4137 , 4135 , 4133 , 4132 , 4130 }, {4102 , 4102 , 4098 , 4094 , 4091 , 4088 , 4087 , 4085 }, {4066 , 4066 , 4061 , 4054 , 4051 , 4047 , 4046 , 4044 }, {4024 , 4024 , 4021 , 4013 , 4011 , 4007 , 4008 , 4006 }, {3986 , 3986 , 3987 , 3980 , 3977 , 3974 , 3974 , 3972 }, {3955 , 3955 , 3960 , 3952 , 3948 , 3944 , 3943 , 3941 }, {3926 , 3926 , 3930 , 3922 , 3920 , 3917 , 3916 , 3914 }, {3897 , 3897 , 3897 , 3887 , 3886 , 3887 , 3890 , 3889 }, {3871 , 3871 , 3866 , 3855 , 3849 , 3845 , 3846 , 3847 }, {3848 , 3848 , 3841 , 3829 , 3823 , 3819 , 3819 , 3819 }, {3829 , 3829 , 3821 , 3811 , 3806 , 3802 , 3802 , 3802 }, {3812 , 3812 , 3805 , 3797 , 3793 , 3790 , 3789 , 3789 }, {3798 , 3798 , 3793 , 3787 , 3784 , 3781 , 3779 , 3778 }, {3788 , 3788 , 3785 , 3780 , 3778 , 3774 , 3771 , 3768 }, {3781 , 3781 , 3780 , 3775 , 3772 , 3766 , 3758 , 3751 }, {3773 , 3773 , 3774 , 3765 , 3756 , 3742 , 3735 , 3731 }, {3763 , 3763 , 3762 , 3738 , 3721 , 3702 , 3697 , 3696 }, {3747 , 3747 , 3736 , 3703 , 3693 , 3684 , 3679 , 3674 }, {3743 , 3743 , 3730 , 3701 , 3691 , 3674 , 3669 , 3668 }, {3739 , 3739 , 3724 , 3698 , 3688 , 3664 , 3659 , 3661 }, {3735 , 3735 , 3718 , 3695 , 3685 , 3653 , 3649 , 3654 }, {3731 , 3731 , 3712 , 3692 , 3682 , 3643 , 3639 , 3647 }, {3726 , 3726 , 3705 , 3689 , 3679 , 3632 , 3628 , 3640 }, {3722 , 3722 , 3702 , 3669 , 3626 , 3592 , 3589 , 3599 }, {3717 , 3717 , 3698 , 3649 , 3573 , 3551 , 3550 , 3558 }, {3713 , 3713 , 3695 , 3629 , 3520 , 3511 , 3511 , 3517 }, {3708 , 3708 , 3691 , 3609 , 3467 , 3470 , 3472 , 3475 }, {3600 , 3600 , 3550 , 3500 , 3300 , 3300 , 3300 , 3300 } }, }; struct pm8921_bms_battery_data bms_battery_data_id_2 = { .fcc = 1780, .fcc_temp_lut = &fcc_temp_id_2, .fcc_sf_lut = &fcc_sf_id_2, .pc_temp_ocv_lut = &pc_temp_ocv_id_2, .pc_sf_lut = &pc_sf_id_2, }; static struct htc_battery_cell htc_battery_cells[] = { [0] = { .model_name = "BJ83100", .capacity = 1800, .id = 1, .id_raw_min = 73, /* unit:mV (10kohm) */ .id_raw_max = 204, .type = HTC_BATTERY_CELL_TYPE_NORMAL, .voltage_max = 4200, .voltage_min = 3200, .chg_param = &chg_batt_params[0], .gauge_param = &bms_battery_data_id_1, }, [1] = { .model_name = "BJ83100", .capacity = 1800, .id = 2, .id_raw_min = 205, /* unit:mV (22kohm) */ .id_raw_max = 595, .type = HTC_BATTERY_CELL_TYPE_NORMAL, .voltage_max = 4200, .voltage_min = 3200, .chg_param = &chg_batt_params[0], .gauge_param = &bms_battery_data_id_2, }, [2] = { .model_name = "UNKNOWN", .capacity = 1800, .id = 255, .id_raw_min = INT_MIN, .id_raw_max = INT_MAX, .type = HTC_BATTERY_CELL_TYPE_NORMAL, .voltage_max = 4200, .voltage_min = 3200, .chg_param = &chg_batt_params[0], .gauge_param = NULL, }, }; #endif /* CONFIG_HTC_BATT_8960 */ static struct msm_rpmrs_level msm_rpmrs_levels[] = { { MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE), true, 1, 784, 180000, 100, }, { MSM_PM_SLEEP_MODE_RETENTION, MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE), true, 415, 715, 340827, 475, }, #ifdef CONFIG_MSM_STANDALONE_POWER_COLLAPSE { MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, MSM_RPMRS_LIMITS(ON, ACTIVE, MAX, ACTIVE), true, 1300, 228, 1200000, 2000, }, #endif { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(ON, GDHS, MAX, ACTIVE), false, 2000, 138, 1208400, 3200, }, { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(ON, HSFS_OPEN, ACTIVE, RET_HIGH), false, 6000, 119, 1850300, 9000, }, { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(OFF, GDHS, MAX, ACTIVE), false, 9200, 68, 2839200, 16400, }, { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, MAX, ACTIVE), false, 10300, 63, 3128000, 18200, }, { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, ACTIVE, RET_HIGH), false, 18000, 10, 4602600, 27000, }, { MSM_PM_SLEEP_MODE_POWER_COLLAPSE, MSM_RPMRS_LIMITS(OFF, HSFS_OPEN, RET_HIGH, RET_LOW), false, 20000, 2, 5752000, 32000, }, }; static struct msm_rpmrs_platform_data msm_rpmrs_data __initdata = { .levels = &msm_rpmrs_levels[0], .num_levels = ARRAY_SIZE(msm_rpmrs_levels), .vdd_mem_levels = { [MSM_RPMRS_VDD_MEM_RET_LOW] = 750000, [MSM_RPMRS_VDD_MEM_RET_HIGH] = 750000, [MSM_RPMRS_VDD_MEM_ACTIVE] = 1050000, [MSM_RPMRS_VDD_MEM_MAX] = 1150000, }, .vdd_dig_levels = { [MSM_RPMRS_VDD_DIG_RET_LOW] = 500000, [MSM_RPMRS_VDD_DIG_RET_HIGH] = 750000, [MSM_RPMRS_VDD_DIG_ACTIVE] = 950000, [MSM_RPMRS_VDD_DIG_MAX] = 1150000, }, .vdd_mask = 0x7FFFFF, .rpmrs_target_id = { [MSM_RPMRS_ID_PXO_CLK] = MSM_RPM_ID_PXO_CLK, [MSM_RPMRS_ID_L2_CACHE_CTL] = MSM_RPM_ID_LAST, [MSM_RPMRS_ID_VDD_DIG_0] = MSM_RPM_ID_PM8921_S3_0, [MSM_RPMRS_ID_VDD_DIG_1] = MSM_RPM_ID_PM8921_S3_1, [MSM_RPMRS_ID_VDD_MEM_0] = MSM_RPM_ID_PM8921_L24_0, [MSM_RPMRS_ID_VDD_MEM_1] = MSM_RPM_ID_PM8921_L24_1, [MSM_RPMRS_ID_RPM_CTL] = MSM_RPM_ID_RPM_CTL, }, }; static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = { .mode = MSM_PM_BOOT_CONFIG_TZ, }; #ifdef CONFIG_I2C #define I2C_SURF 1 #define I2C_FFA (1 << 1) #define I2C_RUMI (1 << 2) #define I2C_SIM (1 << 3) #define I2C_FLUID (1 << 4) struct i2c_registry { u8 machs; int bus; struct i2c_board_info *info; int len; }; static struct i2c_registry msm8960_i2c_devices[] __initdata = { #ifdef CONFIG_FB_MSM_HDMI_MHL #ifdef CONFIG_FB_MSM_HDMI_MHL_SII9234 { I2C_SURF | I2C_FFA, MSM_8960_GSBI8_QUP_I2C_BUS_ID, msm_i2c_gsbi8_mhl_sii9234_info, ARRAY_SIZE(msm_i2c_gsbi8_mhl_sii9234_info), }, #endif #endif #ifdef CONFIG_FLASHLIGHT_TPS61310 { I2C_SURF | I2C_FFA, MSM_8960_GSBI12_QUP_I2C_BUS_ID, i2c_tps61310_flashlight, ARRAY_SIZE(i2c_tps61310_flashlight), }, #endif { I2C_SURF | I2C_FFA, MSM_8960_GSBI3_QUP_I2C_BUS_ID, msm_i2c_gsbi3_info, ARRAY_SIZE(msm_i2c_gsbi3_info), }, { I2C_SURF | I2C_FFA, MSM_8960_GSBI2_QUP_I2C_BUS_ID, msm_i2c_gsbi2_a1028_info, ARRAY_SIZE(msm_i2c_gsbi2_a1028_info), }, { I2C_SURF | I2C_FFA, MSM_8960_GSBI5_QUP_I2C_BUS_ID, pn544_i2c_boardinfo, ARRAY_SIZE(pn544_i2c_boardinfo), }, }; #endif /* CONFIG_I2C */ extern int gy_type; /* from devices_htc.c */ static void __init register_i2c_devices(void) { #ifdef CONFIG_I2C u8 mach_mask = 0; int i, rc; mach_mask = I2C_SURF; /* Run the array and install devices as appropriate */ for (i = 0; i < ARRAY_SIZE(msm8960_i2c_devices); ++i) { if (msm8960_i2c_devices[i].machs & mach_mask) { i2c_register_board_info(msm8960_i2c_devices[i].bus, msm8960_i2c_devices[i].info, msm8960_i2c_devices[i].len); } } if ((engineerid & 0x03) == 1) { for (rc = 0; rc < ARRAY_SIZE(msm_i2c_gsbi3_info); rc++) { if (!strcmp(msm_i2c_gsbi3_info[rc].type, SYNAPTICS_3200_NAME)) msm_i2c_gsbi3_info[rc].platform_data = &syn_ts_3k_2p5D_7070_data; } } else if ((engineerid & 0x03) == 2) { for (rc = 0; rc < ARRAY_SIZE(msm_i2c_gsbi3_info); rc++) { if (!strcmp(msm_i2c_gsbi3_info[rc].type, SYNAPTICS_3200_NAME)) msm_i2c_gsbi3_info[rc].platform_data = &syn_ts_3k_2p5D_3030_data; } } printk(KERN_DEBUG "%s: gy_type = %d\n", __func__, gy_type); if (gy_type == 2) { i2c_register_board_info(MSM_8960_GSBI12_QUP_I2C_BUS_ID, msm_i2c_sensor_gsbi12_info, ARRAY_SIZE(msm_i2c_sensor_gsbi12_info)); } else { i2c_register_board_info(MSM_8960_GSBI12_QUP_I2C_BUS_ID, mpu3050_GSBI12_boardinfo, ARRAY_SIZE(mpu3050_GSBI12_boardinfo)); } if (system_rev < 3) { i2c_register_board_info(MSM_8960_GSBI12_QUP_I2C_BUS_ID, i2c_CM36282_devices, ARRAY_SIZE(i2c_CM36282_devices)); pr_info("%s: cm36282 PL-sensor for XA,XB,XC, system_rev %d ", __func__, system_rev); } else { i2c_register_board_info(MSM_8960_GSBI12_QUP_I2C_BUS_ID, i2c_CM36282_XD_devices, ARRAY_SIZE(i2c_CM36282_XD_devices)); pr_info("%s: cm36282 PL-sensor for XD and newer HW version, system_rev %d ", __func__, system_rev); } #endif } static void __init msm8960ab_update_krait_spm(void) { int i; /* Update the SPM sequences for SPC and PC */ for (i = 0; i < ARRAY_SIZE(msm_spm_data); i++) { int j; struct msm_spm_platform_data *pdata = &msm_spm_data[i]; for (j = 0; j < pdata->num_modes; j++) { if (pdata->modes[j].cmd == spm_power_collapse_without_rpm) pdata->modes[j].cmd = spm_power_collapse_without_rpm_krait_v3; else if (pdata->modes[j].cmd == spm_power_collapse_with_rpm) pdata->modes[j].cmd = spm_power_collapse_with_rpm_krait_v3; } } } static void __init msm8960ab_update_retention_spm(void) { int i; /* Update the SPM sequences for krait retention on all cores */ for (i = 0; i < ARRAY_SIZE(msm_spm_data); i++) { int j; struct msm_spm_platform_data *pdata = &msm_spm_data[i]; for (j = 0; j < pdata->num_modes; j++) { if (pdata->modes[j].cmd == spm_retention_cmd_sequence) pdata->modes[j].cmd = spm_retention_with_krait_v3_cmd_sequence; } } } /*UART -> GSBI8*/ static uint32_t msm_uart_gpio[] = { GPIO_CFG(34, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG(35, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), }; static void msm_uart_gsbi_gpio_init(void) { gpio_tlmm_config(msm_uart_gpio[0], GPIO_CFG_ENABLE); gpio_tlmm_config(msm_uart_gpio[1], GPIO_CFG_ENABLE); } static uint32_t msm_region_gpio[] = { GPIO_CFG(ELITE_GPIO_REGION_ID, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, 0), }; static void msm_region_id_gpio_init(void) { gpio_tlmm_config(msm_region_gpio[0], GPIO_CFG_ENABLE); } #ifdef CONFIG_RAWCHIP static struct spi_board_info rawchip_spi_board_info[] __initdata = { { .modalias = "spi_rawchip", .max_speed_hz = 27000000, .bus_num = 1, .chip_select = 0, .mode = SPI_MODE_0, }, }; #endif static void __init elite_init(void) { int rc; u32 hw_ver_id = 0; struct kobject *properties_kobj; if (meminfo_init(SYS_MEMORY, SZ_256M) < 0) pr_err("meminfo_init() failed!\n"); htc_add_ramconsole_devices(); platform_device_register(&msm_gpio_device); msm_tsens_early_init(&msm_tsens_pdata); msm_thermal_init(&msm_thermal_pdata); BUG_ON(msm_rpm_init(&msm8960_rpm_data)); BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data)); regulator_suppress_info_printing(); if (msm_xo_init()) pr_err("Failed to initialize XO votes\n"); platform_device_register(&elite_device_rpm_regulator); msm_clock_init(&msm8960_clock_init_data); msm8960_device_otg.dev.platform_data = &msm_otg_pdata; android_usb_pdata.swfi_latency = msm_rpmrs_levels[0].latency_us; elite_gpiomux_init(); msm8960_device_qup_spi_gsbi10.dev.platform_data = &msm8960_qup_spi_gsbi10_pdata; #ifdef CONFIG_RAWCHIP spi_register_board_info(rawchip_spi_board_info, ARRAY_SIZE(rawchip_spi_board_info)); #endif elite_init_pmic(); msm8960_i2c_init(); msm8960_gfx_init(); if (cpu_is_msm8960ab()) msm8960ab_update_krait_spm(); if (cpu_is_krait_v3()) { struct msm_pm_init_data_type *pdata = msm8960_pm_8x60.dev.platform_data; pdata->retention_calls_tz = false; msm8960ab_update_retention_spm(); } platform_device_register(&msm8960_pm_8x60); msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data)); msm_spm_l2_init(msm_spm_l2_data); msm8960_init_buses(); elite_cable_detect_register(); #ifdef CONFIG_BT bt_export_bd_address(); #endif #ifdef CONFIG_HTC_BATT_8960 htc_battery_cell_init(htc_battery_cells, ARRAY_SIZE(htc_battery_cells)); #endif /* CONFIG_HTC_BATT_8960 */ platform_add_devices(msm8960_footswitch, msm8960_num_footswitch); platform_device_register(&msm8960_device_ext_l2_vreg); platform_add_devices(common_devices, ARRAY_SIZE(common_devices)); msm_uart_gsbi_gpio_init(); elite_pm8921_gpio_mpp_init(); msm_region_id_gpio_init(); platform_add_devices(elite_devices, ARRAY_SIZE(elite_devices)); elite_init_camera(); elite_init_mmc(); register_i2c_devices(); elite_init_fb(); slim_register_board_info(msm_slim_devices, ARRAY_SIZE(msm_slim_devices)); BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata)); // msm_pm_set_rpm_wakeup_irq(RPM_APCC_CPU0_WAKE_UP_IRQ); /*usb driver won't be loaded in MFG 58 station and gift mode*/ if (!(board_mfg_mode() == 6 || board_mfg_mode() == 7)) elite_add_usb_devices(); properties_kobj = kobject_create_and_add("board_properties", NULL); if (properties_kobj) { if (system_rev < 1) rc = sysfs_create_group(properties_kobj, &properties_attr_group); else rc = sysfs_create_group(properties_kobj, &three_virtual_key_properties_attr_group); } #ifdef CONFIG_PERFLOCK perflock_init(&elite_perflock_data); cpufreq_ceiling_init(&elite_cpufreq_ceiling_data); #endif elite_init_keypad(); hw_ver_id = readl(HW_VER_ID_VIRT); printk(KERN_INFO "hw_ver_id = %x\n", hw_ver_id); } #define PHY_BASE_ADDR1 0x80400000 #define SIZE_ADDR1 (132 * 1024 * 1024) #define PHY_BASE_ADDR2 0x90000000 #define SIZE_ADDR2 (768 * 1024 * 1024) static void __init elite_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { engineerid = parse_tag_engineerid(tags); mi->nr_banks = 2; mi->bank[0].start = PHY_BASE_ADDR1; mi->bank[0].size = SIZE_ADDR1; mi->bank[1].start = PHY_BASE_ADDR2; mi->bank[1].size = SIZE_ADDR2; skuid = parse_tag_skuid((const struct tag *)tags); printk(KERN_INFO "Elite_fixup:skuid=0x%x\n", skuid); } static int __init pm8921_late_init(void) { return 0; } late_initcall(pm8921_late_init); MACHINE_START(ELITE, "elite") .fixup = elite_fixup, .map_io = elite_map_io, .reserve = elite_reserve, .init_irq = elite_init_irq, .handle_irq = gic_handle_irq, .timer = &msm_timer, .init_machine = elite_init, .init_early = msm8960_allocate_memory_regions, .init_very_early = elite_early_memory, .restart = msm_restart, MACHINE_END
gpl-2.0
yaymalaga/yayPrime_kernel
drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
265
19754
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "vcd_ddl.h" #include "vcd_ddl_shared_mem.h" #include "vcd_ddl_metadata.h" #include "vcd_res_tracker_api.h" static u32 *ddl_metadata_hdr_entry(struct ddl_client_context *ddl, u32 meta_data) { u32 skip_words = 0; u32 *buffer; if (ddl->decoding) { buffer = (u32 *) ddl->codec_data.decoder.meta_data_input. align_virtual_addr; skip_words = 32 + 1; buffer += skip_words; switch (meta_data) { default: case VCD_METADATA_DATANONE: skip_words = 0; break; case VCD_METADATA_QPARRAY: skip_words = 3; break; case VCD_METADATA_CONCEALMB: skip_words = 6; break; case VCD_METADATA_VC1: skip_words = 9; break; case VCD_METADATA_SEI: skip_words = 12; break; case VCD_METADATA_VUI: skip_words = 15; break; case VCD_METADATA_PASSTHROUGH: skip_words = 18; break; case VCD_METADATA_QCOMFILLER: skip_words = 21; break; case VCD_METADATA_USER_DATA: skip_words = 27; break; case VCD_METADATA_EXT_DATA: skip_words = 30; break; } } else { buffer = (u32 *) ddl->codec_data.encoder.meta_data_input. align_virtual_addr; skip_words = 2; buffer += skip_words; switch (meta_data) { default: case VCD_METADATA_DATANONE: skip_words = 0; break; case VCD_METADATA_ENC_SLICE: skip_words = 3; break; case VCD_METADATA_QCOMFILLER: skip_words = 6; break; } } buffer += skip_words; return buffer; } void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl) { struct ddl_buf_addr *main_buffer = &ddl->ddl_context->metadata_shared_input; struct ddl_buf_addr *client_buffer; u32 *hdr_entry; if (ddl->decoding) client_buffer = &(ddl->codec_data.decoder.meta_data_input); else client_buffer = &(ddl->codec_data.encoder.meta_data_input); DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer, ddl->instance_id); hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QCOMFILLER; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_DATANONE); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_DATANONE; if (ddl->decoding) { hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QPARRAY); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QPARRAY; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_CONCEALMB); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_CONCEALMB; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_SEI); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_SEI; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VUI); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VUI; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VC1); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VC1; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_PASSTHROUGH); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_PASSTHROUGH; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_USER_DATA); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_USER_DATA; hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_EXT_DATA); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_EXT_DATA; } else { hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_ENC_SLICE); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_ENC_SLICE; } } static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl) { u32 flag = 0; if (ddl->decoding) { enum vcd_codec codec = ddl->codec_data.decoder.codec.codec; flag |= (VCD_METADATA_CONCEALMB | VCD_METADATA_PASSTHROUGH | VCD_METADATA_QPARRAY | VCD_METADATA_SEPARATE_BUF); if (codec == VCD_CODEC_H264) flag |= (VCD_METADATA_SEI | VCD_METADATA_VUI); else if (codec == VCD_CODEC_VC1 || codec == VCD_CODEC_VC1_RCV) flag |= VCD_METADATA_VC1; else if (codec == VCD_CODEC_MPEG2) flag |= (VCD_METADATA_USER_DATA | VCD_METADATA_EXT_DATA); } else flag |= VCD_METADATA_ENC_SLICE; return flag; } void ddl_set_default_metadata_flag(struct ddl_client_context *ddl) { if (ddl->decoding) ddl->codec_data.decoder.meta_data_enable_flag = 0; else ddl->codec_data.encoder.meta_data_enable_flag = 0; } void ddl_set_default_decoder_metadata_buffer_size(struct ddl_decoder_data *decoder, struct vcd_property_frame_size *frame_size, struct vcd_buffer_requirement *output_buf_req) { u32 flag = decoder->meta_data_enable_flag; u32 suffix = 0, size = 0; if (!flag) { output_buf_req->meta_buffer_size = DDL_SECURE_METADATA_DEFAULT_SIZE; decoder->suffix = 0; return; } if (flag & VCD_METADATA_QPARRAY) { u32 num_of_mb = DDL_NO_OF_MB(frame_size->width, frame_size->height); size = DDL_METADATA_HDR_SIZE; size += num_of_mb; DDL_METADATA_ALIGNSIZE(size); suffix += size; } if (flag & VCD_METADATA_CONCEALMB) { u32 num_of_mb = DDL_NO_OF_MB(frame_size->width, frame_size->height); size = DDL_METADATA_HDR_SIZE + (num_of_mb >> 3); DDL_METADATA_ALIGNSIZE(size); suffix += size; } if (flag & VCD_METADATA_VC1) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_VC1_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += size; } if (flag & VCD_METADATA_SEI) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_SEI_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size * DDL_METADATA_SEI_MAX); } if (flag & VCD_METADATA_VUI) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_VUI_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); } if (flag & VCD_METADATA_PASSTHROUGH) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); } if (flag & VCD_METADATA_USER_DATA) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_USER_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); } if (flag & VCD_METADATA_EXT_DATA) { size = DDL_METADATA_HDR_SIZE; size += DDL_METADATA_EXT_PAYLOAD_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); } size = DDL_METADATA_EXTRADATANONE_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); suffix += DDL_METADATA_EXTRAPAD_SIZE; DDL_METADATA_ALIGNSIZE(suffix); decoder->suffix = suffix; output_buf_req->sz += suffix; output_buf_req->meta_buffer_size = suffix; output_buf_req->meta_buffer_size = (output_buf_req->meta_buffer_size + 8191) & (~8191); decoder->meta_data_offset = 0; DDL_MSG_LOW("metadata output buf size : %d", suffix); } void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data *encoder) { u32 flag = encoder->meta_data_enable_flag; u32 suffix = 0, size = 0; if (!flag) { encoder->suffix = 0; return; } if (flag & VCD_METADATA_ENC_SLICE) { u32 num_of_mb = DDL_NO_OF_MB(encoder->frame_size.width, encoder->frame_size.height); size = DDL_METADATA_HDR_SIZE; size += 4; size += (num_of_mb << 3); DDL_METADATA_ALIGNSIZE(size); suffix += size; } size = DDL_METADATA_EXTRADATANONE_SIZE; DDL_METADATA_ALIGNSIZE(size); suffix += (size); suffix += DDL_METADATA_EXTRAPAD_SIZE; DDL_METADATA_ALIGNSIZE(suffix); encoder->suffix = suffix; encoder->output_buf_req.sz += suffix; encoder->output_buf_req.sz = DDL_ALIGN(encoder->output_buf_req.sz, DDL_KILO_BYTE(4)); } u32 ddl_set_metadata_params(struct ddl_client_context *ddl, struct vcd_property_hdr *property_hdr, void *property_value) { u32 vcd_status = VCD_ERR_ILLEGAL_PARM; if (property_hdr->prop_id == VCD_I_METADATA_ENABLE) { struct vcd_property_meta_data_enable *meta_data_enable = (struct vcd_property_meta_data_enable *) property_value; u32 *meta_data_enable_flag; enum vcd_codec codec; if (ddl->decoding) { meta_data_enable_flag = &(ddl->codec_data.decoder.meta_data_enable_flag); codec = ddl->codec_data.decoder.codec.codec; } else { meta_data_enable_flag = &ddl->codec_data.encoder.meta_data_enable_flag; codec = ddl->codec_data.encoder.codec.codec; } if (sizeof(struct vcd_property_meta_data_enable) == property_hdr->sz && DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && codec) { u32 flag = ddl_supported_metadata_flag(ddl); flag &= (meta_data_enable->meta_data_enable_flag); if (flag) flag |= DDL_METADATA_MANDATORY; if (*meta_data_enable_flag != flag) { if (VCD_CODEC_MPEG2 == codec) ddl_set_mp2_dump_default( &ddl->codec_data.decoder, flag); *meta_data_enable_flag = flag; if (ddl->decoding) ddl_set_default_decoder_buffer_req( &ddl->codec_data.decoder, true); else ddl_set_default_encoder_buffer_req( &ddl->codec_data.encoder); } vcd_status = VCD_S_SUCCESS; } } else if (property_hdr->prop_id == VCD_I_METADATA_HEADER) { struct vcd_property_metadata_hdr *hdr = (struct vcd_property_metadata_hdr *) property_value; if (sizeof(struct vcd_property_metadata_hdr) == property_hdr->sz) { u32 flag = ddl_supported_metadata_flag(ddl); flag |= DDL_METADATA_MANDATORY; flag &= hdr->meta_data_id; if (!(flag & (flag - 1))) { u32 *hdr_entry = ddl_metadata_hdr_entry(ddl, flag); hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = hdr->version; hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = hdr->port_index; hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = hdr->type; vcd_status = VCD_S_SUCCESS; } } } return vcd_status; } u32 ddl_get_metadata_params(struct ddl_client_context *ddl, struct vcd_property_hdr *property_hdr, void *property_value) { u32 vcd_status = VCD_ERR_ILLEGAL_PARM; if (property_hdr->prop_id == VCD_I_METADATA_ENABLE && sizeof(struct vcd_property_meta_data_enable) == property_hdr->sz) { struct vcd_property_meta_data_enable *meta_data_enable = (struct vcd_property_meta_data_enable *) property_value; meta_data_enable->meta_data_enable_flag = ((ddl->decoding) ? (ddl->codec_data.decoder.meta_data_enable_flag) : (ddl->codec_data.encoder.meta_data_enable_flag)); vcd_status = VCD_S_SUCCESS; } else if (property_hdr->prop_id == VCD_I_METADATA_HEADER && sizeof(struct vcd_property_metadata_hdr) == property_hdr->sz) { struct vcd_property_metadata_hdr *hdr = (struct vcd_property_metadata_hdr *) property_value; u32 flag = ddl_supported_metadata_flag(ddl); flag |= DDL_METADATA_MANDATORY; flag &= hdr->meta_data_id; if (!(flag & (flag - 1))) { u32 *hdr_entry = ddl_metadata_hdr_entry(ddl, flag); hdr->version = hdr_entry[DDL_METADATA_HDR_VERSION_INDEX]; hdr->port_index = hdr_entry[DDL_METADATA_HDR_PORT_INDEX]; hdr->type = hdr_entry[DDL_METADATA_HDR_TYPE_INDEX]; vcd_status = VCD_S_SUCCESS; } } return vcd_status; } void ddl_vidc_metadata_enable(struct ddl_client_context *ddl) { u32 flag, extradata_enable = false; u32 qp_enable = false, concealed_mb_enable = false; u32 vc1_param_enable = false, sei_nal_enable = false; u32 vui_enable = false, enc_slice_size_enable = false; u32 mp2_data_dump_enable = false; if (ddl->decoding) flag = ddl->codec_data.decoder.meta_data_enable_flag; else flag = ddl->codec_data.encoder.meta_data_enable_flag; if (flag) { if (flag & VCD_METADATA_QPARRAY) qp_enable = true; if (flag & VCD_METADATA_CONCEALMB) concealed_mb_enable = true; if (flag & VCD_METADATA_VC1) vc1_param_enable = true; if (flag & VCD_METADATA_SEI) sei_nal_enable = true; if (flag & VCD_METADATA_VUI) vui_enable = true; if (flag & VCD_METADATA_ENC_SLICE) enc_slice_size_enable = true; if (flag & VCD_METADATA_PASSTHROUGH) extradata_enable = true; } DDL_MSG_LOW("metadata enable flag : %d", sei_nal_enable); if (flag & VCD_METADATA_EXT_DATA || flag & VCD_METADATA_USER_DATA) { mp2_data_dump_enable = true; ddl->codec_data.decoder.extn_user_data_enable = mp2_data_dump_enable; vidc_sm_set_mp2datadump_enable(&ddl->shared_mem [ddl->command_channel], &ddl->codec_data.decoder.mp2_datadump_enable); } else { mp2_data_dump_enable = false; ddl->codec_data.decoder.extn_user_data_enable = mp2_data_dump_enable; } vidc_sm_set_metadata_enable(&ddl->shared_mem [ddl->command_channel], extradata_enable, qp_enable, concealed_mb_enable, vc1_param_enable, sei_nal_enable, vui_enable, enc_slice_size_enable, mp2_data_dump_enable); } u32 ddl_vidc_encode_set_metadata_output_buf(struct ddl_client_context *ddl) { struct ddl_encoder_data *encoder = &ddl->codec_data.encoder; struct vcd_frame_data *stream = &ddl->output_frame.vcd_frm; struct ddl_context *ddl_context; u32 ext_buffer_end, hw_metadata_start; u32 *buffer; ddl_context = ddl_get_context(); ext_buffer_end = (u32) stream->physical + stream->alloc_len; if (!encoder->meta_data_enable_flag) { ext_buffer_end &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES); return ext_buffer_end; } hw_metadata_start = (ext_buffer_end - encoder->suffix) & ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES); ext_buffer_end = (hw_metadata_start - 1) & ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES); buffer = (u32 *) encoder->meta_data_input.align_virtual_addr; *buffer++ = encoder->suffix; *buffer = DDL_OFFSET(ddl_context->dram_base_a.align_physical_addr, hw_metadata_start); encoder->meta_data_offset = hw_metadata_start - (u32) stream->physical; return ext_buffer_end; } void ddl_vidc_decode_set_metadata_output(struct ddl_decoder_data *decoder) { struct ddl_context *ddl_context; u32 loopc, yuv_size, dpb; u32 *buffer; struct ddl_dec_buffers *dec_buffers = &decoder->hw_bufs; if (!decoder->meta_data_enable_flag) { decoder->meta_data_offset = 0; return; } dpb = decoder->dp_buf.no_of_dec_pic_buf; ddl_context = ddl_get_context(); yuv_size = ddl_get_yuv_buffer_size(&decoder->client_frame_size, &decoder->buf_format, !decoder->progressive_only, decoder->hdr.decoding, NULL); decoder->meta_data_offset = DDL_ALIGN_SIZE(yuv_size, DDL_LINEAR_BUF_ALIGN_GUARD_BYTES, DDL_LINEAR_BUF_ALIGN_MASK); buffer = (u32 *) decoder->meta_data_input.align_virtual_addr; DDL_MSG_LOW("Metadata offset & size : %d/%d", decoder->meta_data_offset, decoder->suffix); if (!(decoder->meta_data_enable_flag & VCD_METADATA_SEPARATE_BUF)) { *buffer++ = decoder->suffix; for (loopc = 0; loopc < dpb; ++loopc) { *buffer++ = (u32)(decoder->meta_data_offset + (u8 *) DDL_OFFSET(ddl_context->dram_base_a. align_physical_addr, decoder->dp_buf. dec_pic_buffers[loopc].vcd_frm.physical)); } } else if (res_trk_get_enable_sec_metadata()) { *buffer++ = decoder->actual_output_buf_req.meta_buffer_size; for (loopc = 0; loopc < dpb; ++loopc) { *buffer++ = DDL_ADDR_OFFSET(ddl_context->dram_base_a, dec_buffers->meta_hdr[loopc]); } } } void ddl_process_encoder_metadata(struct ddl_client_context *ddl) { struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); struct vcd_frame_data *out_frame = &(ddl->output_frame.vcd_frm); u32 *qfiller_hdr, *qfiller, start_addr; u32 qfiller_size; if (!encoder->meta_data_enable_flag) { out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } if (!encoder->enc_frame_info.meta_data_exists) { out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA; DDL_MSG_LOW("processing metadata for encoder"); start_addr = (u32) ((u8 *)out_frame->virtual + out_frame->offset); qfiller = (u32 *)((out_frame->data_len + start_addr + 3) & ~3); qfiller_size = (u32)((encoder->meta_data_offset + (u8 *) out_frame->virtual) - (u8 *) qfiller); qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); *qfiller++ = qfiller_size; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX]; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX]; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX]; *qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE); } void ddl_process_decoder_metadata(struct ddl_client_context *ddl) { struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); struct vcd_frame_data *output_frame = &(ddl->output_frame.vcd_frm); u32 *qfiller_hdr, *qfiller; u32 qfiller_size; if (!decoder->meta_data_enable_flag) { output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } if (!decoder->meta_data_exists) { output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } if (!decoder->mp2_datadump_status && decoder->codec.codec == VCD_CODEC_MPEG2 && !decoder->extn_user_data_enable) { output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA); return; } DDL_MSG_LOW("processing metadata for decoder"); DDL_MSG_LOW("data_len/metadata_offset : %d/%d", output_frame->data_len, decoder->meta_data_offset); output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA; if (!(decoder->meta_data_enable_flag & VCD_METADATA_SEPARATE_BUF) && (output_frame->data_len != decoder->meta_data_offset)) { qfiller = (u32 *)((u32)((output_frame->data_len + output_frame->offset + (u8 *) output_frame->virtual) + 3) & ~3); qfiller_size = (u32)((decoder->meta_data_offset + (u8 *) output_frame->virtual) - (u8 *) qfiller); qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); *qfiller++ = qfiller_size; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX]; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX]; *qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX]; *qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE); } } void ddl_set_mp2_dump_default(struct ddl_decoder_data *decoder, u32 flag) { if (flag & VCD_METADATA_EXT_DATA) { decoder->mp2_datadump_enable.pictempscalable_extdump_enable = true; decoder->mp2_datadump_enable.picspat_extdump_enable = true; decoder->mp2_datadump_enable.picdisp_extdump_enable = true; decoder->mp2_datadump_enable.copyright_extdump_enable = true; decoder->mp2_datadump_enable.quantmatrix_extdump_enable = true; decoder->mp2_datadump_enable.seqscalable_extdump_enable = true; decoder->mp2_datadump_enable.seqdisp_extdump_enable = true; decoder->mp2_datadump_enable.seq_extdump_enable = true; } if (flag & VCD_METADATA_USER_DATA) decoder->mp2_datadump_enable.userdatadump_enable = DDL_METADATA_USER_DUMP_FULL_MODE; else decoder->mp2_datadump_enable.userdatadump_enable = DDL_METADATA_USER_DUMP_DISABLE_MODE; }
gpl-2.0
SoraBetty/mptcp
drivers/platform/x86/ideapad-laptop.c
265
22887
/* * ideapad-laptop.c - Lenovo IdeaPad ACPI Extras * * Copyright © 2010 Intel Corporation * Copyright © 2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> #include <linux/rfkill.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/i8042.h> #define IDEAPAD_RFKILL_DEV_NUM (3) #define CFG_BT_BIT (16) #define CFG_3G_BIT (17) #define CFG_WIFI_BIT (18) #define CFG_CAMERA_BIT (19) enum { VPCCMD_R_VPC1 = 0x10, VPCCMD_R_BL_MAX, VPCCMD_R_BL, VPCCMD_W_BL, VPCCMD_R_WIFI, VPCCMD_W_WIFI, VPCCMD_R_BT, VPCCMD_W_BT, VPCCMD_R_BL_POWER, VPCCMD_R_NOVO, VPCCMD_R_VPC2, VPCCMD_R_TOUCHPAD, VPCCMD_W_TOUCHPAD, VPCCMD_R_CAMERA, VPCCMD_W_CAMERA, VPCCMD_R_3G, VPCCMD_W_3G, VPCCMD_R_ODD, /* 0x21 */ VPCCMD_W_FAN, VPCCMD_R_RF, VPCCMD_W_RF, VPCCMD_R_FAN = 0x2B, VPCCMD_R_SPECIAL_BUTTONS = 0x31, VPCCMD_W_BL_POWER = 0x33, }; struct ideapad_rfk_priv { int dev; struct ideapad_private *priv; }; struct ideapad_private { struct acpi_device *adev; struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM]; struct platform_device *platform_device; struct input_dev *inputdev; struct backlight_device *blightdev; struct dentry *debug; unsigned long cfg; }; static bool no_bt_rfkill; module_param(no_bt_rfkill, bool, 0444); MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); /* * ACPI Helpers */ #define IDEAPAD_EC_TIMEOUT (100) /* in ms */ static int read_method_int(acpi_handle handle, const char *method, int *val) { acpi_status status; unsigned long long result; status = acpi_evaluate_integer(handle, (char *)method, NULL, &result); if (ACPI_FAILURE(status)) { *val = -1; return -1; } else { *val = result; return 0; } } static int method_vpcr(acpi_handle handle, int cmd, int *ret) { acpi_status status; unsigned long long result; struct acpi_object_list params; union acpi_object in_obj; params.count = 1; params.pointer = &in_obj; in_obj.type = ACPI_TYPE_INTEGER; in_obj.integer.value = cmd; status = acpi_evaluate_integer(handle, "VPCR", &params, &result); if (ACPI_FAILURE(status)) { *ret = -1; return -1; } else { *ret = result; return 0; } } static int method_vpcw(acpi_handle handle, int cmd, int data) { struct acpi_object_list params; union acpi_object in_obj[2]; acpi_status status; params.count = 2; params.pointer = in_obj; in_obj[0].type = ACPI_TYPE_INTEGER; in_obj[0].integer.value = cmd; in_obj[1].type = ACPI_TYPE_INTEGER; in_obj[1].integer.value = data; status = acpi_evaluate_object(handle, "VPCW", &params, NULL); if (status != AE_OK) return -1; return 0; } static int read_ec_data(acpi_handle handle, int cmd, unsigned long *data) { int val; unsigned long int end_jiffies; if (method_vpcw(handle, 1, cmd)) return -1; for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1; time_before(jiffies, end_jiffies);) { schedule(); if (method_vpcr(handle, 1, &val)) return -1; if (val == 0) { if (method_vpcr(handle, 0, &val)) return -1; *data = val; return 0; } } pr_err("timeout in read_ec_cmd\n"); return -1; } static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data) { int val; unsigned long int end_jiffies; if (method_vpcw(handle, 0, data)) return -1; if (method_vpcw(handle, 1, cmd)) return -1; for (end_jiffies = jiffies+(HZ)*IDEAPAD_EC_TIMEOUT/1000+1; time_before(jiffies, end_jiffies);) { schedule(); if (method_vpcr(handle, 1, &val)) return -1; if (val == 0) return 0; } pr_err("timeout in write_ec_cmd\n"); return -1; } /* * debugfs */ static int debugfs_status_show(struct seq_file *s, void *data) { struct ideapad_private *priv = s->private; unsigned long value; if (!priv) return -EINVAL; if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) seq_printf(s, "Backlight max:\t%lu\n", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value)) seq_printf(s, "Backlight now:\t%lu\n", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value)) seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off"); seq_printf(s, "=====================\n"); if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value)) seq_printf(s, "Radio status:\t%s(%lu)\n", value ? "On" : "Off", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value)) seq_printf(s, "Wifi status:\t%s(%lu)\n", value ? "On" : "Off", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value)) seq_printf(s, "BT status:\t%s(%lu)\n", value ? "On" : "Off", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value)) seq_printf(s, "3G status:\t%s(%lu)\n", value ? "On" : "Off", value); seq_printf(s, "=====================\n"); if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) seq_printf(s, "Touchpad status:%s(%lu)\n", value ? "On" : "Off", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value)) seq_printf(s, "Camera status:\t%s(%lu)\n", value ? "On" : "Off", value); return 0; } static int debugfs_status_open(struct inode *inode, struct file *file) { return single_open(file, debugfs_status_show, inode->i_private); } static const struct file_operations debugfs_status_fops = { .owner = THIS_MODULE, .open = debugfs_status_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int debugfs_cfg_show(struct seq_file *s, void *data) { struct ideapad_private *priv = s->private; if (!priv) { seq_printf(s, "cfg: N/A\n"); } else { seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ", priv->cfg); if (test_bit(CFG_BT_BIT, &priv->cfg)) seq_printf(s, "Bluetooth "); if (test_bit(CFG_3G_BIT, &priv->cfg)) seq_printf(s, "3G "); if (test_bit(CFG_WIFI_BIT, &priv->cfg)) seq_printf(s, "Wireless "); if (test_bit(CFG_CAMERA_BIT, &priv->cfg)) seq_printf(s, "Camera "); seq_printf(s, "\nGraphic: "); switch ((priv->cfg)&0x700) { case 0x100: seq_printf(s, "Intel"); break; case 0x200: seq_printf(s, "ATI"); break; case 0x300: seq_printf(s, "Nvidia"); break; case 0x400: seq_printf(s, "Intel and ATI"); break; case 0x500: seq_printf(s, "Intel and Nvidia"); break; } seq_printf(s, "\n"); } return 0; } static int debugfs_cfg_open(struct inode *inode, struct file *file) { return single_open(file, debugfs_cfg_show, inode->i_private); } static const struct file_operations debugfs_cfg_fops = { .owner = THIS_MODULE, .open = debugfs_cfg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ideapad_debugfs_init(struct ideapad_private *priv) { struct dentry *node; priv->debug = debugfs_create_dir("ideapad", NULL); if (priv->debug == NULL) { pr_err("failed to create debugfs directory"); goto errout; } node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv, &debugfs_cfg_fops); if (!node) { pr_err("failed to create cfg in debugfs"); goto errout; } node = debugfs_create_file("status", S_IRUGO, priv->debug, priv, &debugfs_status_fops); if (!node) { pr_err("failed to create status in debugfs"); goto errout; } return 0; errout: return -ENOMEM; } static void ideapad_debugfs_exit(struct ideapad_private *priv) { debugfs_remove_recursive(priv->debug); priv->debug = NULL; } /* * sysfs */ static ssize_t show_ideapad_cam(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long result; struct ideapad_private *priv = dev_get_drvdata(dev); if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result)) return sprintf(buf, "-1\n"); return sprintf(buf, "%lu\n", result); } static ssize_t store_ideapad_cam(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret, state; struct ideapad_private *priv = dev_get_drvdata(dev); if (!count) return 0; if (sscanf(buf, "%i", &state) != 1) return -EINVAL; ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); if (ret < 0) return -EIO; return count; } static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); static ssize_t show_ideapad_fan(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long result; struct ideapad_private *priv = dev_get_drvdata(dev); if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result)) return sprintf(buf, "-1\n"); return sprintf(buf, "%lu\n", result); } static ssize_t store_ideapad_fan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret, state; struct ideapad_private *priv = dev_get_drvdata(dev); if (!count) return 0; if (sscanf(buf, "%i", &state) != 1) return -EINVAL; if (state < 0 || state > 4 || state == 3) return -EINVAL; ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); if (ret < 0) return -EIO; return count; } static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan); static struct attribute *ideapad_attributes[] = { &dev_attr_camera_power.attr, &dev_attr_fan_mode.attr, NULL }; static umode_t ideapad_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct ideapad_private *priv = dev_get_drvdata(dev); bool supported; if (attr == &dev_attr_camera_power.attr) supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); else if (attr == &dev_attr_fan_mode.attr) { unsigned long value; supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &value); } else supported = true; return supported ? attr->mode : 0; } static struct attribute_group ideapad_attribute_group = { .is_visible = ideapad_is_visible, .attrs = ideapad_attributes }; /* * Rfkill */ struct ideapad_rfk_data { char *name; int cfgbit; int opcode; int type; }; const struct ideapad_rfk_data ideapad_rfk_data[] = { { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN }, { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH }, { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN }, }; static int ideapad_rfk_set(void *data, bool blocked) { struct ideapad_rfk_priv *priv = data; return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked); } static struct rfkill_ops ideapad_rfk_ops = { .set_block = ideapad_rfk_set, }; static void ideapad_sync_rfk_state(struct ideapad_private *priv) { unsigned long hw_blocked; int i; if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) return; hw_blocked = !hw_blocked; for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) if (priv->rfk[i]) rfkill_set_hw_state(priv->rfk[i], hw_blocked); } static int ideapad_register_rfkill(struct ideapad_private *priv, int dev) { int ret; unsigned long sw_blocked; if (no_bt_rfkill && (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) { /* Force to enable bluetooth when no_bt_rfkill=1 */ write_ec_cmd(priv->adev->handle, ideapad_rfk_data[dev].opcode, 1); return 0; } priv->rfk_priv[dev].dev = dev; priv->rfk_priv[dev].priv = priv; priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &priv->platform_device->dev, ideapad_rfk_data[dev].type, &ideapad_rfk_ops, &priv->rfk_priv[dev]); if (!priv->rfk[dev]) return -ENOMEM; if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1, &sw_blocked)) { rfkill_init_sw_state(priv->rfk[dev], 0); } else { sw_blocked = !sw_blocked; rfkill_init_sw_state(priv->rfk[dev], sw_blocked); } ret = rfkill_register(priv->rfk[dev]); if (ret) { rfkill_destroy(priv->rfk[dev]); return ret; } return 0; } static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev) { if (!priv->rfk[dev]) return; rfkill_unregister(priv->rfk[dev]); rfkill_destroy(priv->rfk[dev]); } /* * Platform device */ static int ideapad_sysfs_init(struct ideapad_private *priv) { return sysfs_create_group(&priv->platform_device->dev.kobj, &ideapad_attribute_group); } static void ideapad_sysfs_exit(struct ideapad_private *priv) { sysfs_remove_group(&priv->platform_device->dev.kobj, &ideapad_attribute_group); } /* * input device */ static const struct key_entry ideapad_keymap[] = { { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 7, { KEY_CAMERA } }, { KE_KEY, 11, { KEY_F16 } }, { KE_KEY, 13, { KEY_WLAN } }, { KE_KEY, 16, { KEY_PROG1 } }, { KE_KEY, 17, { KEY_PROG2 } }, { KE_KEY, 64, { KEY_PROG3 } }, { KE_KEY, 65, { KEY_PROG4 } }, { KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, { KE_KEY, 67, { KEY_TOUCHPAD_ON } }, { KE_END, 0 }, }; static int ideapad_input_init(struct ideapad_private *priv) { struct input_dev *inputdev; int error; inputdev = input_allocate_device(); if (!inputdev) return -ENOMEM; inputdev->name = "Ideapad extra buttons"; inputdev->phys = "ideapad/input0"; inputdev->id.bustype = BUS_HOST; inputdev->dev.parent = &priv->platform_device->dev; error = sparse_keymap_setup(inputdev, ideapad_keymap, NULL); if (error) { pr_err("Unable to setup input device keymap\n"); goto err_free_dev; } error = input_register_device(inputdev); if (error) { pr_err("Unable to register input device\n"); goto err_free_keymap; } priv->inputdev = inputdev; return 0; err_free_keymap: sparse_keymap_free(inputdev); err_free_dev: input_free_device(inputdev); return error; } static void ideapad_input_exit(struct ideapad_private *priv) { sparse_keymap_free(priv->inputdev); input_unregister_device(priv->inputdev); priv->inputdev = NULL; } static void ideapad_input_report(struct ideapad_private *priv, unsigned long scancode) { sparse_keymap_report_event(priv->inputdev, scancode, 1, true); } static void ideapad_input_novokey(struct ideapad_private *priv) { unsigned long long_pressed; if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) return; if (long_pressed) ideapad_input_report(priv, 17); else ideapad_input_report(priv, 16); } static void ideapad_check_special_buttons(struct ideapad_private *priv) { unsigned long bit, value; read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value); for (bit = 0; bit < 16; bit++) { if (test_bit(bit, &value)) { switch (bit) { case 0: /* Z580 */ case 6: /* Z570 */ /* Thermal Management button */ ideapad_input_report(priv, 65); break; case 1: /* OneKey Theater button */ ideapad_input_report(priv, 64); break; default: pr_info("Unknown special button: %lu\n", bit); break; } } } } /* * backlight */ static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) { struct ideapad_private *priv = bl_get_data(blightdev); unsigned long now; if (!priv) return -EINVAL; if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now)) return -EIO; return now; } static int ideapad_backlight_update_status(struct backlight_device *blightdev) { struct ideapad_private *priv = bl_get_data(blightdev); if (!priv) return -EINVAL; if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL, blightdev->props.brightness)) return -EIO; if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER, blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1)) return -EIO; return 0; } static const struct backlight_ops ideapad_backlight_ops = { .get_brightness = ideapad_backlight_get_brightness, .update_status = ideapad_backlight_update_status, }; static int ideapad_backlight_init(struct ideapad_private *priv) { struct backlight_device *blightdev; struct backlight_properties props; unsigned long max, now, power; if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max)) return -EIO; if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now)) return -EIO; if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) return -EIO; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = max; props.type = BACKLIGHT_PLATFORM; blightdev = backlight_device_register("ideapad", &priv->platform_device->dev, priv, &ideapad_backlight_ops, &props); if (IS_ERR(blightdev)) { pr_err("Could not register backlight device\n"); return PTR_ERR(blightdev); } priv->blightdev = blightdev; blightdev->props.brightness = now; blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; backlight_update_status(blightdev); return 0; } static void ideapad_backlight_exit(struct ideapad_private *priv) { if (priv->blightdev) backlight_device_unregister(priv->blightdev); priv->blightdev = NULL; } static void ideapad_backlight_notify_power(struct ideapad_private *priv) { unsigned long power; struct backlight_device *blightdev = priv->blightdev; if (!blightdev) return; if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) return; blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; } static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) { unsigned long now; /* if we control brightness via acpi video driver */ if (priv->blightdev == NULL) { read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); return; } backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY); } /* * module init/exit */ static void ideapad_sync_touchpad_state(struct ideapad_private *priv) { unsigned long value; /* Without reading from EC touchpad LED doesn't switch state */ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { /* Some IdeaPads don't really turn off touchpad - they only * switch the LED state. We (de)activate KBC AUX port to turn * touchpad off and on. We send KEY_TOUCHPAD_OFF and * KEY_TOUCHPAD_ON to not to get out of sync with LED */ unsigned char param; i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE); ideapad_input_report(priv, value ? 67 : 66); } } static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) { struct ideapad_private *priv = data; unsigned long vpc1, vpc2, vpc_bit; if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) return; if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) return; vpc1 = (vpc2 << 8) | vpc1; for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { if (test_bit(vpc_bit, &vpc1)) { switch (vpc_bit) { case 9: ideapad_sync_rfk_state(priv); break; case 13: case 11: case 7: case 6: ideapad_input_report(priv, vpc_bit); break; case 5: ideapad_sync_touchpad_state(priv); break; case 4: ideapad_backlight_notify_brightness(priv); break; case 3: ideapad_input_novokey(priv); break; case 2: ideapad_backlight_notify_power(priv); break; case 0: ideapad_check_special_buttons(priv); break; default: pr_info("Unknown event: %lu\n", vpc_bit); } } } } static int ideapad_acpi_add(struct platform_device *pdev) { int ret, i; int cfg; struct ideapad_private *priv; struct acpi_device *adev; ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev); if (ret) return -ENODEV; if (read_method_int(adev->handle, "_CFG", &cfg)) return -ENODEV; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_set_drvdata(&pdev->dev, priv); priv->cfg = cfg; priv->adev = adev; priv->platform_device = pdev; ret = ideapad_sysfs_init(priv); if (ret) goto sysfs_failed; ret = ideapad_debugfs_init(priv); if (ret) goto debugfs_failed; ret = ideapad_input_init(priv); if (ret) goto input_failed; for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) ideapad_register_rfkill(priv, i); else priv->rfk[i] = NULL; } ideapad_sync_rfk_state(priv); ideapad_sync_touchpad_state(priv); if (!acpi_video_backlight_support()) { ret = ideapad_backlight_init(priv); if (ret && ret != -ENODEV) goto backlight_failed; } ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv); if (ret) goto notification_failed; return 0; notification_failed: ideapad_backlight_exit(priv); backlight_failed: for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) ideapad_unregister_rfkill(priv, i); ideapad_input_exit(priv); input_failed: ideapad_debugfs_exit(priv); debugfs_failed: ideapad_sysfs_exit(priv); sysfs_failed: kfree(priv); return ret; } static int ideapad_acpi_remove(struct platform_device *pdev) { struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); int i; acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, ideapad_acpi_notify); ideapad_backlight_exit(priv); for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) ideapad_unregister_rfkill(priv, i); ideapad_input_exit(priv); ideapad_debugfs_exit(priv); ideapad_sysfs_exit(priv); dev_set_drvdata(&pdev->dev, NULL); kfree(priv); return 0; } #ifdef CONFIG_PM_SLEEP static int ideapad_acpi_resume(struct device *device) { struct ideapad_private *priv; if (!device) return -EINVAL; priv = dev_get_drvdata(device); ideapad_sync_rfk_state(priv); ideapad_sync_touchpad_state(priv); return 0; } #endif static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); static const struct acpi_device_id ideapad_device_ids[] = { { "VPC2004", 0}, { "", 0}, }; MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); static struct platform_driver ideapad_acpi_driver = { .probe = ideapad_acpi_add, .remove = ideapad_acpi_remove, .driver = { .name = "ideapad_acpi", .owner = THIS_MODULE, .pm = &ideapad_pm, .acpi_match_table = ACPI_PTR(ideapad_device_ids), }, }; module_platform_driver(ideapad_acpi_driver); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("IdeaPad ACPI Extras"); MODULE_LICENSE("GPL");
gpl-2.0
nhondong/android_kernel_samsung_v2wifixx
drivers/net/wireless/bcmdhd4354/dhd_custom_gpio.c
265
8668
/* * Customer code to add GPIO control during WLAN start/stop * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_custom_gpio.c 447089 2014-01-08 04:05:58Z $ */ #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <bcmutils.h> #include <dngl_stats.h> #include <dhd.h> #include <dhd_linux.h> #include <wlioctl.h> #include <wl_iw.h> #define WL_ERROR(x) printf x #define WL_TRACE(x) #if defined(CUSTOMER_HW2) || defined(CUSTOMER_HW4) #if defined(PLATFORM_MPS) int __attribute__ ((weak)) wifi_get_fw_nv_path(char *fw, char *nv) { return 0;}; #endif #endif /* CUSTOMER_HW2 || CUSTOMER_HW4 */ #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) #if defined(BCMLXSDMMC) extern int sdioh_mmc_irq(int irq); #endif /* (BCMLXSDMMC) */ #if defined(CUSTOMER_HW3) || defined(PLATFORM_MPS) #include <mach/gpio.h> #endif /* Customer specific Host GPIO defintion */ static int dhd_oob_gpio_num = -1; module_param(dhd_oob_gpio_num, int, 0644); MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); /* This function will return: * 1) return : Host gpio interrupt number per customer platform * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge * * NOTE : * Customer should check his platform definitions * and his Host Interrupt spec * to figure out the proper setting for his platform. * Broadcom provides just reference settings as example. * */ int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) { int host_oob_irq = 0; #if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW4)) && !defined(PLATFORM_MPS) host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); #else #if defined(CUSTOM_OOB_GPIO_NUM) if (dhd_oob_gpio_num < 0) { dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; } #endif /* CUSTOMER_OOB_GPIO_NUM */ if (dhd_oob_gpio_num < 0) { WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", __FUNCTION__)); return (dhd_oob_gpio_num); } WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", __FUNCTION__, dhd_oob_gpio_num)); #if defined CUSTOMER_HW3 || defined(PLATFORM_MPS) gpio_request(dhd_oob_gpio_num, "oob irq"); host_oob_irq = gpio_to_irq(dhd_oob_gpio_num); gpio_direction_input(dhd_oob_gpio_num); #endif /* defined CUSTOMER_HW3 || defined(PLATFORM_MPS) */ #endif /* CUSTOMER_HW2 || CUSTOMER_HW4 */ return (host_oob_irq); } #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */ /* Customer function to control hw specific wlan gpios */ int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff) { int err = 0; return err; } #ifdef GET_CUSTOM_MAC_ENABLE /* Function to get custom MAC address */ int dhd_custom_get_mac_address(void *adapter, unsigned char *buf) { int ret = 0; WL_TRACE(("%s Enter\n", __FUNCTION__)); if (!buf) return -EINVAL; /* Customer access to MAC address stored outside of DHD driver */ #if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) ret = wifi_platform_get_mac_addr(adapter, buf); #endif #ifdef EXAMPLE_GET_MAC /* EXAMPLE code */ { struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); } #endif /* EXAMPLE_GET_MAC */ return ret; } #endif /* GET_CUSTOM_MAC_ENABLE */ #if !defined(CUSTOMER_HW4) || defined(PLATFORM_MPS) /* Customized Locale table : OPTIONAL feature */ const struct cntry_locales_custom translate_custom_table[] = { /* Table should be filled out based on custom platform regulatory requirement */ #ifdef EXAMPLE_TABLE {"", "XY", 4}, /* Universal if Country code is unknown or empty */ {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ {"AT", "EU", 5}, {"BE", "EU", 5}, {"BG", "EU", 5}, {"CY", "EU", 5}, {"CZ", "EU", 5}, {"DK", "EU", 5}, {"EE", "EU", 5}, {"FI", "EU", 5}, {"FR", "EU", 5}, {"DE", "EU", 5}, {"GR", "EU", 5}, {"HU", "EU", 5}, {"IE", "EU", 5}, {"IT", "EU", 5}, {"LV", "EU", 5}, {"LI", "EU", 5}, {"LT", "EU", 5}, {"LU", "EU", 5}, {"MT", "EU", 5}, {"NL", "EU", 5}, {"PL", "EU", 5}, {"PT", "EU", 5}, {"RO", "EU", 5}, {"SK", "EU", 5}, {"SI", "EU", 5}, {"ES", "EU", 5}, {"SE", "EU", 5}, {"GB", "EU", 5}, {"KR", "XY", 3}, {"AU", "XY", 3}, {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ {"TW", "XY", 3}, {"AR", "XY", 3}, {"MX", "XY", 3}, {"IL", "IL", 0}, {"CH", "CH", 0}, {"TR", "TR", 0}, {"NO", "NO", 0}, #endif /* EXMAPLE_TABLE */ #if defined(CUSTOMER_HW2) #if defined(BCM4334_CHIP) || defined(BCM4335_CHIP) {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ #endif {"AE", "AE", 1}, {"AR", "AR", 1}, {"AT", "AT", 1}, {"AU", "AU", 2}, {"BE", "BE", 1}, {"BG", "BG", 1}, {"BN", "BN", 1}, {"CA", "CA", 2}, {"CH", "CH", 1}, {"CY", "CY", 1}, {"CZ", "CZ", 1}, {"DE", "DE", 3}, {"DK", "DK", 1}, {"EE", "EE", 1}, {"ES", "ES", 1}, {"FI", "FI", 1}, {"FR", "FR", 1}, {"GB", "GB", 1}, {"GR", "GR", 1}, {"HR", "HR", 1}, {"HU", "HU", 1}, {"IE", "IE", 1}, {"IS", "IS", 1}, {"IT", "IT", 1}, {"ID", "ID", 1}, {"JP", "JP", 8}, {"KR", "KR", 24}, {"KW", "KW", 1}, {"LI", "LI", 1}, {"LT", "LT", 1}, {"LU", "LU", 1}, {"LV", "LV", 1}, {"MA", "MA", 1}, {"MT", "MT", 1}, {"MX", "MX", 1}, {"NL", "NL", 1}, {"NO", "NO", 1}, {"PL", "PL", 1}, {"PT", "PT", 1}, {"PY", "PY", 1}, {"RO", "RO", 1}, {"SE", "SE", 1}, {"SI", "SI", 1}, {"SK", "SK", 1}, {"TR", "TR", 7}, {"TW", "TW", 1}, {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */ {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */ {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ #ifdef BCM4334_CHIP {"US", "US", 0} {"RU", "RU", 5}, {"SG", "SG", 4}, {"US", "US", 46} #endif #ifdef BCM4330_CHIP {"RU", "RU", 1}, {"US", "US", 5} #endif #endif /* CUSTOMER_HW2 */ }; /* Customized Locale convertor * input : ISO 3166-1 country abbreviation * output: customized cspec */ void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) { #if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) struct cntry_locales_custom *cloc_ptr; if (!cspec) return; cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); if (cloc_ptr) { strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = cloc_ptr->custom_locale_rev; } return; #else int size, i; size = ARRAYSIZE(translate_custom_table); if (cspec == 0) return; if (size == 0) return; for (i = 0; i < size; i++) { if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { memcpy(cspec->ccode, translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = translate_custom_table[i].custom_locale_rev; return; } } #ifdef EXAMPLE_TABLE /* if no country code matched return first universal code from translate_custom_table */ memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = translate_custom_table[0].custom_locale_rev; #endif /* EXMAPLE_TABLE */ return; #endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */ } #endif /* !CUSTOMER_HW4 */
gpl-2.0
argakon/android_kernel_swift
drivers/video/msm/mdp_ppp22.c
521
29587
/* drivers/video/msm/mdp_ppp22.c * * Copyright (C) 2007 QUALCOMM Incorporated * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <asm/io.h> #include <linux/msm_mdp.h> #include "mdp_hw.h" #include "mdp_ppp.h" struct mdp_table_entry { uint32_t reg; uint32_t val; }; enum { MDP_DOWNSCALE_PT2TOPT4, MDP_DOWNSCALE_PT4TOPT6, MDP_DOWNSCALE_PT6TOPT8, MDP_DOWNSCALE_PT8TO1, MDP_DOWNSCALE_MAX, /* not technically in the downscale table list */ MDP_DOWNSCALE_BLUR, }; static int downscale_x_table; static int downscale_y_table; static struct mdp_table_entry mdp_upscale_table[] = { { 0x5fffc, 0x0 }, { 0x50200, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50204, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50208, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5020c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50210, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50214, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50218, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5021c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x50220, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x50224, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x50228, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x5022c, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x50230, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x50234, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x50238, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x5023c, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x50240, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x50244, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x50248, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x5024c, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x50250, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x50254, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x50258, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x5025c, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x50260, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x50264, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x50268, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x5026c, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x50270, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x50274, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x50278, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x5027c, 0x34003fe }, }; static struct mdp_table_entry mdp_downscale_x_table_PT2TOPT4[] = { { 0x5fffc, 0x740008c }, { 0x50280, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50284, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50288, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5028c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50290, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50294, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50298, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5029c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x502a0, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x502a4, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x502a8, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x502ac, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x502b0, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x502b4, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x502b8, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x502bc, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x502c0, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x502c4, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x502c8, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x502cc, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x502d0, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x502d4, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x502d8, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x502dc, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x502e0, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x502e4, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x502e8, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x502ec, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x502f0, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x502f4, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x502f8, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x502fc, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_x_table_PT4TOPT6[] = { { 0x5fffc, 0x740008c }, { 0x50280, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50284, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50288, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5028c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50290, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50294, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50298, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5029c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x502a0, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x502a4, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x502a8, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x502ac, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x502b0, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x502b4, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x502b8, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x502bc, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x502c0, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x502c4, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x502c8, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x502cc, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x502d0, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x502d4, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x502d8, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x502dc, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x502e0, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x502e4, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x502e8, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x502ec, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x502f0, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x502f4, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x502f8, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x502fc, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_x_table_PT6TOPT8[] = { { 0x5fffc, 0xfe000070 }, { 0x50280, 0x4bc00068 }, { 0x5fffc, 0xfe000078 }, { 0x50284, 0x4bc00060 }, { 0x5fffc, 0xfe000080 }, { 0x50288, 0x4b800059 }, { 0x5fffc, 0xfe000089 }, { 0x5028c, 0x4b000052 }, { 0x5fffc, 0xfe400091 }, { 0x50290, 0x4a80004b }, { 0x5fffc, 0xfe40009a }, { 0x50294, 0x4a000044 }, { 0x5fffc, 0xfe8000a3 }, { 0x50298, 0x4940003d }, { 0x5fffc, 0xfec000ac }, { 0x5029c, 0x48400037 }, { 0x5fffc, 0xff0000b4 }, { 0x502a0, 0x47800031 }, { 0x5fffc, 0xff8000bd }, { 0x502a4, 0x4640002b }, { 0x5fffc, 0xc5 }, { 0x502a8, 0x45000026 }, { 0x5fffc, 0x8000ce }, { 0x502ac, 0x43800021 }, { 0x5fffc, 0x10000d6 }, { 0x502b0, 0x4240001c }, { 0x5fffc, 0x18000df }, { 0x502b4, 0x40800018 }, { 0x5fffc, 0x24000e6 }, { 0x502b8, 0x3f000014 }, { 0x5fffc, 0x30000ee }, { 0x502bc, 0x3d400010 }, { 0x5fffc, 0x40000f5 }, { 0x502c0, 0x3b80000c }, { 0x5fffc, 0x50000fc }, { 0x502c4, 0x39800009 }, { 0x5fffc, 0x6000102 }, { 0x502c8, 0x37c00006 }, { 0x5fffc, 0x7000109 }, { 0x502cc, 0x35800004 }, { 0x5fffc, 0x840010e }, { 0x502d0, 0x33800002 }, { 0x5fffc, 0x9800114 }, { 0x502d4, 0x31400000 }, { 0x5fffc, 0xac00119 }, { 0x502d8, 0x2f4003fe }, { 0x5fffc, 0xc40011e }, { 0x502dc, 0x2d0003fc }, { 0x5fffc, 0xdc00121 }, { 0x502e0, 0x2b0003fb }, { 0x5fffc, 0xf400125 }, { 0x502e4, 0x28c003fa }, { 0x5fffc, 0x11000128 }, { 0x502e8, 0x268003f9 }, { 0x5fffc, 0x12c0012a }, { 0x502ec, 0x244003f9 }, { 0x5fffc, 0x1480012c }, { 0x502f0, 0x224003f8 }, { 0x5fffc, 0x1640012e }, { 0x502f4, 0x200003f8 }, { 0x5fffc, 0x1800012f }, { 0x502f8, 0x1e0003f8 }, { 0x5fffc, 0x1a00012f }, { 0x502fc, 0x1c0003f8 }, }; static struct mdp_table_entry mdp_downscale_x_table_PT8TO1[] = { { 0x5fffc, 0x0 }, { 0x50280, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50284, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50288, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5028c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50290, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50294, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50298, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5029c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x502a0, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x502a4, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x502a8, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x502ac, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x502b0, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x502b4, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x502b8, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x502bc, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x502c0, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x502c4, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x502c8, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x502cc, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x502d0, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x502d4, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x502d8, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x502dc, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x502e0, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x502e4, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x502e8, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x502ec, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x502f0, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x502f4, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x502f8, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x502fc, 0x34003fe }, }; struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX] = { [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_x_table_PT2TOPT4, [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_x_table_PT4TOPT6, [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_x_table_PT6TOPT8, [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_x_table_PT8TO1, }; static struct mdp_table_entry mdp_downscale_y_table_PT2TOPT4[] = { { 0x5fffc, 0x740008c }, { 0x50300, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50304, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50308, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5030c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50310, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50314, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50318, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5031c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x50320, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x50324, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x50328, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x5032c, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x50330, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x50334, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x50338, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x5033c, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x50340, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x50344, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x50348, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x5034c, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x50350, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x50354, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x50358, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x5035c, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x50360, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x50364, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x50368, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x5036c, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x50370, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x50374, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x50378, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x5037c, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_y_table_PT4TOPT6[] = { { 0x5fffc, 0x740008c }, { 0x50300, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50304, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50308, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5030c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50310, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50314, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50318, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5031c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x50320, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x50324, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x50328, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x5032c, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x50330, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x50334, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x50338, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x5033c, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x50340, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x50344, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x50348, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x5034c, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x50350, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x50354, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x50358, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x5035c, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x50360, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x50364, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x50368, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x5036c, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x50370, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x50374, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x50378, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x5037c, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_y_table_PT6TOPT8[] = { { 0x5fffc, 0xfe000070 }, { 0x50300, 0x4bc00068 }, { 0x5fffc, 0xfe000078 }, { 0x50304, 0x4bc00060 }, { 0x5fffc, 0xfe000080 }, { 0x50308, 0x4b800059 }, { 0x5fffc, 0xfe000089 }, { 0x5030c, 0x4b000052 }, { 0x5fffc, 0xfe400091 }, { 0x50310, 0x4a80004b }, { 0x5fffc, 0xfe40009a }, { 0x50314, 0x4a000044 }, { 0x5fffc, 0xfe8000a3 }, { 0x50318, 0x4940003d }, { 0x5fffc, 0xfec000ac }, { 0x5031c, 0x48400037 }, { 0x5fffc, 0xff0000b4 }, { 0x50320, 0x47800031 }, { 0x5fffc, 0xff8000bd }, { 0x50324, 0x4640002b }, { 0x5fffc, 0xc5 }, { 0x50328, 0x45000026 }, { 0x5fffc, 0x8000ce }, { 0x5032c, 0x43800021 }, { 0x5fffc, 0x10000d6 }, { 0x50330, 0x4240001c }, { 0x5fffc, 0x18000df }, { 0x50334, 0x40800018 }, { 0x5fffc, 0x24000e6 }, { 0x50338, 0x3f000014 }, { 0x5fffc, 0x30000ee }, { 0x5033c, 0x3d400010 }, { 0x5fffc, 0x40000f5 }, { 0x50340, 0x3b80000c }, { 0x5fffc, 0x50000fc }, { 0x50344, 0x39800009 }, { 0x5fffc, 0x6000102 }, { 0x50348, 0x37c00006 }, { 0x5fffc, 0x7000109 }, { 0x5034c, 0x35800004 }, { 0x5fffc, 0x840010e }, { 0x50350, 0x33800002 }, { 0x5fffc, 0x9800114 }, { 0x50354, 0x31400000 }, { 0x5fffc, 0xac00119 }, { 0x50358, 0x2f4003fe }, { 0x5fffc, 0xc40011e }, { 0x5035c, 0x2d0003fc }, { 0x5fffc, 0xdc00121 }, { 0x50360, 0x2b0003fb }, { 0x5fffc, 0xf400125 }, { 0x50364, 0x28c003fa }, { 0x5fffc, 0x11000128 }, { 0x50368, 0x268003f9 }, { 0x5fffc, 0x12c0012a }, { 0x5036c, 0x244003f9 }, { 0x5fffc, 0x1480012c }, { 0x50370, 0x224003f8 }, { 0x5fffc, 0x1640012e }, { 0x50374, 0x200003f8 }, { 0x5fffc, 0x1800012f }, { 0x50378, 0x1e0003f8 }, { 0x5fffc, 0x1a00012f }, { 0x5037c, 0x1c0003f8 }, }; static struct mdp_table_entry mdp_downscale_y_table_PT8TO1[] = { { 0x5fffc, 0x0 }, { 0x50300, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50304, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50308, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5030c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50310, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50314, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50318, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5031c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x50320, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x50324, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x50328, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x5032c, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x50330, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x50334, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x50338, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x5033c, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x50340, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x50344, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x50348, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x5034c, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x50350, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x50354, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x50358, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x5035c, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x50360, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x50364, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x50368, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x5036c, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x50370, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x50374, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x50378, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x5037c, 0x34003fe }, }; struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX] = { [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_y_table_PT2TOPT4, [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_y_table_PT4TOPT6, [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_y_table_PT6TOPT8, [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_y_table_PT8TO1, }; struct mdp_table_entry mdp_gaussian_blur_table[] = { /* max variance */ { 0x5fffc, 0x20000080 }, { 0x50280, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50284, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50288, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5028c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50290, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50294, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50298, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5029c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502ac, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502bc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502cc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502dc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502ec, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502fc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50300, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50304, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50308, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5030c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50310, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50314, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50318, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5031c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50320, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50324, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50328, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5032c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50330, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50334, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50338, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5033c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50340, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50344, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50348, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5034c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50350, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50354, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50358, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5035c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50360, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50364, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50368, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5036c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50370, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50374, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50378, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5037c, 0x20000080 }, }; static void load_table(const struct mdp_info *mdp, struct mdp_table_entry *table, int len) { int i; for (i = 0; i < len; i++) mdp_writel(mdp, table[i].val, table[i].reg); } enum { IMG_LEFT, IMG_RIGHT, IMG_TOP, IMG_BOTTOM, }; static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst, uint32_t *interp1, uint32_t *interp2, uint32_t *repeat1, uint32_t *repeat2) { if (src > 3 * dst) { *interp1 = 0; *interp2 = src - 1; *repeat1 = 0; *repeat2 = 0; } else if (src == 3 * dst) { *interp1 = 0; *interp2 = src; *repeat1 = 0; *repeat2 = 1; } else if (src > dst && src < 3 * dst) { *interp1 = -1; *interp2 = src; *repeat1 = 1; *repeat2 = 1; } else if (src == dst) { *interp1 = -1; *interp2 = src + 1; *repeat1 = 1; *repeat2 = 2; } else { *interp1 = -2; *interp2 = src + 1; *repeat1 = 2; *repeat2 = 2; } *interp1 += src_coord; *interp2 += src_coord; } int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs) { int32_t luma_interp[4]; int32_t luma_repeat[4]; int32_t chroma_interp[4]; int32_t chroma_bound[4]; int32_t chroma_repeat[4]; uint32_t dst_w, dst_h; memset(&luma_interp, 0, sizeof(int32_t) * 4); memset(&luma_repeat, 0, sizeof(int32_t) * 4); memset(&chroma_interp, 0, sizeof(int32_t) * 4); memset(&chroma_bound, 0, sizeof(int32_t) * 4); memset(&chroma_repeat, 0, sizeof(int32_t) * 4); regs->edge = 0; if (req->flags & MDP_ROT_90) { dst_w = req->dst_rect.h; dst_h = req->dst_rect.w; } else { dst_w = req->dst_rect.w; dst_h = req->dst_rect.h; } if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) { get_edge_info(req->src_rect.h, req->src_rect.y, dst_h, &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM], &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]); get_edge_info(req->src_rect.w, req->src_rect.x, dst_w, &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT], &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]); } else { luma_interp[IMG_LEFT] = req->src_rect.x; luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; luma_interp[IMG_TOP] = req->src_rect.y; luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; luma_repeat[IMG_LEFT] = 0; luma_repeat[IMG_TOP] = 0; luma_repeat[IMG_RIGHT] = 0; luma_repeat[IMG_BOTTOM] = 0; } chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT]; chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT]; chroma_interp[IMG_TOP] = luma_interp[IMG_TOP]; chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM]; chroma_bound[IMG_LEFT] = req->src_rect.x; chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; chroma_bound[IMG_TOP] = req->src_rect.y; chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; if (IS_YCRCB(req->src.format)) { chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1; chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1; chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1; chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1; } if (req->src.format == MDP_Y_CBCR_H2V2 || req->src.format == MDP_Y_CRCB_H2V2) { chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1; chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1) >> 1; chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1; chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1; } chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] - chroma_interp[IMG_LEFT]; chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] - chroma_bound[IMG_RIGHT]; chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] - chroma_interp[IMG_TOP]; chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] - chroma_bound[IMG_BOTTOM]; if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 || chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 || chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 || chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 || luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 || luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 || luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 || luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3) return -1; regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA; regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA; regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA; regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA; regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA; regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA; regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA; regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA; return 0; } #define ONE_HALF (1LL << 32) #define ONE (1LL << 33) #define TWO (2LL << 33) #define THREE (3LL << 33) #define FRAC_MASK (ONE - 1) #define INT_MASK (~FRAC_MASK) static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin, uint32_t *phase_init, uint32_t *phase_step) { /* to improve precicsion calculations are done in U31.33 and converted * to U3.29 at the end */ int64_t k1, k2, k3, k4, tmp; uint64_t n, d, os, os_p, od, od_p, oreq; unsigned rpa = 0; int64_t ip64, delta; if (dim_out % 3 == 0) rpa = !(dim_in % (dim_out / 3)); n = ((uint64_t)dim_out) << 34; d = dim_in; if (!d) return -1; do_div(n, d); k3 = (n + 1) >> 1; if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) return -1; n = ((uint64_t)dim_in) << 34; d = (uint64_t)dim_out; if (!d) return -1; do_div(n, d); k1 = (n + 1) >> 1; k2 = (k1 - ONE) >> 1; *phase_init = (int)(k2 >> 4); k4 = (k3 - ONE) >> 1; if (rpa) { os = ((uint64_t)origin << 33) - ONE_HALF; tmp = (dim_out * os) + ONE_HALF; if (!dim_in) return -1; do_div(tmp, dim_in); od = tmp - ONE_HALF; } else { os = ((uint64_t)origin << 1) - 1; od = (((k3 * os) >> 1) + k4); } od_p = od & INT_MASK; if (od_p != od) od_p += ONE; if (rpa) { tmp = (dim_in * od_p) + ONE_HALF; if (!dim_in) return -1; do_div(tmp, dim_in); os_p = tmp - ONE_HALF; } else { os_p = ((k1 * (od_p >> 33)) + k2); } oreq = (os_p & INT_MASK) - ONE; ip64 = os_p - oreq; delta = ((int64_t)(origin) << 33) - oreq; ip64 -= delta; /* limit to valid range before the left shift */ delta = (ip64 & (1LL << 63)) ? 4 : -4; delta <<= 33; while (abs((int)(ip64 >> 33)) > 4) ip64 += delta; *phase_init = (int)(ip64 >> 4); *phase_step = (uint32_t)(k1 >> 4); return 0; } int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs, struct mdp_rect *src_rect, struct mdp_rect *dst_rect, uint32_t src_format, uint32_t dst_format) { int downscale; uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; uint32_t scale_factor_x, scale_factor_y; if (scale_params(src_rect->w, dst_rect->w, 1, &phase_init_x, &phase_step_x) || scale_params(src_rect->h, dst_rect->h, 1, &phase_init_y, &phase_step_y)) return -1; regs->phasex_init = phase_init_x; regs->phasey_init = phase_init_y; regs->phasex_step = phase_step_x; regs->phasey_step = phase_step_y; scale_factor_x = (dst_rect->w * 10) / src_rect->w; scale_factor_y = (dst_rect->h * 10) / src_rect->h; if (scale_factor_x > 8) downscale = MDP_DOWNSCALE_PT8TO1; else if (scale_factor_x > 6) downscale = MDP_DOWNSCALE_PT6TOPT8; else if (scale_factor_x > 4) downscale = MDP_DOWNSCALE_PT4TOPT6; else downscale = MDP_DOWNSCALE_PT2TOPT4; if (downscale != downscale_x_table) { load_table(mdp, mdp_downscale_x_table[downscale], 64); downscale_x_table = downscale; } if (scale_factor_y > 8) downscale = MDP_DOWNSCALE_PT8TO1; else if (scale_factor_y > 6) downscale = MDP_DOWNSCALE_PT6TOPT8; else if (scale_factor_y > 4) downscale = MDP_DOWNSCALE_PT4TOPT6; else downscale = MDP_DOWNSCALE_PT2TOPT4; if (downscale != downscale_y_table) { load_table(mdp, mdp_downscale_y_table[downscale], 64); downscale_y_table = downscale; } return 0; } int mdp_ppp_load_blur(const struct mdp_info *mdp) { if (!(downscale_x_table == MDP_DOWNSCALE_BLUR && downscale_y_table == MDP_DOWNSCALE_BLUR)) { load_table(mdp, mdp_gaussian_blur_table, 128); downscale_x_table = MDP_DOWNSCALE_BLUR; downscale_y_table = MDP_DOWNSCALE_BLUR; } return 0; } void mdp_ppp_init_scale(const struct mdp_info *mdp) { downscale_x_table = MDP_DOWNSCALE_MAX; downscale_y_table = MDP_DOWNSCALE_MAX; load_table(mdp, mdp_upscale_table, ARRAY_SIZE(mdp_upscale_table)); }
gpl-2.0
kinghaitao/git-core
net/ipv4/tunnel4.c
521
4015
/* tunnel4.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/icmp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm_tunnel *tunnel4_handlers; static struct xfrm_tunnel *tunnel64_handlers; static DEFINE_MUTEX(tunnel4_mutex); static inline struct xfrm_tunnel **fam_handlers(unsigned short family) { return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; } int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel **pprev; int ret = -EEXIST; int priority = handler->priority; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { if ((*pprev)->priority > priority) break; if ((*pprev)->priority == priority) goto err; } handler->next = *pprev; *pprev = handler; ret = 0; err: mutex_unlock(&tunnel4_mutex); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_register); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) { struct xfrm_tunnel **pprev; int ret = -ENOENT; mutex_lock(&tunnel4_mutex); for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) { if (*pprev == handler) { *pprev = handler->next; ret = 0; break; } } mutex_unlock(&tunnel4_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm4_tunnel_deregister); static int tunnel4_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto drop; for (handler = tunnel4_handlers; handler; handler = handler->next) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static int tunnel64_rcv(struct sk_buff *skb) { struct xfrm_tunnel *handler; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto drop; for (handler = tunnel64_handlers; handler; handler = handler->next) if (!handler->handler(skb)) return 0; icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } #endif static void tunnel4_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for (handler = tunnel4_handlers; handler; handler = handler->next) if (!handler->err_handler(skb, info)) break; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static void tunnel64_err(struct sk_buff *skb, u32 info) { struct xfrm_tunnel *handler; for (handler = tunnel64_handlers; handler; handler = handler->next) if (!handler->err_handler(skb, info)) break; } #endif static const struct net_protocol tunnel4_protocol = { .handler = tunnel4_rcv, .err_handler = tunnel4_err, .no_policy = 1, .netns_ok = 1, }; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static const struct net_protocol tunnel64_protocol = { .handler = tunnel64_rcv, .err_handler = tunnel64_err, .no_policy = 1, .netns_ok = 1, }; #endif static int __init tunnel4_init(void) { if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { printk(KERN_ERR "tunnel4 init: can't add protocol\n"); return -EAGAIN; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { printk(KERN_ERR "tunnel64 init: can't add protocol\n"); inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); return -EAGAIN; } #endif return 0; } static void __exit tunnel4_fini(void) { #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); #endif if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) printk(KERN_ERR "tunnel4 close: can't remove protocol\n"); } module_init(tunnel4_init); module_exit(tunnel4_fini); MODULE_LICENSE("GPL");
gpl-2.0
Digilent/linux-Digilent-Dev
arch/arm/mach-ux500/board-mop500-audio.c
1801
1742
/* * Copyright (C) ST-Ericsson SA 2010 * * License terms: GNU General Public License (GPL), version 2 */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/platform_data/dma-ste-dma40.h> #include <linux/platform_data/asoc-ux500-msp.h> #include "ste-dma40-db8500.h" #include "board-mop500.h" static struct stedma40_chan_cfg msp0_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, }; static struct stedma40_chan_cfg msp0_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, }; struct msp_i2s_platform_data msp0_platform_data = { .id = 0, .msp_i2s_dma_rx = &msp0_dma_rx, .msp_i2s_dma_tx = &msp0_dma_tx, }; static struct stedma40_chan_cfg msp1_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV30_MSP3, }; static struct stedma40_chan_cfg msp1_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV30_MSP1, }; struct msp_i2s_platform_data msp1_platform_data = { .id = 1, .msp_i2s_dma_rx = NULL, .msp_i2s_dma_tx = &msp1_dma_tx, }; static struct stedma40_chan_cfg msp2_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV14_MSP2, }; static struct stedma40_chan_cfg msp2_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV14_MSP2, .use_fixed_channel = true, .phy_channel = 1, }; struct msp_i2s_platform_data msp2_platform_data = { .id = 2, .msp_i2s_dma_rx = &msp2_dma_rx, .msp_i2s_dma_tx = &msp2_dma_tx, }; struct msp_i2s_platform_data msp3_platform_data = { .id = 3, .msp_i2s_dma_rx = &msp1_dma_rx, .msp_i2s_dma_tx = NULL, };
gpl-2.0
scriptZilla/linux
sound/usb/usx2y/us122l.c
1801
19356
/* * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/module.h> #include <sound/core.h> #include <sound/hwdep.h> #include <sound/pcm.h> #include <sound/initval.h> #define MODNAME "US122L" #include "usb_stream.c" #include "../usbaudio.h" #include "../midi.h" #include "us122l.h" MODULE_AUTHOR("Karsten Wiese <fzu@wemgehoertderstaat.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.5"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ /* Enable this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_us122l_card_used[SNDRV_CARDS]; static int us122l_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US122L", .product_name = NAME_ALLCAPS, .ifnum = 1, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 1); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } static int us144_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data = { .out_ep = 4, .in_ep = 3, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk = { .vendor_name = "US144", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_US122L, .data = &quirk_data }; struct usb_device *dev = US122L(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); return snd_usbmidi_create(card, iface, &US122L(card)->midi_list, &quirk); } /* * Wrapper for usb_control_msg(). * Allocates a temp buffer to prevent dmaing from/to the stack. */ static int us122l_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int err; void *buf = NULL; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } err = usb_control_msg(dev, pipe, request, requesttype, value, index, buf, size, timeout); if (size > 0) { memcpy(data, buf, size); kfree(buf); } return err; } static void pt_info_set(struct usb_device *dev, u8 v) { int ret; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 'I', USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, v, 0, NULL, 0, 1000); snd_printdd(KERN_DEBUG "%i\n", ret); } static void usb_stream_hwdep_vm_open(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_inc(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static int usb_stream_hwdep_vm_fault(struct vm_area_struct *area, struct vm_fault *vmf) { unsigned long offset; struct page *page; void *vaddr; struct us122l *us122l = area->vm_private_data; struct usb_stream *s; mutex_lock(&us122l->mutex); s = us122l->sk.s; if (!s) goto unlock; offset = vmf->pgoff << PAGE_SHIFT; if (offset < PAGE_ALIGN(s->read_size)) vaddr = (char *)s + offset; else { offset -= PAGE_ALIGN(s->read_size); if (offset >= PAGE_ALIGN(s->write_size)) goto unlock; vaddr = us122l->sk.write_page + offset; } page = virt_to_page(vaddr); get_page(page); mutex_unlock(&us122l->mutex); vmf->page = page; return 0; unlock: mutex_unlock(&us122l->mutex); return VM_FAULT_SIGBUS; } static void usb_stream_hwdep_vm_close(struct vm_area_struct *area) { struct us122l *us122l = area->vm_private_data; atomic_dec(&us122l->mmap_count); snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); } static const struct vm_operations_struct usb_stream_hwdep_vm_ops = { .open = usb_stream_hwdep_vm_open, .fault = usb_stream_hwdep_vm_fault, .close = usb_stream_hwdep_vm_close, }; static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (hw->used >= 2) return -EBUSY; if (!us122l->first) us122l->first = file; if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_get_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_get_interface(iface); return 0; } static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file) { struct us122l *us122l = hw->private_data; struct usb_interface *iface; snd_printdd(KERN_DEBUG "%p %p\n", hw, file); if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { iface = usb_ifnum_to_if(us122l->dev, 0); usb_autopm_put_interface(iface); } iface = usb_ifnum_to_if(us122l->dev, 1); usb_autopm_put_interface(iface); if (us122l->first == file) us122l->first = NULL; mutex_lock(&us122l->mutex); if (us122l->master == file) us122l->master = us122l->slave; us122l->slave = NULL; mutex_unlock(&us122l->mutex); return 0; } static int usb_stream_hwdep_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = area->vm_end - area->vm_start; struct us122l *us122l = hw->private_data; unsigned long offset; struct usb_stream *s; int err = 0; bool read; offset = area->vm_pgoff << PAGE_SHIFT; mutex_lock(&us122l->mutex); s = us122l->sk.s; read = offset < s->read_size; if (read && area->vm_flags & VM_WRITE) { err = -EPERM; goto out; } snd_printdd(KERN_DEBUG "%lu %u\n", size, read ? s->read_size : s->write_size); /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > PAGE_ALIGN(read ? s->read_size : s->write_size)) { snd_printk(KERN_WARNING "%lu > %u\n", size, read ? s->read_size : s->write_size); err = -EINVAL; goto out; } area->vm_ops = &usb_stream_hwdep_vm_ops; area->vm_flags |= VM_DONTDUMP; if (!read) area->vm_flags |= VM_DONTEXPAND; area->vm_private_data = us122l; atomic_inc(&us122l->mmap_count); out: mutex_unlock(&us122l->mutex); return err; } static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct us122l *us122l = hw->private_data; unsigned *polled; unsigned int mask; poll_wait(file, &us122l->sk.sleep, wait); mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; if (mutex_trylock(&us122l->mutex)) { struct usb_stream *s = us122l->sk.s; if (s && s->state == usb_stream_ready) { if (us122l->first == file) polled = &s->periods_polled; else polled = &us122l->second_periods_polled; if (*polled != s->periods_done) { *polled = s->periods_done; mask = POLLIN | POLLOUT | POLLWRNORM; } else mask = 0; } mutex_unlock(&us122l->mutex); } return mask; } static void us122l_stop(struct us122l *us122l) { struct list_head *p; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_stop(p); usb_stream_stop(&us122l->sk); usb_stream_free(&us122l->sk); } static int us122l_set_sample_rate(struct usb_device *dev, int rate) { unsigned int ep = 0x81; unsigned char data[3]; int err; data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; err = us122l_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, 1000); if (err < 0) snd_printk(KERN_ERR "%d: cannot set freq %d to ep 0x%x\n", dev->devnum, rate, ep); return err; } static bool us122l_start(struct us122l *us122l, unsigned rate, unsigned period_frames) { struct list_head *p; int err; unsigned use_packsize = 0; bool success = false; if (us122l->dev->speed == USB_SPEED_HIGH) { /* The us-122l's descriptor defaults to iso max_packsize 78, which isn't needed for samplerates <= 48000. Lets save some memory: */ switch (rate) { case 44100: use_packsize = 36; break; case 48000: use_packsize = 42; break; case 88200: use_packsize = 72; break; } } if (!usb_stream_new(&us122l->sk, us122l->dev, 1, 2, rate, use_packsize, period_frames, 6)) goto out; err = us122l_set_sample_rate(us122l->dev, rate); if (err < 0) { us122l_stop(us122l); snd_printk(KERN_ERR "us122l_set_sample_rate error \n"); goto out; } err = usb_stream_start(&us122l->sk); if (err < 0) { us122l_stop(us122l); snd_printk(KERN_ERR "us122l_start error %i \n", err); goto out; } list_for_each(p, &us122l->midi_list) snd_usbmidi_input_start(p); success = true; out: return success; } static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigned cmd, unsigned long arg) { struct usb_stream_config *cfg; struct us122l *us122l = hw->private_data; struct usb_stream *s; unsigned min_period_frames; int err = 0; bool high_speed; if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS) return -ENOTTY; cfg = memdup_user((void *)arg, sizeof(*cfg)); if (IS_ERR(cfg)) return PTR_ERR(cfg); if (cfg->version != USB_STREAM_INTERFACE_VERSION) { err = -ENXIO; goto free; } high_speed = us122l->dev->speed == USB_SPEED_HIGH; if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 && (!high_speed || (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) || cfg->frame_size != 6 || cfg->period_frames > 0x3000) { err = -EINVAL; goto free; } switch (cfg->sample_rate) { case 44100: min_period_frames = 48; break; case 48000: min_period_frames = 52; break; default: min_period_frames = 104; break; } if (!high_speed) min_period_frames <<= 1; if (cfg->period_frames < min_period_frames) { err = -EINVAL; goto free; } snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); mutex_lock(&us122l->mutex); s = us122l->sk.s; if (!us122l->master) us122l->master = file; else if (us122l->master != file) { if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { err = -EIO; goto unlock; } us122l->slave = file; } if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || s->state == usb_stream_xrun) { us122l_stop(us122l); if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) err = -EIO; else err = 1; } unlock: mutex_unlock(&us122l->mutex); free: kfree(cfg); wake_up_all(&us122l->sk.sleep); return err; } #define SND_USB_STREAM_ID "USB STREAM" static int usb_stream_hwdep_new(struct snd_card *card) { int err; struct snd_hwdep *hw; struct usb_device *dev = US122L(card)->dev; err = snd_hwdep_new(card, SND_USB_STREAM_ID, 0, &hw); if (err < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USB_STREAM; hw->private_data = US122L(card); hw->ops.open = usb_stream_hwdep_open; hw->ops.release = usb_stream_hwdep_release; hw->ops.ioctl = usb_stream_hwdep_ioctl; hw->ops.ioctl_compat = usb_stream_hwdep_ioctl; hw->ops.mmap = usb_stream_hwdep_mmap; hw->ops.poll = usb_stream_hwdep_poll; sprintf(hw->name, "/proc/bus/usb/%03d/%03d/hwdeppcm", dev->bus->busnum, dev->devnum); return 0; } static bool us122l_create_card(struct snd_card *card) { int err; struct us122l *us122l = US122L(card); if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { err = usb_set_interface(us122l->dev, 0, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); return false; } } err = usb_set_interface(us122l->dev, 1, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); return false; } pt_info_set(us122l->dev, 0x11); pt_info_set(us122l->dev, 0x10); if (!us122l_start(us122l, 44100, 256)) return false; if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) err = us144_create_usbmidi(card); else err = us122l_create_usbmidi(card); if (err < 0) { snd_printk(KERN_ERR "us122l_create_usbmidi error %i \n", err); us122l_stop(us122l); return false; } err = usb_stream_hwdep_new(card); if (err < 0) { /* release the midi resources */ struct list_head *p; list_for_each(p, &us122l->midi_list) snd_usbmidi_disconnect(p); us122l_stop(us122l); return false; } return true; } static void snd_us122l_free(struct snd_card *card) { struct us122l *us122l = US122L(card); int index = us122l->card_index; if (index >= 0 && index < SNDRV_CARDS) snd_us122l_card_used[index] = 0; } static int usx2y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card *card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_us122l_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct us122l), &card); if (err < 0) return err; snd_us122l_card_used[US122L(card)->card_index = dev] = 1; card->private_free = snd_us122l_free; US122L(card)->dev = device; mutex_init(&US122L(card)->mutex); init_waitqueue_head(&US122L(card)->sk.sleep); INIT_LIST_HEAD(&US122L(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0, US122L(card)->dev->bus->busnum, US122L(card)->dev->devnum ); *cardp = card; return 0; } static int us122l_usb_probe(struct usb_interface *intf, const struct usb_device_id *device_id, struct snd_card **cardp) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; err = usx2y_create_card(device, intf, &card); if (err < 0) return err; if (!us122l_create_card(card)) { snd_card_free(card); return -EINVAL; } err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } usb_get_intf(usb_ifnum_to_if(device, 0)); usb_get_dev(device); *cardp = card; return 0; } static int snd_us122l_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; if ((device->descriptor.idProduct == USB_ID_US144 || device->descriptor.idProduct == USB_ID_US144MKII) && device->speed == USB_SPEED_HIGH) { snd_printk(KERN_ERR "disable ehci-hcd to run US-144 \n"); return -ENODEV; } snd_printdd(KERN_DEBUG"%p:%i\n", intf, intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceNumber != 1) return 0; err = us122l_usb_probe(usb_get_intf(intf), id, &card); if (err < 0) { usb_put_intf(intf); return err; } usb_set_intfdata(intf, card); return 0; } static void snd_us122l_disconnect(struct usb_interface *intf) { struct snd_card *card; struct us122l *us122l; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return; snd_card_disconnect(card); us122l = US122L(card); mutex_lock(&us122l->mutex); us122l_stop(us122l); mutex_unlock(&us122l->mutex); /* release the midi resources */ list_for_each(p, &us122l->midi_list) { snd_usbmidi_disconnect(p); } usb_put_intf(usb_ifnum_to_if(us122l->dev, 0)); usb_put_intf(usb_ifnum_to_if(us122l->dev, 1)); usb_put_dev(us122l->dev); while (atomic_read(&us122l->mmap_count)) msleep(500); snd_card_free(card); } static int snd_us122l_suspend(struct usb_interface *intf, pm_message_t message) { struct snd_card *card; struct us122l *us122l; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return 0; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); us122l = US122L(card); if (!us122l) return 0; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_stop(p); mutex_lock(&us122l->mutex); usb_stream_stop(&us122l->sk); mutex_unlock(&us122l->mutex); return 0; } static int snd_us122l_resume(struct usb_interface *intf) { struct snd_card *card; struct us122l *us122l; struct list_head *p; int err; card = usb_get_intfdata(intf); if (!card) return 0; us122l = US122L(card); if (!us122l) return 0; mutex_lock(&us122l->mutex); /* needed, doesn't restart without: */ if (us122l->dev->descriptor.idProduct == USB_ID_US144 || us122l->dev->descriptor.idProduct == USB_ID_US144MKII) { err = usb_set_interface(us122l->dev, 0, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); goto unlock; } } err = usb_set_interface(us122l->dev, 1, 1); if (err) { snd_printk(KERN_ERR "usb_set_interface error \n"); goto unlock; } pt_info_set(us122l->dev, 0x11); pt_info_set(us122l->dev, 0x10); err = us122l_set_sample_rate(us122l->dev, us122l->sk.s->cfg.sample_rate); if (err < 0) { snd_printk(KERN_ERR "us122l_set_sample_rate error \n"); goto unlock; } err = usb_stream_start(&us122l->sk); if (err) goto unlock; list_for_each(p, &us122l->midi_list) snd_usbmidi_input_start(p); unlock: mutex_unlock(&us122l->mutex); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return err; } static struct usb_device_id snd_us122l_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US122L }, { /* US-144 only works at USB1.1! Disable module ehci-hcd. */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US144 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US122MKII }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0644, .idProduct = USB_ID_US144MKII }, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, snd_us122l_usb_id_table); static struct usb_driver snd_us122l_usb_driver = { .name = "snd-usb-us122l", .probe = snd_us122l_probe, .disconnect = snd_us122l_disconnect, .suspend = snd_us122l_suspend, .resume = snd_us122l_resume, .reset_resume = snd_us122l_resume, .id_table = snd_us122l_usb_id_table, .supports_autosuspend = 1 }; module_usb_driver(snd_us122l_usb_driver);
gpl-2.0
jerdog/android_kernel_asus_ze500cl
drivers/pwm/pwm-tiecap.c
2057
9205
/* * ECAP PWM driver * * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/pwm.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include "pwm-tipwmss.h" /* ECAP registers and bits definitions */ #define CAP1 0x08 #define CAP2 0x0C #define CAP3 0x10 #define CAP4 0x14 #define ECCTL2 0x2A #define ECCTL2_APWM_POL_LOW BIT(10) #define ECCTL2_APWM_MODE BIT(9) #define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6)) #define ECCTL2_TSCTR_FREERUN BIT(4) struct ecap_context { u32 cap3; u32 cap4; u16 ecctl2; }; struct ecap_pwm_chip { struct pwm_chip chip; unsigned int clk_rate; void __iomem *mmio_base; struct ecap_context ctx; }; static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip) { return container_of(chip, struct ecap_pwm_chip, chip); } /* * period_ns = 10^9 * period_cycles / PWM_CLK_RATE * duty_ns = 10^9 * duty_cycles / PWM_CLK_RATE */ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); unsigned long long c; unsigned long period_cycles, duty_cycles; unsigned int reg_val; if (period_ns > NSEC_PER_SEC) return -ERANGE; c = pc->clk_rate; c = c * period_ns; do_div(c, NSEC_PER_SEC); period_cycles = (unsigned long)c; if (period_cycles < 1) { period_cycles = 1; duty_cycles = 1; } else { c = pc->clk_rate; c = c * duty_ns; do_div(c, NSEC_PER_SEC); duty_cycles = (unsigned long)c; } pm_runtime_get_sync(pc->chip.dev); reg_val = readw(pc->mmio_base + ECCTL2); /* Configure APWM mode & disable sync option */ reg_val |= ECCTL2_APWM_MODE | ECCTL2_SYNC_SEL_DISA; writew(reg_val, pc->mmio_base + ECCTL2); if (!test_bit(PWMF_ENABLED, &pwm->flags)) { /* Update active registers if not running */ writel(duty_cycles, pc->mmio_base + CAP2); writel(period_cycles, pc->mmio_base + CAP1); } else { /* * Update shadow registers to configure period and * compare values. This helps current PWM period to * complete on reconfiguring */ writel(duty_cycles, pc->mmio_base + CAP4); writel(period_cycles, pc->mmio_base + CAP3); } if (!test_bit(PWMF_ENABLED, &pwm->flags)) { reg_val = readw(pc->mmio_base + ECCTL2); /* Disable APWM mode to put APWM output Low */ reg_val &= ~ECCTL2_APWM_MODE; writew(reg_val, pc->mmio_base + ECCTL2); } pm_runtime_put_sync(pc->chip.dev); return 0; } static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); unsigned short reg_val; pm_runtime_get_sync(pc->chip.dev); reg_val = readw(pc->mmio_base + ECCTL2); if (polarity == PWM_POLARITY_INVERSED) /* Duty cycle defines LOW period of PWM */ reg_val |= ECCTL2_APWM_POL_LOW; else /* Duty cycle defines HIGH period of PWM */ reg_val &= ~ECCTL2_APWM_POL_LOW; writew(reg_val, pc->mmio_base + ECCTL2); pm_runtime_put_sync(pc->chip.dev); return 0; } static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); unsigned int reg_val; /* Leave clock enabled on enabling PWM */ pm_runtime_get_sync(pc->chip.dev); /* * Enable 'Free run Time stamp counter mode' to start counter * and 'APWM mode' to enable APWM output */ reg_val = readw(pc->mmio_base + ECCTL2); reg_val |= ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE; writew(reg_val, pc->mmio_base + ECCTL2); return 0; } static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); unsigned int reg_val; /* * Disable 'Free run Time stamp counter mode' to stop counter * and 'APWM mode' to put APWM output to low */ reg_val = readw(pc->mmio_base + ECCTL2); reg_val &= ~(ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE); writew(reg_val, pc->mmio_base + ECCTL2); /* Disable clock on PWM disable */ pm_runtime_put_sync(pc->chip.dev); } static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { if (test_bit(PWMF_ENABLED, &pwm->flags)) { dev_warn(chip->dev, "Removing PWM device without disabling\n"); pm_runtime_put_sync(chip->dev); } } static const struct pwm_ops ecap_pwm_ops = { .free = ecap_pwm_free, .config = ecap_pwm_config, .set_polarity = ecap_pwm_set_polarity, .enable = ecap_pwm_enable, .disable = ecap_pwm_disable, .owner = THIS_MODULE, }; static const struct of_device_id ecap_of_match[] = { { .compatible = "ti,am33xx-ecap" }, {}, }; MODULE_DEVICE_TABLE(of, ecap_of_match); static int ecap_pwm_probe(struct platform_device *pdev) { int ret; struct resource *r; struct clk *clk; struct ecap_pwm_chip *pc; u16 status; struct pinctrl *pinctrl; pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "unable to select pin group\n"); pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); if (!pc) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } clk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clock\n"); return PTR_ERR(clk); } pc->clk_rate = clk_get_rate(clk); if (!pc->clk_rate) { dev_err(&pdev->dev, "failed to get clock rate\n"); return -EINVAL; } pc->chip.dev = &pdev->dev; pc->chip.ops = &ecap_pwm_ops; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; pc->chip.base = -1; pc->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); ret = pwmchip_add(&pc->chip); if (ret < 0) { dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); return ret; } pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); status = pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_EN); if (!(status & PWMSS_ECAPCLK_EN_ACK)) { dev_err(&pdev->dev, "PWMSS config space clock enable failed\n"); ret = -EINVAL; goto pwmss_clk_failure; } pm_runtime_put_sync(&pdev->dev); platform_set_drvdata(pdev, pc); return 0; pwmss_clk_failure: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); pwmchip_remove(&pc->chip); return ret; } static int ecap_pwm_remove(struct platform_device *pdev) { struct ecap_pwm_chip *pc = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); /* * Due to hardware misbehaviour, acknowledge of the stop_req * is missing. Hence checking of the status bit skipped. */ pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ); pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return pwmchip_remove(&pc->chip); } static void ecap_pwm_save_context(struct ecap_pwm_chip *pc) { pm_runtime_get_sync(pc->chip.dev); pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2); pc->ctx.cap4 = readl(pc->mmio_base + CAP4); pc->ctx.cap3 = readl(pc->mmio_base + CAP3); pm_runtime_put_sync(pc->chip.dev); } static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc) { writel(pc->ctx.cap3, pc->mmio_base + CAP3); writel(pc->ctx.cap4, pc->mmio_base + CAP4); writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2); } #ifdef CONFIG_PM_SLEEP static int ecap_pwm_suspend(struct device *dev) { struct ecap_pwm_chip *pc = dev_get_drvdata(dev); struct pwm_device *pwm = pc->chip.pwms; ecap_pwm_save_context(pc); /* Disable explicitly if PWM is running */ if (test_bit(PWMF_ENABLED, &pwm->flags)) pm_runtime_put_sync(dev); return 0; } static int ecap_pwm_resume(struct device *dev) { struct ecap_pwm_chip *pc = dev_get_drvdata(dev); struct pwm_device *pwm = pc->chip.pwms; /* Enable explicitly if PWM was running */ if (test_bit(PWMF_ENABLED, &pwm->flags)) pm_runtime_get_sync(dev); ecap_pwm_restore_context(pc); return 0; } #endif static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume); static struct platform_driver ecap_pwm_driver = { .driver = { .name = "ecap", .owner = THIS_MODULE, .of_match_table = ecap_of_match, .pm = &ecap_pwm_pm_ops, }, .probe = ecap_pwm_probe, .remove = ecap_pwm_remove, }; module_platform_driver(ecap_pwm_driver); MODULE_DESCRIPTION("ECAP PWM driver"); MODULE_AUTHOR("Texas Instruments"); MODULE_LICENSE("GPL");
gpl-2.0
Coolexe/shooter-ics-crc-3.0.16-294f767
drivers/net/can/softing/softing_fw.c
2569
18770
/* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/firmware.h> #include <linux/sched.h> #include <asm/div64.h> #include "softing.h" /* * low level DPRAM command. * Make sure that card->dpram[DPRAM_FCT_HOST] is preset */ static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector, const char *msg) { int ret; unsigned long stamp; iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]); iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]); iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]); /* be sure to flush this to the card */ wmb(); stamp = jiffies + 1 * HZ; /* wait for card */ do { /* DPRAM_FCT_HOST is _not_ aligned */ ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) + (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8); /* don't have any cached variables */ rmb(); if (ret == RES_OK) /* read return-value now */ return ioread16(&card->dpram[DPRAM_FCT_RESULT]); if ((ret != vector) || time_after(jiffies, stamp)) break; /* process context => relax */ usleep_range(500, 10000); } while (1); ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret); return ret; } static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg) { int ret; ret = _softing_fct_cmd(card, cmd, 0, msg); if (ret > 0) { dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret); ret = -EIO; } return ret; } int softing_bootloader_command(struct softing *card, int16_t cmd, const char *msg) { int ret; unsigned long stamp; iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]); iowrite16(cmd, &card->dpram[DPRAM_COMMAND]); /* be sure to flush this to the card */ wmb(); stamp = jiffies + 3 * HZ; /* wait for card */ do { ret = ioread16(&card->dpram[DPRAM_RECEIPT]); /* don't have any cached variables */ rmb(); if (ret == RES_OK) return 0; if (time_after(jiffies, stamp)) break; /* process context => relax */ usleep_range(500, 10000); } while (!signal_pending(current)); ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret); return ret; } static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr, uint16_t *plen, const uint8_t **pdat) { uint16_t checksum[2]; const uint8_t *mem; const uint8_t *end; /* * firmware records are a binary, unaligned stream composed of: * uint16_t type; * uint32_t addr; * uint16_t len; * uint8_t dat[len]; * uint16_t checksum; * all values in little endian. * We could define a struct for this, with __attribute__((packed)), * but would that solve the alignment in _all_ cases (cfr. the * struct itself may be an odd address)? * * I chose to use leXX_to_cpup() since this solves both * endianness & alignment. */ mem = *pmem; *ptype = le16_to_cpup((void *)&mem[0]); *paddr = le32_to_cpup((void *)&mem[2]); *plen = le16_to_cpup((void *)&mem[6]); *pdat = &mem[8]; /* verify checksum */ end = &mem[8 + *plen]; checksum[0] = le16_to_cpup((void *)end); for (checksum[1] = 0; mem < end; ++mem) checksum[1] += *mem; if (checksum[0] != checksum[1]) return -EINVAL; /* increment */ *pmem += 10 + *plen; return 0; } int softing_load_fw(const char *file, struct softing *card, __iomem uint8_t *dpram, unsigned int size, int offset) { const struct firmware *fw; int ret; const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; uint8_t *buf = NULL; int buflen = 0; int8_t type_end = 0; ret = request_firmware(&fw, file, &card->pdev->dev); if (ret < 0) return ret; dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes" ", offset %c0x%04x\n", card->pdat->name, file, (unsigned int)fw->size, (offset >= 0) ? '+' : '-', (unsigned int)abs(offset)); /* parse the firmware */ mem = fw->data; end = &mem[fw->size]; /* look for header record */ ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret < 0) goto failed; if (type != 0xffff) goto failed; if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) { ret = -EINVAL; goto failed; } /* ok, we had a header */ while (mem < end) { ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret < 0) goto failed; if (type == 3) { /* start address, not used here */ continue; } else if (type == 1) { /* eof */ type_end = 1; break; } else if (type != 0) { ret = -EINVAL; goto failed; } if ((addr + len + offset) > size) goto failed; memcpy_toio(&dpram[addr + offset], dat, len); /* be sure to flush caches from IO space */ mb(); if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); buf = krealloc(buf, buflen, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto failed; } } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); if (memcmp(buf, dat, len)) { /* is not ok */ dev_alert(&card->pdev->dev, "DPRAM readback failed\n"); ret = -EIO; goto failed; } } if (!type_end) /* no end record seen */ goto failed; ret = 0; failed: kfree(buf); release_firmware(fw); if (ret < 0) dev_info(&card->pdev->dev, "firmware %s failed\n", file); return ret; } int softing_load_app_fw(const char *file, struct softing *card) { const struct firmware *fw; const uint8_t *mem, *end, *dat; int ret, j; uint16_t type, len; uint32_t addr, start_addr = 0; unsigned int sum, rx_sum; int8_t type_end = 0, type_entrypoint = 0; ret = request_firmware(&fw, file, &card->pdev->dev); if (ret) { dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n", file, ret); return ret; } dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n", file, (unsigned long)fw->size); /* parse the firmware */ mem = fw->data; end = &mem[fw->size]; /* look for header record */ ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret) goto failed; ret = -EINVAL; if (type != 0xffff) { dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n", type); goto failed; } if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) { dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n", len, dat); goto failed; } /* ok, we had a header */ while (mem < end) { ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret) goto failed; if (type == 3) { /* start address */ start_addr = addr; type_entrypoint = 1; continue; } else if (type == 1) { /* eof */ type_end = 1; break; } else if (type != 0) { dev_alert(&card->pdev->dev, "unknown record type 0x%04x\n", type); ret = -EINVAL; goto failed; } /* regualar data */ for (sum = 0, j = 0; j < len; ++j) sum += dat[j]; /* work in 16bit (target) */ sum &= 0xffff; memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len); iowrite32(card->pdat->app.offs + card->pdat->app.addr, &card->dpram[DPRAM_COMMAND + 2]); iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]); iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]); iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]); ret = softing_bootloader_command(card, 1, "loading app."); if (ret < 0) goto failed; /* verify checksum */ rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]); if (rx_sum != sum) { dev_alert(&card->pdev->dev, "SRAM seems to be damaged" ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum); ret = -EIO; goto failed; } } if (!type_end || !type_entrypoint) goto failed; /* start application in card */ iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]); iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]); ret = softing_bootloader_command(card, 3, "start app."); if (ret < 0) goto failed; ret = 0; failed: release_firmware(fw); if (ret < 0) dev_info(&card->pdev->dev, "firmware %s failed\n", file); return ret; } static int softing_reset_chip(struct softing *card) { int ret; do { /* reset chip */ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]); iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]); iowrite8(1, &card->dpram[DPRAM_RESET]); iowrite8(0, &card->dpram[DPRAM_RESET+1]); ret = softing_fct_cmd(card, 0, "reset_can"); if (!ret) break; if (signal_pending(current)) /* don't wait any longer */ break; } while (1); card->tx.pending = 0; return ret; } int softing_chip_poweron(struct softing *card) { int ret; /* sync */ ret = _softing_fct_cmd(card, 99, 0x55, "sync-a"); if (ret < 0) goto failed; ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b"); if (ret < 0) goto failed; ret = softing_reset_chip(card); if (ret < 0) goto failed; /* get_serial */ ret = softing_fct_cmd(card, 43, "get_serial_number"); if (ret < 0) goto failed; card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]); /* get_version */ ret = softing_fct_cmd(card, 12, "get_version"); if (ret < 0) goto failed; card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]); card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]); card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]); card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]); card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]); return 0; failed: return ret; } static void softing_initialize_timestamp(struct softing *card) { uint64_t ovf; card->ts_ref = ktime_get(); /* 16MHz is the reference */ ovf = 0x100000000ULL * 16; do_div(ovf, card->pdat->freq ?: 16); card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); } ktime_t softing_raw2ktime(struct softing *card, u32 raw) { uint64_t rawl; ktime_t now, real_offset; ktime_t target; ktime_t tmp; now = ktime_get(); real_offset = ktime_sub(ktime_get_real(), now); /* find nsec from card */ rawl = raw * 16; do_div(rawl, card->pdat->freq ?: 16); target = ktime_add_us(card->ts_ref, rawl); /* test for overflows */ tmp = ktime_add(target, card->ts_overflow); while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) { card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow); target = tmp; tmp = ktime_add(target, card->ts_overflow); } return ktime_add(target, real_offset); } static inline int softing_error_reporting(struct net_device *netdev) { struct softing_priv *priv = netdev_priv(netdev); return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) ? 1 : 0; } int softing_startstop(struct net_device *dev, int up) { int ret; struct softing *card; struct softing_priv *priv; struct net_device *netdev; int bus_bitmask_start; int j, error_reporting; struct can_frame msg; const struct can_bittiming *bt; priv = netdev_priv(dev); card = priv->card; if (!card->fw.up) return -EIO; ret = mutex_lock_interruptible(&card->fw.lock); if (ret) return ret; bus_bitmask_start = 0; if (dev && up) /* prepare to start this bus as well */ bus_bitmask_start |= (1 << priv->index); /* bring netdevs down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); if (dev != netdev) netif_stop_queue(netdev); if (netif_running(netdev)) { if (dev != netdev) bus_bitmask_start |= (1 << j); priv->tx.pending = 0; priv->tx.echo_put = 0; priv->tx.echo_get = 0; /* * this bus' may just have called open_candev() * which is rather stupid to call close_candev() * already * but we may come here from busoff recovery too * in which case the echo_skb _needs_ flushing too. * just be sure to call open_candev() again */ close_candev(netdev); } priv->can.state = CAN_STATE_STOPPED; } card->tx.pending = 0; softing_enable_irq(card, 0); ret = softing_reset_chip(card); if (ret) goto failed; if (!bus_bitmask_start) /* no busses to be brought up */ goto card_done; if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) && (softing_error_reporting(card->net[0]) != softing_error_reporting(card->net[1]))) { dev_alert(&card->pdev->dev, "err_reporting flag differs for busses\n"); goto invalid; } error_reporting = 0; if (bus_bitmask_start & 1) { netdev = card->net[0]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip 1 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); if (ret < 0) goto failed; /* set mode */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 3, "set_mode[0]"); if (ret < 0) goto failed; /* set filter */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 7, "set_filter[0]"); if (ret < 0) goto failed; /* set output control */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 5, "set_output[0]"); if (ret < 0) goto failed; } if (bus_bitmask_start & 2) { netdev = card->net[1]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip2 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); if (ret < 0) goto failed; /* set mode2 */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 4, "set_mode[1]"); if (ret < 0) goto failed; /* set filter2 */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 8, "set_filter[1]"); if (ret < 0) goto failed; /* set output control2 */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 6, "set_output[1]"); if (ret < 0) goto failed; } /* enable_error_frame */ /* * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later * if (error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } */ /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); ret = softing_fct_cmd(card, 17, "initialize_interface"); if (ret < 0) goto failed; /* enable_fifo */ ret = softing_fct_cmd(card, 36, "enable_fifo"); if (ret < 0) goto failed; /* enable fifo tx ack */ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); if (ret < 0) goto failed; /* enable fifo tx ack2 */ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); if (ret < 0) goto failed; /* start_chip */ ret = softing_fct_cmd(card, 11, "start_chip"); if (ret < 0) goto failed; iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); if (card->pdat->generation < 2) { iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); /* flush the DPRAM caches */ wmb(); } softing_initialize_timestamp(card); /* * do socketcan notifications/status changes * from here, no errors should occur, or the failed: part * must be reviewed */ memset(&msg, 0, sizeof(msg)); msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; msg.can_dlc = CAN_ERR_DLC; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!(bus_bitmask_start & (1 << j))) continue; netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; open_candev(netdev); if (dev != netdev) { /* notify other busses on the restart */ softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); ++priv->can.can_stats.restarts; } netif_wake_queue(netdev); } /* enable interrupts */ ret = softing_enable_irq(card, 1); if (ret) goto failed; card_done: mutex_unlock(&card->fw.lock); return 0; invalid: ret = -EINVAL; failed: softing_enable_irq(card, 0); softing_reset_chip(card); mutex_unlock(&card->fw.lock); /* bring all other interfaces down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; dev_close(netdev); } return ret; } int softing_default_output(struct net_device *netdev) { struct softing_priv *priv = netdev_priv(netdev); struct softing *card = priv->card; switch (priv->chip) { case 1000: return (card->pdat->generation < 2) ? 0xfb : 0xfa; case 5: return 0x60; default: return 0x40; } }
gpl-2.0
Ninpo/ninphetamine3
arch/mn10300/kernel/irq.c
2569
9974
/* MN10300 Arch-specific interrupt handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/cpumask.h> #include <asm/setup.h> #include <asm/serial-regs.h> unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 }; EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); #ifdef CONFIG_SMP static char irq_affinity_online[NR_IRQS] = { [0 ... NR_IRQS - 1] = 0 }; #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32) static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { [0 ... NR_IRQ_WORDS - 1] = 0 }; #endif /* CONFIG_SMP */ atomic_t irq_err_count; /* * MN10300 interrupt controller operations */ static void mn10300_cpupic_ack(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; u16 tmp; flags = arch_local_cli_save(); GxICR_u8(irq) = GxICR_DETECT; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void __mask_and_set_icr(unsigned int irq, unsigned int mask, unsigned int set) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = (tmp & mask) | set; tmp = GxICR(irq); arch_local_irq_restore(flags); } static void mn10300_cpupic_mask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, 0); } static void mn10300_cpupic_mask_ack(struct irq_data *d) { unsigned int irq = d->irq; #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; tmp = GxICR(irq); } else { u16 tmp2; tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL); tmp2 = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(d->affinity, cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); #endif /* CONFIG_SMP */ } static void mn10300_cpupic_unmask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE); } static void mn10300_cpupic_unmask_clear(struct irq_data *d) { unsigned int irq = d->irq; /* the MN10300 PIC latches its interrupt request bit, even after the * device has ceased to assert its interrupt line and the interrupt * channel has been disabled in the PIC, so for level-triggered * interrupts we need to clear the request bit when we re-enable */ #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = GxICR(irq); } else { tmp = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(d->affinity, cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); #endif /* CONFIG_SMP */ } #ifdef CONFIG_SMP static int mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, bool force) { unsigned long flags; int err; flags = arch_local_cli_save(); /* check irq no */ switch (d->irq) { case TMJCIRQ: case RESCHEDULE_IPI: case CALL_FUNC_SINGLE_IPI: case LOCAL_TIMER_IPI: case FLUSH_CACHE_IPI: case CALL_FUNCTION_NMI_IPI: case DEBUGGER_NMI_IPI: #ifdef CONFIG_MN10300_TTYSM0 case SC0RXIRQ: case SC0TXIRQ: #ifdef CONFIG_MN10300_TTYSM0_TIMER8 case TM8IRQ: #elif CONFIG_MN10300_TTYSM0_TIMER2 case TM2IRQ: #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ #endif /* CONFIG_MN10300_TTYSM0 */ #ifdef CONFIG_MN10300_TTYSM1 case SC1RXIRQ: case SC1TXIRQ: #ifdef CONFIG_MN10300_TTYSM1_TIMER12 case TM12IRQ: #elif CONFIG_MN10300_TTYSM1_TIMER9 case TM9IRQ: #elif CONFIG_MN10300_TTYSM1_TIMER3 case TM3IRQ: #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ #endif /* CONFIG_MN10300_TTYSM1 */ #ifdef CONFIG_MN10300_TTYSM2 case SC2RXIRQ: case SC2TXIRQ: case TM10IRQ: #endif /* CONFIG_MN10300_TTYSM2 */ err = -1; break; default: set_bit(d->irq, irq_affinity_request); err = 0; break; } arch_local_irq_restore(flags); return err; } #endif /* CONFIG_SMP */ /* * MN10300 PIC level-triggered IRQ handling. * * The PIC has no 'ACK' function per se. It is possible to clear individual * channel latches, but each latch relatches whether or not the channel is * masked, so we need to clear the latch when we unmask the channel. * * Also for this reason, we don't supply an ack() op (it's unused anyway if * mask_ack() is provided), and mask_ack() just masks. */ static struct irq_chip mn10300_cpu_pic_level = { .name = "cpu_l", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask_clear, .irq_ack = NULL, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask, .irq_unmask = mn10300_cpupic_unmask_clear, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * MN10300 PIC edge-triggered IRQ handling. * * We use the latch clearing function of the PIC as the 'ACK' function. */ static struct irq_chip mn10300_cpu_pic_edge = { .name = "cpu_e", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask, .irq_ack = mn10300_cpupic_ack, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask_ack, .irq_unmask = mn10300_cpupic_unmask, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(int irq) { printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); } /* * change the level at which an IRQ executes * - must not be called whilst interrupts are being processed! */ void set_intr_level(int irq, u16 level) { BUG_ON(in_interrupt()); __mask_and_set_icr(irq, GxICR_ENABLE, level); } /* * mark an interrupt to be ACK'd after interrupt handlers have been run rather * than before * - see Documentation/mn10300/features.txt */ void mn10300_set_lateack_irq_type(int irq) { irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, handle_level_irq); } /* * initialise the interrupt system */ void __init init_IRQ(void) { int irq; for (irq = 0; irq < NR_IRQS; irq++) if (irq_get_chip(irq) == &no_irq_chip) /* due to the PIC latching interrupt requests, even * when the IRQ is disabled, IRQ_PENDING is superfluous * and we can use handle_level_irq() for edge-triggered * interrupts */ irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, handle_level_irq); unit_init_IRQ(); } /* * handle normal device IRQs */ asmlinkage void do_IRQ(void) { unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; unsigned int cpu_id = smp_processor_id(); int irq; sp = current_stack_pointer(); BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); /* make sure local_irq_enable() doesn't muck up the interrupt priority * setting in EPSW */ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; local_save_flags(epsw); __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; #ifdef CONFIG_MN10300_WD_TIMER __IRQ_STAT(cpu_id, __irq_count)++; #endif irq_enter(); for (;;) { /* ask the interrupt controller for the next IRQ to process * - the result we get depends on EPSW.IM */ irq = IAGR & IAGR_GN; if (!irq) break; local_irq_restore(irq_disabled_epsw); generic_handle_irq(irq >> 2); /* restore IRQ controls for IAGR access */ local_irq_restore(epsw); } __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; irq_exit(); } /* * Display interrupt management information through /proc/interrupts */ int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_MN10300_WD_TIMER int j; seq_printf(p, "%*s: ", prec, "NMI"); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); #endif seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } #ifdef CONFIG_HOTPLUG_CPU void migrate_irqs(void) { int irq; unsigned int self, new; unsigned long flags; self = smp_processor_id(); for (irq = 0; irq < NR_IRQS; irq++) { struct irq_data *data = irq_get_irq_data(irq); if (irqd_is_per_cpu(data)) continue; if (cpumask_test_cpu(self, &data->affinity) && !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { int cpu_id; cpu_id = cpumask_first(cpu_online_mask); cpumask_set_cpu(cpu_id, &data->affinity); } /* We need to operate irq_affinity_online atomically. */ arch_local_cli_save(flags); if (irq_affinity_online[irq] == self) { u16 x, tmp; x = GxICR(irq); GxICR(irq) = x & GxICR_LEVEL; tmp = GxICR(irq); new = cpumask_any_and(&data->affinity, cpu_online_mask); irq_affinity_online[irq] = new; CROSS_GxICR(irq, new) = (x & GxICR_LEVEL) | GxICR_DETECT; tmp = CROSS_GxICR(irq, new); x &= GxICR_LEVEL | GxICR_ENABLE; if (GxICR(irq) & GxICR_REQUEST) x |= GxICR_REQUEST | GxICR_DETECT; CROSS_GxICR(irq, new) = x; tmp = CROSS_GxICR(irq, new); } arch_local_irq_restore(flags); } } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
fbocharov/au-linux-kernel-spring-2016
linux/arch/arm64/kvm/regmap.c
3337
4812
/* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Derived from arch/arm/kvm/emulate.c: * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/mm.h> #include <linux/kvm_host.h> #include <asm/kvm_emulate.h> #include <asm/ptrace.h> #define VCPU_NR_MODES 6 #define REG_OFFSET(_reg) \ (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long)) #define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R)) static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { /* USR Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), REG_OFFSET(pc) }, /* FIQ Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), REG_OFFSET(compat_r8_fiq), /* r8 */ REG_OFFSET(compat_r9_fiq), /* r9 */ REG_OFFSET(compat_r10_fiq), /* r10 */ REG_OFFSET(compat_r11_fiq), /* r11 */ REG_OFFSET(compat_r12_fiq), /* r12 */ REG_OFFSET(compat_sp_fiq), /* r13 */ REG_OFFSET(compat_lr_fiq), /* r14 */ REG_OFFSET(pc) }, /* IRQ Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), USR_REG_OFFSET(12), REG_OFFSET(compat_sp_irq), /* r13 */ REG_OFFSET(compat_lr_irq), /* r14 */ REG_OFFSET(pc) }, /* SVC Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), USR_REG_OFFSET(12), REG_OFFSET(compat_sp_svc), /* r13 */ REG_OFFSET(compat_lr_svc), /* r14 */ REG_OFFSET(pc) }, /* ABT Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), USR_REG_OFFSET(12), REG_OFFSET(compat_sp_abt), /* r13 */ REG_OFFSET(compat_lr_abt), /* r14 */ REG_OFFSET(pc) }, /* UND Registers */ { USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), USR_REG_OFFSET(12), REG_OFFSET(compat_sp_und), /* r13 */ REG_OFFSET(compat_lr_und), /* r14 */ REG_OFFSET(pc) }, }; /* * Return a pointer to the register number valid in the current mode of * the virtual CPU. */ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) { unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; switch (mode) { case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ break; case COMPAT_PSR_MODE_ABT: mode = 4; break; case COMPAT_PSR_MODE_UND: mode = 5; break; case COMPAT_PSR_MODE_SYS: mode = 0; /* SYS maps to USR */ break; default: BUG(); } return reg_array + vcpu_reg_offsets[mode][reg_num]; } /* * Return the SPSR for the current mode of the virtual CPU. */ unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu) { unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; switch (mode) { case COMPAT_PSR_MODE_SVC: mode = KVM_SPSR_SVC; break; case COMPAT_PSR_MODE_ABT: mode = KVM_SPSR_ABT; break; case COMPAT_PSR_MODE_UND: mode = KVM_SPSR_UND; break; case COMPAT_PSR_MODE_IRQ: mode = KVM_SPSR_IRQ; break; case COMPAT_PSR_MODE_FIQ: mode = KVM_SPSR_FIQ; break; default: BUG(); } return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode]; }
gpl-2.0
virtuous/kernel-7x30-gingerbread-v3
lib/fault-inject.c
4105
8069
#include <linux/kernel.h> #include <linux/init.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/fault-inject.h> /* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. */ int __init setup_fault_attr(struct fault_attr *attr, char *str) { unsigned long probability; unsigned long interval; int times; int space; /* "<interval>,<probability>,<space>,<times>" */ if (sscanf(str, "%lu,%lu,%d,%d", &interval, &probability, &space, &times) < 4) { printk(KERN_WARNING "FAULT_INJECTION: failed to parse arguments\n"); return 0; } attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); atomic_set(&attr->space, space); return 1; } static void fail_dump(struct fault_attr *attr) { if (attr->verbose > 0) printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n"); if (attr->verbose > 1) dump_stack(); } #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) static bool fail_task(struct fault_attr *attr, struct task_struct *task) { return !in_interrupt() && task->make_it_fail; } #define MAX_STACK_TRACE_DEPTH 32 #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static bool fail_stacktrace(struct fault_attr *attr) { struct stack_trace trace; int depth = attr->stacktrace_depth; unsigned long entries[MAX_STACK_TRACE_DEPTH]; int n; bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX); if (depth == 0) return found; trace.nr_entries = 0; trace.entries = entries; trace.max_entries = depth; trace.skip = 1; save_stack_trace(&trace); for (n = 0; n < trace.nr_entries; n++) { if (attr->reject_start <= entries[n] && entries[n] < attr->reject_end) return false; if (attr->require_start <= entries[n] && entries[n] < attr->require_end) found = true; } return found; } #else static inline bool fail_stacktrace(struct fault_attr *attr) { return true; } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ /* * This code is stolen from failmalloc-1.0 * http://www.nongnu.org/failmalloc/ */ bool should_fail(struct fault_attr *attr, ssize_t size) { if (attr->task_filter && !fail_task(attr, current)) return false; if (atomic_read(&attr->times) == 0) return false; if (atomic_read(&attr->space) > size) { atomic_sub(size, &attr->space); return false; } if (attr->interval > 1) { attr->count++; if (attr->count % attr->interval) return false; } if (attr->probability <= random32() % 100) return false; if (!fail_stacktrace(attr)) return false; fail_dump(attr); if (atomic_read(&attr->times) != -1) atomic_dec_not_zero(&attr->times); return true; } #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int debugfs_ul_set(void *data, u64 val) { *(unsigned long *)data = val; return 0; } #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static int debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val) { *(unsigned long *)data = val < MAX_STACK_TRACE_DEPTH ? val : MAX_STACK_TRACE_DEPTH; return 0; } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ static int debugfs_ul_get(void *data, u64 *val) { *val = *(unsigned long *)data; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); static struct dentry *debugfs_create_ul(const char *name, mode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_ul); } #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get, debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n"); static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH( const char *name, mode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_ul_MAX_STACK_TRACE_DEPTH); } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ static int debugfs_atomic_t_set(void *data, u64 val) { atomic_set((atomic_t *)data, val); return 0; } static int debugfs_atomic_t_get(void *data, u64 *val) { *val = atomic_read((atomic_t *)data); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, struct dentry *parent, atomic_t *value) { return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); } void cleanup_fault_attr_dentries(struct fault_attr *attr) { debugfs_remove(attr->dentries.probability_file); attr->dentries.probability_file = NULL; debugfs_remove(attr->dentries.interval_file); attr->dentries.interval_file = NULL; debugfs_remove(attr->dentries.times_file); attr->dentries.times_file = NULL; debugfs_remove(attr->dentries.space_file); attr->dentries.space_file = NULL; debugfs_remove(attr->dentries.verbose_file); attr->dentries.verbose_file = NULL; debugfs_remove(attr->dentries.task_filter_file); attr->dentries.task_filter_file = NULL; #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER debugfs_remove(attr->dentries.stacktrace_depth_file); attr->dentries.stacktrace_depth_file = NULL; debugfs_remove(attr->dentries.require_start_file); attr->dentries.require_start_file = NULL; debugfs_remove(attr->dentries.require_end_file); attr->dentries.require_end_file = NULL; debugfs_remove(attr->dentries.reject_start_file); attr->dentries.reject_start_file = NULL; debugfs_remove(attr->dentries.reject_end_file); attr->dentries.reject_end_file = NULL; #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ if (attr->dentries.dir) WARN_ON(!simple_empty(attr->dentries.dir)); debugfs_remove(attr->dentries.dir); attr->dentries.dir = NULL; } int init_fault_attr_dentries(struct fault_attr *attr, const char *name) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; memset(&attr->dentries, 0, sizeof(attr->dentries)); dir = debugfs_create_dir(name, NULL); if (!dir) goto fail; attr->dentries.dir = dir; attr->dentries.probability_file = debugfs_create_ul("probability", mode, dir, &attr->probability); attr->dentries.interval_file = debugfs_create_ul("interval", mode, dir, &attr->interval); attr->dentries.times_file = debugfs_create_atomic_t("times", mode, dir, &attr->times); attr->dentries.space_file = debugfs_create_atomic_t("space", mode, dir, &attr->space); attr->dentries.verbose_file = debugfs_create_ul("verbose", mode, dir, &attr->verbose); attr->dentries.task_filter_file = debugfs_create_bool("task-filter", mode, dir, &attr->task_filter); if (!attr->dentries.probability_file || !attr->dentries.interval_file || !attr->dentries.times_file || !attr->dentries.space_file || !attr->dentries.verbose_file || !attr->dentries.task_filter_file) goto fail; #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER attr->dentries.stacktrace_depth_file = debugfs_create_ul_MAX_STACK_TRACE_DEPTH( "stacktrace-depth", mode, dir, &attr->stacktrace_depth); attr->dentries.require_start_file = debugfs_create_ul("require-start", mode, dir, &attr->require_start); attr->dentries.require_end_file = debugfs_create_ul("require-end", mode, dir, &attr->require_end); attr->dentries.reject_start_file = debugfs_create_ul("reject-start", mode, dir, &attr->reject_start); attr->dentries.reject_end_file = debugfs_create_ul("reject-end", mode, dir, &attr->reject_end); if (!attr->dentries.stacktrace_depth_file || !attr->dentries.require_start_file || !attr->dentries.require_end_file || !attr->dentries.reject_start_file || !attr->dentries.reject_end_file) goto fail; #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ return 0; fail: cleanup_fault_attr_dentries(attr); return -ENOMEM; } #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
gpl-2.0
Jovy23/N920TUVU2COJ5_Kernel
arch/powerpc/kernel/clock.c
7177
1772
/* * Dummy clk implementations for powerpc. * These need to be overridden in platform code. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/export.h> #include <asm/clk_interface.h> struct clk_interface clk_functions; struct clk *clk_get(struct device *dev, const char *id) { if (clk_functions.clk_get) return clk_functions.clk_get(dev, id); return ERR_PTR(-ENOSYS); } EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { if (clk_functions.clk_put) clk_functions.clk_put(clk); } EXPORT_SYMBOL(clk_put); int clk_enable(struct clk *clk) { if (clk_functions.clk_enable) return clk_functions.clk_enable(clk); return -ENOSYS; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { if (clk_functions.clk_disable) clk_functions.clk_disable(clk); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { if (clk_functions.clk_get_rate) return clk_functions.clk_get_rate(clk); return 0; } EXPORT_SYMBOL(clk_get_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { if (clk_functions.clk_round_rate) return clk_functions.clk_round_rate(clk, rate); return -ENOSYS; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { if (clk_functions.clk_set_rate) return clk_functions.clk_set_rate(clk, rate); return -ENOSYS; } EXPORT_SYMBOL(clk_set_rate); struct clk *clk_get_parent(struct clk *clk) { if (clk_functions.clk_get_parent) return clk_functions.clk_get_parent(clk); return ERR_PTR(-ENOSYS); } EXPORT_SYMBOL(clk_get_parent); int clk_set_parent(struct clk *clk, struct clk *parent) { if (clk_functions.clk_set_parent) return clk_functions.clk_set_parent(clk, parent); return -ENOSYS; } EXPORT_SYMBOL(clk_set_parent);
gpl-2.0
davidmueller13/Audax_Kernel
drivers/uwb/drp-ie.c
11529
9767
/* * UWB DRP IE management. * * Copyright (C) 2005-2006 Intel Corporation * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/uwb.h> #include "uwb-internal.h" /* * Return the reason code for a reservations's DRP IE. */ int uwb_rsv_reason_code(struct uwb_rsv *rsv) { static const int reason_codes[] = { [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return reason_codes[rsv->state]; } /* * Return the reason code for a reservations's companion DRP IE . */ int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) { static const int companion_reason_codes[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return companion_reason_codes[rsv->state]; } /* * Return the status bit for a reservations's DRP IE. */ int uwb_rsv_status(struct uwb_rsv *rsv) { static const int statuses[] = { [UWB_RSV_STATE_O_INITIATED] = 0, [UWB_RSV_STATE_O_PENDING] = 0, [UWB_RSV_STATE_O_MODIFIED] = 1, [UWB_RSV_STATE_O_ESTABLISHED] = 1, [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, [UWB_RSV_STATE_T_ACCEPTED] = 1, [UWB_RSV_STATE_T_CONFLICT] = 0, [UWB_RSV_STATE_T_PENDING] = 0, [UWB_RSV_STATE_T_DENIED] = 0, [UWB_RSV_STATE_T_RESIZED] = 1, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, }; return statuses[rsv->state]; } /* * Return the status bit for a reservations's companion DRP IE . */ int uwb_rsv_companion_status(struct uwb_rsv *rsv) { static const int companion_statuses[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, }; return companion_statuses[rsv->state]; } /* * Allocate a DRP IE. * * To save having to free/allocate a DRP IE when its MAS changes, * enough memory is allocated for the maxiumum number of DRP * allocation fields. This gives an overhead per reservation of up to * (UWB_NUM_ZONES - 1) * 4 = 60 octets. */ static struct uwb_ie_drp *uwb_drp_ie_alloc(void) { struct uwb_ie_drp *drp_ie; drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), GFP_KERNEL); if (drp_ie) { drp_ie->hdr.element_id = UWB_IE_DRP; } return drp_ie; } /* * Fill a DRP IE's allocation fields from a MAS bitmap. */ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas) { int z, i, num_fields = 0, next = 0; struct uwb_drp_alloc *zones; __le16 current_bmp; DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); zones = drp_ie->allocs; bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); /* Determine unique MAS bitmaps in zones from bitmap. */ for (z = 0; z < UWB_NUM_ZONES; z++) { bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { bool found = false; current_bmp = (__le16) *tmp_mas_bm; for (i = 0; i < next; i++) { if (current_bmp == zones[i].mas_bm) { zones[i].zone_bm |= 1 << z; found = true; break; } } if (!found) { num_fields++; zones[next].zone_bm = 1 << z; zones[next].mas_bm = current_bmp; next++; } } bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); } /* Store in format ready for transmission (le16). */ for (i = 0; i < num_fields; i++) { drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); } drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) + num_fields * sizeof(struct uwb_drp_alloc); } /** * uwb_drp_ie_update - update a reservation's DRP IE * @rsv: the reservation */ int uwb_drp_ie_update(struct uwb_rsv *rsv) { struct uwb_ie_drp *drp_ie; struct uwb_rsv_move *mv; int unsafe; if (rsv->state == UWB_RSV_STATE_NONE) { kfree(rsv->drp_ie); rsv->drp_ie = NULL; return 0; } unsafe = rsv->mas.unsafe ? 1 : 0; if (rsv->drp_ie == NULL) { rsv->drp_ie = uwb_drp_ie_alloc(); if (rsv->drp_ie == NULL) return -ENOMEM; } drp_ie = rsv->drp_ie; uwb_ie_drp_set_unsafe(drp_ie, unsafe); uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); uwb_ie_drp_set_type(drp_ie, rsv->type); if (uwb_rsv_is_owner(rsv)) { switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: drp_ie->dev_addr = rsv->target.dev->dev_addr; break; case UWB_RSV_TARGET_DEVADDR: drp_ie->dev_addr = rsv->target.devaddr; break; } } else drp_ie->dev_addr = rsv->owner->dev_addr; uwb_drp_ie_from_bm(drp_ie, &rsv->mas); if (uwb_rsv_has_two_drp_ies(rsv)) { mv = &rsv->mv; if (mv->companion_drp_ie == NULL) { mv->companion_drp_ie = uwb_drp_ie_alloc(); if (mv->companion_drp_ie == NULL) return -ENOMEM; } drp_ie = mv->companion_drp_ie; /* keep all the same configuration of the main drp_ie */ memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); /* FIXME: handle properly the unsafe bit */ uwb_ie_drp_set_unsafe(drp_ie, 1); uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); } rsv->ie_valid = true; return 0; } /* * Set MAS bits from given MAS bitmap in a single zone of large bitmap. * * We are given a zone id and the MAS bitmap of bits that need to be set in * this zone. Note that this zone may already have bits set and this only * adds settings - we cannot simply assign the MAS bitmap contents to the * zone contents. We iterate over the the bits (MAS) in the zone and set the * bits that are set in the given MAS bitmap. */ static void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) { int mas; u16 mas_mask; for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { mas_mask = 1 << mas; if (mas_bm & mas_mask) set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); } } /** * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap * @mas: MAS bitmap that will be populated to correspond to the * allocation fields in the DRP IE * @drp_ie: the DRP IE that contains the allocation fields. * * The input format is an array of MAS allocation fields (16 bit Zone * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section * 16.8.6. The output is a full 256 bit MAS bitmap. * * We go over all the allocation fields, for each allocation field we * know which zones are impacted. We iterate over all the zones * impacted and call a function that will set the correct MAS bits in * each zone. */ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) { int numallocs = (drp_ie->hdr.length - 4) / 4; const struct uwb_drp_alloc *alloc; int cnt; u16 zone_bm, mas_bm; u8 zone; u16 zone_mask; bitmap_zero(bm->bm, UWB_NUM_MAS); for (cnt = 0; cnt < numallocs; cnt++) { alloc = &drp_ie->allocs[cnt]; zone_bm = le16_to_cpu(alloc->zone_bm); mas_bm = le16_to_cpu(alloc->mas_bm); for (zone = 0; zone < UWB_NUM_ZONES; zone++) { zone_mask = 1 << zone; if (zone_bm & zone_mask) uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); } } }
gpl-2.0
ChronoMonochrome/android_kernel_lenovo_msm8916
drivers/uwb/drp-ie.c
11529
9767
/* * UWB DRP IE management. * * Copyright (C) 2005-2006 Intel Corporation * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/uwb.h> #include "uwb-internal.h" /* * Return the reason code for a reservations's DRP IE. */ int uwb_rsv_reason_code(struct uwb_rsv *rsv) { static const int reason_codes[] = { [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return reason_codes[rsv->state]; } /* * Return the reason code for a reservations's companion DRP IE . */ int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) { static const int companion_reason_codes[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return companion_reason_codes[rsv->state]; } /* * Return the status bit for a reservations's DRP IE. */ int uwb_rsv_status(struct uwb_rsv *rsv) { static const int statuses[] = { [UWB_RSV_STATE_O_INITIATED] = 0, [UWB_RSV_STATE_O_PENDING] = 0, [UWB_RSV_STATE_O_MODIFIED] = 1, [UWB_RSV_STATE_O_ESTABLISHED] = 1, [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, [UWB_RSV_STATE_T_ACCEPTED] = 1, [UWB_RSV_STATE_T_CONFLICT] = 0, [UWB_RSV_STATE_T_PENDING] = 0, [UWB_RSV_STATE_T_DENIED] = 0, [UWB_RSV_STATE_T_RESIZED] = 1, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, }; return statuses[rsv->state]; } /* * Return the status bit for a reservations's companion DRP IE . */ int uwb_rsv_companion_status(struct uwb_rsv *rsv) { static const int companion_statuses[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, }; return companion_statuses[rsv->state]; } /* * Allocate a DRP IE. * * To save having to free/allocate a DRP IE when its MAS changes, * enough memory is allocated for the maxiumum number of DRP * allocation fields. This gives an overhead per reservation of up to * (UWB_NUM_ZONES - 1) * 4 = 60 octets. */ static struct uwb_ie_drp *uwb_drp_ie_alloc(void) { struct uwb_ie_drp *drp_ie; drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), GFP_KERNEL); if (drp_ie) { drp_ie->hdr.element_id = UWB_IE_DRP; } return drp_ie; } /* * Fill a DRP IE's allocation fields from a MAS bitmap. */ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas) { int z, i, num_fields = 0, next = 0; struct uwb_drp_alloc *zones; __le16 current_bmp; DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); zones = drp_ie->allocs; bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); /* Determine unique MAS bitmaps in zones from bitmap. */ for (z = 0; z < UWB_NUM_ZONES; z++) { bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { bool found = false; current_bmp = (__le16) *tmp_mas_bm; for (i = 0; i < next; i++) { if (current_bmp == zones[i].mas_bm) { zones[i].zone_bm |= 1 << z; found = true; break; } } if (!found) { num_fields++; zones[next].zone_bm = 1 << z; zones[next].mas_bm = current_bmp; next++; } } bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); } /* Store in format ready for transmission (le16). */ for (i = 0; i < num_fields; i++) { drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); } drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) + num_fields * sizeof(struct uwb_drp_alloc); } /** * uwb_drp_ie_update - update a reservation's DRP IE * @rsv: the reservation */ int uwb_drp_ie_update(struct uwb_rsv *rsv) { struct uwb_ie_drp *drp_ie; struct uwb_rsv_move *mv; int unsafe; if (rsv->state == UWB_RSV_STATE_NONE) { kfree(rsv->drp_ie); rsv->drp_ie = NULL; return 0; } unsafe = rsv->mas.unsafe ? 1 : 0; if (rsv->drp_ie == NULL) { rsv->drp_ie = uwb_drp_ie_alloc(); if (rsv->drp_ie == NULL) return -ENOMEM; } drp_ie = rsv->drp_ie; uwb_ie_drp_set_unsafe(drp_ie, unsafe); uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); uwb_ie_drp_set_type(drp_ie, rsv->type); if (uwb_rsv_is_owner(rsv)) { switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: drp_ie->dev_addr = rsv->target.dev->dev_addr; break; case UWB_RSV_TARGET_DEVADDR: drp_ie->dev_addr = rsv->target.devaddr; break; } } else drp_ie->dev_addr = rsv->owner->dev_addr; uwb_drp_ie_from_bm(drp_ie, &rsv->mas); if (uwb_rsv_has_two_drp_ies(rsv)) { mv = &rsv->mv; if (mv->companion_drp_ie == NULL) { mv->companion_drp_ie = uwb_drp_ie_alloc(); if (mv->companion_drp_ie == NULL) return -ENOMEM; } drp_ie = mv->companion_drp_ie; /* keep all the same configuration of the main drp_ie */ memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); /* FIXME: handle properly the unsafe bit */ uwb_ie_drp_set_unsafe(drp_ie, 1); uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); } rsv->ie_valid = true; return 0; } /* * Set MAS bits from given MAS bitmap in a single zone of large bitmap. * * We are given a zone id and the MAS bitmap of bits that need to be set in * this zone. Note that this zone may already have bits set and this only * adds settings - we cannot simply assign the MAS bitmap contents to the * zone contents. We iterate over the the bits (MAS) in the zone and set the * bits that are set in the given MAS bitmap. */ static void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) { int mas; u16 mas_mask; for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { mas_mask = 1 << mas; if (mas_bm & mas_mask) set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); } } /** * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap * @mas: MAS bitmap that will be populated to correspond to the * allocation fields in the DRP IE * @drp_ie: the DRP IE that contains the allocation fields. * * The input format is an array of MAS allocation fields (16 bit Zone * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section * 16.8.6. The output is a full 256 bit MAS bitmap. * * We go over all the allocation fields, for each allocation field we * know which zones are impacted. We iterate over all the zones * impacted and call a function that will set the correct MAS bits in * each zone. */ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) { int numallocs = (drp_ie->hdr.length - 4) / 4; const struct uwb_drp_alloc *alloc; int cnt; u16 zone_bm, mas_bm; u8 zone; u16 zone_mask; bitmap_zero(bm->bm, UWB_NUM_MAS); for (cnt = 0; cnt < numallocs; cnt++) { alloc = &drp_ie->allocs[cnt]; zone_bm = le16_to_cpu(alloc->zone_bm); mas_bm = le16_to_cpu(alloc->mas_bm); for (zone = 0; zone < UWB_NUM_ZONES; zone++) { zone_mask = 1 << zone; if (zone_bm & zone_mask) uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); } } }
gpl-2.0
shakalaca/ASUS_ZenFone_A400CG
linux/kernel/net/bridge/netfilter/ebt_ip.c
13577
3302
/* * ebt_ip * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * * Changes: * added ip-sport and ip-dport * Innominate Security Technologies AG <mhopf@innominate.com> * September, 2002 */ #include <linux/ip.h> #include <net/ip.h> #include <linux/in.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_ip.h> struct tcpudphdr { __be16 src; __be16 dst; }; static bool ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_ip_info *info = par->matchinfo; const struct iphdr *ih; struct iphdr _iph; const struct tcpudphdr *pptr; struct tcpudphdr _ports; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) return false; if (info->bitmask & EBT_IP_TOS && FWINV(info->tos != ih->tos, EBT_IP_TOS)) return false; if (info->bitmask & EBT_IP_SOURCE && FWINV((ih->saddr & info->smsk) != info->saddr, EBT_IP_SOURCE)) return false; if ((info->bitmask & EBT_IP_DEST) && FWINV((ih->daddr & info->dmsk) != info->daddr, EBT_IP_DEST)) return false; if (info->bitmask & EBT_IP_PROTO) { if (FWINV(info->protocol != ih->protocol, EBT_IP_PROTO)) return false; if (!(info->bitmask & EBT_IP_DPORT) && !(info->bitmask & EBT_IP_SPORT)) return true; if (ntohs(ih->frag_off) & IP_OFFSET) return false; pptr = skb_header_pointer(skb, ih->ihl*4, sizeof(_ports), &_ports); if (pptr == NULL) return false; if (info->bitmask & EBT_IP_DPORT) { u32 dst = ntohs(pptr->dst); if (FWINV(dst < info->dport[0] || dst > info->dport[1], EBT_IP_DPORT)) return false; } if (info->bitmask & EBT_IP_SPORT) { u32 src = ntohs(pptr->src); if (FWINV(src < info->sport[0] || src > info->sport[1], EBT_IP_SPORT)) return false; } } return true; } static int ebt_ip_mt_check(const struct xt_mtchk_param *par) { const struct ebt_ip_info *info = par->matchinfo; const struct ebt_entry *e = par->entryinfo; if (e->ethproto != htons(ETH_P_IP) || e->invflags & EBT_IPROTO) return -EINVAL; if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK) return -EINVAL; if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) { if (info->invflags & EBT_IP_PROTO) return -EINVAL; if (info->protocol != IPPROTO_TCP && info->protocol != IPPROTO_UDP && info->protocol != IPPROTO_UDPLITE && info->protocol != IPPROTO_SCTP && info->protocol != IPPROTO_DCCP) return -EINVAL; } if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) return -EINVAL; if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1]) return -EINVAL; return 0; } static struct xt_match ebt_ip_mt_reg __read_mostly = { .name = "ip", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_ip_mt, .checkentry = ebt_ip_mt_check, .matchsize = sizeof(struct ebt_ip_info), .me = THIS_MODULE, }; static int __init ebt_ip_init(void) { return xt_register_match(&ebt_ip_mt_reg); } static void __exit ebt_ip_fini(void) { xt_unregister_match(&ebt_ip_mt_reg); } module_init(ebt_ip_init); module_exit(ebt_ip_fini); MODULE_DESCRIPTION("Ebtables: IPv4 protocol packet match"); MODULE_LICENSE("GPL");
gpl-2.0
robacklin/uclinux-users
mysql/bdb/btree/bt_compare.c
10
6104
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1996, 1997, 1998, 1999, 2000 * Sleepycat Software. All rights reserved. */ /* * Copyright (c) 1990, 1993, 1994, 1995, 1996 * Keith Bostic. All rights reserved. */ /* * Copyright (c) 1990, 1993, 1994, 1995 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Mike Olson. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "db_config.h" #ifndef lint static const char revid[] = "$Id: bt_compare.c,v 11.12 2000/10/26 19:00:28 krinsky Exp $"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES #include <sys/types.h> #endif #include "db_int.h" #include "db_page.h" #include "btree.h" /* * __bam_cmp -- * Compare a key to a given record. * * PUBLIC: int __bam_cmp __P((DB *, const DBT *, PAGE *, * PUBLIC: u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *)); */ int __bam_cmp(dbp, dbt, h, indx, func, cmpp) DB *dbp; const DBT *dbt; PAGE *h; u_int32_t indx; int (*func)__P((DB *, const DBT *, const DBT *)); int *cmpp; { BINTERNAL *bi; BKEYDATA *bk; BOVERFLOW *bo; DBT pg_dbt; /* * Returns: * < 0 if dbt is < page record * = 0 if dbt is = page record * > 0 if dbt is > page record * * !!! * We do not clear the pg_dbt DBT even though it's likely to contain * random bits. That should be okay, because the app's comparison * routine had better not be looking at fields other than data/size. * We don't clear it because we go through this path a lot and it's * expensive. */ switch (TYPE(h)) { case P_LBTREE: case P_LDUP: case P_LRECNO: bk = GET_BKEYDATA(h, indx); if (B_TYPE(bk->type) == B_OVERFLOW) bo = (BOVERFLOW *)bk; else { pg_dbt.data = bk->data; pg_dbt.size = bk->len; *cmpp = func(dbp, dbt, &pg_dbt); return (0); } break; case P_IBTREE: /* * The following code guarantees that the left-most key on an * internal page at any place in the tree sorts less than any * user-specified key. The reason is that if we have reached * this internal page, we know the user key must sort greater * than the key we're storing for this page in any internal * pages at levels above us in the tree. It then follows that * any user-specified key cannot sort less than the first page * which we reference, and so there's no reason to call the * comparison routine. While this may save us a comparison * routine call or two, the real reason for this is because * we don't maintain a copy of the smallest key in the tree, * so that we don't have to update all the levels of the tree * should the application store a new smallest key. And, so, * we may not have a key to compare, which makes doing the * comparison difficult and error prone. */ if (indx == 0) { *cmpp = 1; return (0); } bi = GET_BINTERNAL(h, indx); if (B_TYPE(bi->type) == B_OVERFLOW) bo = (BOVERFLOW *)(bi->data); else { pg_dbt.data = bi->data; pg_dbt.size = bi->len; *cmpp = func(dbp, dbt, &pg_dbt); return (0); } break; default: return (__db_pgfmt(dbp, PGNO(h))); } /* * Overflow. */ return (__db_moff(dbp, dbt, bo->pgno, bo->tlen, func == __bam_defcmp ? NULL : func, cmpp)); } /* * __bam_defcmp -- * Default comparison routine. * * PUBLIC: int __bam_defcmp __P((DB *, const DBT *, const DBT *)); */ int __bam_defcmp(dbp, a, b) DB *dbp; const DBT *a, *b; { size_t len; u_int8_t *p1, *p2; COMPQUIET(dbp, NULL); /* * Returns: * < 0 if a is < b * = 0 if a is = b * > 0 if a is > b * * XXX * If a size_t doesn't fit into a long, or if the difference between * any two characters doesn't fit into an int, this routine can lose. * What we need is a signed integral type that's guaranteed to be at * least as large as a size_t, and there is no such thing. */ len = a->size > b->size ? b->size : a->size; for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2) if (*p1 != *p2) return ((long)*p1 - (long)*p2); return ((long)a->size - (long)b->size); } /* * __bam_defpfx -- * Default prefix routine. * * PUBLIC: size_t __bam_defpfx __P((DB *, const DBT *, const DBT *)); */ size_t __bam_defpfx(dbp, a, b) DB *dbp; const DBT *a, *b; { size_t cnt, len; u_int8_t *p1, *p2; COMPQUIET(dbp, NULL); cnt = 1; len = a->size > b->size ? b->size : a->size; for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2, ++cnt) if (*p1 != *p2) return (cnt); /* * We know that a->size must be <= b->size, or they wouldn't be * in this order. */ return (a->size < b->size ? a->size + 1 : a->size); }
gpl-2.0
knuesel/gst-plugins-bad
ext/openal/gstopenalsrc.c
10
10330
/* * GStreamer * Copyright (C) 2005 Thomas Vander Stichele <thomas@apestaart.org> * Copyright (C) 2005 Ronald S. Bultje <rbultje@ronald.bitfreak.net> * Copyright (C) 2008 Victor Lin <bornstub@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Alternatively, the contents of this file may be used under the * GNU Lesser General Public License Version 2.1 (the "LGPL"), in * which case the following provisions apply instead of the ones * mentioned above: * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /** * SECTION:element-openalsrc * @short_description: record sound from your sound card using OpenAL * * <refsect2> * <para> * This element lets you record sound using the OpenAL * </para> * <title>Example pipelines</title> * <para> * <programlisting> * gst-launch -v openalsrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=mymusic.ogg * </programlisting> * will record sound from your sound card using OpenAL and encode it to an Ogg/Vorbis file * </para> * </refsect2> */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <gst/gst.h> #include <gst/gsterror.h> #include "gstopenalsrc.h" GST_DEBUG_CATEGORY_STATIC (openalsrc_debug); #define GST_CAT_DEFAULT openalsrc_debug #define DEFAULT_DEVICE NULL #define DEFAULT_DEVICE_NAME NULL /** Filter signals and args **/ enum { /* FILL ME */ LAST_SIGNAL }; /** Properties **/ enum { PROP_0, PROP_DEVICE, PROP_DEVICE_NAME }; static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS ("audio/x-raw-int, " "endianness = (int) BYTE_ORDER, " "signed = (boolean) TRUE, " "width = (int) 16, " "depth = (int) 16, " "rate = (int) [ 1, MAX ], " "channels = (int) [ 1, 2 ]; " "audio/x-raw-int, " "signed = (boolean) TRUE, " "width = (int) 8, " "depth = (int) 8, " "rate = (int) [ 1, MAX ], " "channels = (int) [ 1, 2 ]") ); GST_BOILERPLATE (GstOpenalSrc, gst_openal_src, GstAudioSrc, GST_TYPE_AUDIO_SRC); static void gst_openal_src_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_openal_src_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static gboolean gst_openal_src_open (GstAudioSrc * src); static gboolean gst_openal_src_prepare (GstAudioSrc * src, GstRingBufferSpec * spec); static gboolean gst_openal_src_unprepare (GstAudioSrc * src); static gboolean gst_openal_src_close (GstAudioSrc * src); static guint gst_openal_src_read (GstAudioSrc * src, gpointer data, guint length); static guint gst_openal_src_delay (GstAudioSrc * src); static void gst_openal_src_reset (GstAudioSrc * src); static void gst_openal_src_finalize (GObject * object); static void gst_openal_src_base_init (gpointer gclass) { GstElementClass *element_class = GST_ELEMENT_CLASS (gclass); gst_element_class_set_details_simple (element_class, "OpenAL src", "Source/Audio", "OpenAL source capture audio from device", "Victor Lin <bornstub@gmail.com>"); gst_element_class_add_static_pad_template (element_class, &src_factory); } static void gst_openal_src_class_init (GstOpenalSrcClass * klass) { GObjectClass *gobject_class; GstAudioSrcClass *gstaudio_src_class; gobject_class = G_OBJECT_CLASS (klass); gstaudio_src_class = GST_AUDIO_SRC_CLASS (klass); GST_DEBUG_CATEGORY_INIT (openalsrc_debug, "openalsrc", 0, "OpenAL source capture audio from device"); gobject_class->set_property = gst_openal_src_set_property; gobject_class->get_property = gst_openal_src_get_property; gobject_class->finalize = gst_openal_src_finalize; gstaudio_src_class->open = GST_DEBUG_FUNCPTR (gst_openal_src_open); gstaudio_src_class->prepare = GST_DEBUG_FUNCPTR (gst_openal_src_prepare); gstaudio_src_class->unprepare = GST_DEBUG_FUNCPTR (gst_openal_src_unprepare); gstaudio_src_class->close = GST_DEBUG_FUNCPTR (gst_openal_src_close); gstaudio_src_class->read = GST_DEBUG_FUNCPTR (gst_openal_src_read); gstaudio_src_class->delay = GST_DEBUG_FUNCPTR (gst_openal_src_delay); gstaudio_src_class->reset = GST_DEBUG_FUNCPTR (gst_openal_src_reset); g_object_class_install_property (gobject_class, PROP_DEVICE, g_param_spec_string ("device", "Device", "Specific capture device to open, NULL indicate default device", DEFAULT_DEVICE, G_PARAM_READWRITE) ); g_object_class_install_property (gobject_class, PROP_DEVICE_NAME, g_param_spec_string ("device-name", "Device name", "Readable name of device", DEFAULT_DEVICE_NAME, G_PARAM_READABLE) ); } static void gst_openal_src_init (GstOpenalSrc * osrc, GstOpenalSrcClass * gclass) { osrc->deviceName = g_strdup (DEFAULT_DEVICE_NAME); osrc->device = DEFAULT_DEVICE; osrc->deviceHandle = NULL; } static void gst_openal_src_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstOpenalSrc *osrc = GST_OPENAL_SRC (object); switch (prop_id) { case PROP_DEVICE: osrc->device = g_value_dup_string (value); break; case PROP_DEVICE_NAME: osrc->deviceName = g_value_dup_string (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void gst_openal_src_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstOpenalSrc *osrc = GST_OPENAL_SRC (object); switch (prop_id) { case PROP_DEVICE: g_value_set_string (value, osrc->device); break; case PROP_DEVICE_NAME: g_value_set_string (value, osrc->deviceName); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static gboolean gst_openal_src_open (GstAudioSrc * asrc) { /* We don't do anything here */ return TRUE; } static gboolean gst_openal_src_prepare (GstAudioSrc * asrc, GstRingBufferSpec * spec) { GstOpenalSrc *osrc = GST_OPENAL_SRC (asrc); ALenum format; guint64 bufferSize; switch (spec->width) { case 8: format = AL_FORMAT_STEREO8; break; case 16: format = AL_FORMAT_STEREO16; break; default: g_assert_not_reached (); } bufferSize = spec->buffer_time * spec->rate * spec->bytes_per_sample / 1000000; GST_INFO_OBJECT (osrc, "Open device : %s", osrc->deviceName); osrc->deviceHandle = alcCaptureOpenDevice (osrc->device, spec->rate, format, bufferSize); if (!osrc->deviceHandle) { GST_ELEMENT_ERROR (osrc, RESOURCE, FAILED, ("Can't open device \"%s\"", osrc->device), ("Can't open device \"%s\"", osrc->device) ); return FALSE; } osrc->deviceName = g_strdup (alcGetString (osrc->deviceHandle, ALC_DEVICE_SPECIFIER)); osrc->bytes_per_sample = spec->bytes_per_sample; GST_INFO_OBJECT (osrc, "Start capture"); alcCaptureStart (osrc->deviceHandle); return TRUE; } static gboolean gst_openal_src_unprepare (GstAudioSrc * asrc) { GstOpenalSrc *osrc = GST_OPENAL_SRC (asrc); GST_INFO_OBJECT (osrc, "Close device : %s", osrc->deviceName); if (osrc->deviceHandle) { alcCaptureStop (osrc->deviceHandle); alcCaptureCloseDevice (osrc->deviceHandle); } return TRUE; } static gboolean gst_openal_src_close (GstAudioSrc * asrc) { /* We don't do anything here */ return TRUE; } static guint gst_openal_src_read (GstAudioSrc * asrc, gpointer data, guint length) { GstOpenalSrc *osrc = GST_OPENAL_SRC (asrc); gint samples; alcGetIntegerv (osrc->deviceHandle, ALC_CAPTURE_SAMPLES, sizeof (samples), &samples); if (samples * osrc->bytes_per_sample > length) { samples = length / osrc->bytes_per_sample; } if (samples) { GST_DEBUG_OBJECT (osrc, "Read samples : %d", samples); alcCaptureSamples (osrc->deviceHandle, data, samples); } return samples * osrc->bytes_per_sample; } static guint gst_openal_src_delay (GstAudioSrc * asrc) { GstOpenalSrc *osrc = GST_OPENAL_SRC (asrc); gint samples; alcGetIntegerv (osrc->deviceHandle, ALC_CAPTURE_SAMPLES, sizeof (samples), &samples); return samples; } static void gst_openal_src_reset (GstAudioSrc * asrc) { /* We don't do anything here */ } static void gst_openal_src_finalize (GObject * object) { GstOpenalSrc *osrc = GST_OPENAL_SRC (object); g_free (osrc->deviceName); g_free (osrc->device); G_OBJECT_CLASS (parent_class)->finalize (object); }
gpl-2.0
jedis/jedioutcast
CODE-mp/game/bg_weapons.c
10
17686
// Copyright (C) 2001-2002 Raven Software // // bg_weapons.c -- part of bg_pmove functionality #include "q_shared.h" #include "bg_public.h" #include "bg_local.h" // Muzzle point table... vec3_t WP_MuzzlePoint[WP_NUM_WEAPONS] = {// Fwd, right, up. {0, 0, 0 }, // WP_NONE, {0 , 8, 0 }, // WP_STUN_BATON, {8 , 16, 0 }, // WP_SABER, {12, 6, -6 }, // WP_BRYAR_PISTOL, {12, 6, -6 }, // WP_BLASTER, {12, 6, -6 }, // WP_DISRUPTOR, {12, 2, -6 }, // WP_BOWCASTER, {12, 4.5, -6 }, // WP_REPEATER, {12, 6, -6 }, // WP_DEMP2, {12, 6, -6 }, // WP_FLECHETTE, {12, 8, 0 }, // WP_ROCKET_LAUNCHER, {12, 0, -4 }, // WP_THERMAL, {12, 0, -10 }, // WP_TRIP_MINE, {12, 0, -4 }, // WP_DET_PACK, }; weaponData_t weaponData[WP_NUM_WEAPONS] = { { // WP_NONE // "No Weapon", // char classname[32]; // Spawning name AMMO_NONE, // int ammoIndex; // Index to proper ammo slot 0, // int ammoLow; // Count when ammo is low 0, // int energyPerShot; // Amount of energy used per shot 0, // int fireTime; // Amount of time between firings 0, // int range; // Range of weapon 0, // int altEnergyPerShot; // Amount of energy used for alt-fire 0, // int altFireTime; // Amount of time between alt-firings 0, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_STUN_BATON // "Stun Baton", // char classname[32]; // Spawning name AMMO_NONE, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 0, // int energyPerShot; // Amount of energy used per shot 400, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 0, // int altEnergyPerShot; // Amount of energy used for alt-fire 400, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_SABER, // "Lightsaber", // char classname[32]; // Spawning name AMMO_NONE, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 0, // int energyPerShot; // Amount of energy used per shot 100, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 0, // int altEnergyPerShot; // Amount of energy used for alt-fire 100, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_BRYAR_PISTOL, // "Bryar Pistol", // char classname[32]; // Spawning name AMMO_BLASTER, // int ammoIndex; // Index to proper ammo slot 15, // int ammoLow; // Count when ammo is low 2, // int energyPerShot; // Amount of energy used per shot 400, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 2, // int altEnergyPerShot; // Amount of energy used for alt-fire 400, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 200, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 1, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 1500 // int altMaxCharge; // above for secondary }, { // WP_BLASTER // "E11 Blaster Rifle", // char classname[32]; // Spawning name AMMO_BLASTER, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 2, // int energyPerShot; // Amount of energy used per shot 350, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 3, // int altEnergyPerShot; // Amount of energy used for alt-fire 150, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_DISRUPTOR // "Tenloss Disruptor Rifle",// char classname[32]; // Spawning name AMMO_POWERCELL, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 5, // int energyPerShot; // Amount of energy used per shot 600, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 6, // int altEnergyPerShot; // Amount of energy used for alt-fire 1300, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 200, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 3, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 1700 // int altMaxCharge; // above for secondary }, { // WP_BOWCASTER // "Wookiee Bowcaster", // char classname[32]; // Spawning name AMMO_POWERCELL, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 5, // int energyPerShot; // Amount of energy used per shot 1000, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 5, // int altEnergyPerShot; // Amount of energy used for alt-fire 750, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 400, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 5, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 1700, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_REPEATER // "Imperial Heavy Repeater",// char classname[32]; // Spawning name AMMO_METAL_BOLTS, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 1, // int energyPerShot; // Amount of energy used per shot 100, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 15, // int altEnergyPerShot; // Amount of energy used for alt-fire 800, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_DEMP2 // "DEMP2", // char classname[32]; // Spawning name AMMO_POWERCELL, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 8, // int energyPerShot; // Amount of energy used per shot 500, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 6, // int altEnergyPerShot; // Amount of energy used for alt-fire 900, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 250, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 3, // int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 2100 // int altMaxCharge; // above for secondary }, { // WP_FLECHETTE // "Golan Arms Flechette", // char classname[32]; // Spawning name AMMO_METAL_BOLTS, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 10, // int energyPerShot; // Amount of energy used per shot 700, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 15, // int altEnergyPerShot; // Amount of energy used for alt-fire 800, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_ROCKET_LAUNCHER // "Merr-Sonn Missile System", // char classname[32]; // Spawning name AMMO_ROCKETS, // int ammoIndex; // Index to proper ammo slot 5, // int ammoLow; // Count when ammo is low 1, // int energyPerShot; // Amount of energy used per shot 900, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 2, // int altEnergyPerShot; // Amount of energy used for alt-fire 1200, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_THERMAL // "Thermal Detonator", // char classname[32]; // Spawning name AMMO_THERMAL, // int ammoIndex; // Index to proper ammo slot 0, // int ammoLow; // Count when ammo is low 1, // int energyPerShot; // Amount of energy used per shot 800, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 1, // int altEnergyPerShot; // Amount of energy used for alt-fire 400, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_TRIP_MINE // "Trip Mine", // char classname[32]; // Spawning name AMMO_TRIPMINE, // int ammoIndex; // Index to proper ammo slot 0, // int ammoLow; // Count when ammo is low 1, // int energyPerShot; // Amount of energy used per shot 800, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 1, // int altEnergyPerShot; // Amount of energy used for alt-fire 400, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_DET_PACK // "Det Pack", // char classname[32]; // Spawning name AMMO_DETPACK, // int ammoIndex; // Index to proper ammo slot 0, // int ammoLow; // Count when ammo is low 1, // int energyPerShot; // Amount of energy used per shot 800, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon 0, // int altEnergyPerShot; // Amount of energy used for alt-fire 400, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary }, { // WP_EMPLCACED_GUN // "Emplaced Gun", // char classname[32]; // Spawning name /*AMMO_BLASTER*/0, // int ammoIndex; // Index to proper ammo slot /*5*/0, // int ammoLow; // Count when ammo is low /*2*/0, // int energyPerShot; // Amount of energy used per shot 100, // int fireTime; // Amount of time between firings 8192, // int range; // Range of weapon /*3*/0, // int altEnergyPerShot; // Amount of energy used for alt-fire 100, // int altFireTime; // Amount of time between alt-firings 8192, // int altRange; // Range of alt-fire 0, // int chargeSubTime; // ms interval for subtracting ammo during charge 0, // int altChargeSubTime; // above for secondary 0, // int chargeSub; // amount to subtract during charge on each interval 0, //int altChargeSub; // above for secondary 0, // int maxCharge; // stop subtracting once charged for this many ms 0 // int altMaxCharge; // above for secondary } }; ammoData_t ammoData[AMMO_MAX] = { { // AMMO_NONE // "", // char icon[32]; // Name of ammo icon file 0 // int max; // Max amount player can hold of ammo }, { // AMMO_FORCE // "", // char icon[32]; // Name of ammo icon file 100 // int max; // Max amount player can hold of ammo }, { // AMMO_BLASTER // "", // char icon[32]; // Name of ammo icon file 300 // int max; // Max amount player can hold of ammo }, { // AMMO_POWERCELL // "", // char icon[32]; // Name of ammo icon file 300 // int max; // Max amount player can hold of ammo }, { // AMMO_METAL_BOLTS // "", // char icon[32]; // Name of ammo icon file 300 // int max; // Max amount player can hold of ammo }, { // AMMO_ROCKETS // "", // char icon[32]; // Name of ammo icon file 25 // int max; // Max amount player can hold of ammo }, { // AMMO_EMPLACED // "", // char icon[32]; // Name of ammo icon file 800 // int max; // Max amount player can hold of ammo }, { // AMMO_THERMAL // "", // char icon[32]; // Name of ammo icon file 10 // int max; // Max amount player can hold of ammo }, { // AMMO_TRIPMINE // "", // char icon[32]; // Name of ammo icon file 10 // int max; // Max amount player can hold of ammo }, { // AMMO_DETPACK // "", // char icon[32]; // Name of ammo icon file 10 // int max; // Max amount player can hold of ammo } };
gpl-2.0
imfoollink/NPLRuntime
Server/trunk/curl-7.47.1/lib/vtls/cyassl.c
10
24758
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * Source file for all CyaSSL-specific code for the TLS/SSL layer. No code * but vtls.c should ever call or use these functions. * */ #include "curl_setup.h" #ifdef USE_CYASSL #define WOLFSSL_OPTIONS_IGNORE_SYS /* CyaSSL's version.h, which should contain only the version, should come before all other CyaSSL includes and be immediately followed by build config aka options.h. https://curl.haxx.se/mail/lib-2015-04/0069.html */ #include <cyassl/version.h> #if defined(HAVE_CYASSL_OPTIONS_H) && (LIBCYASSL_VERSION_HEX > 0x03004008) #if defined(CYASSL_API) || defined(WOLFSSL_API) /* Safety measure. If either is defined some API include was already included and that's a problem since options.h hasn't been included yet. */ #error "CyaSSL API was included before the CyaSSL build options." #endif #include <cyassl/options.h> #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "urldata.h" #include "sendf.h" #include "inet_pton.h" #include "cyassl.h" #include "vtls.h" #include "parsedate.h" #include "connect.h" /* for the connect timeout */ #include "select.h" #include "rawstr.h" #include "x509asn1.h" #include "curl_printf.h" #include <cyassl/ssl.h> #ifdef HAVE_CYASSL_ERROR_SSL_H #include <cyassl/error-ssl.h> #else #include <cyassl/error.h> #endif #include <cyassl/ctaocrypt/random.h> #include <cyassl/ctaocrypt/sha256.h> /* The last #include files should be: */ #include "curl_memory.h" #include "memdebug.h" #if LIBCYASSL_VERSION_HEX < 0x02007002 /* < 2.7.2 */ #define CYASSL_MAX_ERROR_SZ 80 #endif static Curl_recv cyassl_recv; static Curl_send cyassl_send; static int do_file_type(const char *type) { if(!type || !type[0]) return SSL_FILETYPE_PEM; if(Curl_raw_equal(type, "PEM")) return SSL_FILETYPE_PEM; if(Curl_raw_equal(type, "DER")) return SSL_FILETYPE_ASN1; return -1; } /* * This function loads all the client/CA certificates and CRLs. Setup the TLS * layer and do all necessary magic. */ static CURLcode cyassl_connect_step1(struct connectdata *conn, int sockindex) { char error_buffer[CYASSL_MAX_ERROR_SZ]; struct SessionHandle *data = conn->data; struct ssl_connect_data* conssl = &conn->ssl[sockindex]; SSL_METHOD* req_method = NULL; void* ssl_sessionid = NULL; curl_socket_t sockfd = conn->sock[sockindex]; #ifdef HAVE_SNI bool sni = FALSE; #define use_sni(x) sni = (x) #else #define use_sni(x) Curl_nop_stmt #endif if(conssl->state == ssl_connection_complete) return CURLE_OK; /* check to see if we've been told to use an explicit SSL/TLS version */ switch(data->set.ssl.version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: #if LIBCYASSL_VERSION_HEX >= 0x03003000 /* >= 3.3.0 */ /* minimum protocol version is set later after the CTX object is created */ req_method = SSLv23_client_method(); #else infof(data, "CyaSSL <3.3.0 cannot be configured to use TLS 1.0-1.2, " "TLS 1.0 is used exclusively\n"); req_method = TLSv1_client_method(); #endif use_sni(TRUE); break; case CURL_SSLVERSION_TLSv1_0: req_method = TLSv1_client_method(); use_sni(TRUE); break; case CURL_SSLVERSION_TLSv1_1: req_method = TLSv1_1_client_method(); use_sni(TRUE); break; case CURL_SSLVERSION_TLSv1_2: req_method = TLSv1_2_client_method(); use_sni(TRUE); break; case CURL_SSLVERSION_SSLv3: /* before WolfSSL SSLv3 was enabled by default, and starting in WolfSSL we check for its presence since it is built without it by default */ #if !defined(WOLFSSL_VERSION) || defined(HAVE_WOLFSSLV3_CLIENT_METHOD) req_method = SSLv3_client_method(); use_sni(FALSE); #else failf(data, "No support for SSLv3"); return CURLE_NOT_BUILT_IN; #endif break; case CURL_SSLVERSION_SSLv2: failf(data, "CyaSSL does not support SSLv2"); return CURLE_SSL_CONNECT_ERROR; default: failf(data, "Unrecognized parameter passed via CURLOPT_SSLVERSION"); return CURLE_SSL_CONNECT_ERROR; } if(!req_method) { failf(data, "SSL: couldn't create a method!"); return CURLE_OUT_OF_MEMORY; } if(conssl->ctx) SSL_CTX_free(conssl->ctx); conssl->ctx = SSL_CTX_new(req_method); if(!conssl->ctx) { failf(data, "SSL: couldn't create a context!"); return CURLE_OUT_OF_MEMORY; } switch(data->set.ssl.version) { case CURL_SSLVERSION_DEFAULT: case CURL_SSLVERSION_TLSv1: #if LIBCYASSL_VERSION_HEX > 0x03004006 /* > 3.4.6 */ /* Versions 3.3.0 to 3.4.6 we know the minimum protocol version is whatever minimum version of TLS was built in and at least TLS 1.0. For later library versions that could change (eg TLS 1.0 built in but defaults to TLS 1.1) so we have this short circuit evaluation to find the minimum supported TLS version. We use wolfSSL_CTX_SetMinVersion and not CyaSSL_SetMinVersion because only the former will work before the user's CTX callback is called. */ if((wolfSSL_CTX_SetMinVersion(conssl->ctx, WOLFSSL_TLSV1) != 1) && (wolfSSL_CTX_SetMinVersion(conssl->ctx, WOLFSSL_TLSV1_1) != 1) && (wolfSSL_CTX_SetMinVersion(conssl->ctx, WOLFSSL_TLSV1_2) != 1)) { failf(data, "SSL: couldn't set the minimum protocol version"); return CURLE_SSL_CONNECT_ERROR; } #endif break; } #ifndef NO_FILESYSTEM /* load trusted cacert */ if(data->set.str[STRING_SSL_CAFILE]) { if(1 != SSL_CTX_load_verify_locations(conssl->ctx, data->set.str[STRING_SSL_CAFILE], data->set.str[STRING_SSL_CAPATH])) { if(data->set.ssl.verifypeer) { /* Fail if we insist on successfully verifying the server. */ failf(data, "error setting certificate verify locations:\n" " CAfile: %s\n CApath: %s", data->set.str[STRING_SSL_CAFILE]? data->set.str[STRING_SSL_CAFILE]: "none", data->set.str[STRING_SSL_CAPATH]? data->set.str[STRING_SSL_CAPATH] : "none"); return CURLE_SSL_CACERT_BADFILE; } else { /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "error setting certificate verify locations," " continuing anyway:\n"); } } else { /* Everything is fine. */ infof(data, "successfully set certificate verify locations:\n"); } infof(data, " CAfile: %s\n" " CApath: %s\n", data->set.str[STRING_SSL_CAFILE] ? data->set.str[STRING_SSL_CAFILE]: "none", data->set.str[STRING_SSL_CAPATH] ? data->set.str[STRING_SSL_CAPATH]: "none"); } /* Load the client certificate, and private key */ if(data->set.str[STRING_CERT] && data->set.str[STRING_KEY]) { int file_type = do_file_type(data->set.str[STRING_CERT_TYPE]); if(SSL_CTX_use_certificate_file(conssl->ctx, data->set.str[STRING_CERT], file_type) != 1) { failf(data, "unable to use client certificate (no key or wrong pass" " phrase?)"); return CURLE_SSL_CONNECT_ERROR; } file_type = do_file_type(data->set.str[STRING_KEY_TYPE]); if(SSL_CTX_use_PrivateKey_file(conssl->ctx, data->set.str[STRING_KEY], file_type) != 1) { failf(data, "unable to set private key"); return CURLE_SSL_CONNECT_ERROR; } } #endif /* !NO_FILESYSTEM */ /* SSL always tries to verify the peer, this only says whether it should * fail to connect if the verification fails, or if it should continue * anyway. In the latter case the result of the verification is checked with * SSL_get_verify_result() below. */ SSL_CTX_set_verify(conssl->ctx, data->set.ssl.verifypeer?SSL_VERIFY_PEER:SSL_VERIFY_NONE, NULL); #ifdef HAVE_SNI if(sni) { struct in_addr addr4; #ifdef ENABLE_IPV6 struct in6_addr addr6; #endif size_t hostname_len = strlen(conn->host.name); if((hostname_len < USHRT_MAX) && (0 == Curl_inet_pton(AF_INET, conn->host.name, &addr4)) && #ifdef ENABLE_IPV6 (0 == Curl_inet_pton(AF_INET6, conn->host.name, &addr6)) && #endif (CyaSSL_CTX_UseSNI(conssl->ctx, CYASSL_SNI_HOST_NAME, conn->host.name, (unsigned short)hostname_len) != 1)) { infof(data, "WARNING: failed to configure server name indication (SNI) " "TLS extension\n"); } } #endif /* give application a chance to interfere with SSL set up. */ if(data->set.ssl.fsslctx) { CURLcode result = CURLE_OK; result = (*data->set.ssl.fsslctx)(data, conssl->ctx, data->set.ssl.fsslctxp); if(result) { failf(data, "error signaled by ssl ctx callback"); return result; } } #ifdef NO_FILESYSTEM else if(data->set.ssl.verifypeer) { failf(data, "SSL: Certificates couldn't be loaded because CyaSSL was built" " with \"no filesystem\". Either disable peer verification" " (insecure) or if you are building an application with libcurl you" " can load certificates via CURLOPT_SSL_CTX_FUNCTION."); return CURLE_SSL_CONNECT_ERROR; } #endif /* Let's make an SSL structure */ if(conssl->handle) SSL_free(conssl->handle); conssl->handle = SSL_new(conssl->ctx); if(!conssl->handle) { failf(data, "SSL: couldn't create a context (handle)!"); return CURLE_OUT_OF_MEMORY; } /* Check if there's a cached ID we can/should use here! */ if(!Curl_ssl_getsessionid(conn, &ssl_sessionid, NULL)) { /* we got a session id, use it! */ if(!SSL_set_session(conssl->handle, ssl_sessionid)) { failf(data, "SSL: SSL_set_session failed: %s", ERR_error_string(SSL_get_error(conssl->handle, 0), error_buffer)); return CURLE_SSL_CONNECT_ERROR; } /* Informational message */ infof (data, "SSL re-using session ID\n"); } /* pass the raw socket into the SSL layer */ if(!SSL_set_fd(conssl->handle, (int)sockfd)) { failf(data, "SSL: SSL_set_fd failed"); return CURLE_SSL_CONNECT_ERROR; } conssl->connecting_state = ssl_connect_2; return CURLE_OK; } static CURLcode cyassl_connect_step2(struct connectdata *conn, int sockindex) { int ret = -1; struct SessionHandle *data = conn->data; struct ssl_connect_data* conssl = &conn->ssl[sockindex]; conn->recv[sockindex] = cyassl_recv; conn->send[sockindex] = cyassl_send; /* Enable RFC2818 checks */ if(data->set.ssl.verifyhost) { ret = CyaSSL_check_domain_name(conssl->handle, conn->host.name); if(ret == SSL_FAILURE) return CURLE_OUT_OF_MEMORY; } ret = SSL_connect(conssl->handle); if(ret != 1) { char error_buffer[CYASSL_MAX_ERROR_SZ]; int detail = SSL_get_error(conssl->handle, ret); if(SSL_ERROR_WANT_READ == detail) { conssl->connecting_state = ssl_connect_2_reading; return CURLE_OK; } else if(SSL_ERROR_WANT_WRITE == detail) { conssl->connecting_state = ssl_connect_2_writing; return CURLE_OK; } /* There is no easy way to override only the CN matching. * This will enable the override of both mismatching SubjectAltNames * as also mismatching CN fields */ else if(DOMAIN_NAME_MISMATCH == detail) { #if 1 failf(data, "\tsubject alt name(s) or common name do not match \"%s\"\n", conn->host.dispname); return CURLE_PEER_FAILED_VERIFICATION; #else /* When the CyaSSL_check_domain_name() is used and you desire to continue * on a DOMAIN_NAME_MISMATCH, i.e. 'data->set.ssl.verifyhost == 0', * CyaSSL version 2.4.0 will fail with an INCOMPLETE_DATA error. The only * way to do this is currently to switch the CyaSSL_check_domain_name() * in and out based on the 'data->set.ssl.verifyhost' value. */ if(data->set.ssl.verifyhost) { failf(data, "\tsubject alt name(s) or common name do not match \"%s\"\n", conn->host.dispname); return CURLE_PEER_FAILED_VERIFICATION; } else { infof(data, "\tsubject alt name(s) and/or common name do not match \"%s\"\n", conn->host.dispname); return CURLE_OK; } #endif } #if LIBCYASSL_VERSION_HEX >= 0x02007000 /* 2.7.0 */ else if(ASN_NO_SIGNER_E == detail) { if(data->set.ssl.verifypeer) { failf(data, "\tCA signer not available for verification\n"); return CURLE_SSL_CACERT_BADFILE; } else { /* Just continue with a warning if no strict certificate verification is required. */ infof(data, "CA signer not available for verification, " "continuing anyway\n"); } } #endif else { failf(data, "SSL_connect failed with error %d: %s", detail, ERR_error_string(detail, error_buffer)); return CURLE_SSL_CONNECT_ERROR; } } if(data->set.str[STRING_SSL_PINNEDPUBLICKEY]) { #if defined(HAVE_WOLFSSL_GET_PEER_CERTIFICATE) || \ defined(HAVE_CYASSL_GET_PEER_CERTIFICATE) X509 *x509; const char *x509_der; int x509_der_len; curl_X509certificate x509_parsed; curl_asn1Element *pubkey; CURLcode result; x509 = SSL_get_peer_certificate(conssl->handle); if(!x509) { failf(data, "SSL: failed retrieving server certificate"); return CURLE_SSL_PINNEDPUBKEYNOTMATCH; } x509_der = (const char *)CyaSSL_X509_get_der(x509, &x509_der_len); if(!x509_der) { failf(data, "SSL: failed retrieving ASN.1 server certificate"); return CURLE_SSL_PINNEDPUBKEYNOTMATCH; } memset(&x509_parsed, 0, sizeof x509_parsed); Curl_parseX509(&x509_parsed, x509_der, x509_der + x509_der_len); pubkey = &x509_parsed.subjectPublicKeyInfo; if(!pubkey->header || pubkey->end <= pubkey->header) { failf(data, "SSL: failed retrieving public key from server certificate"); return CURLE_SSL_PINNEDPUBKEYNOTMATCH; } result = Curl_pin_peer_pubkey(data, data->set.str[STRING_SSL_PINNEDPUBLICKEY], (const unsigned char *)pubkey->header, (size_t)(pubkey->end - pubkey->header)); if(result) { failf(data, "SSL: public key does not match pinned public key!"); return result; } #else failf(data, "Library lacks pinning support built-in"); return CURLE_NOT_BUILT_IN; #endif } conssl->connecting_state = ssl_connect_3; infof(data, "SSL connected\n"); return CURLE_OK; } static CURLcode cyassl_connect_step3(struct connectdata *conn, int sockindex) { CURLcode result = CURLE_OK; void *old_ssl_sessionid=NULL; struct SessionHandle *data = conn->data; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; bool incache; SSL_SESSION *our_ssl_sessionid; DEBUGASSERT(ssl_connect_3 == connssl->connecting_state); our_ssl_sessionid = SSL_get_session(connssl->handle); incache = !(Curl_ssl_getsessionid(conn, &old_ssl_sessionid, NULL)); if(incache) { if(old_ssl_sessionid != our_ssl_sessionid) { infof(data, "old SSL session ID is stale, removing\n"); Curl_ssl_delsessionid(conn, old_ssl_sessionid); incache = FALSE; } } if(!incache) { result = Curl_ssl_addsessionid(conn, our_ssl_sessionid, 0 /* unknown size */); if(result) { failf(data, "failed to store ssl session"); return result; } } connssl->connecting_state = ssl_connect_done; return result; } static ssize_t cyassl_send(struct connectdata *conn, int sockindex, const void *mem, size_t len, CURLcode *curlcode) { char error_buffer[CYASSL_MAX_ERROR_SZ]; int memlen = (len > (size_t)INT_MAX) ? INT_MAX : (int)len; int rc = SSL_write(conn->ssl[sockindex].handle, mem, memlen); if(rc < 0) { int err = SSL_get_error(conn->ssl[sockindex].handle, rc); switch(err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* there's data pending, re-invoke SSL_write() */ *curlcode = CURLE_AGAIN; return -1; default: failf(conn->data, "SSL write: %s, errno %d", ERR_error_string(err, error_buffer), SOCKERRNO); *curlcode = CURLE_SEND_ERROR; return -1; } } return rc; } void Curl_cyassl_close(struct connectdata *conn, int sockindex) { struct ssl_connect_data *conssl = &conn->ssl[sockindex]; if(conssl->handle) { (void)SSL_shutdown(conssl->handle); SSL_free (conssl->handle); conssl->handle = NULL; } if(conssl->ctx) { SSL_CTX_free (conssl->ctx); conssl->ctx = NULL; } } static ssize_t cyassl_recv(struct connectdata *conn, int num, char *buf, size_t buffersize, CURLcode *curlcode) { char error_buffer[CYASSL_MAX_ERROR_SZ]; int buffsize = (buffersize > (size_t)INT_MAX) ? INT_MAX : (int)buffersize; int nread = SSL_read(conn->ssl[num].handle, buf, buffsize); if(nread < 0) { int err = SSL_get_error(conn->ssl[num].handle, nread); switch(err) { case SSL_ERROR_ZERO_RETURN: /* no more data */ break; case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: /* there's data pending, re-invoke SSL_read() */ *curlcode = CURLE_AGAIN; return -1; default: failf(conn->data, "SSL read: %s, errno %d", ERR_error_string(err, error_buffer), SOCKERRNO); *curlcode = CURLE_RECV_ERROR; return -1; } } return nread; } void Curl_cyassl_session_free(void *ptr) { (void)ptr; /* CyaSSL reuses sessions on own, no free */ } size_t Curl_cyassl_version(char *buffer, size_t size) { #ifdef WOLFSSL_VERSION return snprintf(buffer, size, "wolfSSL/%s", WOLFSSL_VERSION); #elif defined(CYASSL_VERSION) return snprintf(buffer, size, "CyaSSL/%s", CYASSL_VERSION); #else return snprintf(buffer, size, "CyaSSL/%s", "<1.8.8"); #endif } int Curl_cyassl_init(void) { return (CyaSSL_Init() == SSL_SUCCESS); } bool Curl_cyassl_data_pending(const struct connectdata* conn, int connindex) { if(conn->ssl[connindex].handle) /* SSL is in use */ return (0 != SSL_pending(conn->ssl[connindex].handle)) ? TRUE : FALSE; else return FALSE; } /* * This function is called to shut down the SSL layer but keep the * socket open (CCC - Clear Command Channel) */ int Curl_cyassl_shutdown(struct connectdata *conn, int sockindex) { int retval = 0; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; if(connssl->handle) { SSL_free (connssl->handle); connssl->handle = NULL; } return retval; } static CURLcode cyassl_connect_common(struct connectdata *conn, int sockindex, bool nonblocking, bool *done) { CURLcode result; struct SessionHandle *data = conn->data; struct ssl_connect_data *connssl = &conn->ssl[sockindex]; curl_socket_t sockfd = conn->sock[sockindex]; long timeout_ms; int what; /* check if the connection has already been established */ if(ssl_connection_complete == connssl->state) { *done = TRUE; return CURLE_OK; } if(ssl_connect_1==connssl->connecting_state) { /* Find out how much more time we're allowed */ timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } result = cyassl_connect_step1(conn, sockindex); if(result) return result; } while(ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state) { /* check allowed time left */ timeout_ms = Curl_timeleft(data, NULL, TRUE); if(timeout_ms < 0) { /* no need to continue if time already is up */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } /* if ssl is expecting something, check if it's available. */ if(connssl->connecting_state == ssl_connect_2_reading || connssl->connecting_state == ssl_connect_2_writing) { curl_socket_t writefd = ssl_connect_2_writing== connssl->connecting_state?sockfd:CURL_SOCKET_BAD; curl_socket_t readfd = ssl_connect_2_reading== connssl->connecting_state?sockfd:CURL_SOCKET_BAD; what = Curl_socket_ready(readfd, writefd, nonblocking?0:timeout_ms); if(what < 0) { /* fatal error */ failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO); return CURLE_SSL_CONNECT_ERROR; } else if(0 == what) { if(nonblocking) { *done = FALSE; return CURLE_OK; } else { /* timeout */ failf(data, "SSL connection timeout"); return CURLE_OPERATION_TIMEDOUT; } } /* socket is readable or writable */ } /* Run transaction, and return to the caller if it failed or if * this connection is part of a multi handle and this loop would * execute again. This permits the owner of a multi handle to * abort a connection attempt before step2 has completed while * ensuring that a client using select() or epoll() will always * have a valid fdset to wait on. */ result = cyassl_connect_step2(conn, sockindex); if(result || (nonblocking && (ssl_connect_2 == connssl->connecting_state || ssl_connect_2_reading == connssl->connecting_state || ssl_connect_2_writing == connssl->connecting_state))) return result; } /* repeat step2 until all transactions are done. */ if(ssl_connect_3 == connssl->connecting_state) { result = cyassl_connect_step3(conn, sockindex); if(result) return result; } if(ssl_connect_done == connssl->connecting_state) { connssl->state = ssl_connection_complete; conn->recv[sockindex] = cyassl_recv; conn->send[sockindex] = cyassl_send; *done = TRUE; } else *done = FALSE; /* Reset our connect state machine */ connssl->connecting_state = ssl_connect_1; return CURLE_OK; } CURLcode Curl_cyassl_connect_nonblocking(struct connectdata *conn, int sockindex, bool *done) { return cyassl_connect_common(conn, sockindex, TRUE, done); } CURLcode Curl_cyassl_connect(struct connectdata *conn, int sockindex) { CURLcode result; bool done = FALSE; result = cyassl_connect_common(conn, sockindex, FALSE, &done); if(result) return result; DEBUGASSERT(done); return CURLE_OK; } int Curl_cyassl_random(struct SessionHandle *data, unsigned char *entropy, size_t length) { RNG rng; (void)data; if(InitRng(&rng)) return 1; if(length > UINT_MAX) return 1; if(RNG_GenerateBlock(&rng, entropy, (unsigned)length)) return 1; return 0; } void Curl_cyassl_sha256sum(const unsigned char *tmp, /* input */ size_t tmplen, unsigned char *sha256sum /* output */, size_t unused) { Sha256 SHA256pw; (void)unused; InitSha256(&SHA256pw); Sha256Update(&SHA256pw, tmp, (word32)tmplen); Sha256Final(&SHA256pw, sha256sum); } #endif
gpl-2.0
yangjoo/kernel_samsung_smdk4412
drivers/input/touchscreen/mms152_ts.c
10
106502
/* * mms_ts.c - Touchscreen driver for Melfas MMS-series touch controllers * * Copyright (C) 2011 Google Inc. * Author: Dima Zavin <dima@android.com> * Simon Wilson <simonwilson@google.com> * * ISP reflashing code based on original code from Melfas. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define DEBUG /* #define VERBOSE_DEBUG */ #define SEC_TSP_DEBUG /* #define FORCE_FW_FLASH */ /* #define FORCE_FW_PASS */ /* #define ESD_DEBUG */ #define SEC_TSP_FACTORY_TEST #define SEC_TSP_FW_UPDATE #define TSP_BUF_SIZE 1024 #define FAIL -1 #include <linux/delay.h> #include <linux/earlysuspend.h> #include <linux/firmware.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <mach/gpio.h> #include <linux/uaccess.h> #include <mach/cpufreq.h> #include <mach/dev.h> #include <linux/platform_data/mms152_ts.h> #include <asm/unaligned.h> #ifdef CONFIG_INPUT_FBSUSPEND #ifdef CONFIG_DRM #include <drm/drm_backlight.h> #endif #include <linux/fb.h> #endif #ifdef CONFIG_TOUCH_WAKE #include <linux/touch_wake.h> #endif #define MAX_FINGERS 10 #define MAX_WIDTH 30 #define MAX_PRESSURE 255 #define MAX_ANGLE 90 #define MIN_ANGLE -90 /* Registers */ #define MMS_MODE_CONTROL 0x01 #define MMS_XYRES_HI 0x02 #define MMS_XRES_LO 0x03 #define MMS_YRES_LO 0x04 #define MMS_INPUT_EVENT_PKT_SZ 0x0F #define MMS_INPUT_EVENT0 0x10 #define EVENT_SZ_PALM 8 #define EVENT_SZ_OLD 6 #define MMS_CORE_VERSION 0xE1 #define MMS_TSP_REVISION 0xF0 #define MMS_HW_REVISION 0xF1 #define MMS_COMPAT_GROUP 0xF2 #define MMS_FW_VERSION 0xF4 enum { ISP_MODE_FLASH_ERASE = 0x59F3, ISP_MODE_FLASH_WRITE = 0x62CD, ISP_MODE_FLASH_READ = 0x6AC9, }; /* each address addresses 4-byte words */ #define ISP_MAX_FW_SIZE (0x1F00 * 4) #define ISP_IC_INFO_ADDR 0x1F00 #define ISP_CAL_DATA_SIZE 256 #ifdef SEC_TSP_FW_UPDATE #define WORD_SIZE 4 #define ISC_PKT_SIZE 1029 #define ISC_PKT_DATA_SIZE 1024 #define ISC_PKT_HEADER_SIZE 3 #define ISC_PKT_NUM 31 #define ISC_ENTER_ISC_CMD 0x5F #define ISC_ENTER_ISC_DATA 0x01 #define ISC_CMD 0xAE #define ISC_ENTER_UPDATE_DATA 0x55 #define ISC_ENTER_UPDATE_DATA_LEN 9 #define ISC_DATA_WRITE_SUB_CMD 0xF1 #define ISC_EXIT_ISC_SUB_CMD 0x0F #define ISC_EXIT_ISC_SUB_CMD2 0xF0 #define ISC_CHECK_STATUS_CMD 0xAF #define ISC_CONFIRM_CRC 0x03 #define ISC_DEFAULT_CRC 0xFFFF #endif #ifdef SEC_TSP_FACTORY_TEST #define TX_NUM_A 29 #define TX_NUM_M 30 #define RX_NUM 17 #define NODE_NUM 510 /* 30x17 */ /* self diagnostic */ #define ADDR_CH_NUM 0x0B #define ADDR_UNIV_CMD 0xA0 #define CMD_ENTER_TEST 0x40 #define CMD_EXIT_TEST 0x4F #define CMD_CM_DELTA 0x41 #define CMD_GET_DELTA 0x42 #define CMD_CM_ABS 0X43 #define CMD_GET_ABS 0X44 #define CMD_CM_JITTER 0X45 #define CMD_GET_JITTER 0X46 #define CMD_GET_INTEN 0x70 #define CMD_RESULT_SZ 0XAE #define CMD_RESULT 0XAF /* VSC(Vender Specific Command) */ #define MMS_VSC_CMD 0xB0 /* vendor specific command */ #define MMS_VSC_MODE 0x1A /* mode of vendor */ #define MMS_VSC_CMD_ENTER 0X01 #define MMS_VSC_CMD_CM_DELTA 0X02 #define MMS_VSC_CMD_CM_ABS 0X03 #define MMS_VSC_CMD_EXIT 0X05 #define MMS_VSC_CMD_INTENSITY 0X04 #define MMS_VSC_CMD_RAW 0X06 #define MMS_VSC_CMD_REFER 0X07 #define TSP_CMD_STR_LEN 32 #define TSP_CMD_RESULT_STR_LEN 512 #define TSP_CMD_PARAM_NUM 8 #endif /* SEC_TSP_FACTORY_TEST */ /* Touch booster */ #if defined(CONFIG_EXYNOS4_CPUFREQ) &&\ defined(CONFIG_BUSFREQ_OPP) #define TOUCH_BOOSTER 1 #define TOUCH_BOOSTER_OFF_TIME 100 #define TOUCH_BOOSTER_CHG_TIME 200 #else #define TOUCH_BOOSTER 0 #endif struct device *sec_touchscreen; static struct device *bus_dev; int touch_is_pressed; static bool knockon_reset = false; #define ISC_DL_MODE 1 /* 5.55" OCTA LCD */ #define FW_VERSION_L 0x29 #define FW_VERSION_M 0x50 #define MAX_FW_PATH 255 #define TSP_FW_FILENAME "melfas_fw.bin" #if ISC_DL_MODE /* ISC_DL_MODE start */ /* * Default configuration of ISC mode */ #define DEFAULT_SLAVE_ADDR 0x48 #define SECTION_NUM 3 #define SECTION_NAME_LEN 5 #define PAGE_HEADER 3 #define PAGE_DATA 1024 #define PAGE_TAIL 2 #define PACKET_SIZE (PAGE_HEADER + PAGE_DATA + PAGE_TAIL) #define TS_WRITE_REGS_LEN 1030 #define TIMEOUT_CNT 10 #define STRING_BUF_LEN 100 /* State Registers */ #define MIP_ADDR_INPUT_INFORMATION 0x01 #define ISC_ADDR_VERSION 0xE1 #define ISC_ADDR_SECTION_PAGE_INFO 0xE5 /* Config Update Commands */ #define ISC_CMD_ENTER_ISC 0x5F #define ISC_CMD_ENTER_ISC_PARA1 0x01 #define ISC_CMD_UPDATE_MODE 0xAE #define ISC_SUBCMD_ENTER_UPDATE 0x55 #define ISC_SUBCMD_DATA_WRITE 0XF1 #define ISC_SUBCMD_LEAVE_UPDATE_PARA1 0x0F #define ISC_SUBCMD_LEAVE_UPDATE_PARA2 0xF0 #define ISC_CMD_CONFIRM_STATUS 0xAF #define ISC_STATUS_UPDATE_MODE 0x01 #define ISC_STATUS_CRC_CHECK_SUCCESS 0x03 #define ISC_CHAR_2_BCD(num) (((num/10)<<4) + (num%10)) #define ISC_MAX(x, y) (((x) > (y)) ? (x) : (y)) static const char section_name[SECTION_NUM][SECTION_NAME_LEN] = { "BOOT", "CORE", "CONF" }; static const unsigned char crc0_buf[31] = { 0x1D, 0x2C, 0x05, 0x34, 0x95, 0xA4, 0x8D, 0xBC, 0x59, 0x68, 0x41, 0x70, 0xD1, 0xE0, 0xC9, 0xF8, 0x3F, 0x0E, 0x27, 0x16, 0xB7, 0x86, 0xAF, 0x9E, 0x7B, 0x4A, 0x63, 0x52, 0xF3, 0xC2, 0xEB }; static const unsigned char crc1_buf[31] = { 0x1E, 0x9C, 0xDF, 0x5D, 0x76, 0xF4, 0xB7, 0x35, 0x2A, 0xA8, 0xEB, 0x69, 0x42, 0xC0, 0x83, 0x01, 0x04, 0x86, 0xC5, 0x47, 0x6C, 0xEE, 0xAD, 0x2F, 0x30, 0xB2, 0xF1, 0x73, 0x58, 0xDA, 0x99 }; enum { ISC_NONE = -1, ISC_SUCCESS = 0, ISC_FILE_OPEN_ERROR, ISC_FILE_CLOSE_ERROR, ISC_FILE_FORMAT_ERROR, ISC_WRITE_BUFFER_ERROR, ISC_I2C_ERROR, ISC_UPDATE_MODE_ENTER_ERROR, ISC_CRC_ERROR, ISC_VALIDATION_ERROR, ISC_COMPATIVILITY_ERROR, ISC_UPDATE_SECTION_ERROR, ISC_SLAVE_ERASE_ERROR, ISC_SLAVE_DOWNLOAD_ERROR, ISC_DOWNLOAD_WHEN_SLAVE_IS_UPDATED_ERROR, ISC_INITIAL_PACKET_ERROR, ISC_NO_NEED_UPDATE_ERROR, ISC_LIMIT }; enum { EC_NONE = -1, EC_DEPRECATED = 0, EC_BOOTLOADER_RUNNING = 1, EC_BOOT_ON_SUCCEEDED = 2, EC_ERASE_END_MARKER_ON_SLAVE_FINISHED = 3, EC_SLAVE_DOWNLOAD_STARTS = 4, EC_SLAVE_DOWNLOAD_FINISHED = 5, EC_2CHIP_HANDSHAKE_FAILED = 0x0E, EC_ESD_PATTERN_CHECKED = 0x0F, EC_LIMIT }; enum { SEC_NONE = -1, SEC_BOOTLOADER = 0, SEC_CORE, SEC_CONFIG, SEC_LIMIT }; struct tISCFWInfo_t { unsigned char version; unsigned char compatible_version; unsigned char start_addr; unsigned char end_addr; }; static struct tISCFWInfo_t mbin_info[SECTION_NUM]; static struct tISCFWInfo_t ts_info[SECTION_NUM]; static bool section_update_flag[SECTION_NUM]; const struct firmware *fw_mbin[SECTION_NUM]; static unsigned char g_wr_buf[1024 + 3 + 2]; #endif enum fw_flash_mode { ISP_FLASH, ISC_FLASH, }; enum { BUILT_IN = 0, UMS, REQ_FW, }; struct tsp_callbacks { void (*inform_charger)(struct tsp_callbacks *tsp_cb, bool mode); }; #ifdef CONFIG_LCD_FREQ_SWITCH struct tsp_lcd_callbacks { void (*inform_lcd)(struct tsp_lcd_callbacks *tsp_cb, bool en); }; #endif struct mms_ts_info { struct i2c_client *client; struct input_dev *input_dev; char phys[32]; int max_x; int max_y; bool invert_x; bool invert_y; u8 palm_flag; const u8 *config_fw_version; int irq; int (*power) (bool on); void (*input_event)(void *data); int tx_num; struct melfas_tsi_platform_data *pdata; struct early_suspend early_suspend; #if TOUCH_BOOSTER struct delayed_work work_dvfs_off; struct delayed_work work_dvfs_chg; bool dvfs_lock_status; int cpufreq_level; struct mutex dvfs_lock; #endif /* protects the enabled flag */ struct mutex lock; bool enabled; enum fw_flash_mode fw_flash_mode; void (*register_cb)(void *); struct tsp_callbacks callbacks; #ifdef CONFIG_LCD_FREQ_SWITCH void (*register_lcd_cb)(void *); struct tsp_lcd_callbacks lcd_callback; bool tsp_lcdfreq_flag; #endif bool ta_status; bool noise_mode; bool sleep_wakeup_ta_check; #if defined(SEC_TSP_DEBUG) unsigned char finger_state[MAX_FINGERS]; #endif #if defined(SEC_TSP_FW_UPDATE) u8 fw_update_state; #endif u8 fw_ic_ver; char panel; char ldi; /* LSI : L, Magna : M */ u8 fw_core_ver; #if defined(SEC_TSP_FACTORY_TEST) struct list_head cmd_list_head; u8 cmd_state; char cmd[TSP_CMD_STR_LEN]; int cmd_param[TSP_CMD_PARAM_NUM]; char cmd_result[TSP_CMD_RESULT_STR_LEN]; struct mutex cmd_lock; bool cmd_is_running; unsigned int reference[NODE_NUM]; unsigned int raw[NODE_NUM]; /* CM_ABS */ unsigned int inspection[NODE_NUM];/* CM_DELTA */ unsigned int intensity[NODE_NUM]; bool ft_flag; #endif /* SEC_TSP_FACTORY_TEST */ int (*lcd_type) (void); #ifdef CONFIG_INPUT_FBSUSPEND struct notifier_block fb_notif; bool was_enabled_at_suspend; #endif }; struct mms_fw_image { __le32 hdr_len; __le32 data_len; __le32 fw_ver; __le32 hdr_ver; u8 data[0]; } __packed; #ifdef CONFIG_HAS_EARLYSUSPEND static void mms_ts_early_suspend(struct early_suspend *h); static void mms_ts_late_resume(struct early_suspend *h); #endif #if defined(SEC_TSP_FACTORY_TEST) #define TSP_CMD(name, func) .cmd_name = name, .cmd_func = func struct tsp_cmd { struct list_head list; const char *cmd_name; void (*cmd_func)(void *device_data); }; static void fw_update(void *device_data); static void get_fw_ver_bin(void *device_data); static void get_fw_ver_ic(void *device_data); static void get_config_ver(void *device_data); static void get_threshold(void *device_data); static void module_off_master(void *device_data); static void module_on_master(void *device_data); /*static void module_off_slave(void *device_data); static void module_on_slave(void *device_data);*/ static void get_chip_vendor(void *device_data); static void get_chip_name(void *device_data); static void get_reference(void *device_data); static void get_cm_abs(void *device_data); static void get_cm_delta(void *device_data); static void get_intensity(void *device_data); static void get_x_num(void *device_data); static void get_y_num(void *device_data); static void run_reference_read(void *device_data); static void run_cm_abs_read(void *device_data); static void run_cm_delta_read(void *device_data); static void run_intensity_read(void *device_data); static void not_support_cmd(void *device_data); struct tsp_cmd tsp_cmds[] = { {TSP_CMD("fw_update", fw_update),}, {TSP_CMD("get_fw_ver_bin", get_fw_ver_bin),}, {TSP_CMD("get_fw_ver_ic", get_fw_ver_ic),}, {TSP_CMD("get_config_ver", get_config_ver),}, {TSP_CMD("get_threshold", get_threshold),}, {TSP_CMD("module_off_master", module_off_master),}, {TSP_CMD("module_on_master", module_on_master),}, {TSP_CMD("module_off_slave", not_support_cmd),}, {TSP_CMD("module_on_slave", not_support_cmd),}, {TSP_CMD("get_chip_vendor", get_chip_vendor),}, {TSP_CMD("get_chip_name", get_chip_name),}, {TSP_CMD("get_x_num", get_x_num),}, {TSP_CMD("get_y_num", get_y_num),}, {TSP_CMD("get_reference", get_reference),}, {TSP_CMD("get_cm_abs", get_cm_abs),}, {TSP_CMD("get_cm_delta", get_cm_delta),}, {TSP_CMD("get_intensity", get_intensity),}, {TSP_CMD("run_reference_read", run_reference_read),}, {TSP_CMD("run_cm_abs_read", run_cm_abs_read),}, {TSP_CMD("run_cm_delta_read", run_cm_delta_read),}, {TSP_CMD("run_intensity_read", run_intensity_read),}, {TSP_CMD("not_support_cmd", not_support_cmd),}, }; #endif #if TOUCH_BOOSTER static void change_dvfs_lock(struct work_struct *work) { struct mms_ts_info *info = container_of(work, struct mms_ts_info, work_dvfs_chg.work); int ret; mutex_lock(&info->dvfs_lock); ret = dev_lock(bus_dev, sec_touchscreen, 267160); /* 266 Mhz setting */ if (ret < 0) pr_err("%s: dev change bud lock failed(%d)\n",\ __func__, __LINE__); else pr_info("[TSP] change_dvfs_lock"); mutex_unlock(&info->dvfs_lock); } static void set_dvfs_off(struct work_struct *work) { struct mms_ts_info *info = container_of(work, struct mms_ts_info, work_dvfs_off.work); int ret; mutex_lock(&info->dvfs_lock); ret = dev_unlock(bus_dev, sec_touchscreen); if (ret < 0) pr_err("%s: dev unlock failed(%d)\n", __func__, __LINE__); exynos_cpufreq_lock_free(DVFS_LOCK_ID_TSP); info->dvfs_lock_status = false; pr_info("[TSP] DVFS Off!"); mutex_unlock(&info->dvfs_lock); } static void set_dvfs_lock(struct mms_ts_info *info, uint32_t on) { int ret; mutex_lock(&info->dvfs_lock); if (info->cpufreq_level <= 0) { ret = exynos_cpufreq_get_level(800000, &info->cpufreq_level); if (ret < 0) pr_err("[TSP] exynos_cpufreq_get_level error"); goto out; } if (on == 0) { if (info->dvfs_lock_status) { cancel_delayed_work(&info->work_dvfs_chg); schedule_delayed_work(&info->work_dvfs_off, msecs_to_jiffies(TOUCH_BOOSTER_OFF_TIME)); } } else if (on == 1) { cancel_delayed_work(&info->work_dvfs_off); if (!info->dvfs_lock_status) { ret = dev_lock(bus_dev, sec_touchscreen, 400200); if (ret < 0) { pr_err("%s: dev lock failed(%d)\n",\ __func__, __LINE__); } ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TSP, info->cpufreq_level); if (ret < 0) pr_err("%s: cpu lock failed(%d)\n",\ __func__, __LINE__); schedule_delayed_work(&info->work_dvfs_chg, msecs_to_jiffies(TOUCH_BOOSTER_CHG_TIME)); info->dvfs_lock_status = true; pr_info("[TSP] DVFS On![%d]", info->cpufreq_level); } } else if (on == 2) { cancel_delayed_work(&info->work_dvfs_off); cancel_delayed_work(&info->work_dvfs_chg); schedule_work(&info->work_dvfs_off.work); } out: mutex_unlock(&info->dvfs_lock); } #endif #ifdef CONFIG_INPUT_FBSUSPEND #ifdef CONFIG_DRM static void melfas_set_power(void *priv, int power) { struct mms_ts_info *info = (struct mms_ts_info *)priv; int i; switch (power) { case FB_BLANK_UNBLANK: if (info->enabled == 0) { info->pdata->power(true); msleep(200); enable_irq(info->client->irq); info->enabled = 1; } else { pr_err("[TSP] touchscreen already on\n"); } break; case FB_BLANK_POWERDOWN: for (i = 0; i < MAX_FINGERS; i++) { input_mt_slot(info->input_dev, i); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, false); input_sync(info->input_dev); } if (info->enabled == 1) { disable_irq(info->client->irq); info->pdata->power(false); info->enabled = 0; } else { pr_err("[TSP] touchscreen already off\n"); } break; default: break; } } static struct drm_bl_notifier bl_notifier = { .set_power = melfas_set_power }; static int tsp_register_fb(struct mms_ts_info *info) { bl_notifier.dev = info->input_dev->dev; bl_notifier.priv = (void *)info; return drm_bl_register(&bl_notifier.dev, BL_TSP_CLASS); } static void tsp_unregister_fb(struct mms_ts_info *info) { drm_bl_unregister(&bl_notifier.dev); } #else static int melfas_fb_notifier_callback(struct notifier_block *self, unsigned long event, void *fb_evdata) { struct mms_ts_info *info; struct fb_event *evdata = fb_evdata; int blank; int i; /* If we aren't interested in this event, skip it immediately ... */ if (event != FB_EVENT_BLANK) return 0; info = container_of(self, struct mms_ts_info, fb_notif); blank = *(int *)evdata->data; switch (blank) { case FB_BLANK_UNBLANK: if (info->enabled == 0) { info->pdata->power(true); msleep(200); enable_irq(info->client->irq); info->enabled = 1; } else { pr_err("[TSP] touchscreen already on\n"); } break; case FB_BLANK_POWERDOWN: for (i = 0; i < MAX_FINGERS; i++) { input_mt_slot(info->input_dev, i); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, false); input_sync(info->input_dev); } if (info->enabled == 1) { disable_irq(info->client->irq); info->pdata->power(false); info->enabled = 0; } else { pr_err("[TSP] touchscreen already off\n"); } break; default: break; } return 0; } static int tsp_register_fb(struct mms_ts_info *info) { memset(&info->fb_notif, 0, sizeof(info->fb_notif)); info->fb_notif.notifier_call = melfas_fb_notifier_callback; return fb_register_client(&info->fb_notif); } static void tsp_unregister_fb(struct mms_ts_info *info) { fb_unregister_client(&info->fb_notif); } #endif #endif static inline void mms_pwr_on_reset(struct mms_ts_info *info) { struct i2c_adapter *adapter = to_i2c_adapter(info->client->dev.parent); if (!info->pdata->mux_fw_flash) { dev_info(&info->client->dev, "missing platform data, can't do power-on-reset\n"); return; } i2c_lock_adapter(adapter); info->pdata->mux_fw_flash(true); info->pdata->power(0); gpio_direction_output(info->pdata->gpio_sda, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_int, 0); msleep(50); info->pdata->power(1); msleep(200); info->pdata->mux_fw_flash(false); i2c_unlock_adapter(adapter); /* TODO: Seems long enough for the firmware to boot. * Find the right value */ msleep(250); } static void release_all_fingers(struct mms_ts_info *info) { struct i2c_client *client = info->client; int i; dev_dbg(&client->dev, "[TSP] %s\n", __func__); for (i = 0; i < MAX_FINGERS; i++) { #ifdef SEC_TSP_DEBUG if (info->finger_state[i] == 1) dev_notice(&client->dev, "finger %d up(force)\n", i); #endif info->finger_state[i] = 0; input_mt_slot(info->input_dev, i); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, false); } input_sync(info->input_dev); #if TOUCH_BOOSTER set_dvfs_lock(info, 2); pr_info("[TSP] dvfs_lock free.\n "); #endif } static void mms_set_noise_mode(struct mms_ts_info *info) { struct i2c_client *client = info->client; if (!(info->noise_mode && info->enabled)) return; dev_notice(&client->dev, "%s\n", __func__); if (info->ta_status) { dev_notice(&client->dev, "noise_mode & TA connect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x30, 0x1); } else { dev_notice(&client->dev, "noise_mode & TA disconnect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x30, 0x2); info->noise_mode = 0; } } static void reset_mms_ts(struct mms_ts_info *info) { struct i2c_client *client = info->client; if (info->enabled == false) return; dev_notice(&client->dev, "%s++\n", __func__); disable_irq_nosync(info->irq); info->enabled = false; touch_is_pressed = 0; release_all_fingers(info); mms_pwr_on_reset(info); info->enabled = true; if (info->fw_ic_ver < 0x18) { if (info->ta_status) { dev_notice(&client->dev, "TA connect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x1); } else { dev_notice(&client->dev, "TA disconnect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x2); } } mms_set_noise_mode(info); enable_irq(info->irq); dev_notice(&client->dev, "%s--\n", __func__); } static void melfas_ta_cb(struct tsp_callbacks *cb, bool ta_status) { struct mms_ts_info *info = container_of(cb, struct mms_ts_info, callbacks); struct i2c_client *client = info->client; dev_notice(&client->dev, "%s\n", __func__); info->ta_status = ta_status; if (info->enabled) { if (info->ta_status) { dev_notice(&client->dev, "TA connect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x1); } else { dev_notice(&client->dev, "TA disconnect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x2); } mms_set_noise_mode(info); } /* if (!ta_status) mms_set_noise_mode(info); */ } #ifdef CONFIG_LCD_FREQ_SWITCH static void melfas_lcd_cb(struct tsp_lcd_callbacks *cb, bool en) { struct mms_ts_info *info = container_of(cb, struct mms_ts_info, lcd_callback); if (info->enabled == false) { dev_err(&info->client->dev, "[TSP] do not excute %s.(touch off)\n", __func__); return; } if (info->fw_ic_ver < 0x21) { dev_err(&info->client->dev, "[TSP] Do not support firmware LCD framerate changing(ver = 0x%x)\n", info->fw_ic_ver); return; } if (en) { if (info->tsp_lcdfreq_flag == 0) { info->tsp_lcdfreq_flag = 1; dev_info(&info->client->dev, "[TSP] LCD framerate to 40 Hz\n"); i2c_smbus_write_byte_data(info->client, 0x34, 0x1); } } else { if (info->tsp_lcdfreq_flag == 1) { info->tsp_lcdfreq_flag = 0; dev_info(&info->client->dev, "[TSP] LCD framreate to 60 Hz\n"); i2c_smbus_write_byte_data(info->client, 0x34, 0x1); } } } #endif static irqreturn_t mms_ts_interrupt(int irq, void *dev_id) { struct mms_ts_info *info = dev_id; struct i2c_client *client = info->client; int ret; int i; int sz; u8 buf[MAX_FINGERS * EVENT_SZ_PALM] = { 0 }; int event_sz; u8 reg = MMS_INPUT_EVENT0; struct i2c_msg msg[] = { { .addr = client->addr, .flags = 0, .buf = &reg, .len = 1, }, { .addr = client->addr, .flags = I2C_M_RD, .buf = buf, }, }; if (info->panel == 'M') event_sz = EVENT_SZ_PALM; else event_sz = EVENT_SZ_OLD; sz = i2c_smbus_read_byte_data(client, MMS_INPUT_EVENT_PKT_SZ); if (sz < 0) { dev_err(&client->dev, "%s bytes=%d\n", __func__, sz); for (i = 0; i < 50; i++) { sz = i2c_smbus_read_byte_data(client, MMS_INPUT_EVENT_PKT_SZ); if (sz > 0) break; } if (i == 50) { dev_dbg(&client->dev, "i2c failed... reset!!\n"); reset_mms_ts(info); goto out; } } if (sz == 0) goto out; if (sz > MAX_FINGERS * event_sz) { dev_err(&client->dev, "[TSP] abnormal data inputed.\n"); goto out; } msg[1].len = sz; ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); if (ret != ARRAY_SIZE(msg)) { dev_err(&client->dev, "failed to read %d bytes of touch data (%d)\n", sz, ret); if (ret < 0) { dev_err(&client->dev, "%s bytes=%d\n", __func__, sz); for (i = 0; i < 5; i++) { msleep(20); ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); if (ret > 0) break; } if (i == 5) { dev_dbg(&client->dev, "[TSP] i2c failed E2... reset!!\n"); reset_mms_ts(info); goto out; } } } #if defined(VERBOSE_DEBUG) print_hex_dump(KERN_DEBUG, "mms_ts raw: ", DUMP_PREFIX_OFFSET, 32, 1, buf, sz, false); #endif if (buf[0] == 0x0F) { /* ESD */ dev_dbg(&client->dev, "ESD DETECT.... reset!!\n"); reset_mms_ts(info); goto out; } if (buf[0] == 0x0E) { /* NOISE MODE */ dev_dbg(&client->dev, "[TSP] noise mode enter!!\n"); info->noise_mode = 1; mms_set_noise_mode(info); goto out; } touch_is_pressed = 0; for (i = 0; i < sz; i += event_sz) { u8 *tmp = &buf[i]; int id = (tmp[0] & 0xf) - 1; int x = tmp[2] | ((tmp[1] & 0xf) << 8); int y = tmp[3] | ((tmp[1] >> 4) << 8); int angle = 0; int palm = 0; if (info->panel == 'M') { angle = (tmp[5] >= 127) ? (-(256 - tmp[5])) : tmp[5]; palm = (tmp[0] & 0x10) >> 4; } if (info->invert_x) { x = info->max_x - x; if (x < 0) x = 0; } if (info->invert_y) { y = info->max_y - y; if (y < 0) y = 0; } if (palm) { if (info->palm_flag == 3) { info->palm_flag = 1; } else { info->palm_flag = 3; palm = 3; } } else { if (info->palm_flag == 2) { info->palm_flag = 0; } else { info->palm_flag = 2; palm = 2; } } if (id >= MAX_FINGERS) { dev_notice(&client->dev, "finger id error [%d]\n", id); reset_mms_ts(info); goto out; } if ((tmp[0] & 0x80) == 0) { input_mt_slot(info->input_dev, id); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, false); #ifdef CONFIG_SAMSUNG_PRODUCT_SHIP if (info->panel == 'M') { if (info->finger_state[id] != 0) { info->finger_state[id] = 0; #ifdef CONFIG_LCD_FREQ_SWITCH dev_notice(&client->dev, "R(%c)(%d) [%2d]", info->ldi, (info->tsp_lcdfreq_flag ? 40 : 60), id); #else dev_notice(&client->dev, "R(%c) [%2d]", info->ldi, id); #endif } } else { if (info->finger_state[id] != 0) { info->finger_state[id] = 0; dev_notice(&client->dev, "R [%2d]", id); } } #else if (info->panel == 'M') { if (info->finger_state[id] != 0) { info->finger_state[id] = 0; #ifdef CONFIG_LCD_FREQ_SWITCH dev_notice(&client->dev, "R(%c)(%d) [%2d],([%4d],[%3d])", info->ldi, (info->tsp_lcdfreq_flag ? 40 : 60), id, x, y); #else dev_notice(&client->dev, "R(%c) [%2d],([%4d],[%3d])", info->ldi, id, x, y); #endif } } else { if (info->finger_state[id] != 0) { info->finger_state[id] = 0; dev_notice(&client->dev, "R [%2d],([%4d],[%3d]),S:%d W:%d", id, x, y, tmp[4], tmp[5]); } } #endif continue; } if (info->panel == 'M') { input_mt_slot(info->input_dev, id); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, true); input_report_abs(info->input_dev, ABS_MT_POSITION_X, x); input_report_abs(info->input_dev, ABS_MT_POSITION_Y, y); input_report_abs(info->input_dev, ABS_MT_WIDTH_MAJOR, tmp[4]); input_report_abs(info->input_dev, ABS_MT_TOUCH_MAJOR, tmp[6]); input_report_abs(info->input_dev, ABS_MT_TOUCH_MINOR, tmp[7]); input_report_abs(info->input_dev, ABS_MT_ANGLE, angle); input_report_abs(info->input_dev, ABS_MT_PALM, palm); #ifdef CONFIG_SAMSUNG_PRODUCT_SHIP if (info->finger_state[id] == 0) { info->finger_state[id] = 1; #ifdef CONFIG_LCD_FREQ_SWITCH dev_notice(&client->dev, "P(%c)(%d) [%2d]", info->ldi, (info->tsp_lcdfreq_flag ? 40 : 60), id); #else dev_notice(&client->dev, "P(%c) [%2d]", info->ldi, id); #endif } #else if (info->finger_state[id] == 0) { info->finger_state[id] = 1; #ifdef CONFIG_LCD_FREQ_SWITCH dev_notice(&client->dev, "P(%c)(%d) [%2d],([%4d],[%3d]) w=%d, major=%d, minor=%d, angle=%d, palm=%d", info->ldi, (info->tsp_lcdfreq_flag ? 40 : 60), id, x, y, tmp[4], tmp[6], tmp[7], angle, palm); #else dev_notice(&client->dev, "P(%c) [%2d],([%4d],[%3d]) w=%d, major=%d, minor=%d, angle=%d, palm=%d", info->ldi, id, x, y, tmp[4], tmp[6], tmp[7], angle, palm); #endif } #endif } else { input_mt_slot(info->input_dev, id); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, true); input_report_abs(info->input_dev, ABS_MT_POSITION_X, x); input_report_abs(info->input_dev, ABS_MT_POSITION_Y, y); input_report_abs(info->input_dev, ABS_MT_TOUCH_MAJOR, tmp[4]); input_report_abs(info->input_dev, ABS_MT_PRESSURE, tmp[5]); #ifdef CONFIG_SAMSUNG_PRODUCT_SHIP if (info->finger_state[id] == 0) { info->finger_state[id] = 1; dev_notice(&client->dev, "P [%2d]", id); } #else if (info->finger_state[id] == 0) { info->finger_state[id] = 1; dev_notice(&client->dev, "P [%2d],([%4d],[%3d]),S:%d W:%d", id, x, y, tmp[4], tmp[5]); } #endif } touch_is_pressed++; #ifdef CONFIG_TOUCH_WAKE if (mms_ts_suspended) { if (knockon) { if (touch_is_pressed == 0) { if (knockon_reset) { knockon_reset = false; touch_press(); } else { knockon_reset = true; } } } else { touch_press(); } } #endif } input_sync(info->input_dev); #if TOUCH_BOOSTER set_dvfs_lock(info, !!touch_is_pressed); #endif out: return IRQ_HANDLED; } int get_tsp_status(void) { return touch_is_pressed; } EXPORT_SYMBOL(get_tsp_status); #if ISC_DL_MODE static int mms100_i2c_read(struct i2c_client *client, u16 addr, u16 length, u8 *value) { struct i2c_adapter *adapter = client->adapter; struct i2c_msg msg; int ret = -1; msg.addr = client->addr; msg.flags = 0x00; msg.len = 1; msg.buf = (u8 *) &addr; ret = i2c_transfer(adapter, &msg, 1); if (ret >= 0) { msg.addr = client->addr; msg.flags = I2C_M_RD; msg.len = length; msg.buf = (u8 *) value; ret = i2c_transfer(adapter, &msg, 1); } if (ret < 0) pr_err("[TSP] : read error : [%d]", ret); return ret; } static int mms100_reset(struct mms_ts_info *info) { info->pdata->power(false); msleep(30); info->pdata->power(true); msleep(300); return ISC_SUCCESS; } /* static int mms100_check_operating_mode(struct i2c_client *_client, const int _error_code) { int ret; unsigned char rd_buf = 0x00; unsigned char count = 0; if(_client == NULL) pr_err("[TSP ISC] _client is null"); ret = mms100_i2c_read(_client, ISC_ADDR_VERSION, 1, &rd_buf); if (ret<0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return _error_code; } return ISC_SUCCESS; } */ static int mms100_get_version_info(struct i2c_client *_client) { int i, ret; unsigned char rd_buf[8]; /* config version brust read (core, private, public) */ ret = mms100_i2c_read(_client, ISC_ADDR_VERSION, 3, rd_buf); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } for (i = 0; i < SECTION_NUM; i++) ts_info[i].version = rd_buf[i]; ts_info[SEC_CORE].compatible_version = ts_info[SEC_BOOTLOADER].version; ts_info[SEC_CONFIG].compatible_version = ts_info[SEC_CORE].version; ret = mms100_i2c_read(_client, ISC_ADDR_SECTION_PAGE_INFO, 8, rd_buf); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } for (i = 0; i < SECTION_NUM; i++) { ts_info[i].start_addr = rd_buf[i]; ts_info[i].end_addr = rd_buf[i + SECTION_NUM + 1]; } for (i = 0; i < SECTION_NUM; i++) { pr_info("TS : Section(%d) version: 0x%02X\n", i, ts_info[i].version); pr_info("TS : Section(%d) Start Address: 0x%02X\n", i, ts_info[i].start_addr); pr_info("TS : Section(%d) End Address: 0x%02X\n", i, ts_info[i].end_addr); pr_info("TS : Section(%d) Compatibility: 0x%02X\n", i, ts_info[i].compatible_version); } return ISC_SUCCESS; } static int mms100_seek_section_info(void) { int i; char str_buf[STRING_BUF_LEN]; char name_buf[SECTION_NAME_LEN]; int version; int page_num; const unsigned char *buf; int next_ptr; for (i = 0; i < SECTION_NUM; i++) { if (fw_mbin[i] == NULL) { buf = NULL; pr_info("[TSP ISC] fw_mbin[%d]->data is NULL", i); } else { buf = fw_mbin[i]->data; } if (buf == NULL) { mbin_info[i].version = ts_info[i].version; mbin_info[i].compatible_version = ts_info[i].compatible_version; mbin_info[i].start_addr = ts_info[i].start_addr; mbin_info[i].end_addr = ts_info[i].end_addr; } else { next_ptr = 0; do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "SECTION_NAME")); sscanf(buf + next_ptr, "%s%s", str_buf, name_buf); if (strncmp(section_name[i], name_buf, SECTION_NAME_LEN)) return ISC_FILE_FORMAT_ERROR; do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "SECTION_VERSION")); sscanf(buf + next_ptr, "%s%d", str_buf, &version); mbin_info[i].version = ISC_CHAR_2_BCD(version); do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "START_PAGE_ADDR")); sscanf(buf + next_ptr, "%s%d", str_buf, &page_num); mbin_info[i].start_addr = page_num; do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "END_PAGE_ADDR")); sscanf(buf + next_ptr, "%s%d", str_buf, &page_num); mbin_info[i].end_addr = page_num; do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "COMPATIBLE_VERSION")); sscanf(buf + next_ptr, "%s%d", str_buf, &version); mbin_info[i].compatible_version = ISC_CHAR_2_BCD(version); do { sscanf(buf + next_ptr, "%s", str_buf); next_ptr += strlen(str_buf) + 1; } while (!strstr(str_buf, "[Binary]")); if (mbin_info[i].version == 0xFF) return ISC_FILE_FORMAT_ERROR; } } for (i = 0; i < SECTION_NUM; i++) { pr_info("[TSP ISC] MBin : Section(%d) Version: 0x%02X\n", i, mbin_info[i].version); pr_info("[TSP ISC] MBin : Section(%d) Start Address: 0x%02X\n", i, mbin_info[i].start_addr); pr_info("[TSP ISC] MBin : Section(%d) End Address: 0x%02X\n", i, mbin_info[i].end_addr); pr_info("[TSP ISC] MBin : Section(%d) Compatibility: 0x%02X\n", i, mbin_info[i].compatible_version); } return ISC_SUCCESS; } static int mms100_compare_version_info(struct i2c_client *_client) { int i, ret; unsigned char expected_compatibility[SECTION_NUM]; if (mms100_get_version_info(_client) != ISC_SUCCESS) return ISC_I2C_ERROR; ret = mms100_seek_section_info(); /* Check update areas , 0 : bootloader 1: core 2: private 3: public */ for (i = 0; i < SECTION_NUM; i++) { if ((mbin_info[i].version == 0) || (mbin_info[i].version != ts_info[i].version)) { section_update_flag[i] = true; pr_info("[TSP ISC] [%d] section will be updated!", i); } } section_update_flag[0] = false; section_update_flag[1] = false; pr_info("[TSP ISC] [%d] [%d] [%d]", section_update_flag[0], section_update_flag[1], section_update_flag[2]); if (section_update_flag[SEC_BOOTLOADER]) { expected_compatibility[SEC_CORE] = mbin_info[SEC_BOOTLOADER].version; } else { expected_compatibility[SEC_CORE] = ts_info[SEC_BOOTLOADER].version; } if (section_update_flag[SEC_CORE]) { expected_compatibility[SEC_CONFIG] = mbin_info[SEC_CORE].version; } else { expected_compatibility[SEC_CONFIG] = ts_info[SEC_CORE].version; } for (i = SEC_CORE; i < SEC_CONFIG; i++) { if (section_update_flag[i]) { pr_info("[TSP ISC] section_update_flag(%d), 0x%02x, 0x%02x\n", i, expected_compatibility[i], mbin_info[i].compatible_version); if (expected_compatibility[i] != mbin_info[i].compatible_version) return ISC_COMPATIVILITY_ERROR; } else { pr_info("[TSP ISC] !section_update_flag(%d), 0x%02x, 0x%02x\n", i, expected_compatibility[i], ts_info[i].compatible_version); if (expected_compatibility[i] != ts_info[i].compatible_version) return ISC_COMPATIVILITY_ERROR; } } return ISC_SUCCESS; } static int mms100_enter_ISC_mode(struct i2c_client *_client) { int ret; unsigned char wr_buf[2]; pr_info("[TSP ISC] %s\n", __func__); wr_buf[0] = ISC_CMD_ENTER_ISC; wr_buf[1] = ISC_CMD_ENTER_ISC_PARA1; ret = i2c_master_send(_client, wr_buf, 2); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } msleep(50); return ISC_SUCCESS; } static int mms100_enter_config_update(struct i2c_client *_client) { int ret; unsigned char wr_buf[10] = {0,}; unsigned char rd_buf; wr_buf[0] = ISC_CMD_UPDATE_MODE; wr_buf[1] = ISC_SUBCMD_ENTER_UPDATE; ret = i2c_master_send(_client, wr_buf, 10); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } ret = mms100_i2c_read(_client, ISC_CMD_CONFIRM_STATUS, 1, &rd_buf); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } if (rd_buf != ISC_STATUS_UPDATE_MODE) return ISC_UPDATE_MODE_ENTER_ERROR; pr_info("[TSP ISC]End mms100_enter_config_update()\n"); return ISC_SUCCESS; } static int mms100_ISC_clear_page(struct i2c_client *_client, unsigned char _page_addr) { int ret; unsigned char rd_buf; memset(&g_wr_buf[3], 0xFF, PAGE_DATA); g_wr_buf[0] = ISC_CMD_UPDATE_MODE; /* command */ g_wr_buf[1] = ISC_SUBCMD_DATA_WRITE; /* sub_command */ g_wr_buf[2] = _page_addr; g_wr_buf[PAGE_HEADER + PAGE_DATA] = crc0_buf[_page_addr]; g_wr_buf[PAGE_HEADER + PAGE_DATA + 1] = crc1_buf[_page_addr]; ret = i2c_master_send(_client, g_wr_buf, PACKET_SIZE); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } ret = mms100_i2c_read(_client, ISC_CMD_CONFIRM_STATUS, 1, &rd_buf); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } if (rd_buf != ISC_STATUS_CRC_CHECK_SUCCESS) return ISC_UPDATE_MODE_ENTER_ERROR; pr_info("[TSP ISC]End mms100_ISC_clear_page()\n"); return ISC_SUCCESS; } static int mms100_ISC_clear_validate_markers(struct i2c_client *_client) { int ret_msg; int i, j; bool is_matched_address; for (i = SEC_CORE; i <= SEC_CONFIG; i++) { if (section_update_flag[i]) { if (ts_info[i].end_addr <= 30 && ts_info[i].end_addr > 0) { ret_msg = mms100_ISC_clear_page(_client, ts_info[i].end_addr); if (ret_msg != ISC_SUCCESS) return ret_msg; } } } for (i = SEC_CORE; i <= SEC_CONFIG; i++) { if (section_update_flag[i]) { is_matched_address = false; for (j = SEC_CORE; j <= SEC_CONFIG; j++) { if (mbin_info[i].end_addr == ts_info[i].end_addr) { is_matched_address = true; break; } } if (!is_matched_address) { if (mbin_info[i].end_addr <= 30 && mbin_info[i].end_addr > 0) { ret_msg = mms100_ISC_clear_page(_client, mbin_info[i].end_addr); if (ret_msg != ISC_SUCCESS) return ret_msg; } } } } return ISC_SUCCESS; } static void mms100_calc_crc(unsigned char *crc, int page_addr, unsigned char *ptr_fw) { int i, j; unsigned char ucData; unsigned short SeedValue; unsigned short CRC_check_buf; unsigned short CRC_send_buf; unsigned short IN_data; unsigned short XOR_bit_1; unsigned short XOR_bit_2; unsigned short XOR_bit_3; CRC_check_buf = 0xFFFF; SeedValue = (unsigned short)page_addr; for (i = 7; i >= 0; i--) { IN_data = (SeedValue >> i) & 0x01; XOR_bit_1 = (CRC_check_buf & 0x0001) ^ IN_data; XOR_bit_2 = XOR_bit_1^(CRC_check_buf >> 11 & 0x01); XOR_bit_3 = XOR_bit_1^(CRC_check_buf >> 4 & 0x01); CRC_send_buf = (XOR_bit_1 << 4) | (CRC_check_buf >> 12 & 0x0F); CRC_send_buf = (CRC_send_buf << 7) | (XOR_bit_2 << 6) | (CRC_check_buf >> 5 & 0x3F); CRC_send_buf = (CRC_send_buf << 4) | (XOR_bit_3 << 3) | (CRC_check_buf >> 1 & 0x0007); CRC_check_buf = CRC_send_buf; } for (j = 0; j < 1024; j++) { ucData = ptr_fw[j]; for (i = 7; i >= 0; i--) { IN_data = (ucData >> i) & 0x0001; XOR_bit_1 = (CRC_check_buf & 0x0001) ^ IN_data; XOR_bit_2 = XOR_bit_1^(CRC_check_buf >> 11 & 0x01); XOR_bit_3 = XOR_bit_1^(CRC_check_buf >> 4 & 0x01); CRC_send_buf = (XOR_bit_1 << 4) | (CRC_check_buf >> 12 & 0x0F); CRC_send_buf = (CRC_send_buf << 7) | (XOR_bit_2 << 6) | (CRC_check_buf >> 5 & 0x3F); CRC_send_buf = (CRC_send_buf << 4) | (XOR_bit_3 << 3) | (CRC_check_buf >> 1 & 0x0007); CRC_check_buf = CRC_send_buf; } } crc[0] = (unsigned char)((CRC_check_buf >> 8) & 0xFF); crc[1] = (unsigned char)((CRC_check_buf >> 0) & 0xFF); } static int mms100_update_section_data(struct i2c_client *_client) { int i, j, ret; unsigned char rd_buf; unsigned char crc[2]; const unsigned char *ptr_fw; char str_buf[STRING_BUF_LEN]; int page_addr; for (i = 0; i < SECTION_NUM; i++) { if (section_update_flag[i]) { pr_info("[TSP ISC] section data i2c flash : [%d]", i); ptr_fw = fw_mbin[i]->data; do { sscanf(ptr_fw, "%s", str_buf); ptr_fw += strlen(str_buf) + 1; /* pr_info("[TSP ISC] Section[%d] %s", i, str_buf ); */ } while (!strstr(str_buf, "[Binary]")); ptr_fw += 1; for (page_addr = mbin_info[i].start_addr; page_addr <= mbin_info[i].end_addr; page_addr++) { if (page_addr - mbin_info[i].start_addr > 0) ptr_fw += 1024; g_wr_buf[0] = ISC_CMD_UPDATE_MODE; g_wr_buf[1] = ISC_SUBCMD_DATA_WRITE; g_wr_buf[2] = (unsigned char)page_addr; for (j = 0; j < 1024; j += 4) { g_wr_buf[3+j] = ptr_fw[j+3]; g_wr_buf[3+j+1] = ptr_fw[j+2]; g_wr_buf[3+j+2] = ptr_fw[j+1]; g_wr_buf[3+j+3] = ptr_fw[j+0]; } mms100_calc_crc(crc, page_addr, &g_wr_buf[3]); g_wr_buf[1027] = crc[0]; g_wr_buf[1028] = crc[1]; /* pr_info("[TSP ISC] [%d] DATA %02X %02X %02X %02X CRC %02X %02X ", page_addr, g_wr_buf[3], g_wr_buf[4], g_wr_buf[5], g_wr_buf[6] , crc[0] , crc[1] ); */ ret = i2c_master_send(_client, g_wr_buf, PACKET_SIZE); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c write fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } ret = mms100_i2c_read(_client, ISC_CMD_CONFIRM_STATUS, 1, &rd_buf); if (ret < 0) { pr_info("[TSP ISC] %s,%d: i2c read fail[%d]\n", __func__, __LINE__, ret); return ISC_I2C_ERROR; } if (rd_buf != ISC_STATUS_CRC_CHECK_SUCCESS) return ISC_CRC_ERROR; section_update_flag[i] = false; } } } return ISC_SUCCESS; } static int mms100_open_mbinary(struct mms_ts_info *info) { struct i2c_client *_client = info->client; int ret = 0; ret += request_firmware(&(fw_mbin[0]),\ "tsp_melfas/note/BOOT.fw", &_client->dev); ret += request_firmware(&(fw_mbin[1]),\ "tsp_melfas/note/CORE.fw", &_client->dev); if (info->ldi == 'L') { ret += request_firmware(&(fw_mbin[2]),\ "tsp_melfas/note/CONFL.fw", &_client->dev); } else { ret += request_firmware(&(fw_mbin[2]),\ "tsp_melfas/note/CONFM.fw", &_client->dev); } if (!ret) return ISC_SUCCESS; else { pr_info("[TSP ISC] request_firmware fail"); return ret; } } static int mms100_close_mbinary(void) { int i; for (i = 0; i < SECTION_NUM; i++) { if (fw_mbin[i] != NULL) release_firmware(fw_mbin[i]); } return ISC_SUCCESS; } int mms100_ISC_download_mbinary(struct mms_ts_info *info) { struct i2c_client *_client = info->client; int ret_msg = ISC_NONE; pr_info("[TSP ISC] %s\n", __func__); mms100_reset(info); /* ret_msg = mms100_check_operating_mode(_client, EC_BOOT_ON_SUCCEEDED); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; */ ret_msg = mms100_open_mbinary(info); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; /*Config version Check*/ ret_msg = mms100_compare_version_info(_client); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; ret_msg = mms100_enter_ISC_mode(_client); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; ret_msg = mms100_enter_config_update(_client); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; ret_msg = mms100_ISC_clear_validate_markers(_client); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; pr_info("[TSP ISC]mms100_update_section_data start"); ret_msg = mms100_update_section_data(_client); if (ret_msg != ISC_SUCCESS) goto ISC_ERROR_HANDLE; pr_info("[TSP ISC]mms100_update_section_data end"); /* mms100_reset(info); */ pr_info("[TSP ISC]FIRMWARE_UPDATE_FINISHED!!!\n"); ret_msg = ISC_SUCCESS; ISC_ERROR_HANDLE: if (ret_msg != ISC_SUCCESS) pr_info("[TSP ISC]ISC_ERROR_CODE: %d\n", ret_msg); mms100_reset(info); mms100_close_mbinary(); return ret_msg; } #endif /* ISC_DL_MODE start */ static void hw_reboot(struct mms_ts_info *info, bool bootloader) { info->pdata->power(0); gpio_direction_output(info->pdata->gpio_sda, bootloader ? 0 : 1); gpio_direction_output(info->pdata->gpio_scl, bootloader ? 0 : 1); gpio_direction_output(info->pdata->gpio_int, 0); msleep(30); info->pdata->power(1); msleep(30); if (bootloader) { gpio_set_value(info->pdata->gpio_scl, 0); gpio_set_value(info->pdata->gpio_sda, 1); } else { gpio_set_value(info->pdata->gpio_int, 1); gpio_direction_input(info->pdata->gpio_int); gpio_direction_input(info->pdata->gpio_scl); gpio_direction_input(info->pdata->gpio_sda); } msleep(40); } static inline void hw_reboot_bootloader(struct mms_ts_info *info) { hw_reboot(info, true); } static inline void hw_reboot_normal(struct mms_ts_info *info) { hw_reboot(info, false); } static void isp_toggle_clk(struct mms_ts_info *info, int start_lvl, int end_lvl, int hold_us) { gpio_set_value(info->pdata->gpio_scl, start_lvl); udelay(hold_us); gpio_set_value(info->pdata->gpio_scl, end_lvl); udelay(hold_us); } /* 1 <= cnt <= 32 bits to write */ static void isp_send_bits(struct mms_ts_info *info, u32 data, int cnt) { gpio_direction_output(info->pdata->gpio_int, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_sda, 0); /* clock out the bits, msb first */ while (cnt--) { gpio_set_value(info->pdata->gpio_sda, (data >> cnt) & 1); udelay(3); isp_toggle_clk(info, 1, 0, 3); } } /* 1 <= cnt <= 32 bits to read */ static u32 isp_recv_bits(struct mms_ts_info *info, int cnt) { u32 data = 0; gpio_direction_output(info->pdata->gpio_int, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_set_value(info->pdata->gpio_sda, 0); gpio_direction_input(info->pdata->gpio_sda); /* clock in the bits, msb first */ while (cnt--) { isp_toggle_clk(info, 0, 1, 1); data = (data << 1) | (!!gpio_get_value(info->pdata->gpio_sda)); } gpio_direction_output(info->pdata->gpio_sda, 0); return data; } static void isp_enter_mode(struct mms_ts_info *info, u32 mode) { int cnt; unsigned long flags; local_irq_save(flags); gpio_direction_output(info->pdata->gpio_int, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_sda, 1); mode &= 0xffff; for (cnt = 15; cnt >= 0; cnt--) { gpio_set_value(info->pdata->gpio_int, (mode >> cnt) & 1); udelay(3); isp_toggle_clk(info, 1, 0, 3); } gpio_set_value(info->pdata->gpio_int, 0); local_irq_restore(flags); } static void isp_exit_mode(struct mms_ts_info *info) { int i; unsigned long flags; local_irq_save(flags); gpio_direction_output(info->pdata->gpio_int, 0); udelay(3); for (i = 0; i < 10; i++) isp_toggle_clk(info, 1, 0, 3); local_irq_restore(flags); } static void flash_set_address(struct mms_ts_info *info, u16 addr) { /* Only 13 bits of addr are valid. * The addr is in bits 13:1 of cmd */ isp_send_bits(info, (u32) (addr & 0x1fff) << 1, 18); } static void flash_erase(struct mms_ts_info *info) { isp_enter_mode(info, ISP_MODE_FLASH_ERASE); gpio_direction_output(info->pdata->gpio_int, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_sda, 1); /* 4 clock cycles with different timings for the erase to * get processed, clk is already 0 from above */ udelay(7); isp_toggle_clk(info, 1, 0, 3); udelay(7); isp_toggle_clk(info, 1, 0, 3); usleep_range(25000, 35000); isp_toggle_clk(info, 1, 0, 3); usleep_range(150, 200); isp_toggle_clk(info, 1, 0, 3); gpio_set_value(info->pdata->gpio_sda, 0); isp_exit_mode(info); } static u32 flash_readl(struct mms_ts_info *info, u16 addr) { int i; u32 val; unsigned long flags; local_irq_save(flags); isp_enter_mode(info, ISP_MODE_FLASH_READ); flash_set_address(info, addr); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_sda, 0); udelay(40); /* data load cycle */ for (i = 0; i < 6; i++) isp_toggle_clk(info, 1, 0, 10); val = isp_recv_bits(info, 32); isp_exit_mode(info); local_irq_restore(flags); return val; } static void flash_writel(struct mms_ts_info *info, u16 addr, u32 val) { unsigned long flags; local_irq_save(flags); isp_enter_mode(info, ISP_MODE_FLASH_WRITE); flash_set_address(info, addr); isp_send_bits(info, val, 32); gpio_direction_output(info->pdata->gpio_sda, 1); /* 6 clock cycles with different timings for the data to get written * into flash */ isp_toggle_clk(info, 0, 1, 3); isp_toggle_clk(info, 0, 1, 3); isp_toggle_clk(info, 0, 1, 6); isp_toggle_clk(info, 0, 1, 12); isp_toggle_clk(info, 0, 1, 3); isp_toggle_clk(info, 0, 1, 3); isp_toggle_clk(info, 1, 0, 1); gpio_direction_output(info->pdata->gpio_sda, 0); isp_exit_mode(info); local_irq_restore(flags); usleep_range(300, 400); } static bool flash_is_erased(struct mms_ts_info *info) { struct i2c_client *client = info->client; u32 val; u16 addr; for (addr = 0; addr < (ISP_MAX_FW_SIZE / 4); addr++) { udelay(40); val = flash_readl(info, addr); if (val != 0xffffffff) { dev_dbg(&client->dev, "addr 0x%x not erased: 0x%08x != 0xffffffff\n", addr, val); return false; } } return true; } static int fw_write_image(struct mms_ts_info *info, const u8 * data, size_t len) { struct i2c_client *client = info->client; u16 addr = 0; for (addr = 0; addr < (len / 4); addr++, data += 4) { u32 val = get_unaligned_le32(data); u32 verify_val; int retries = 3; while (retries--) { flash_writel(info, addr, val); verify_val = flash_readl(info, addr); if (val == verify_val) break; dev_err(&client->dev, "mismatch @ addr 0x%x: 0x%x != 0x%x\n", addr, verify_val, val); continue; } if (retries < 0) return -ENXIO; } return 0; } static int fw_download(struct mms_ts_info *info, const u8 * data, size_t len) { struct i2c_client *client = info->client; u32 val; int ret = 0; int i; u32 *buf = kzalloc(ISP_CAL_DATA_SIZE * 4, GFP_KERNEL); if (!buf) { dev_err(&info->client->dev, "%s: failed to allocate memory\n", __func__); return -ENOMEM; } if (len % 4) { dev_err(&client->dev, "fw image size (%d) must be a multiple of 4 bytes\n", len); kfree(buf); return -EINVAL; } else if (len > ISP_MAX_FW_SIZE) { dev_err(&client->dev, "fw image is too big, %d > %d\n", len, ISP_MAX_FW_SIZE); kfree(buf); return -EINVAL; } dev_info(&client->dev, "fw download start\n"); info->pdata->power(0); gpio_direction_output(info->pdata->gpio_sda, 0); gpio_direction_output(info->pdata->gpio_scl, 0); gpio_direction_output(info->pdata->gpio_int, 0); hw_reboot_bootloader(info); dev_info(&client->dev, "calibration data backup\n"); for (i = 0; i < ISP_CAL_DATA_SIZE; i++) buf[i] = flash_readl(info, ISP_IC_INFO_ADDR); val = flash_readl(info, ISP_IC_INFO_ADDR); dev_info(&client->dev, "IC info: 0x%02x (%x)\n", val & 0xff, val); dev_info(&client->dev, "fw erase...\n"); flash_erase(info); if (!flash_is_erased(info)) { ret = -ENXIO; goto err; } dev_info(&client->dev, "fw write...\n"); /* XXX: what does this do?! */ flash_writel(info, ISP_IC_INFO_ADDR, 0xffffff00 | (val & 0xff)); usleep_range(1000, 1500); ret = fw_write_image(info, data, len); if (ret) goto err; usleep_range(1000, 1500); dev_info(&client->dev, "restoring data\n"); for (i = 0; i < ISP_CAL_DATA_SIZE; i++) flash_writel(info, ISP_IC_INFO_ADDR, buf[i]); kfree(buf); dev_info(&client->dev, "fw download done...\n"); hw_reboot_normal(info); msleep(200); return 0; err: dev_err(&client->dev, "fw download failed...\n"); kfree(buf); hw_reboot_normal(info); return ret; } #if defined(SEC_TSP_ISC_FW_UPDATE) static u16 gen_crc(u8 data, u16 pre_crc) { u16 crc; u16 cur; u16 temp; u16 bit_1; u16 bit_2; int i; crc = pre_crc; for (i = 7; i >= 0; i--) { cur = ((data >> i) & 0x01) ^ (crc & 0x0001); bit_1 = cur ^ (crc >> 11 & 0x01); bit_2 = cur ^ (crc >> 4 & 0x01); temp = (cur << 4) | (crc >> 12 & 0x0F); temp = (temp << 7) | (bit_1 << 6) | (crc >> 5 & 0x3F); temp = (temp << 4) | (bit_2 << 3) | (crc >> 1 & 0x0007); crc = temp; } return crc; } static int isc_fw_download(struct mms_ts_info *info, const u8 * data, size_t len) { u8 *buff; u16 crc_buf; int src_idx; int dest_idx; int ret; int i, j; buff = kzalloc(ISC_PKT_SIZE, GFP_KERNEL); if (!buff) { dev_err(&info->client->dev, "%s: failed to allocate memory\n", __func__); ret = -1; goto err_mem_alloc; } /* enterring ISC mode */ *buff = ISC_ENTER_ISC_DATA; ret = i2c_smbus_write_byte_data(info->client, ISC_ENTER_ISC_CMD, *buff); if (ret < 0) { dev_err(&info->client->dev, "fail to enter ISC mode(err=%d)\n", ret); goto fail_to_isc_enter; } usleep_range(10000, 20000); dev_info(&info->client->dev, "Enter ISC mode\n"); /*enter ISC update mode */ *buff = ISC_ENTER_UPDATE_DATA; ret = i2c_smbus_write_i2c_block_data(info->client, ISC_CMD, ISC_ENTER_UPDATE_DATA_LEN, buff); if (ret < 0) { dev_err(&info->client->dev, "fail to enter ISC update mode(err=%d)\n", ret); goto fail_to_isc_update; } dev_info(&info->client->dev, "Enter ISC update mode\n"); /* firmware write */ *buff = ISC_CMD; *(buff + 1) = ISC_DATA_WRITE_SUB_CMD; for (i = 0; i < ISC_PKT_NUM; i++) { *(buff + 2) = i; crc_buf = gen_crc(*(buff + 2), ISC_DEFAULT_CRC); for (j = 0; j < ISC_PKT_DATA_SIZE; j++) { dest_idx = ISC_PKT_HEADER_SIZE + j; src_idx = i * ISC_PKT_DATA_SIZE + ((int)(j / WORD_SIZE)) * WORD_SIZE - (j % WORD_SIZE) + 3; *(buff + dest_idx) = *(data + src_idx); crc_buf = gen_crc(*(buff + dest_idx), crc_buf); } *(buff + ISC_PKT_DATA_SIZE + ISC_PKT_HEADER_SIZE + 1) = crc_buf & 0xFF; *(buff + ISC_PKT_DATA_SIZE + ISC_PKT_HEADER_SIZE) = crc_buf >> 8 & 0xFF; ret = i2c_master_send(info->client, buff, ISC_PKT_SIZE); if (ret < 0) { dev_err(&info->client->dev, "fail to firmware writing on packet %d.(%d)\n", i, ret); goto fail_to_fw_write; } usleep_range(1, 5); /* confirm CRC */ ret = i2c_smbus_read_byte_data(info->client, ISC_CHECK_STATUS_CMD); if (ret == ISC_CONFIRM_CRC) { dev_info(&info->client->dev, "updating %dth firmware data packet.\n", i); } else { dev_err(&info->client->dev, "fail to firmware update on %dth (%X).\n", i, ret); ret = -1; goto fail_to_confirm_crc; } } ret = 0; fail_to_confirm_crc: fail_to_fw_write: /* exit ISC mode */ *buff = ISC_EXIT_ISC_SUB_CMD; *(buff + 1) = ISC_EXIT_ISC_SUB_CMD2; i2c_smbus_write_i2c_block_data(info->client, ISC_CMD, 2, buff); usleep_range(10000, 20000); fail_to_isc_update: hw_reboot_normal(info); fail_to_isc_enter: kfree(buff); err_mem_alloc: return ret; } #endif /* SEC_TSP_ISC_FW_UPDATE */ static int get_fw_version(struct mms_ts_info *info, u8 area) { struct i2c_client *client = info->client; struct i2c_adapter *adapter = client->adapter; struct i2c_msg msg; u8 reg = MMS_CORE_VERSION; int ret; unsigned char buf[4]; msg.addr = client->addr; msg.flags = 0x00; msg.len = 1; msg.buf = &reg; ret = i2c_transfer(adapter, &msg, 1); if (ret >= 0) { msg.addr = client->addr; msg.flags = I2C_M_RD; msg.len = 4; msg.buf = buf; ret = i2c_transfer(adapter, &msg, 1); } if (ret < 0) { pr_err("[TSP] : read error : [%d]", ret); return ret; } if (area == SEC_BOOTLOADER) return buf[0]; else if (area == SEC_CORE) return buf[1]; else if (area == SEC_CONFIG) return buf[2]; else return 0; } static int get_panel_version(struct mms_ts_info *info) { int ret; int retries = 3; /* this seems to fail sometimes after a reset.. retry a few times */ do { ret = i2c_smbus_read_byte_data(info->client, MMS_COMPAT_GROUP); } while (ret < 0 && retries-- > 0); return ret; } /* static int mms_ts_enable(struct mms_ts_info *info, int wakeupcmd) { mutex_lock(&info->lock); if (info->enabled) goto out; if (wakeupcmd == 1) { i2c_smbus_write_byte_data(info->client, 0, 0); usleep_range(3000, 5000); } info->enabled = true; enable_irq(info->irq); out: mutex_unlock(&info->lock); return 0; } static int mms_ts_disable(struct mms_ts_info *info, int sleepcmd) { mutex_lock(&info->lock); if (!info->enabled) goto out; disable_irq_nosync(info->irq); if (sleepcmd == 1) { i2c_smbus_write_byte_data(info->client, MMS_MODE_CONTROL, 0); usleep_range(10000, 12000); } info->enabled = false; touch_is_pressed = 0; out: mutex_unlock(&info->lock); return 0; } */ static int mms_ts_fw_info(struct mms_ts_info *info) { struct i2c_client *client = info->client; int ret = 0; int ver; ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; dev_info(&client->dev, "[TSP]fw version 0x%02x !!!!\n", ver); if (ver < 0) { ret = 1; dev_err(&client->dev, "i2c fail...tsp driver unload.\n"); return ret; } if (!info->pdata || !info->pdata->mux_fw_flash) { ret = 1; dev_err(&client->dev, "fw cannot be updated, missing platform data\n"); return ret; } return ret; } static int mms_ts_fw_load(struct mms_ts_info *info, bool force, char ldi) { struct i2c_client *client = info->client; int ret = 0; int ver; int bin_ver; int retries = 3; ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; dev_info(&client->dev, "[TSP]fw version 0x%02x !!!!\n", ver); if (!info->pdata || !info->pdata->mux_fw_flash) { ret = 1; dev_err(&client->dev, "fw cannot be updated, missing platform data\n"); goto out; } if (ldi == 'N') { if (info->ldi == 'M') bin_ver = FW_VERSION_M; else bin_ver = FW_VERSION_L; } else { if (ldi == 'M') bin_ver = FW_VERSION_M; else bin_ver = FW_VERSION_L; } if (!force) { if ((ver >= bin_ver) && (ver != 0xff)) { dev_info(&client->dev, "fw version update does not need\n"); goto done; } } while (retries--) { ret = mms100_ISC_download_mbinary(info); ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; if (ret == 0) { pr_err("[TSP] mms100_ISC_download_mbinary success"); goto done; } else { pr_err("[TSP] mms100_ISC_download_mbinary fail [%d]", ret); ret = 1; } dev_err(&client->dev, "retrying flashing\n"); } out: done: return ret; } #ifdef SEC_TSP_FACTORY_TEST static void set_cmd_result(struct mms_ts_info *info, char *buff, int len) { strncat(info->cmd_result, buff, len); } static int get_data(struct mms_ts_info *info, u8 addr, u8 size, u8 *array) { struct i2c_client *client = info->client; struct i2c_adapter *adapter = client->adapter; struct i2c_msg msg; u8 reg = addr; unsigned char buf[size]; int ret; msg.addr = client->addr; msg.flags = 0x00; msg.len = 1; msg.buf = &reg; ret = i2c_transfer(adapter, &msg, 1); if (ret >= 0) { msg.addr = client->addr; msg.flags = I2C_M_RD; msg.len = size; msg.buf = buf; ret = i2c_transfer(adapter, &msg, 1); } if (ret < 0) { pr_err("[TSP] : read error : [%d]", ret); return ret; } memcpy(array, &buf, size); return size; } static void get_intensity_data(struct mms_ts_info *info) { u8 w_buf[4]; u8 r_buf; u8 read_buffer[60] = {0}; int i, j; int ret; u16 max_value = 0, min_value = 0; u16 raw_data; char buff[TSP_CMD_STR_LEN] = {0}; disable_irq(info->irq); w_buf[0] = ADDR_UNIV_CMD; w_buf[1] = CMD_GET_INTEN; w_buf[2] = 0xFF; for (i = 0; i < RX_NUM; i++) { w_buf[3] = i; ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 3, &w_buf[1]); if (ret < 0) goto err_i2c; usleep_range(1, 5); ret = i2c_smbus_read_i2c_block_data(info->client, CMD_RESULT_SZ, 1, &r_buf); if (ret < 0) goto err_i2c; ret = get_data(info, CMD_RESULT, r_buf, read_buffer); if (ret < 0) goto err_i2c; for (j = 0; j < r_buf/2; j++) { raw_data = read_buffer[2*j] | (read_buffer[2*j+1] << 8); if (raw_data > 32767) raw_data = 0; if (i == 0 && j == 0) { max_value = min_value = raw_data; } else { max_value = max(max_value, raw_data); min_value = min(min_value, raw_data); } info->intensity[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] intensity[%d][%d] = %d\n", j, i, info->intensity[i * info->tx_num + j]); } } snprintf(buff, sizeof(buff), "%d,%d", min_value, max_value); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); enable_irq(info->irq); return; err_i2c: dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n", __func__, MMS_VSC_CMD_INTENSITY); } static void get_raw_data(struct mms_ts_info *info, u8 cmd) { u8 w_buf[4]; u8 r_buf = 0; u8 read_buffer[60] = {0}; int ret; int i, j; int max_value = 0, min_value = 0; int raw_data; int retry; char buff[TSP_CMD_STR_LEN] = {0}; int gpio = info->pdata->gpio_int; disable_irq(info->irq); ret = i2c_smbus_write_byte_data(info->client, ADDR_UNIV_CMD, CMD_ENTER_TEST); if (ret < 0) goto err_i2c; /* event type check */ retry = 1; while (retry) { while (gpio_get_value(gpio)) udelay(100); ret = i2c_smbus_read_i2c_block_data(info->client, 0x0F, 1, &r_buf); if (ret < 0) goto err_i2c; ret = i2c_smbus_read_i2c_block_data(info->client, 0x10, 1, &r_buf); if (ret < 0) goto err_i2c; dev_info(&info->client->dev, "event type = 0x%x\n", r_buf); if (r_buf == 0x0C) retry = 0; } w_buf[0] = ADDR_UNIV_CMD; if (cmd == MMS_VSC_CMD_CM_DELTA) w_buf[1] = CMD_CM_DELTA; else w_buf[1] = CMD_CM_ABS; ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 1, &w_buf[1]); if (ret < 0) goto err_i2c; while (gpio_get_value(gpio)) udelay(100); ret = i2c_smbus_read_i2c_block_data(info->client, CMD_RESULT_SZ, 1, &r_buf); if (ret < 0) goto err_i2c; ret = i2c_smbus_read_i2c_block_data(info->client, CMD_RESULT, 1, &r_buf); if (ret < 0) goto err_i2c; if (r_buf == 1) dev_info(&info->client->dev, "PASS\n"); else dev_info(&info->client->dev, "FAIL\n"); if (cmd == MMS_VSC_CMD_CM_DELTA) w_buf[1] = CMD_GET_DELTA; else w_buf[1] = CMD_GET_ABS; w_buf[2] = 0xFF; for (i = 0; i < RX_NUM; i++) { w_buf[3] = i; ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 3, &w_buf[1]); if (ret < 0) goto err_i2c; while (gpio_get_value(gpio)) udelay(100); ret = i2c_smbus_read_i2c_block_data(info->client, CMD_RESULT_SZ, 1, &r_buf); if (ret < 0) goto err_i2c; ret = get_data(info, CMD_RESULT, r_buf, read_buffer); if (ret < 0) goto err_i2c; for (j = 0; j < info->tx_num; j++) { raw_data = read_buffer[2*j] | (read_buffer[2*j+1] << 8); if (i == 0 && j == 0) { max_value = min_value = raw_data; } else { max_value = max(max_value, raw_data); min_value = min(min_value, raw_data); } if (cmd == MMS_VSC_CMD_CM_DELTA) { info->inspection[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] delta[%d][%d] = %d\n", j, i, info->inspection[i * info->tx_num + j]); } else if (cmd == MMS_VSC_CMD_CM_ABS) { info->raw[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] raw[%d][%d] = %d\n", j, i, info->raw[i * info->tx_num + j]); } else if (cmd == MMS_VSC_CMD_REFER) { info->reference[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] reference[%d][%d] = %d\n", j, i, info->reference[i * info->tx_num + j]); } } } ret = i2c_smbus_write_byte_data(info->client, ADDR_UNIV_CMD, CMD_EXIT_TEST); snprintf(buff, sizeof(buff), "%d,%d", min_value, max_value); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); touch_is_pressed = 0; release_all_fingers(info); mms_pwr_on_reset(info); info->enabled = true; if (info->fw_ic_ver < 0x18) { if (info->ta_status) { dev_notice(&info->client->dev, "TA connect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x1); } else { dev_notice(&info->client->dev, "TA disconnect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x2); } } mms_set_noise_mode(info); enable_irq(info->irq); return; err_i2c: dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n", __func__, cmd); } static void get_raw_data_all(struct mms_ts_info *info, u8 cmd) { u8 w_buf[6]; u8 read_buffer[2]; /* 52 */ int gpio; int ret; int i, j; u32 max_value = 0, min_value = 0; u32 raw_data; char buff[TSP_CMD_STR_LEN] = {0}; gpio = info->pdata->gpio_int; /* gpio = msm_irq_to_gpio(info->irq); */ disable_irq(info->irq); w_buf[0] = MMS_VSC_CMD; /* vendor specific command id */ w_buf[1] = MMS_VSC_MODE; /* mode of vendor */ w_buf[2] = 0; /* tx line */ w_buf[3] = 0; /* rx line */ w_buf[4] = 0; /* reserved */ w_buf[5] = 0; /* sub command */ if (cmd == MMS_VSC_CMD_EXIT) { w_buf[5] = MMS_VSC_CMD_EXIT; /* exit test mode */ ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 5, &w_buf[1]); if (ret < 0) goto err_i2c; enable_irq(info->irq); msleep(200); return; } /* MMS_VSC_CMD_CM_DELTA or MMS_VSC_CMD_CM_ABS * this two mode need to enter the test mode * exit command must be followed by testing. */ if (cmd == MMS_VSC_CMD_CM_DELTA || cmd == MMS_VSC_CMD_CM_ABS) { /* enter the debug mode */ w_buf[2] = 0x0; /* tx */ w_buf[3] = 0x0; /* rx */ w_buf[5] = MMS_VSC_CMD_ENTER; ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 5, &w_buf[1]); if (ret < 0) goto err_i2c; /* wating for the interrupt */ while (gpio_get_value(gpio)) udelay(100); } for (i = 0; i < RX_NUM; i++) { for (j = 0; j < info->tx_num; j++) { w_buf[2] = j; /* tx */ w_buf[3] = i; /* rx */ w_buf[5] = cmd; ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 5, &w_buf[1]); if (ret < 0) goto err_i2c; usleep_range(1, 5); ret = i2c_smbus_read_i2c_block_data(info->client, 0xBF, 2, read_buffer); if (ret < 0) goto err_i2c; raw_data = ((u16) read_buffer[1] << 8) | read_buffer[0]; if (i == 0 && j == 0) { max_value = min_value = raw_data; } else { max_value = max(max_value, raw_data); min_value = min(min_value, raw_data); } if (cmd == MMS_VSC_CMD_INTENSITY) { info->intensity[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] intensity[%d][%d] = %d\n", j, i, info->intensity[i * info->tx_num + j]); } else if (cmd == MMS_VSC_CMD_CM_DELTA) { info->inspection[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] delta[%d][%d] = %d\n", j, i, info->inspection[i * info->tx_num + j]); } else if (cmd == MMS_VSC_CMD_CM_ABS) { info->raw[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] raw[%d][%d] = %d\n", j, i, info->raw[i * info->tx_num + j]); } else if (cmd == MMS_VSC_CMD_REFER) { info->reference[i * info->tx_num + j] = raw_data; dev_dbg(&info->client->dev, "[TSP] reference[%d][%d] = %d\n", j, i, info->reference[i * info->tx_num + j]); } } } snprintf(buff, sizeof(buff), "%d,%d", min_value, max_value); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); enable_irq(info->irq); return; err_i2c: dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n", __func__, cmd); } static ssize_t show_close_tsp_test(struct device *dev, struct device_attribute *attr, char *buf) { struct mms_ts_info *info = dev_get_drvdata(dev); get_raw_data_all(info, MMS_VSC_CMD_EXIT); info->ft_flag = 0; return snprintf(buf, TSP_BUF_SIZE, "%u\n", 0); } static void set_default_result(struct mms_ts_info *info) { char delim = ':'; memset(info->cmd_result, 0x00, ARRAY_SIZE(info->cmd_result)); memcpy(info->cmd_result, info->cmd, strlen(info->cmd)); strncat(info->cmd_result, &delim, 1); } static int check_rx_tx_num(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[TSP_CMD_STR_LEN] = {0}; int node; if (info->cmd_param[0] < 0 || info->cmd_param[0] >= info->tx_num || info->cmd_param[1] < 0 || info->cmd_param[1] >= RX_NUM) { snprintf(buff, sizeof(buff) , "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; dev_info(&info->client->dev, "%s: parameter error: %u,%u\n", __func__, info->cmd_param[0], info->cmd_param[1]); node = -1; return node; } node = info->cmd_param[1] * info->tx_num + info->cmd_param[0]; dev_info(&info->client->dev, "%s: node = %d\n", __func__, node); return node; } static void not_support_cmd(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; set_default_result(info); sprintf(buff, "%s", "NA"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 4; dev_info(&info->client->dev, "%s: \"%s(%d)\"\n", __func__, buff, strnlen(buff, sizeof(buff))); return; } static int mms_ts_core_fw_load(struct mms_ts_info *info) { struct i2c_client *client = info->client; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int ret = 0; int ver = 0, fw_bin_ver = 0; int retries = 5; const u8 *buff = 0; long fsize = 0; const struct firmware *tsp_fw = NULL; ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; dev_info(&client->dev, "Entered REQ_FW\n"); if (info->ldi == 'L') { fw_bin_ver = FW_VERSION_L; ret = request_firmware(&tsp_fw, "tsp_melfas/note/melfasl.fw", &(client->dev)); } else { fw_bin_ver = FW_VERSION_M; ret = request_firmware(&tsp_fw, "tsp_melfas/note/melfasm.fw", &(client->dev)); } dev_info(&client->dev, "fw_ic_ver = 0x%02x, fw_bin_ver = 0x%02x\n", info->fw_ic_ver, fw_bin_ver); if (ret) { dev_err(&client->dev, "request firmware error!!\n"); return 1; } fsize = tsp_fw->size; buff = kzalloc((size_t)fsize, GFP_KERNEL); if (!buff) { dev_err(&client->dev, "fail to alloc buffer for fw\n"); info->cmd_state = 3; release_firmware(tsp_fw); return 1; } memcpy((void *)buff, tsp_fw->data, fsize); release_firmware(tsp_fw); disable_irq(info->irq); while (retries--) { i2c_lock_adapter(adapter); info->pdata->mux_fw_flash(true); ret = fw_download(info, (const u8 *)buff, (const size_t)fsize); info->pdata->mux_fw_flash(false); i2c_unlock_adapter(adapter); if (ret < 0) { dev_err(&client->dev, "retrying flashing\n"); continue; } ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; if (ver == fw_bin_ver) { dev_info(&client->dev, "fw update done. ver = 0x%02x\n", ver); enable_irq(info->irq); kfree(buff); return 0; } else { dev_err(&client->dev, "ERROR : fw version is still wrong (0x%x != 0x%x)\n", ver, fw_bin_ver); } dev_err(&client->dev, "retrying flashing\n"); } kfree(buff); return 1; } static void fw_update(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; struct i2c_client *client = info->client; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int ret = 0; int ver = 0, fw_bin_ver = 0; int retries = 5; const u8 *buff = 0; mm_segment_t old_fs = {0}; struct file *fp = NULL; long fsize = 0, nread = 0; const struct firmware *tsp_fw = NULL; char fw_path[MAX_FW_PATH+1]; char result[16] = {0}; if (info->panel == 'A') { dev_dbg(&client->dev, "support only Melfas panel\n"); dev_dbg(&client->dev, "fw update do not excute\n"); goto not_support; } set_default_result(info); if (info->ldi == 'L') fw_bin_ver = FW_VERSION_L; else fw_bin_ver = FW_VERSION_M; dev_info(&client->dev, "fw_ic_ver = 0x%02x, fw_bin_ver = 0x%02x\n", info->fw_ic_ver, fw_bin_ver); if (info->cmd_param[0] == 0) { if (info->fw_core_ver > 0x53) { dev_info(&client->dev, "fw version update does not need\n"); info->cmd_state = 2; goto do_not_need_update; } else if (info->fw_core_ver == 0x53) { if (info->fw_ic_ver >= fw_bin_ver) { dev_info(&client->dev, "fw version update does not need\n"); info->cmd_state = 2; goto do_not_need_update; } } else { /* core < 0x53 */ dev_info(&client->dev, "fw version update need(core:0x%x)\n", info->fw_core_ver); } } switch (info->cmd_param[0]) { case BUILT_IN: dev_info(&client->dev, "built in fw is loaded!!\n"); disable_irq(info->irq); while (retries--) { #if 0 ret = mms100_ISC_download_mbinary(info); #else ret = mms_ts_core_fw_load(info); #endif ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; if (ret == 0) { pr_err("[TSP] mms100_ISC_download_mbinary success"); info->cmd_state = 2; enable_irq(info->irq); return; } else { pr_err("[TSP] mms100_ISC_download_mbinary fail[%d]", ret); info->cmd_state = 3; } } enable_irq(info->irq); return; break; case UMS: old_fs = get_fs(); set_fs(get_ds()); snprintf(fw_path, MAX_FW_PATH, "/sdcard/%s", TSP_FW_FILENAME); fp = filp_open(fw_path, O_RDONLY, 0); if (IS_ERR(fp)) { dev_err(&client->dev, "file %s open error:%d\n", fw_path, (s32)fp); info->cmd_state = 3; goto err_open; } fsize = fp->f_path.dentry->d_inode->i_size; buff = kzalloc((size_t)fsize, GFP_KERNEL); if (!buff) { dev_err(&client->dev, "fail to alloc buffer for fw\n"); info->cmd_state = 3; goto err_alloc; } nread = vfs_read(fp, (char __user *)buff, fsize, &fp->f_pos); if (nread != fsize) { /*dev_err("fail to read file %s (nread = %d)\n", fw_path, nread);*/ info->cmd_state = 3; goto err_fw_size; } filp_close(fp, current->files); set_fs(old_fs); dev_info(&client->dev, "ums fw is loaded!!\n"); break; case REQ_FW: dev_info(&client->dev, "Entered REQ_FW case\n"); ret = request_firmware(&tsp_fw, TSP_FW_FILENAME, &(client->dev)); if (ret) { dev_err(&client->dev, "request firmware error!!\n"); goto not_support; } fsize = tsp_fw->size; buff = kzalloc((size_t)fsize, GFP_KERNEL); if (!buff) { dev_err(&client->dev, "fail to alloc buffer for fw\n"); info->cmd_state = 3; release_firmware(tsp_fw); goto not_support; } memcpy((void *)buff, tsp_fw->data, fsize); release_firmware(tsp_fw); break; default: dev_err(&client->dev, "invalid fw file type!!\n"); goto not_support; } disable_irq(info->irq); while (retries--) { i2c_lock_adapter(adapter); info->pdata->mux_fw_flash(true); ret = fw_download(info, (const u8 *)buff, (const size_t)fsize); info->pdata->mux_fw_flash(false); i2c_unlock_adapter(adapter); if (ret < 0) { dev_err(&client->dev, "retrying flashing\n"); continue; } ver = get_fw_version(info, SEC_CONFIG); info->fw_ic_ver = ver; if (info->cmd_param[0] == 1 || info->cmd_param[0] == 2) { dev_info(&client->dev, "fw update done. ver = 0x%02x\n", ver); info->cmd_state = 2; snprintf(result, sizeof(result) , "%s", "OK"); set_cmd_result(info, result, strnlen(result, sizeof(result))); enable_irq(info->irq); kfree(buff); return; } else if (ver == fw_bin_ver) { dev_info(&client->dev, "fw update done. ver = 0x%02x\n", ver); info->cmd_state = 2; snprintf(result, sizeof(result) , "%s", "OK"); set_cmd_result(info, result, strnlen(result, sizeof(result))); enable_irq(info->irq); return; } else { dev_err(&client->dev, "ERROR : fw version is still wrong (0x%x != 0x%x)\n", ver, fw_bin_ver); } dev_err(&client->dev, "retrying flashing\n"); } if (fp != NULL) { err_fw_size: kfree(buff); err_alloc: filp_close(fp, NULL); err_open: set_fs(old_fs); } not_support: do_not_need_update: snprintf(result, sizeof(result) , "%s", "NG"); set_cmd_result(info, result, strnlen(result, sizeof(result))); return; } static void get_fw_ver_bin(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; set_default_result(info); if (info->ldi == 'L') snprintf(buff, sizeof(buff), "ME0053%02x", FW_VERSION_L); else snprintf(buff, sizeof(buff), "ME0053%02x", FW_VERSION_M); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_fw_ver_ic(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; set_default_result(info); if (info->enabled) { info->fw_core_ver = get_fw_version(info, SEC_CORE); info->fw_ic_ver = get_fw_version(info, SEC_CONFIG); } snprintf(buff, sizeof(buff), "ME00%02x%02x", info->fw_core_ver, info->fw_ic_ver); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_config_ver(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[20] = {0}; set_default_result(info); if (info->ldi == 'L') snprintf(buff, sizeof(buff), "N7100_Me_0921_L"); else snprintf(buff, sizeof(buff), "N7100_Me_0911_M"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_threshold(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; int threshold; set_default_result(info); threshold = i2c_smbus_read_byte_data(info->client, 0x05); if (threshold < 0) { snprintf(buff, sizeof(buff), "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; return; } snprintf(buff, sizeof(buff), "%d", threshold); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void module_off_master(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[3] = {0}; mutex_lock(&info->lock); if (info->enabled) { disable_irq(info->irq); info->enabled = false; touch_is_pressed = 0; } mutex_unlock(&info->lock); info->pdata->power(0); if (info->pdata->is_vdd_on() == 0) snprintf(buff, sizeof(buff), "%s", "OK"); else snprintf(buff, sizeof(buff), "%s", "NG"); set_default_result(info); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); if (strncmp(buff, "OK", 2) == 0) info->cmd_state = 2; else info->cmd_state = 3; dev_info(&info->client->dev, "%s: %s\n", __func__, buff); } static void module_on_master(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[3] = {0}; mms_pwr_on_reset(info); mutex_lock(&info->lock); if (!info->enabled) { enable_irq(info->irq); info->enabled = true; } mutex_unlock(&info->lock); if (info->pdata->is_vdd_on() == 1) snprintf(buff, sizeof(buff), "%s", "OK"); else snprintf(buff, sizeof(buff), "%s", "NG"); set_default_result(info); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); if (strncmp(buff, "OK", 2) == 0) info->cmd_state = 2; else info->cmd_state = 3; dev_info(&info->client->dev, "%s: %s\n", __func__, buff); } /* static void module_off_slave(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; not_support_cmd(info); } static void module_on_slave(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; not_support_cmd(info); } */ static void get_chip_vendor(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; set_default_result(info); snprintf(buff, sizeof(buff), "%s", "MELFAS"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_chip_name(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; set_default_result(info); snprintf(buff, sizeof(buff), "%s", "MMS152"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_reference(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; unsigned int val; int node; set_default_result(info); node = check_rx_tx_num(info); if (node < 0) return; val = info->reference[node]; snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_cm_abs(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; unsigned int val; int node; set_default_result(info); node = check_rx_tx_num(info); if (node < 0) return; val = info->raw[node]; snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_cm_delta(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; unsigned int val; int node; set_default_result(info); node = check_rx_tx_num(info); if (node < 0) return; val = info->inspection[node]; snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_intensity(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; unsigned int val; int node; set_default_result(info); node = check_rx_tx_num(info); if (node < 0) return; val = info->intensity[node]; snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_x_num(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; int val; u8 r_buf[2]; int ret; set_default_result(info); if (info->fw_core_ver == 0x45) { val = i2c_smbus_read_byte_data(info->client, 0xEF); if (val < 0) { snprintf(buff, sizeof(buff), "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; dev_info(&info->client->dev, "%s: fail to read num of x (%d).\n", __func__, val); return; } } else if (info->fw_ic_ver < 0x29) { ret = i2c_smbus_read_i2c_block_data(info->client, ADDR_CH_NUM, 2, r_buf); val = r_buf[0]; if (ret < 0) { snprintf(buff, sizeof(buff), "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; dev_info(&info->client->dev, "%s: fail to read num of x (%d).\n", __func__, val); return; } } else { val = 30; } snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void get_y_num(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; char buff[16] = {0}; int val; u8 r_buf[2]; int ret; set_default_result(info); if (info->fw_core_ver == 0x45) { val = i2c_smbus_read_byte_data(info->client, 0xEE); if (val < 0) { snprintf(buff, sizeof(buff), "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; dev_info(&info->client->dev, "%s: fail to read num of y (%d).\n", __func__, val); return; } } else if (info->fw_ic_ver < 0x29) { ret = i2c_smbus_read_i2c_block_data(info->client, ADDR_CH_NUM, 2, r_buf); val = r_buf[1]; if (ret < 0) { snprintf(buff, sizeof(buff), "%s", "NG"); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 3; dev_info(&info->client->dev, "%s: fail to read num of x (%d).\n", __func__, val); return; } } else { val = 17; } snprintf(buff, sizeof(buff), "%u", val); set_cmd_result(info, buff, strnlen(buff, sizeof(buff))); info->cmd_state = 2; dev_info(&info->client->dev, "%s: %s(%d)\n", __func__, buff, strnlen(buff, sizeof(buff))); } static void run_reference_read(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; set_default_result(info); if (info->fw_ic_ver == 0x45) get_raw_data_all(info, MMS_VSC_CMD_REFER); else get_raw_data(info, MMS_VSC_CMD_REFER); info->cmd_state = 2; /* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */ } static void run_cm_abs_read(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; set_default_result(info); if (info->fw_ic_ver == 0x45) { get_raw_data_all(info, MMS_VSC_CMD_CM_ABS); get_raw_data_all(info, MMS_VSC_CMD_EXIT); } else { get_raw_data(info, MMS_VSC_CMD_CM_ABS); } info->cmd_state = 2; /* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */ } static void run_cm_delta_read(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; set_default_result(info); if (info->fw_ic_ver == 0x45) { get_raw_data_all(info, MMS_VSC_CMD_CM_DELTA); get_raw_data_all(info, MMS_VSC_CMD_EXIT); } else { get_raw_data(info, MMS_VSC_CMD_CM_DELTA); } info->cmd_state = 2; /* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */ } static void run_intensity_read(void *device_data) { struct mms_ts_info *info = (struct mms_ts_info *)device_data; set_default_result(info); if (info->fw_ic_ver == 0x45) get_raw_data_all(info, MMS_VSC_CMD_INTENSITY); else get_intensity_data(info); info->cmd_state = 2; /* dev_info(&info->client->dev, "%s: %s(%d)\n", __func__); */ } static ssize_t store_cmd(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct mms_ts_info *info = dev_get_drvdata(dev); struct i2c_client *client = info->client; char *cur, *start, *end; char buff[TSP_CMD_STR_LEN] = {0}; int len, i; struct tsp_cmd *tsp_cmd_ptr = NULL; char delim = ','; bool cmd_found = false; int param_cnt = 0; int ret; if (info->cmd_is_running == true) { dev_err(&info->client->dev, "tsp_cmd: other cmd is running.\n"); goto err_out; } /* check lock */ mutex_lock(&info->cmd_lock); info->cmd_is_running = true; mutex_unlock(&info->cmd_lock); info->cmd_state = 1; for (i = 0; i < ARRAY_SIZE(info->cmd_param); i++) info->cmd_param[i] = 0; len = (int)count; if (*(buf + len - 1) == '\n') len--; memset(info->cmd, 0x00, ARRAY_SIZE(info->cmd)); memcpy(info->cmd, buf, len); cur = strchr(buf, (int)delim); if (cur) memcpy(buff, buf, cur - buf); else memcpy(buff, buf, len); /* find command */ list_for_each_entry(tsp_cmd_ptr, &info->cmd_list_head, list) { if (!strcmp(buff, tsp_cmd_ptr->cmd_name)) { cmd_found = true; break; } } /* set not_support_cmd */ if (!cmd_found) { list_for_each_entry(tsp_cmd_ptr, &info->cmd_list_head, list) { if (!strcmp("not_support_cmd", tsp_cmd_ptr->cmd_name)) break; } } /* parsing parameters */ if (cur && cmd_found) { cur++; start = cur; memset(buff, 0x00, ARRAY_SIZE(buff)); do { if (*cur == delim || cur - buf == len) { end = cur; memcpy(buff, start, end - start); *(buff + strlen(buff)) = '\0'; ret = kstrtoint(buff, 10,\ info->cmd_param + param_cnt); start = cur + 1; memset(buff, 0x00, ARRAY_SIZE(buff)); param_cnt++; } cur++; } while (cur - buf <= len); } dev_info(&client->dev, "cmd = %s\n", tsp_cmd_ptr->cmd_name); for (i = 0; i < param_cnt; i++) dev_info(&client->dev, "cmd param %d= %d\n", i, info->cmd_param[i]); tsp_cmd_ptr->cmd_func(info); err_out: return count; } static ssize_t show_cmd_status(struct device *dev, struct device_attribute *devattr, char *buf) { struct mms_ts_info *info = dev_get_drvdata(dev); char buff[16] = {0}; dev_info(&info->client->dev, "tsp cmd: status:%d\n", info->cmd_state); if (info->cmd_state == 0) snprintf(buff, sizeof(buff), "WAITING"); else if (info->cmd_state == 1) snprintf(buff, sizeof(buff), "RUNNING"); else if (info->cmd_state == 2) snprintf(buff, sizeof(buff), "OK"); else if (info->cmd_state == 3) snprintf(buff, sizeof(buff), "FAIL"); else if (info->cmd_state == 4) snprintf(buff, sizeof(buff), "NOT_APPLICABLE"); return snprintf(buf, TSP_BUF_SIZE, "%s\n", buff); } static ssize_t show_cmd_result(struct device *dev, struct device_attribute *devattr, char *buf) { struct mms_ts_info *info = dev_get_drvdata(dev); dev_info(&info->client->dev, "tsp cmd: result: %s\n", info->cmd_result); mutex_lock(&info->cmd_lock); info->cmd_is_running = false; mutex_unlock(&info->cmd_lock); info->cmd_state = 0; return snprintf(buf, TSP_BUF_SIZE, "%s\n", info->cmd_result); } #ifdef ESD_DEBUG static bool intensity_log_flag; static u32 get_raw_data_one(struct mms_ts_info *info, u16 rx_idx, u16 tx_idx, u8 cmd) { u8 w_buf[6]; u8 read_buffer[2]; int ret; u32 raw_data; w_buf[0] = MMS_VSC_CMD; /* vendor specific command id */ w_buf[1] = MMS_VSC_MODE; /* mode of vendor */ w_buf[2] = 0; /* tx line */ w_buf[3] = 0; /* rx line */ w_buf[4] = 0; /* reserved */ w_buf[5] = 0; /* sub command */ if (cmd != MMS_VSC_CMD_INTENSITY && cmd != MMS_VSC_CMD_RAW && cmd != MMS_VSC_CMD_REFER) { dev_err(&info->client->dev, "%s: not profer command(cmd=%d)\n", __func__, cmd); return FAIL; } w_buf[2] = tx_idx; /* tx */ w_buf[3] = rx_idx; /* rx */ w_buf[5] = cmd; /* sub command */ ret = i2c_smbus_write_i2c_block_data(info->client, w_buf[0], 5, &w_buf[1]); if (ret < 0) goto err_i2c; ret = i2c_smbus_read_i2c_block_data(info->client, 0xBF, 2, read_buffer); if (ret < 0) goto err_i2c; raw_data = ((u16) read_buffer[1] << 8) | read_buffer[0]; if (cmd == MMS_VSC_CMD_REFER) raw_data = raw_data >> 4; return raw_data; err_i2c: dev_err(&info->client->dev, "%s: fail to i2c (cmd=%d)\n", __func__, cmd); return FAIL; } static ssize_t show_intensity_logging_on(struct device *dev, struct device_attribute *devattr, char *buf) { struct mms_ts_info *info = dev_get_drvdata(dev); struct i2c_client *client = info->client; struct file *fp; char log_data[160] = { 0, }; char buff[16] = { 0, }; mm_segment_t old_fs; long nwrite; u32 val; int i, y, c; old_fs = get_fs(); set_fs(KERNEL_DS); #define MELFAS_DEBUG_LOG_PATH "/sdcard/melfas_log" dev_info(&client->dev, "%s: start.\n", __func__); fp = filp_open(MELFAS_DEBUG_LOG_PATH, O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); if (IS_ERR(fp)) { dev_err(&client->dev, "%s: fail to open log file\n", __func__); goto open_err; } intensity_log_flag = 1; do { for (y = 0; y < 3; y++) { /* for tx chanel 0~2 */ memset(log_data, 0x00, 160); snprintf(buff, 16, "%1u: ", y); strncat(log_data, buff, strnlen(buff, 16)); for (i = 0; i < RX_NUM; i++) { val = get_raw_data_one(info, i, y, MMS_VSC_CMD_INTENSITY); snprintf(buff, 16, "%5u, ", val); strncat(log_data, buff, strnlen(buff, 16)); } memset(buff, '\n', 2); c = (y == 2) ? 2 : 1; strncat(log_data, buff, c); nwrite = vfs_write(fp, (const char __user *)log_data, strnlen(log_data, 160), &fp->f_pos); } usleep_range(3000, 5000); } while (intensity_log_flag); filp_close(fp, current->files); set_fs(old_fs); return 0; open_err: set_fs(old_fs); return FAIL; } static ssize_t show_intensity_logging_off(struct device *dev, struct device_attribute *devattr, char *buf) { struct mms_ts_info *info = dev_get_drvdata(dev); intensity_log_flag = 0; usleep_range(10000, 12000); get_raw_data_all(info, MMS_VSC_CMD_EXIT); return 0; } #endif static DEVICE_ATTR(close_tsp_test, S_IRUGO, show_close_tsp_test, NULL); static DEVICE_ATTR(cmd, S_IWUSR | S_IWGRP, NULL, store_cmd); static DEVICE_ATTR(cmd_status, S_IRUGO, show_cmd_status, NULL); static DEVICE_ATTR(cmd_result, S_IRUGO, show_cmd_result, NULL); #ifdef ESD_DEBUG static DEVICE_ATTR(intensity_logging_on, S_IRUGO, show_intensity_logging_on, NULL); static DEVICE_ATTR(intensity_logging_off, S_IRUGO, show_intensity_logging_off, NULL); #endif static struct attribute *sec_touch_facotry_attributes[] = { &dev_attr_close_tsp_test.attr, &dev_attr_cmd.attr, &dev_attr_cmd_status.attr, &dev_attr_cmd_result.attr, #ifdef ESD_DEBUG &dev_attr_intensity_logging_on.attr, &dev_attr_intensity_logging_off.attr, #endif NULL, }; static struct attribute_group sec_touch_factory_attr_group = { .attrs = sec_touch_facotry_attributes, }; #endif /* SEC_TSP_FACTORY_TEST */ static int __devinit mms_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct mms_ts_info *info; struct input_dev *input_dev; int ret = 0; char buf[4] = { 0, }; #ifdef SEC_TSP_FACTORY_TEST int i; struct device *fac_dev_ts; #endif touch_is_pressed = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -EIO; info = kzalloc(sizeof(struct mms_ts_info), GFP_KERNEL); if (!info) { dev_err(&client->dev, "Failed to allocate memory\n"); ret = -ENOMEM; goto err_alloc; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&client->dev, "Failed to allocate memory for input device\n"); ret = -ENOMEM; goto err_input_alloc; } info->client = client; info->input_dev = input_dev; info->pdata = client->dev.platform_data; if (NULL == info->pdata) { pr_err("failed to get platform data\n"); goto err_config; } info->irq = -1; mutex_init(&info->lock); if (info->pdata) { info->max_x = info->pdata->max_x; info->max_y = info->pdata->max_y; info->invert_x = info->pdata->invert_x; info->invert_y = info->pdata->invert_y; info->config_fw_version = info->pdata->config_fw_version; info->lcd_type = info->pdata->lcd_type; info->input_event = info->pdata->input_event; info->register_cb = info->pdata->register_cb; #ifdef CONFIG_LCD_FREQ_SWITCH info->register_lcd_cb = info->pdata->register_lcd_cb; #endif } else { info->max_x = 720; info->max_y = 1280; } i2c_set_clientdata(client, info); info->pdata->power(true); msleep(250); if (gpio_get_value(GPIO_OLED_ID)) { info->ldi = 'L'; dev_info(&client->dev, "LSI LDI\n"); } else { info->ldi = 'M'; dev_info(&client->dev, "Magna LDI\n"); } ret = i2c_master_recv(client, buf, 1); if (ret < 0) { /* tsp connect check */ pr_err("%s: i2c fail...tsp driver unload [%d], Add[%d]\n", __func__, ret, info->client->addr); goto err_config; } info->fw_core_ver = get_fw_version(info, SEC_CORE); dev_info(&client->dev, "core version : 0x%02x\n", info->fw_core_ver); if (info->fw_core_ver == 0x50) { dev_err(&client->dev, "Do not use 0x50 core version\n"); dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_core_fw_load(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } info->fw_core_ver = get_fw_version(info, SEC_CORE); } if (info->ldi == 'L') { if ((info->fw_core_ver < 0x53) || (info->fw_core_ver == 0xff)) { dev_err(&client->dev, "core version must be 0x53\n"); dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_core_fw_load(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } info->fw_core_ver = get_fw_version(info, SEC_CORE); } info->panel = get_panel_version(info); if (info->panel != 'M') { if (info->fw_core_ver == 0x53) { dev_err(&client->dev, "cannot read panel info\n"); dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_fw_load(info, true, 'L'); } else { dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_core_fw_load(info); } if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); } } info->fw_ic_ver = get_fw_version(info, SEC_CONFIG); if (((info->fw_ic_ver < FW_VERSION_L) || (info->fw_ic_ver == 0xff)) && (info->fw_core_ver == 0x53)) { dev_err(&client->dev, "firmware update\n"); dev_err(&client->dev, "ic:0x%x, bin:0x%x\n", info->fw_ic_ver, FW_VERSION_L); if ((info->fw_ic_ver >= 0x21) || (info->fw_ic_ver == 0) || (info->fw_ic_ver == 0xff)) ret = mms_ts_fw_load(info, false, 'N'); else ret = mms_ts_core_fw_load(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } } if ((info->fw_ic_ver >= 0x50) && (info->fw_ic_ver <= 0x69)) { dev_err(&client->dev, "LSI panel, Magna firmware written\n"); dev_err(&client->dev, "ic:0x%x, bin:0x%x\n", info->fw_ic_ver, FW_VERSION_L); ret = mms_ts_fw_load(info, true, 'L'); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } } } else { info->panel = get_panel_version(info); dev_info(&client->dev, "%c panel\n", info->panel); if (info->panel == 'M') { if ((info->fw_core_ver < 0x53) || (info->fw_core_ver == 0xff)) { dev_err(&client->dev, "core version must be 0x53\n"); dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_core_fw_load(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } info->fw_core_ver = get_fw_version(info, SEC_CORE); } info->fw_ic_ver = get_fw_version(info, SEC_CONFIG); if ((info->fw_ic_ver < FW_VERSION_M) && (info->fw_core_ver == 0x53)) { dev_err(&client->dev, "firmware update\n"); dev_err(&client->dev, "ic:0x%x, bin:0x%x\n", info->fw_ic_ver, FW_VERSION_M); if ((info->fw_ic_ver >= 0x24) || (info->fw_ic_ver == 0)) ret = mms_ts_fw_load(info, false, 'N'); else ret = mms_ts_core_fw_load(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } } } else if (info->panel == 'A') { dev_info(&client->dev, "A panel. Do not firm update\n"); } else { dev_err(&client->dev, "cannot read panel info\n"); info->fw_ic_ver = get_fw_version(info, SEC_CONFIG); if (info->fw_core_ver == 0x53) { dev_err(&client->dev, "firmware update\n"); dev_err(&client->dev, "ic:0x%x, bin:0x%x\n", info->fw_ic_ver, FW_VERSION_M); ret = mms_ts_fw_load(info, true, 'N'); } else { dev_err(&client->dev, "excute core firmware update\n"); ret = mms_ts_core_fw_load(info); } if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } } if ((info->fw_ic_ver >= 0x30) && (info->fw_ic_ver <= 0x49)) { dev_err(&client->dev, "Magna panel, LSI firmware written\n"); dev_err(&client->dev, "ic:0x%x, bin:0x%x\n", info->fw_ic_ver, FW_VERSION_M); ret = mms_ts_fw_load(info, true, 'M'); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } } } info->panel = get_panel_version(info); dev_info(&client->dev, "%c panel\n", info->panel); ret = mms_ts_fw_info(info); if (ret) { dev_err(&client->dev, "failed to initialize (%d)\n", ret); goto err_config; } if (info->panel == 'M') info->tx_num = TX_NUM_M; else info->tx_num = TX_NUM_A; info->callbacks.inform_charger = melfas_ta_cb; if (info->register_cb) info->register_cb(&info->callbacks); #ifdef CONFIG_LCD_FREQ_SWITCH info->lcd_callback.inform_lcd = melfas_lcd_cb; if (info->register_lcd_cb) info->register_lcd_cb(&info->lcd_callback); info->tsp_lcdfreq_flag = 0; #endif snprintf(info->phys, sizeof(info->phys), "%s/input0", dev_name(&client->dev)); input_dev->name = "sec_touchscreen"; input_dev->phys = info->phys; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; __set_bit(EV_ABS, input_dev->evbit); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); input_mt_init_slots(input_dev, MAX_FINGERS); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_PRESSURE, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, MAX_PRESSURE, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, (info->max_x)-1, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, (info->max_y)-1, 0, 0); if (info->panel == 'M') { input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, MAX_WIDTH, 0, 0); input_set_abs_params(input_dev, ABS_MT_ANGLE, MIN_ANGLE, MAX_ANGLE, 0, 0); input_set_abs_params(input_dev, ABS_MT_PALM, 0, 1, 0, 0); } else { input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, MAX_WIDTH, 0, 0); } input_set_drvdata(input_dev, info); ret = input_register_device(input_dev); if (ret) { dev_err(&client->dev, "failed to register input dev (%d)\n", ret); goto err_reg_input_dev; } #if TOUCH_BOOSTER mutex_init(&info->dvfs_lock); INIT_DELAYED_WORK(&info->work_dvfs_off, set_dvfs_off); INIT_DELAYED_WORK(&info->work_dvfs_chg, change_dvfs_lock); bus_dev = dev_get("exynos-busfreq"); info->cpufreq_level = -1; info->dvfs_lock_status = false; #endif info->enabled = true; ret = request_threaded_irq(client->irq, NULL, mms_ts_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, MELFAS_TS_NAME, info); if (ret < 0) { dev_err(&client->dev, "Failed to register interrupt\n"); goto err_req_irq; } info->irq = client->irq; #ifdef CONFIG_HAS_EARLYSUSPEND info->early_suspend.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING; info->early_suspend.suspend = mms_ts_early_suspend; info->early_suspend.resume = mms_ts_late_resume; register_early_suspend(&info->early_suspend); #endif #ifdef CONFIG_TOUCH_WAKE touchwake_data = info; if (touchwake_data == NULL) pr_err("[TOUCHWAKE] Failed to set touchwake_data\n"); #endif #ifdef CONFIG_INPUT_FBSUSPEND ret = tsp_register_fb(info); if (ret) pr_err("[TSP] Failed to register fb\n"); #endif sec_touchscreen = device_create(sec_class, NULL, 0, info, "sec_touchscreen"); if (IS_ERR(sec_touchscreen)) { dev_err(&client->dev, "Failed to create device for the sysfs1\n"); ret = -ENODEV; } #ifdef SEC_TSP_FACTORY_TEST INIT_LIST_HEAD(&info->cmd_list_head); for (i = 0; i < ARRAY_SIZE(tsp_cmds); i++) list_add_tail(&tsp_cmds[i].list, &info->cmd_list_head); mutex_init(&info->cmd_lock); info->cmd_is_running = false; fac_dev_ts = device_create(sec_class, NULL, 0, info, "tsp"); if (IS_ERR(fac_dev_ts)) dev_err(&client->dev, "Failed to create device for the sysfs\n"); ret = sysfs_create_group(&fac_dev_ts->kobj, &sec_touch_factory_attr_group); if (ret) dev_err(&client->dev, "Failed to create sysfs group\n"); #endif return 0; err_req_irq: input_unregister_device(input_dev); err_reg_input_dev: err_config: input_free_device(input_dev); /*input_dev = NULL;*/ err_input_alloc: kfree(info); err_alloc: return ret; } static int __devexit mms_ts_remove(struct i2c_client *client) { struct mms_ts_info *info = i2c_get_clientdata(client); unregister_early_suspend(&info->early_suspend); #ifdef CONFIG_INPUT_FBSUSPEND tsp_unregister_fb(info); #endif if (info->irq >= 0) free_irq(info->irq, info); input_unregister_device(info->input_dev); kfree(info); return 0; } #if defined(CONFIG_PM) || defined(CONFIG_HAS_EARLYSUSPEND) static int mms_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mms_ts_info *info = i2c_get_clientdata(client); if (!info->enabled) { #ifdef CONFIG_INPUT_FBSUSPEND info->was_enabled_at_suspend = false; #endif return 0; } #ifdef CONFIG_INPUT_FBSUSPEND info->was_enabled_at_suspend = true; #endif dev_notice(&info->client->dev, "%s: users=%d\n", __func__, info->input_dev->users); disable_irq(info->irq); info->enabled = false; touch_is_pressed = 0; #ifdef CONFIG_LCD_FREQ_SWITCH info->tsp_lcdfreq_flag = 0; #endif release_all_fingers(info); info->pdata->power(0); info->sleep_wakeup_ta_check = info->ta_status; /* This delay needs to prevent unstable POR by rapid frequently pressing of PWR key. */ msleep(50); return 0; } static int mms_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mms_ts_info *info = i2c_get_clientdata(client); if (info->enabled) return 0; #ifdef CONFIG_INPUT_FBSUSPEND if (!info->was_enabled_at_suspend) return 0; #endif dev_notice(&info->client->dev, "%s: users=%d\n", __func__, info->input_dev->users); info->pdata->power(1); msleep(120); if (info->fw_ic_ver < 0x18) { if (info->ta_status) { dev_notice(&client->dev, "TA connect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x1); } else { dev_notice(&client->dev, "TA disconnect!!!\n"); i2c_smbus_write_byte_data(info->client, 0x33, 0x2); } } info->enabled = true; mms_set_noise_mode(info); if (info->fw_ic_ver >= 0x21) { if ((info->ta_status == 1) && (info->sleep_wakeup_ta_check == 0)) { dev_notice(&client->dev, "TA connect!!! %s\n", __func__); i2c_smbus_write_byte_data(info->client, 0x32, 0x1); } } /* Because irq_type by EXT_INTxCON register is changed to low_level * after wakeup, irq_type set to falling edge interrupt again. */ enable_irq(info->irq); return 0; } #endif #ifdef CONFIG_HAS_EARLYSUSPEND static void mms_ts_early_suspend(struct early_suspend *h) { #ifndef CONFIG_TOUCH_WAKE struct mms_ts_info *info; info = container_of(h, struct mms_ts_info, early_suspend); mms_ts_suspend(&info->client->dev); #endif } static void mms_ts_late_resume(struct early_suspend *h) { #ifndef CONFIG_TOUCH_WAKE struct mms_ts_info *info; info = container_of(h, struct mms_ts_info, early_suspend); mms_ts_resume(&info->client->dev); #endif } #endif #ifdef CONFIG_TOUCH_WAKE static struct mms_ts_info * touchwake_data; void touchscreen_disable(void) { if (likely(touchwake_data != NULL)) mms_ts_suspend(&touchwake_data->client->dev); return; } EXPORT_SYMBOL(touchscreen_disable); void touchscreen_enable(void) { if (likely(touchwake_data != NULL)) mms_ts_resume(&touchwake_data->client->dev); return; } EXPORT_SYMBOL(touchscreen_enable); #endif #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND) static const struct dev_pm_ops mms_ts_pm_ops = { .suspend = mms_ts_suspend, .resume = mms_ts_resume, #ifdef CONFIG_HIBERNATION .freeze = mms_ts_suspend, .thaw = mms_ts_resume, .restore = mms_ts_resume, #endif }; #endif static const struct i2c_device_id mms_ts_id[] = { {MELFAS_TS_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, mms_ts_id); static struct i2c_driver mms_ts_driver = { .probe = mms_ts_probe, .remove = __devexit_p(mms_ts_remove), .driver = { .name = MELFAS_TS_NAME, #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND) .pm = &mms_ts_pm_ops, #endif }, .id_table = mms_ts_id, }; static int __init mms_ts_init(void) { return i2c_add_driver(&mms_ts_driver); } static void __exit mms_ts_exit(void) { i2c_del_driver(&mms_ts_driver); } module_init(mms_ts_init); module_exit(mms_ts_exit); /* Module information */ MODULE_DESCRIPTION("Touchscreen driver for Melfas MMS-series controllers"); MODULE_LICENSE("GPL");
gpl-2.0
fedya/aircam-openwrt
build_dir/host/bison-2.4.3/lib/argmatch.c
10
7873
/* argmatch.c -- find a match for a string in an array Copyright (C) 1990, 1998, 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Written by David MacKenzie <djm@ai.mit.edu> Modified by Akim Demaille <demaille@inf.enst.fr> */ #include <config.h> /* Specification. */ #include "argmatch.h" #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "gettext.h" #define _(msgid) gettext (msgid) #include "error.h" #include "quotearg.h" #include "quote.h" #if USE_UNLOCKED_IO # include "unlocked-io.h" #endif /* When reporting an invalid argument, show nonprinting characters by using the quoting style ARGMATCH_QUOTING_STYLE. Do not use literal_quoting_style. */ #ifndef ARGMATCH_QUOTING_STYLE # define ARGMATCH_QUOTING_STYLE locale_quoting_style #endif /* Non failing version of argmatch call this function after failing. */ #ifndef ARGMATCH_DIE # include "exitfail.h" # define ARGMATCH_DIE exit (exit_failure) #endif #ifdef ARGMATCH_DIE_DECL ARGMATCH_DIE_DECL; #endif static void __argmatch_die (void) { ARGMATCH_DIE; } /* Used by XARGMATCH and XARGCASEMATCH. See description in argmatch.h. Default to __argmatch_die, but allow caller to change this at run-time. */ argmatch_exit_fn argmatch_die = __argmatch_die; /* If ARG is an unambiguous match for an element of the NULL-terminated array ARGLIST, return the index in ARGLIST of the matched element, else -1 if it does not match any element or -2 if it is ambiguous (is a prefix of more than one element). If VALLIST is none null, use it to resolve ambiguities limited to synonyms, i.e., for "yes", "yop" -> 0 "no", "nope" -> 1 "y" is a valid argument, for `0', and "n" for `1'. */ ptrdiff_t argmatch (const char *arg, const char *const *arglist, const char *vallist, size_t valsize) { size_t i; /* Temporary index in ARGLIST. */ size_t arglen; /* Length of ARG. */ ptrdiff_t matchind = -1; /* Index of first nonexact match. */ bool ambiguous = false; /* If true, multiple nonexact match(es). */ arglen = strlen (arg); /* Test all elements for either exact match or abbreviated matches. */ for (i = 0; arglist[i]; i++) { if (!strncmp (arglist[i], arg, arglen)) { if (strlen (arglist[i]) == arglen) /* Exact match found. */ return i; else if (matchind == -1) /* First nonexact match found. */ matchind = i; else { /* Second nonexact match found. */ if (vallist == NULL || memcmp (vallist + valsize * matchind, vallist + valsize * i, valsize)) { /* There is a real ambiguity, or we could not disambiguate. */ ambiguous = true; } } } } if (ambiguous) return -2; else return matchind; } /* Error reporting for argmatch. CONTEXT is a description of the type of entity that was being matched. VALUE is the invalid value that was given. PROBLEM is the return value from argmatch. */ void argmatch_invalid (const char *context, const char *value, ptrdiff_t problem) { char const *format = (problem == -1 ? _("invalid argument %s for %s") : _("ambiguous argument %s for %s")); error (0, 0, format, quotearg_n_style (0, ARGMATCH_QUOTING_STYLE, value), quote_n (1, context)); } /* List the valid arguments for argmatch. ARGLIST is the same as in argmatch. VALLIST is a pointer to an array of values. VALSIZE is the size of the elements of VALLIST */ void argmatch_valid (const char *const *arglist, const char *vallist, size_t valsize) { size_t i; const char *last_val = NULL; /* We try to put synonyms on the same line. The assumption is that synonyms follow each other */ fprintf (stderr, _("Valid arguments are:")); for (i = 0; arglist[i]; i++) if ((i == 0) || memcmp (last_val, vallist + valsize * i, valsize)) { fprintf (stderr, "\n - `%s'", arglist[i]); last_val = vallist + valsize * i; } else { fprintf (stderr, ", `%s'", arglist[i]); } putc ('\n', stderr); } /* Never failing versions of the previous functions. CONTEXT is the context for which argmatch is called (e.g., "--version-control", or "$VERSION_CONTROL" etc.). Upon failure, calls the (supposed never to return) function EXIT_FN. */ ptrdiff_t __xargmatch_internal (const char *context, const char *arg, const char *const *arglist, const char *vallist, size_t valsize, argmatch_exit_fn exit_fn) { ptrdiff_t res = argmatch (arg, arglist, vallist, valsize); if (res >= 0) /* Success. */ return res; /* We failed. Explain why. */ argmatch_invalid (context, arg, res); argmatch_valid (arglist, vallist, valsize); (*exit_fn) (); return -1; /* To please the compilers. */ } /* Look for VALUE in VALLIST, an array of objects of size VALSIZE and return the first corresponding argument in ARGLIST */ const char * argmatch_to_argument (const char *value, const char *const *arglist, const char *vallist, size_t valsize) { size_t i; for (i = 0; arglist[i]; i++) if (!memcmp (value, vallist + valsize * i, valsize)) return arglist[i]; return NULL; } #ifdef TEST /* * Based on "getversion.c" by David MacKenzie <djm@gnu.ai.mit.edu> */ char *program_name; /* When to make backup files. */ enum backup_type { /* Never make backups. */ no_backups, /* Make simple backups of every file. */ simple_backups, /* Make numbered backups of files that already have numbered backups, and simple backups of the others. */ numbered_existing_backups, /* Make numbered backups of every file. */ numbered_backups }; /* Two tables describing arguments (keys) and their corresponding values */ static const char *const backup_args[] = { "no", "none", "off", "simple", "never", "existing", "nil", "numbered", "t", 0 }; static const enum backup_type backup_vals[] = { no_backups, no_backups, no_backups, simple_backups, simple_backups, numbered_existing_backups, numbered_existing_backups, numbered_backups, numbered_backups }; int main (int argc, const char *const *argv) { const char *cp; enum backup_type backup_type = no_backups; program_name = (char *) argv[0]; if (argc > 2) { fprintf (stderr, "Usage: %s [VERSION_CONTROL]\n", program_name); exit (1); } if ((cp = getenv ("VERSION_CONTROL"))) backup_type = XARGMATCH ("$VERSION_CONTROL", cp, backup_args, backup_vals); if (argc == 2) backup_type = XARGMATCH (program_name, argv[1], backup_args, backup_vals); printf ("The version control is `%s'\n", ARGMATCH_TO_ARGUMENT (backup_type, backup_args, backup_vals)); return 0; } #endif
gpl-2.0
zales/RamosW17pro-kernel-common
net/mac80211/mlme.c
522
74893
/* * BSS client mode implementation * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/pm_qos_params.h> #include <linux/crc32.h> #include <linux/slab.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "led.h" static int max_nullfunc_tries = 2; module_param(max_nullfunc_tries, int, 0644); MODULE_PARM_DESC(max_nullfunc_tries, "Maximum nullfunc tx tries before disconnecting (reason 4)."); static int max_probe_tries = 5; module_param(max_probe_tries, int, 0644); MODULE_PARM_DESC(max_probe_tries, "Maximum probe tries before disconnecting (reason 4)."); /* * Beacon loss timeout is calculated as N frames times the * advertised beacon interval. This may need to be somewhat * higher than what hardware might detect to account for * delays in the host processing frames. But since we also * probe on beacon miss before declaring the connection lost * default to what we want. */ #define IEEE80211_BEACON_LOSS_COUNT 7 /* * Time the connection can be idle before we probe * it to see if we can still talk to the AP. */ #define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) /* * Time we wait for a probe response after sending * a probe request because of beacon loss or for * checking the connection still works. */ static int probe_wait_ms = 500; module_param(probe_wait_ms, int, 0644); MODULE_PARM_DESC(probe_wait_ms, "Maximum time(ms) to wait for probe response" " before disconnecting (reason 4)."); /* * Weight given to the latest Beacon frame when calculating average signal * strength for Beacon frames received in the current BSS. This must be * between 1 and 15. */ #define IEEE80211_SIGNAL_AVE_WEIGHT 3 /* * How many Beacon frames need to have been used in average signal strength * before starting to indicate signal change events. */ #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 #define TMR_RUNNING_TIMER 0 #define TMR_RUNNING_CHANSW 1 /* * All cfg80211 functions have to be called outside a locked * section so that they can acquire a lock themselves... This * is much simpler than queuing up things in cfg80211, but we * do need some indirection for that here. */ enum rx_mgmt_action { /* no action required */ RX_MGMT_NONE, /* caller must call cfg80211_send_deauth() */ RX_MGMT_CFG80211_DEAUTH, /* caller must call cfg80211_send_disassoc() */ RX_MGMT_CFG80211_DISASSOC, }; /* utils */ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) { lockdep_assert_held(&ifmgd->mtx); } /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was * asked for before, or if it's not pending right now. This * function ensures that. Note that it then is required to * run this function for all timeouts after the first one * has happened -- the work that runs from this timer will * do that. */ static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout) { ASSERT_MGD_MTX(ifmgd); if (!timer_pending(&ifmgd->timer) || time_before(timeout, ifmgd->timer.expires)) mod_timer(&ifmgd->timer, timeout); } void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) { if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) return; mod_timer(&sdata->u.mgd.bcn_mon_timer, round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout)); } void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (unlikely(!sdata->u.mgd.associated)) return; if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) return; mod_timer(&sdata->u.mgd.conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); ifmgd->probe_send_count = 0; } static int ecw2cw(int ecw) { return (1 << ecw) - 1; } /* * ieee80211_enable_ht should be called only after the operating band * has been determined as ht configuration depends on the hw's * HT abilities for a specific band. */ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, struct ieee80211_ht_info *hti, const u8 *bssid, u16 ap_ht_cap_flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; u32 changed = 0; int hti_cfreq; u16 ht_opmode; bool enable_ht = true; enum nl80211_channel_type prev_chantype; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; prev_chantype = sdata->vif.bss_conf.channel_type; /* HT is not supported */ if (!sband->ht_cap.ht_supported) enable_ht = false; if (enable_ht) { hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, sband->band); /* check that channel matches the right operating channel */ if (local->hw.conf.channel->center_freq != hti_cfreq) { /* Some APs mess this up, evidently. * Netgear WNDR3700 sometimes reports 4 higher than * the actual channel, for instance. */ printk(KERN_DEBUG "%s: Wrong control channel in association" " response: configured center-freq: %d" " hti-cfreq: %d hti->control_chan: %d" " band: %d. Disabling HT.\n", sdata->name, local->hw.conf.channel->center_freq, hti_cfreq, hti->control_chan, sband->band); enable_ht = false; } } if (enable_ht) { channel_type = NL80211_CHAN_HT20; if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (!(local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)) channel_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: if (!(local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)) channel_type = NL80211_CHAN_HT40MINUS; break; } } } if (local->tmp_channel) local->tmp_channel_type = channel_type; if (!ieee80211_set_channel_type(local, sdata, channel_type)) { /* can only fail due to HT40+/- mismatch */ channel_type = NL80211_CHAN_HT20; WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); } /* channel_type change automatically detected */ ieee80211_hw_config(local, 0); if (prev_chantype != channel_type) { rcu_read_lock(); sta = sta_info_get(sdata, bssid); if (sta) rate_control_rate_update(local, sband, sta, IEEE80211_RC_HT_CHANGED, channel_type); rcu_read_unlock(); } ht_opmode = le16_to_cpu(hti->operation_mode); /* if bss configuration changed store the new one */ if (sdata->ht_opmode_valid != enable_ht || sdata->vif.bss_conf.ht_operation_mode != ht_opmode || prev_chantype != channel_type) { changed |= BSS_CHANGED_HT; sdata->vif.bss_conf.ht_operation_mode = ht_opmode; sdata->ht_opmode_valid = enable_ht; } return changed; } /* frame sending functions */ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *bssid, u16 stype, u16 reason, void *cookie, bool send_frame) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for " "deauth/disassoc frame\n", sdata->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, bssid, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); skb_put(skb, 2); /* u.deauth.reason_code == u.disassoc.reason_code */ mgmt->u.deauth.reason_code = cpu_to_le16(reason); if (stype == IEEE80211_STYPE_DEAUTH) if (cookie) __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); else cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); else if (cookie) __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); else cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; if (send_frame) ieee80211_tx_skb(sdata, skb); else kfree_skb(skb); } void ieee80211_send_pspoll(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_pspoll *pspoll; struct sk_buff *skb; skb = ieee80211_pspoll_get(&local->hw, &sdata->vif); if (!skb) return; pspoll = (struct ieee80211_pspoll *) skb->data; pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int powersave) { struct sk_buff *skb; struct ieee80211_hdr_3addr *nullfunc; skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); if (!skb) return; nullfunc = (struct ieee80211_hdr_3addr *) skb->data; if (powersave) nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct sk_buff *skb; struct ieee80211_hdr *nullfunc; __le16 fc; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr " "nullfunc frame\n", sdata->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); memset(nullfunc, 0, 30); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); nullfunc->frame_control = fc; memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } /* spectrum management related things */ static void ieee80211_chswitch_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (!ieee80211_sdata_running(sdata)) return; mutex_lock(&ifmgd->mtx); if (!ifmgd->associated) goto out; sdata->local->oper_channel = sdata->local->csa_channel; if (!sdata->local->ops->channel_switch) { /* call "hw_config" only if doing sw channel switch */ ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); } /* XXX: shouldn't really modify cfg80211-owned data! */ ifmgd->associated->channel = sdata->local->oper_channel; ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; mutex_unlock(&ifmgd->mtx); } void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) { struct ieee80211_sub_if_data *sdata; struct ieee80211_if_managed *ifmgd; sdata = vif_to_sdata(vif); ifmgd = &sdata->u.mgd; trace_api_chswitch_done(sdata, success); if (!success) { /* * If the channel switch was not successful, stay * around on the old channel. We currently lack * good handling of this situation, possibly we * should just drop the association. */ sdata->local->csa_channel = sdata->local->oper_channel; } ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); } EXPORT_SYMBOL(ieee80211_chswitch_done); static void ieee80211_chswitch_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (sdata->local->quiescing) { set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); return; } ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); } void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_sw_ie *sw_elem, struct ieee80211_bss *bss, u64 timestamp) { struct cfg80211_bss *cbss = container_of((void *)bss, struct cfg80211_bss, priv); struct ieee80211_channel *new_ch; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, cbss->channel->band); ASSERT_MGD_MTX(ifmgd); if (!ifmgd->associated) return; if (sdata->local->scanning) return; /* Disregard subsequent beacons if we are already running a timer processing a CSA */ if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED) return; new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) return; sdata->local->csa_channel = new_ch; if (sdata->local->ops->channel_switch) { /* use driver's channel switch callback */ struct ieee80211_channel_switch ch_switch; memset(&ch_switch, 0, sizeof(ch_switch)); ch_switch.timestamp = timestamp; if (sw_elem->mode) { ch_switch.block_tx = true; ieee80211_stop_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CSA); } ch_switch.channel = new_ch; ch_switch.count = sw_elem->count; ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; drv_channel_switch(sdata->local, &ch_switch); return; } /* channel switch handled in software */ if (sw_elem->count <= 1) { ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); } else { if (sw_elem->mode) ieee80211_stop_queues_by_reason(&sdata->local->hw, IEEE80211_QUEUE_STOP_REASON_CSA); ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; mod_timer(&ifmgd->chswitch_timer, jiffies + msecs_to_jiffies(sw_elem->count * cbss->beacon_interval)); } } static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, u16 capab_info, u8 *pwr_constr_elem, u8 pwr_constr_elem_len) { struct ieee80211_conf *conf = &sdata->local->hw.conf; if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT)) return; /* Power constraint IE length should be 1 octet */ if (pwr_constr_elem_len != 1) return; if ((*pwr_constr_elem <= conf->channel->max_power) && (*pwr_constr_elem != sdata->local->power_constr_level)) { sdata->local->power_constr_level = *pwr_constr_elem; ieee80211_hw_config(sdata->local, 0); } } void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_conf *conf = &local->hw.conf; WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) || (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)); local->disable_dynamic_ps = false; conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout; } EXPORT_SYMBOL(ieee80211_enable_dyn_ps); void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_conf *conf = &local->hw.conf; WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) || (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)); local->disable_dynamic_ps = true; conf->dynamic_ps_timeout = 0; del_timer_sync(&local->dynamic_ps_timer); ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); } EXPORT_SYMBOL(ieee80211_disable_dyn_ps); /* powersave */ static void ieee80211_enable_ps(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct ieee80211_conf *conf = &local->hw.conf; /* * If we are scanning right now then the parameters will * take effect when scan finishes. */ if (local->scanning) return; if (conf->dynamic_ps_timeout > 0 && !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) { mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(conf->dynamic_ps_timeout)); } else { if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) ieee80211_send_nullfunc(local, sdata, 1); if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) return; conf->flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } } static void ieee80211_change_ps(struct ieee80211_local *local) { struct ieee80211_conf *conf = &local->hw.conf; if (local->ps_sdata) { ieee80211_enable_ps(local, local->ps_sdata); } else if (conf->flags & IEEE80211_CONF_PS) { conf->flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); } } static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *mgd = &sdata->u.mgd; struct sta_info *sta = NULL; u32 sta_flags = 0; if (!mgd->powersave) return false; if (!mgd->associated) return false; if (!mgd->associated->beacon_ies) return false; if (mgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL)) return false; rcu_read_lock(); sta = sta_info_get(sdata, mgd->bssid); if (sta) sta_flags = get_sta_flags(sta); rcu_read_unlock(); if (!(sta_flags & WLAN_STA_AUTHORIZED)) return false; return true; } /* need to hold RTNL or interface lock */ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) { struct ieee80211_sub_if_data *sdata, *found = NULL; int count = 0; int timeout; if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) { local->ps_sdata = NULL; return; } if (!list_empty(&local->work_list)) { local->ps_sdata = NULL; goto change; } list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP) { /* If an AP vif is found, then disable PS * by setting the count to zero thereby setting * ps_sdata to NULL. */ count = 0; break; } if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; found = sdata; count++; } if (count == 1 && ieee80211_powersave_allowed(found)) { struct ieee80211_conf *conf = &local->hw.conf; s32 beaconint_us; if (latency < 0) latency = pm_qos_request(PM_QOS_NETWORK_LATENCY); beaconint_us = ieee80211_tu_to_usec( found->vif.bss_conf.beacon_int); timeout = local->dynamic_ps_forced_timeout; if (timeout < 0) { /* * Go to full PSM if the user configures a very low * latency requirement. * The 2000 second value is there for compatibility * until the PM_QOS_NETWORK_LATENCY is configured * with real values. */ if (latency > (1900 * USEC_PER_MSEC) && latency != (2000 * USEC_PER_SEC)) timeout = 0; else timeout = 100; } local->dynamic_ps_user_timeout = timeout; if (!local->disable_dynamic_ps) conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout; if (beaconint_us > latency) { local->ps_sdata = NULL; } else { struct ieee80211_bss *bss; int maxslp = 1; u8 dtimper; bss = (void *)found->u.mgd.associated->priv; dtimper = bss->dtim_period; /* If the TIM IE is invalid, pretend the value is 1 */ if (!dtimper) dtimper = 1; else if (dtimper > 1) maxslp = min_t(int, dtimper, latency / beaconint_us); local->hw.conf.max_sleep_period = maxslp; local->hw.conf.ps_dtim_period = dtimper; local->ps_sdata = found; } } else { local->ps_sdata = NULL; } change: ieee80211_change_ps(local); } void ieee80211_dynamic_ps_disable_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_disable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_QUEUE_STOP_REASON_PS); } void ieee80211_dynamic_ps_enable_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, dynamic_ps_enable_work); struct ieee80211_sub_if_data *sdata = local->ps_sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; unsigned long flags; int q; /* can only happen when PS was just disabled anyway */ if (!sdata) return; if (local->hw.conf.flags & IEEE80211_CONF_PS) return; /* * transmission can be stopped by others which leads to * dynamic_ps_timer expiry. Postpond the ps timer if it * is not the actual idle state. */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) { if (local->queue_stop_reasons[q]) { spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); return; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { netif_tx_stop_all_queues(sdata->dev); if (drv_tx_frames_pending(local)) mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies( local->hw.conf.dynamic_ps_timeout)); else { ieee80211_send_nullfunc(local, sdata, 1); /* Flush to get the tx status of nullfunc frame */ drv_flush(local, false); } } if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) || (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } netif_tx_wake_all_queues(sdata->dev); } void ieee80211_dynamic_ps_timer(unsigned long data) { struct ieee80211_local *local = (void *) data; if (local->quiescing || local->suspended) return; ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); } /* MLME */ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u8 *wmm_param, size_t wmm_param_len) { struct ieee80211_tx_queue_params params; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t left; int count; u8 *pos, uapsd_queues = 0; if (!local->ops->conf_tx) return; if (local->hw.queues < 4) return; if (!wmm_param) return; if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) return; if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) uapsd_queues = local->uapsd_queues; count = wmm_param[6] & 0x0f; if (count == ifmgd->wmm_last_param_set) return; ifmgd->wmm_last_param_set = count; pos = wmm_param + 8; left = wmm_param_len - 8; memset(&params, 0, sizeof(params)); local->wmm_acm = 0; for (; left >= 4; left -= 4, pos += 4) { int aci = (pos[0] >> 5) & 0x03; int acm = (pos[0] >> 4) & 0x01; bool uapsd = false; int queue; switch (aci) { case 1: /* AC_BK */ queue = 3; if (acm) local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd = true; break; case 2: /* AC_VI */ queue = 1; if (acm) local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd = true; break; case 3: /* AC_VO */ queue = 0; if (acm) local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd = true; break; case 0: /* AC_BE */ default: queue = 2; if (acm) local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd = true; break; } params.aifs = pos[0] & 0x0f; params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); params.cw_min = ecw2cw(pos[1] & 0x0f); params.txop = get_unaligned_le16(pos + 2); params.uapsd = uapsd; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "WMM queue=%d aci=%d acm=%d aifs=%d " "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", queue, aci, acm, params.aifs, params.cw_min, params.cw_max, params.txop, params.uapsd); #endif if (drv_conf_tx(local, queue, &params)) wiphy_debug(local->hw.wiphy, "failed to set TX queue parameters for queue %d\n", queue); } /* enable WMM or activate new settings */ sdata->vif.bss_conf.qos = true; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS); } static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; if (erp_valid) { use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; } else { use_protection = false; use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); } use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) use_short_slot = true; if (use_protection != bss_conf->use_cts_prot) { bss_conf->use_cts_prot = use_protection; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (use_short_preamble != bss_conf->use_short_preamble) { bss_conf->use_short_preamble = use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (use_short_slot != bss_conf->use_short_slot) { bss_conf->use_short_slot = use_short_slot; changed |= BSS_CHANGED_ERP_SLOT; } return changed; } static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss, u32 bss_info_changed) { struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_local *local = sdata->local; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; bss_info_changed |= BSS_CHANGED_ASSOC; /* set timing information */ bss_conf->beacon_int = cbss->beacon_interval; bss_conf->timestamp = cbss->tsf; bss_info_changed |= BSS_CHANGED_BEACON_INT; bss_info_changed |= ieee80211_handle_bss_capability(sdata, cbss->capability, bss->has_erp_value, bss->erp_value); sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec( IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int)); sdata->u.mgd.associated = cbss; memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; /* just to be sure */ sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); ieee80211_led_assoc(local, 1); if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) bss_conf->dtim_period = bss->dtim_period; else bss_conf->dtim_period = 0; bss_conf->assoc = 1; /* * For now just always ask the driver to update the basic rateset * when we have associated, we aren't checking whether it actually * changed or not. */ bss_info_changed |= BSS_CHANGED_BASIC_RATES; /* And the BSSID changed - we're associated now */ bss_info_changed |= BSS_CHANGED_BSSID; /* Tell the driver to monitor connection quality (if supported) */ if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && bss_conf->cqm_rssi_thold) bss_info_changed |= BSS_CHANGED_CQM; /* Enable ARP filtering */ if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { bss_conf->arp_filter_enabled = sdata->arp_filter_state; bss_info_changed |= BSS_CHANGED_ARP_FILTER; } ieee80211_bss_info_change_notify(sdata, bss_info_changed); mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local, -1); ieee80211_recalc_smps(local); mutex_unlock(&local->iflist_mtx); netif_tx_start_all_queues(sdata->dev); netif_carrier_on(sdata->dev); } static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, bool remove_sta, bool tx) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct sta_info *sta; u32 changed = 0, config_changed = 0; u8 bssid[ETH_ALEN]; ASSERT_MGD_MTX(ifmgd); if (WARN_ON(!ifmgd->associated)) return; memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); ifmgd->associated = NULL; memset(ifmgd->bssid, 0, ETH_ALEN); /* * we need to commit the associated = NULL change because the * scan code uses that to determine whether this iface should * go to/wake up from powersave or not -- and could otherwise * wake the queues erroneously. */ smp_mb(); /* * Thus, we can only afterwards stop the queues -- to account * for the case where another CPU is finishing a scan at this * time -- we don't want the scan code to enable queues. */ netif_tx_stop_all_queues(sdata->dev); netif_carrier_off(sdata->dev); mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); if (sta) { set_sta_flags(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, tx); } mutex_unlock(&local->sta_mtx); changed |= ieee80211_reset_erp_info(sdata); ieee80211_led_assoc(local, 0); changed |= BSS_CHANGED_ASSOC; sdata->vif.bss_conf.assoc = false; ieee80211_set_wmm_default(sdata); /* channel(_type) changes are handled by ieee80211_hw_config */ WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); /* on the next assoc, re-program HT parameters */ sdata->ht_opmode_valid = false; local->power_constr_level = 0; del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; config_changed |= IEEE80211_CONF_CHANGE_PS; } local->ps_sdata = NULL; ieee80211_hw_config(local, config_changed); /* Disable ARP filtering */ if (sdata->vif.bss_conf.arp_filter_enabled) { sdata->vif.bss_conf.arp_filter_enabled = false; changed |= BSS_CHANGED_ARP_FILTER; } /* The BSSID (not really interesting) and HT changed */ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; ieee80211_bss_info_change_notify(sdata, changed); if (remove_sta) sta_info_destroy_addr(sdata, bssid); del_timer_sync(&sdata->u.mgd.conn_mon_timer); del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer); } void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr) { /* * We can postpone the mgd.timer whenever receiving unicast frames * from AP because we know that the connection is working both ways * at that time. But multicast frames (and hence also beacons) must * be ignored here, because we need to trigger the timer during * data idle periods for sending the periodic probe request to the * AP we're connected to. */ if (is_multicast_ether_addr(hdr->addr1)) return; ieee80211_sta_reset_conn_monitor(sdata); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL))) return; ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); mutex_lock(&sdata->local->iflist_mtx); ieee80211_recalc_ps(sdata->local, -1); mutex_unlock(&sdata->local->iflist_mtx); if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) return; /* * We've received a probe response, but are not sure whether * we have or will be receiving any beacons or data, so let's * schedule the timers again, just in case. */ ieee80211_sta_reset_beacon_monitor(sdata); mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); } void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_hdr *hdr, bool ack) { if (!ieee80211_is_data(hdr->frame_control)) return; if (ack) ieee80211_sta_reset_conn_monitor(sdata); if (ieee80211_is_nullfunc(hdr->frame_control) && sdata->u.mgd.probe_send_count > 0) { if (ack) sdata->u.mgd.probe_send_count = 0; else sdata->u.mgd.nullfunc_failed = true; ieee80211_queue_work(&sdata->local->hw, &sdata->work); } } static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *ssid; u8 *dst = ifmgd->associated->bssid; u8 unicast_limit = max(1, max_probe_tries - 3); /* * Try sending broadcast probe requests for the last three * probe requests after the first ones failed since some * buggy APs only support broadcast probe requests. */ if (ifmgd->probe_send_count >= unicast_limit) dst = NULL; /* * When the hardware reports an accurate Tx ACK status, it's * better to send a nullfunc frame instead of a probe request, * as it will kick us off the AP quickly if we aren't associated * anymore. The timeout will be reset if the frame is ACKed by * the AP. */ if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { ifmgd->nullfunc_failed = false; ieee80211_send_nullfunc(sdata->local, sdata, 0); } else { ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0); } ifmgd->probe_send_count++; ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); run_again(ifmgd, ifmgd->probe_timeout); } static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, bool beacon) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool already = false; if (!ieee80211_sdata_running(sdata)) return; if (sdata->local->scanning) return; if (sdata->local->tmp_channel) return; mutex_lock(&ifmgd->mtx); if (!ifmgd->associated) goto out; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (beacon && net_ratelimit()) printk(KERN_DEBUG "%s: detected beacon loss from AP " "- sending probe request\n", sdata->name); #endif /* * The driver/our work has already reported this event or the * connection monitoring has kicked in and we have already sent * a probe request. Or maybe the AP died and the driver keeps * reporting until we disassociate... * * In either case we have to ignore the current call to this * function (except for setting the correct probe reason bit) * because otherwise we would reset the timer every time and * never check whether we received a probe response! */ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL)) already = true; if (beacon) ifmgd->flags |= IEEE80211_STA_BEACON_POLL; else ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; if (already) goto out; mutex_lock(&sdata->local->iflist_mtx); ieee80211_recalc_ps(sdata->local, -1); mutex_unlock(&sdata->local->iflist_mtx); ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: mutex_unlock(&ifmgd->mtx); } struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct sk_buff *skb; const u8 *ssid; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; ASSERT_MGD_MTX(ifmgd); if (!ifmgd->associated) return NULL; ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, ssid + 2, ssid[1], NULL, 0); return skb; } EXPORT_SYMBOL(ieee80211_ap_probereq_get); static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; u8 bssid[ETH_ALEN]; mutex_lock(&ifmgd->mtx); if (!ifmgd->associated) { mutex_unlock(&ifmgd->mtx); return; } memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n", sdata->name, bssid); ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); mutex_lock(&local->mtx); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. */ ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, NULL, true); } void ieee80211_beacon_connection_loss_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.beacon_connection_loss_work); if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) __ieee80211_connection_loss(sdata); else ieee80211_mgd_probe_ap(sdata, true); } void ieee80211_beacon_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_beacon_loss(sdata); WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR); ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_beacon_loss); void ieee80211_connection_loss(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_hw *hw = &sdata->local->hw; trace_api_connection_loss(sdata); WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR)); ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_connection_loss); static enum rx_mgmt_action __must_check ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *bssid = NULL; u16 reason_code; if (len < 24 + 2) return RX_MGMT_NONE; ASSERT_MGD_MTX(ifmgd); bssid = ifmgd->associated->bssid; reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", sdata->name, bssid, reason_code); ieee80211_set_disassoc(sdata, true, false); mutex_lock(&sdata->local->mtx); ieee80211_recalc_idle(sdata->local); mutex_unlock(&sdata->local->mtx); return RX_MGMT_CFG80211_DEAUTH; } static enum rx_mgmt_action __must_check ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code; if (len < 24 + 2) return RX_MGMT_NONE; ASSERT_MGD_MTX(ifmgd); if (WARN_ON(!ifmgd->associated)) return RX_MGMT_NONE; if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN))) return RX_MGMT_NONE; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", sdata->name, mgmt->sa, reason_code); ieee80211_set_disassoc(sdata, true, false); mutex_lock(&sdata->local->mtx); ieee80211_recalc_idle(sdata->local); mutex_unlock(&sdata->local->mtx); return RX_MGMT_CFG80211_DISASSOC; } static bool ieee80211_assoc_success(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; struct cfg80211_bss *cbss = wk->assoc.bss; u8 *pos; u32 rates, basic_rates; u16 capab_info, aid; struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; int i, j, err; bool have_higher_than_11mbit = false; u16 ap_ht_cap_flags; /* AssocResp and ReassocResp have identical structure */ aid = le16_to_cpu(mgmt->u.assoc_resp.aid); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " "set\n", sdata->name, aid); aid &= ~(BIT(15) | BIT(14)); pos = mgmt->u.assoc_resp.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); if (!elems.supp_rates) { printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", sdata->name); return false; } ifmgd->aid = aid; sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL); if (!sta) { printk(KERN_DEBUG "%s: failed to alloc STA entry for" " the AP\n", sdata->name); return false; } set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP); if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) set_sta_flags(sta, WLAN_STA_AUTHORIZED); rates = 0; basic_rates = 0; sband = local->hw.wiphy->bands[wk->chan->band]; for (i = 0; i < elems.supp_rates_len; i++) { int rate = (elems.supp_rates[i] & 0x7f) * 5; bool is_basic = !!(elems.supp_rates[i] & 0x80); if (rate > 110) have_higher_than_11mbit = true; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) { rates |= BIT(j); if (is_basic) basic_rates |= BIT(j); break; } } } for (i = 0; i < elems.ext_supp_rates_len; i++) { int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); if (rate > 110) have_higher_than_11mbit = true; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) { rates |= BIT(j); if (is_basic) basic_rates |= BIT(j); break; } } } sta->sta.supp_rates[wk->chan->band] = rates; sdata->vif.bss_conf.basic_rates = basic_rates; /* cf. IEEE 802.11 9.2.12 */ if (wk->chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; rate_control_rate_init(sta); if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) set_sta_flags(sta, WLAN_STA_MFP); if (elems.wmm_param) set_sta_flags(sta, WLAN_STA_WME); err = sta_info_insert(sta); sta = NULL; if (err) { printk(KERN_DEBUG "%s: failed to insert STA entry for" " the AP (error %d)\n", sdata->name, err); return false; } /* * Always handle WMM once after association regardless * of the first value the AP uses. Setting -1 here has * that effect because the AP values is an unsigned * 4-bit value. */ ifmgd->wmm_last_param_set = -1; if (elems.wmm_param) ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len); else ieee80211_set_wmm_default(sdata); local->oper_channel = wk->chan; if (elems.ht_info_elem && elems.wmm_param && (sdata->local->hw.queues >= 4) && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, cbss->bssid, ap_ht_cap_flags); /* set AID and assoc capability, * ieee80211_set_associated() will tell the driver */ bss_conf->aid = aid; bss_conf->assoc_capability = capab_info; ieee80211_set_associated(sdata, cbss, changed); /* * If we're using 4-addr mode, let the AP know that we're * doing so, so that it can create the STA VLAN on its side */ if (ifmgd->use_4addr) ieee80211_send_4addr_nullfunc(local, sdata); /* * Start timer to probe the connection to the AP now. * Also start the timer that will detect beacon loss. */ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); ieee80211_sta_reset_beacon_monitor(sdata); return true; } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems, bool beacon) { struct ieee80211_local *local = sdata->local; int freq; struct ieee80211_bss *bss; struct ieee80211_channel *channel; bool need_ps = false; if (sdata->u.mgd.associated) { bss = (void *)sdata->u.mgd.associated->priv; /* not previously set so we may need to recalc */ need_ps = !bss->dtim_period; } if (elems->ds_params && elems->ds_params_len == 1) freq = ieee80211_channel_to_frequency(elems->ds_params[0], rx_status->band); else freq = rx_status->freq; channel = ieee80211_get_channel(local->hw.wiphy, freq); if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, channel, beacon); if (bss) ieee80211_rx_bss_put(local, bss); if (!sdata->u.mgd.associated) return; if (need_ps) { mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local, -1); mutex_unlock(&local->iflist_mtx); } if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)) { struct ieee80211_channel_sw_ie *sw_elem = (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; ieee80211_sta_process_chanswitch(sdata, sw_elem, bss, rx_status->mactime); } } static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_if_managed *ifmgd; struct ieee80211_rx_status *rx_status = (void *) skb->cb; size_t baselen, len = skb->len; struct ieee802_11_elems elems; ifmgd = &sdata->u.mgd; ASSERT_MGD_MTX(ifmgd); if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) return; /* ignore ProbeResp to foreign address */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); if (ifmgd->associated && memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0) ieee80211_reset_ap_probe(sdata); } /* * This is the canonical list of information elements we care about, * the filter code also gives us all changes to the Microsoft OUI * (00:50:F2) vendor IE which is used for WMM which we need to track. * * We implement beacon filtering in software since that means we can * avoid processing the frame here and in cfg80211, and userspace * will not be able to tell whether the hardware supports it or not. * * XXX: This list needs to be dynamic -- userspace needs to be able to * add items it requires. It also needs to be able to tell us to * look out for other vendor IEs. */ static const u64 care_about_ies = (1ULL << WLAN_EID_COUNTRY) | (1ULL << WLAN_EID_ERP_INFO) | (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_INFORMATION); static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; size_t baselen; struct ieee802_11_elems elems; struct ieee80211_local *local = sdata->local; u32 changed = 0; bool erp_valid, directed_tim = false; u8 erp_value = 0; u32 ncrc; u8 *bssid; ASSERT_MGD_MTX(ifmgd); /* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) return; if (rx_status->freq != local->hw.conf.channel->center_freq) return; /* * We might have received a number of frames, among them a * disassoc frame and a beacon... */ if (!ifmgd->associated) return; bssid = ifmgd->associated->bssid; /* * And in theory even frames from a different AP we were just * associated to a split-second ago! */ if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) return; /* Track average RSSI from the Beacon frames of the current AP */ ifmgd->last_beacon_signal = rx_status->signal; if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; ifmgd->ave_beacon_signal = rx_status->signal * 16; ifmgd->last_cqm_event_signal = 0; ifmgd->count_beacon_signal = 1; } else { ifmgd->ave_beacon_signal = (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + (16 - IEEE80211_SIGNAL_AVE_WEIGHT) * ifmgd->ave_beacon_signal) / 16; ifmgd->count_beacon_signal++; } if (bss_conf->cqm_rssi_thold && ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { int sig = ifmgd->ave_beacon_signal / 16; int last_event = ifmgd->last_cqm_event_signal; int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; if (sig < thold && (last_event == 0 || sig < last_event - hyst)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { ifmgd->last_cqm_event_signal = sig; ieee80211_cqm_rssi_notify( &sdata->vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, GFP_KERNEL); } } if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (net_ratelimit()) { printk(KERN_DEBUG "%s: cancelling probereq poll due " "to a received beacon\n", sdata->name); } #endif ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local, -1); mutex_unlock(&local->iflist_mtx); } /* * Push the beacon loss detection into the future since * we are processing a beacon from the AP just now. */ ieee80211_sta_reset_beacon_monitor(sdata); ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, len - baselen, &elems, care_about_ies, ncrc); if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid); if (ncrc != ifmgd->beacon_crc || !ifmgd->beacon_crc_valid) { ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); ieee80211_sta_wmm_params(local, sdata, elems.wmm_param, elems.wmm_param_len); } if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { if (directed_tim) { if (local->hw.conf.dynamic_ps_timeout > 0) { local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); ieee80211_send_nullfunc(local, sdata, 0); } else { local->pspolling = true; /* * Here is assumed that the driver will be * able to send ps-poll frame and receive a * response even though power save mode is * enabled, but some drivers might require * to disable power save here. This needs * to be investigated. */ ieee80211_send_pspoll(local, sdata); } } } if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true; if (elems.erp_info && elems.erp_info_len >= 1) { erp_valid = true; erp_value = elems.erp_info[0]; } else { erp_valid = false; } changed |= ieee80211_handle_bss_capability(sdata, le16_to_cpu(mgmt->u.beacon.capab_info), erp_valid, erp_value); if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { struct sta_info *sta; struct ieee80211_supported_band *sband; u16 ap_ht_cap_flags; rcu_read_lock(); sta = sta_info_get(sdata, bssid); if (WARN_ON(!sta)) { rcu_read_unlock(); return; } sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; rcu_read_unlock(); changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, bssid, ap_ht_cap_flags); } /* Note: country IE parsing is done for us by cfg80211 */ if (elems.country_elem) { /* TODO: IBSS also needs this */ if (elems.pwr_constr_elem) ieee80211_handle_pwr_constr(sdata, le16_to_cpu(mgmt->u.probe_resp.capab_info), elems.pwr_constr_elem, elems.pwr_constr_elem_len); } ieee80211_bss_info_change_notify(sdata, changed); } void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; enum rx_mgmt_action rma = RX_MGMT_NONE; u16 fc; rx_status = (struct ieee80211_rx_status *) skb->cb; mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); mutex_lock(&ifmgd->mtx); if (ifmgd->associated && memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) { switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_DEAUTH: rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: switch (mgmt->u.action.category) { case WLAN_CATEGORY_SPECTRUM_MGMT: ieee80211_sta_process_chanswitch(sdata, &mgmt->u.action.u.chan_switch.sw_elem, (void *)ifmgd->associated->priv, rx_status->mactime); break; } } mutex_unlock(&ifmgd->mtx); switch (rma) { case RX_MGMT_NONE: /* no action */ break; case RX_MGMT_CFG80211_DEAUTH: cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); break; case RX_MGMT_CFG80211_DISASSOC: cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); break; default: WARN(1, "unexpected: %d", rma); } return; } mutex_unlock(&ifmgd->mtx); if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk; mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; if (wk->type != IEEE80211_WORK_ASSOC && wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) continue; if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) continue; /* * Printing the message only here means we can't * spuriously print it, but it also means that it * won't be printed when the frame comes in before * we even tried to associate or in similar cases. * * Ultimately, I suspect cfg80211 should print the * messages instead. */ printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", sdata->name, mgmt->bssid, le16_to_cpu(mgmt->u.deauth.reason_code)); list_del_rcu(&wk->list); free_work(wk); break; } mutex_unlock(&local->mtx); cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); } } static void ieee80211_sta_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; if (local->quiescing) { set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); return; } ieee80211_queue_work(&local->hw, &sdata->work); } static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | IEEE80211_STA_BEACON_POLL); ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); mutex_lock(&local->mtx); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. */ ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, NULL, true); mutex_lock(&ifmgd->mtx); } void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* then process the rest of the work */ mutex_lock(&ifmgd->mtx); if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL) && ifmgd->associated) { u8 bssid[ETH_ALEN]; int max_tries; memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) max_tries = max_nullfunc_tries; else max_tries = max_probe_tries; /* ACK received for nullfunc probing frame */ if (!ifmgd->probe_send_count) ieee80211_reset_ap_probe(sdata); else if (ifmgd->nullfunc_failed) { if (ifmgd->probe_send_count < max_tries) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: No ack for nullfunc frame to" " AP %pM, try %d/%i\n", sdata->name, bssid, ifmgd->probe_send_count, max_tries); #endif ieee80211_mgd_probe_ap_send(sdata); } else { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: No ack for nullfunc frame to" " AP %pM, disconnecting.\n", sdata->name, bssid); #endif ieee80211_sta_connection_lost(sdata, bssid); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) run_again(ifmgd, ifmgd->probe_timeout); else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: Failed to send nullfunc to AP %pM" " after %dms, disconnecting.\n", sdata->name, bssid, probe_wait_ms); #endif ieee80211_sta_connection_lost(sdata, bssid); } else if (ifmgd->probe_send_count < max_tries) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "%s: No probe response from AP %pM" " after %dms, try %d/%i\n", sdata->name, bssid, probe_wait_ms, ifmgd->probe_send_count, max_tries); #endif ieee80211_mgd_probe_ap_send(sdata); } else { /* * We actually lost the connection ... or did we? * Let's make sure! */ wiphy_debug(local->hw.wiphy, "%s: No probe response from AP %pM" " after %dms, disconnecting.\n", sdata->name, bssid, probe_wait_ms); ieee80211_sta_connection_lost(sdata, bssid); } } mutex_unlock(&ifmgd->mtx); } static void ieee80211_sta_bcn_mon_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; struct ieee80211_local *local = sdata->local; if (local->quiescing) return; ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_connection_loss_work); } static void ieee80211_sta_conn_mon_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = (struct ieee80211_sub_if_data *) data; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; if (local->quiescing) return; ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); } static void ieee80211_sta_monitor_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.mgd.monitor_work); ieee80211_mgd_probe_ap(sdata, false); } static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL); /* let's probe the connection once */ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work); /* and do all the other regular work too */ ieee80211_queue_work(&sdata->local->hw, &sdata->work); } } #ifdef CONFIG_PM void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; /* * we need to use atomic bitops for the running bits * only because both timers might fire at the same * time -- the code here is properly synchronised. */ cancel_work_sync(&ifmgd->request_smps_work); cancel_work_sync(&ifmgd->beacon_connection_loss_work); if (del_timer_sync(&ifmgd->timer)) set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); cancel_work_sync(&ifmgd->chswitch_work); if (del_timer_sync(&ifmgd->chswitch_timer)) set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); cancel_work_sync(&ifmgd->monitor_work); /* these will just be re-established on connection */ del_timer_sync(&ifmgd->conn_mon_timer); del_timer_sync(&ifmgd->bcn_mon_timer); } void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; if (!ifmgd->associated) return; if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) add_timer(&ifmgd->timer); if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) add_timer(&ifmgd->chswitch_timer); ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_restart_sta_timer(sdata); } #endif /* interface setup */ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd; ifmgd = &sdata->u.mgd; INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); INIT_WORK(&ifmgd->beacon_connection_loss_work, ieee80211_beacon_connection_loss_work); INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work); setup_timer(&ifmgd->timer, ieee80211_sta_timer, (unsigned long) sdata); setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, (unsigned long) sdata); setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, (unsigned long) sdata); setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, (unsigned long) sdata); ifmgd->flags = 0; mutex_init(&ifmgd->mtx); if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else ifmgd->req_smps = IEEE80211_SMPS_OFF; } /* scan finished notification */ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata = local->scan_sdata; /* Restart STA timers */ rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) ieee80211_restart_sta_timer(sdata); rcu_read_unlock(); } int ieee80211_max_network_latency(struct notifier_block *nb, unsigned long data, void *dummy) { s32 latency_usec = (s32) data; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, network_latency_notifier); mutex_lock(&local->iflist_mtx); ieee80211_recalc_ps(local, latency_usec); mutex_unlock(&local->iflist_mtx); return 0; } /* config hooks */ static enum work_done_result ieee80211_probe_auth_done(struct ieee80211_work *wk, struct sk_buff *skb) { if (!skb) { cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta); return WORK_DONE_DESTROY; } if (wk->type == IEEE80211_WORK_AUTH) { cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len); return WORK_DONE_DESTROY; } mutex_lock(&wk->sdata->u.mgd.mtx); ieee80211_rx_mgmt_probe_resp(wk->sdata, skb); mutex_unlock(&wk->sdata->u.mgd.mtx); wk->type = IEEE80211_WORK_AUTH; wk->probe_auth.tries = 0; return WORK_DONE_REQUEUE; } int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, struct cfg80211_auth_request *req) { const u8 *ssid; struct ieee80211_work *wk; u16 auth_alg; if (req->local_state_change) return 0; /* no need to update mac80211 state */ switch (req->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: auth_alg = WLAN_AUTH_OPEN; break; case NL80211_AUTHTYPE_SHARED_KEY: if (IS_ERR(sdata->local->wep_tx_tfm)) return -EOPNOTSUPP; auth_alg = WLAN_AUTH_SHARED_KEY; break; case NL80211_AUTHTYPE_FT: auth_alg = WLAN_AUTH_FT; break; case NL80211_AUTHTYPE_NETWORK_EAP: auth_alg = WLAN_AUTH_LEAP; break; default: return -EOPNOTSUPP; } wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); if (!wk) return -ENOMEM; memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); if (req->ie && req->ie_len) { memcpy(wk->ie, req->ie, req->ie_len); wk->ie_len = req->ie_len; } if (req->key && req->key_len) { wk->probe_auth.key_len = req->key_len; wk->probe_auth.key_idx = req->key_idx; memcpy(wk->probe_auth.key, req->key, req->key_len); } ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]); wk->probe_auth.ssid_len = ssid[1]; wk->probe_auth.algorithm = auth_alg; wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY; /* if we already have a probe, don't probe again */ if (req->bss->proberesp_ies) wk->type = IEEE80211_WORK_AUTH; else wk->type = IEEE80211_WORK_DIRECT_PROBE; wk->chan = req->bss->channel; wk->chan_type = NL80211_CHAN_NO_HT; wk->sdata = sdata; wk->done = ieee80211_probe_auth_done; ieee80211_add_work(wk); return 0; } static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt; struct ieee80211_rx_status *rx_status; struct ieee802_11_elems elems; u16 status; if (!skb) { cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); return WORK_DONE_DESTROY; } if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) { mutex_lock(&wk->sdata->u.mgd.mtx); rx_status = (void *) skb->cb; ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems); ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status, &elems, true); mutex_unlock(&wk->sdata->u.mgd.mtx); wk->type = IEEE80211_WORK_ASSOC; /* not really done yet */ return WORK_DONE_REQUEUE; } mgmt = (void *)skb->data; status = le16_to_cpu(mgmt->u.assoc_resp.status_code); if (status == WLAN_STATUS_SUCCESS) { mutex_lock(&wk->sdata->u.mgd.mtx); if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { mutex_unlock(&wk->sdata->u.mgd.mtx); /* oops -- internal error -- send timeout for now */ cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); return WORK_DONE_DESTROY; } mutex_unlock(&wk->sdata->u.mgd.mtx); } cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); return WORK_DONE_DESTROY; } int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss *bss = (void *)req->bss->priv; struct ieee80211_work *wk; const u8 *ssid; int i; mutex_lock(&ifmgd->mtx); if (ifmgd->associated) { if (!req->prev_bssid || memcmp(req->prev_bssid, ifmgd->associated->bssid, ETH_ALEN)) { /* * We are already associated and the request was not a * reassociation request from the current BSS, so * reject it. */ mutex_unlock(&ifmgd->mtx); return -EALREADY; } /* Trying to reassociate - clear previous association state */ ieee80211_set_disassoc(sdata, true, false); } mutex_unlock(&ifmgd->mtx); wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); if (!wk) return -ENOMEM; ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; ifmgd->beacon_crc_valid = false; for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) ifmgd->flags |= IEEE80211_STA_DISABLE_11N; if (req->ie && req->ie_len) { memcpy(wk->ie, req->ie, req->ie_len); wk->ie_len = req->ie_len; } else wk->ie_len = 0; wk->assoc.bss = req->bss; memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); /* new association always uses requested smps mode */ if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { if (ifmgd->powersave) ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; else ifmgd->ap_smps = IEEE80211_SMPS_OFF; } else ifmgd->ap_smps = ifmgd->req_smps; wk->assoc.smps = ifmgd->ap_smps; /* * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. * We still associate in non-HT mode (11a/b/g) if any one of these * ciphers is configured as pairwise. * We can set this to true for non-11n hardware, that'll be checked * separately along with the peer capabilities. */ wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N); wk->assoc.capability = req->bss->capability; wk->assoc.wmm_used = bss->wmm_used; wk->assoc.supp_rates = bss->supp_rates; wk->assoc.supp_rates_len = bss->supp_rates_len; wk->assoc.ht_information_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); if (bss->wmm_used && bss->uapsd_supported && (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { wk->assoc.uapsd_used = true; ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; } else { wk->assoc.uapsd_used = false; ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; } ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); memcpy(wk->assoc.ssid, ssid + 2, ssid[1]); wk->assoc.ssid_len = ssid[1]; if (req->prev_bssid) memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); wk->chan = req->bss->channel; wk->chan_type = NL80211_CHAN_NO_HT; wk->sdata = sdata; wk->done = ieee80211_assoc_done; if (!bss->dtim_period && sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT; else wk->type = IEEE80211_WORK_ASSOC; if (req->use_mfp) { ifmgd->mfp = IEEE80211_MFP_REQUIRED; ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; } else { ifmgd->mfp = IEEE80211_MFP_DISABLED; ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; } if (req->crypto.control_port) ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; else ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; sdata->control_port_protocol = req->crypto.control_port_ethertype; sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt; ieee80211_add_work(wk); return 0; } int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, struct cfg80211_deauth_request *req, void *cookie) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_work *wk; u8 bssid[ETH_ALEN]; bool assoc_bss = false; mutex_lock(&ifmgd->mtx); memcpy(bssid, req->bss->bssid, ETH_ALEN); if (ifmgd->associated == req->bss) { ieee80211_set_disassoc(sdata, false, true); mutex_unlock(&ifmgd->mtx); assoc_bss = true; } else { bool not_auth_yet = false; mutex_unlock(&ifmgd->mtx); mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; if (wk->type != IEEE80211_WORK_DIRECT_PROBE && wk->type != IEEE80211_WORK_AUTH && wk->type != IEEE80211_WORK_ASSOC && wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) continue; not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; list_del_rcu(&wk->list); free_work(wk); break; } mutex_unlock(&local->mtx); /* * If somebody requests authentication and we haven't * sent out an auth frame yet there's no need to send * out a deauth frame either. If the state was PROBE, * then this is the case. If it's AUTH we have sent a * frame, and if it's IDLE we have completed the auth * process already. */ if (not_auth_yet) { __cfg80211_auth_canceled(sdata->dev, bssid); return 0; } } printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", sdata->name, bssid, req->reason_code); ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, req->reason_code, cookie, !req->local_state_change); if (assoc_bss) sta_info_destroy_addr(sdata, bssid); mutex_lock(&sdata->local->mtx); ieee80211_recalc_idle(sdata->local); mutex_unlock(&sdata->local->mtx); return 0; } int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_disassoc_request *req, void *cookie) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; mutex_lock(&ifmgd->mtx); /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ if (ifmgd->associated != req->bss) { mutex_unlock(&ifmgd->mtx); return -ENOLINK; } printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", sdata->name, req->bss->bssid, req->reason_code); memcpy(bssid, req->bss->bssid, ETH_ALEN); ieee80211_set_disassoc(sdata, false, true); mutex_unlock(&ifmgd->mtx); ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, IEEE80211_STYPE_DISASSOC, req->reason_code, cookie, !req->local_state_change); sta_info_destroy_addr(sdata, bssid); mutex_lock(&sdata->local->mtx); ieee80211_recalc_idle(sdata->local); mutex_unlock(&sdata->local->mtx); return 0; } void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_cqm_rssi_notify(sdata, rssi_event); cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); } EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
gpl-2.0
infected-lp/kernel_sony_msm8974
drivers/devfreq/governor_msm_adreno_tz.c
1034
8833
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/errno.h> #include <linux/module.h> #include <linux/devfreq.h> #include <linux/math64.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/ftrace.h> #include <linux/msm_adreno_devfreq.h> #include <mach/scm.h> #include "governor.h" static DEFINE_SPINLOCK(tz_lock); /* * FLOOR is 5msec to capture up to 3 re-draws * per frame for 60fps content. */ #define FLOOR 5000 #define LONG_FLOOR 50000 #define HIST 5 #define TARGET 80 #define CAP 75 /* * CEILING is 50msec, larger than any standard * frame length, but less than the idle timer. */ #define CEILING 50000 #define TZ_RESET_ID 0x3 #define TZ_UPDATE_ID 0x4 #define TZ_INIT_ID 0x6 #define TAG "msm_adreno_tz: " /* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2) { int ret; spin_lock(&tz_lock); /* sync memory before sending the commands to tz*/ __iowmb(); ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2); spin_unlock(&tz_lock); return ret; } static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2, u32 val3) { int ret; spin_lock(&tz_lock); /* sync memory before sending the commands to tz*/ __iowmb(); ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2, val3); spin_unlock(&tz_lock); return ret; } static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv, unsigned int norm_max) { int i; priv->bus.max = norm_max; for (i = 0; i < priv->bus.num; i++) { priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100; priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100; } } static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq, u32 *flag) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; struct xstats b; int val, level = 0; int act_level; int norm_cycles; int gpu_percent; if (priv->bus.num) stats.private_data = &b; else stats.private_data = NULL; result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; *flag = 0; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; if (priv->bus.num) { priv->bus.total_time += stats.total_time; priv->bus.gpu_time += stats.busy_time; priv->bus.ram_time += b.ram_time; priv->bus.ram_time += b.ram_wait; } /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) { return 1; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1 * level; } else { val = __secure_tz_entry3(TZ_UPDATE_ID, level, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a different level, make sure the GPU * frequency changes. */ if (val) { level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state); goto clear; } if (priv->bus.total_time < LONG_FLOOR) goto end; norm_cycles = (unsigned int)priv->bus.ram_time / (unsigned int) priv->bus.total_time; gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) / (unsigned int) priv->bus.total_time; /* * If there's a new high watermark, update the cutoffs and send the * FAST hint. Otherwise check the current value against the current * cutoffs. */ if (norm_cycles > priv->bus.max) { _update_cutoff(priv, norm_cycles); *flag = DEVFREQ_FLAG_FAST_HINT; } else { /* * Normalize by gpu_time unless it is a small fraction of * the total time interval. */ norm_cycles = (100 * norm_cycles) / TARGET; act_level = priv->bus.index[level] + b.mod; act_level = (act_level < 0) ? 0 : act_level; act_level = (act_level >= priv->bus.num) ? (priv->bus.num - 1) : act_level; if (norm_cycles > priv->bus.up[act_level] && gpu_percent > CAP) *flag = DEVFREQ_FLAG_FAST_HINT; else if (norm_cycles < priv->bus.down[act_level] && level) *flag = DEVFREQ_FLAG_SLOW_HINT; } clear: priv->bus.total_time = 0; priv->bus.gpu_time = 0; priv->bus.ram_time = 0; end: *freq = devfreq->profile->freq_table[level]; return 0; } static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp) { int result = 0; struct devfreq *devfreq = devp; switch (type) { case ADRENO_DEVFREQ_NOTIFY_IDLE: case ADRENO_DEVFREQ_NOTIFY_RETIRE: mutex_lock(&devfreq->lock); result = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); break; /* ignored by this governor */ case ADRENO_DEVFREQ_NOTIFY_SUBMIT: default: break; } return notifier_from_errno(result); } static int tz_start(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv; unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1]; unsigned int t1, t2 = 2 * HIST; int i, out, ret; if (devfreq->data == NULL) { pr_err(TAG "data is required for this governor\n"); return -EINVAL; } priv = devfreq->data; priv->nb.notifier_call = tz_notify; out = 1; if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) { for (i = 0; i < devfreq->profile->max_state; i++) tz_pwrlevels[out++] = devfreq->profile->freq_table[i]; tz_pwrlevels[0] = i; } else { pr_err(TAG "tz_pwrlevels[] is too short\n"); return -EINVAL; } ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, sizeof(tz_pwrlevels), NULL, 0); if (ret != 0) pr_err(TAG "tz_init failed\n"); /* Set up the cut-over percentages for the bus calculation. */ if (priv->bus.num) { for (i = 0; i < priv->bus.num; i++) { t1 = (u32)(100 * priv->bus.ib[i]) / (u32)priv->bus.ib[priv->bus.num - 1]; priv->bus.p_up[i] = t1 - HIST; priv->bus.p_down[i] = t2 - 2 * HIST; t2 = t1; } /* Set the upper-most and lower-most bounds correctly. */ priv->bus.p_down[0] = 0; priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ? priv->bus.p_down[1] : (2 * HIST); if (priv->bus.num - 1 >= 0) priv->bus.p_up[priv->bus.num - 1] = 100; _update_cutoff(priv, priv->bus.max); } return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb); } static int tz_stop(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv = devfreq->data; kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb); return 0; } static int tz_resume(struct devfreq *devfreq) { struct devfreq_dev_profile *profile = devfreq->profile; unsigned long freq; freq = profile->initial_freq; return profile->target(devfreq->dev.parent, &freq, 0); } static int tz_suspend(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv = devfreq->data; __secure_tz_entry2(TZ_RESET_ID, 0, 0); priv->bin.total_time = 0; priv->bin.busy_time = 0; priv->bus.total_time = 0; priv->bus.gpu_time = 0; priv->bus.ram_time = 0; return 0; } static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data) { int result; BUG_ON(devfreq == NULL); switch (event) { case DEVFREQ_GOV_START: result = tz_start(devfreq); break; case DEVFREQ_GOV_STOP: result = tz_stop(devfreq); break; case DEVFREQ_GOV_SUSPEND: result = tz_suspend(devfreq); break; case DEVFREQ_GOV_RESUME: result = tz_resume(devfreq); break; case DEVFREQ_GOV_INTERVAL: /* ignored, this governor doesn't use polling */ default: result = 0; break; } return result; } static struct devfreq_governor msm_adreno_tz = { .name = "msm-adreno-tz", .get_target_freq = tz_get_target_freq, .event_handler = tz_handler, }; static int __init msm_adreno_tz_init(void) { return devfreq_add_governor(&msm_adreno_tz); } subsys_initcall(msm_adreno_tz_init); static void __exit msm_adreno_tz_exit(void) { int ret; ret = devfreq_remove_governor(&msm_adreno_tz); if (ret) pr_err(TAG "failed to remove governor %d\n", ret); return; } module_exit(msm_adreno_tz_exit); MODULE_LICENSE("GPLv2");
gpl-2.0
aznair/mptcp
drivers/net/ethernet/natsemi/natsemi.c
1290
95083
/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ /* Written/copyright 1999-2001 by Donald Becker. Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) Portions copyright 2004 Harald Welte <laforge@gnumonks.org> This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. License for under other terms may be available. Contact the original author for details. The original author may be reached as becker@scyld.com, or at Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support information and updates available at http://www.scyld.com/network/netsemi.html [link no longer provides useful info -jgarzik] TODO: * big endian support with CFG:BEM instead of cpu_to_le32 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/rtnetlink.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/prefetch.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #define DRV_NAME "natsemi" #define DRV_VERSION "2.1" #define DRV_RELDATE "Sept 11, 2006" #define RX_OFFSET 2 /* Updated to recommendations in pci-skeleton v2.03. */ /* The user-configurable values. These may be modified when a driver module is loaded.*/ #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \ NETIF_MSG_LINK | \ NETIF_MSG_WOL | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) static int debug = -1; static int mtu; /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). This chip uses a 512 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 100; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; static int dspcfg_workaround = 1; /* Used to pass the media type, etc. Both 'options[]' and 'full_duplex[]' should exist for driver interoperability. The media type is usually passed in 'options[]'. */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS]; static int full_duplex[MAX_UNITS]; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */ #define RX_RING_SIZE 32 /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) #define NATSEMI_HW_TIMEOUT 400 #define NATSEMI_TIMER_FREQ 5*HZ #define NATSEMI_PG0_NREGS 64 #define NATSEMI_RFDR_NREGS 8 #define NATSEMI_PG1_NREGS 4 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \ NATSEMI_PG1_NREGS) #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) /* Buffer sizes: * The nic writes 32-bit values, even if the upper bytes of * a 32-bit value are beyond the end of the buffer. */ #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */ #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */ #define NATSEMI_LONGPKT 1518 /* limit for normal packets */ #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ /* These identify the driver base version and may not be removed. */ static const char version[] = KERN_INFO DRV_NAME " dp8381x driver, version " DRV_VERSION ", " DRV_RELDATE "\n" " originally by Donald Becker <becker@scyld.com>\n" " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); MODULE_LICENSE("GPL"); module_param(mtu, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param(dspcfg_workaround, int, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); MODULE_PARM_DESC(debug, "DP8381x default debug level"); MODULE_PARM_DESC(rx_copybreak, "DP8381x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround"); MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex"); MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); /* Theory of Operation I. Board Compatibility This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC. It also works with other chips in in the DP83810 series. II. Board-specific settings This driver requires the PCI interrupt line to be valid. It honors the EEPROM-set values. III. Driver operation IIIa. Ring buffers This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. The NatSemi design uses a 'next descriptor' pointer that the driver forms into a list. IIIb/c. Transmit/Receive Structure This driver uses a zero-copy receive and transmit scheme. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the chip as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack. Buffers consumed this way are replaced by newly allocated skbuffs in a later phase of receives. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. New boards are typically used in generously configured machines and the underfilled buffers have negligible impact compared to the benefit of a single allocation size, so the default value of zero results in never copying packets. When copying is done, the cost is usually mitigated by using a combined copy/checksum routine. Copying also preloads the cache, which is most useful with small frames. A subtle aspect of the operation is that unaligned buffers are not permitted by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't longword aligned for further processing. On copies frames are put into the skbuff at an offset of "+2", 16-byte aligning the IP header. IIId. Synchronization Most operations are synchronized on the np->lock irq spinlock, except the receive and transmit paths which are synchronised using a combination of hardware descriptor ownership, disabling interrupts and NAPI poll scheduling. IVb. References http://www.scyld.com/expert/100mbps.html http://www.scyld.com/expert/NWay.html Datasheet is available from: http://www.national.com/pf/DP/DP83815.html IVc. Errata None characterised. */ /* * Support for fibre connections on Am79C874: * This phy needs a special setup when connected to a fibre cable. * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf */ #define PHYID_AM79C874 0x0022561b enum { MII_MCTRL = 0x15, /* mode control register */ MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */ MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */ }; enum { NATSEMI_FLAG_IGNORE_PHY = 0x1, }; /* array of board data directly indexed by pci_tbl[x].driver_data */ static struct { const char *name; unsigned long flags; unsigned int eeprom_size; } natsemi_pci_info[] = { { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 }, { "NatSemi DP8381[56]", 0, 24 }, }; static const struct pci_device_id natsemi_pci_tbl[] = { { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); /* Offsets to the device registers. Unlike software-only systems, device drivers interact with complex hardware. It's not useful to define symbolic names for every register bit in the device. */ enum register_offsets { ChipCmd = 0x00, ChipConfig = 0x04, EECtrl = 0x08, PCIBusCfg = 0x0C, IntrStatus = 0x10, IntrMask = 0x14, IntrEnable = 0x18, IntrHoldoff = 0x1C, /* DP83816 only */ TxRingPtr = 0x20, TxConfig = 0x24, RxRingPtr = 0x30, RxConfig = 0x34, ClkRun = 0x3C, WOLCmd = 0x40, PauseCmd = 0x44, RxFilterAddr = 0x48, RxFilterData = 0x4C, BootRomAddr = 0x50, BootRomData = 0x54, SiliconRev = 0x58, StatsCtrl = 0x5C, StatsData = 0x60, RxPktErrs = 0x60, RxMissed = 0x68, RxCRCErrs = 0x64, BasicControl = 0x80, BasicStatus = 0x84, AnegAdv = 0x90, AnegPeer = 0x94, PhyStatus = 0xC0, MIntrCtrl = 0xC4, MIntrStatus = 0xC8, PhyCtrl = 0xE4, /* These are from the spec, around page 78... on a separate table. * The meaning of these registers depend on the value of PGSEL. */ PGSEL = 0xCC, PMDCSR = 0xE4, TSTDAT = 0xFC, DSPCFG = 0xF4, SDCFG = 0xF8 }; /* the values for the 'magic' registers above (PGSEL=1) */ #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */ #define TSTDAT_VAL 0x0 #define DSPCFG_VAL 0x5040 #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */ #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */ #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */ #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */ /* misc PCI space registers */ enum pci_register_offsets { PCIPM = 0x44, }; enum ChipCmd_bits { ChipReset = 0x100, RxReset = 0x20, TxReset = 0x10, RxOff = 0x08, RxOn = 0x04, TxOff = 0x02, TxOn = 0x01, }; enum ChipConfig_bits { CfgPhyDis = 0x200, CfgPhyRst = 0x400, CfgExtPhy = 0x1000, CfgAnegEnable = 0x2000, CfgAneg100 = 0x4000, CfgAnegFull = 0x8000, CfgAnegDone = 0x8000000, CfgFullDuplex = 0x20000000, CfgSpeed100 = 0x40000000, CfgLink = 0x80000000, }; enum EECtrl_bits { EE_ShiftClk = 0x04, EE_DataIn = 0x01, EE_ChipSelect = 0x08, EE_DataOut = 0x02, MII_Data = 0x10, MII_Write = 0x20, MII_ShiftClk = 0x40, }; enum PCIBusCfg_bits { EepromReload = 0x4, }; /* Bits in the interrupt status/mask registers. */ enum IntrStatus_bits { IntrRxDone = 0x0001, IntrRxIntr = 0x0002, IntrRxErr = 0x0004, IntrRxEarly = 0x0008, IntrRxIdle = 0x0010, IntrRxOverrun = 0x0020, IntrTxDone = 0x0040, IntrTxIntr = 0x0080, IntrTxErr = 0x0100, IntrTxIdle = 0x0200, IntrTxUnderrun = 0x0400, StatsMax = 0x0800, SWInt = 0x1000, WOLPkt = 0x2000, LinkChange = 0x4000, IntrHighBits = 0x8000, RxStatusFIFOOver = 0x10000, IntrPCIErr = 0xf00000, RxResetDone = 0x1000000, TxResetDone = 0x2000000, IntrAbnormalSummary = 0xCD20, }; /* * Default Interrupts: * Rx OK, Rx Packet Error, Rx Overrun, * Tx OK, Tx Packet Error, Tx Underrun, * MIB Service, Phy Interrupt, High Bits, * Rx Status FIFO overrun, * Received Target Abort, Received Master Abort, * Signalled System Error, Received Parity Error */ #define DEFAULT_INTR 0x00f1cd65 enum TxConfig_bits { TxDrthMask = 0x3f, TxFlthMask = 0x3f00, TxMxdmaMask = 0x700000, TxMxdma_512 = 0x0, TxMxdma_4 = 0x100000, TxMxdma_8 = 0x200000, TxMxdma_16 = 0x300000, TxMxdma_32 = 0x400000, TxMxdma_64 = 0x500000, TxMxdma_128 = 0x600000, TxMxdma_256 = 0x700000, TxCollRetry = 0x800000, TxAutoPad = 0x10000000, TxMacLoop = 0x20000000, TxHeartIgn = 0x40000000, TxCarrierIgn = 0x80000000 }; /* * Tx Configuration: * - 256 byte DMA burst length * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free) * - 64 bytes initial drain threshold (i.e. begin actual transmission * when 64 byte are in the fifo) * - on tx underruns, increase drain threshold by 64. * - at most use a drain threshold of 1472 bytes: The sum of the fill * threshold and the drain threshold must be less than 2016 bytes. * */ #define TX_FLTH_VAL ((512/32) << 8) #define TX_DRTH_VAL_START (64/32) #define TX_DRTH_VAL_INC 2 #define TX_DRTH_VAL_LIMIT (1472/32) enum RxConfig_bits { RxDrthMask = 0x3e, RxMxdmaMask = 0x700000, RxMxdma_512 = 0x0, RxMxdma_4 = 0x100000, RxMxdma_8 = 0x200000, RxMxdma_16 = 0x300000, RxMxdma_32 = 0x400000, RxMxdma_64 = 0x500000, RxMxdma_128 = 0x600000, RxMxdma_256 = 0x700000, RxAcceptLong = 0x8000000, RxAcceptTx = 0x10000000, RxAcceptRunt = 0x40000000, RxAcceptErr = 0x80000000 }; #define RX_DRTH_VAL (128/8) enum ClkRun_bits { PMEEnable = 0x100, PMEStatus = 0x8000, }; enum WolCmd_bits { WakePhy = 0x1, WakeUnicast = 0x2, WakeMulticast = 0x4, WakeBroadcast = 0x8, WakeArp = 0x10, WakePMatch0 = 0x20, WakePMatch1 = 0x40, WakePMatch2 = 0x80, WakePMatch3 = 0x100, WakeMagic = 0x200, WakeMagicSecure = 0x400, SecureHack = 0x100000, WokePhy = 0x400000, WokeUnicast = 0x800000, WokeMulticast = 0x1000000, WokeBroadcast = 0x2000000, WokeArp = 0x4000000, WokePMatch0 = 0x8000000, WokePMatch1 = 0x10000000, WokePMatch2 = 0x20000000, WokePMatch3 = 0x40000000, WokeMagic = 0x80000000, WakeOptsSummary = 0x7ff }; enum RxFilterAddr_bits { RFCRAddressMask = 0x3ff, AcceptMulticast = 0x00200000, AcceptMyPhys = 0x08000000, AcceptAllPhys = 0x10000000, AcceptAllMulticast = 0x20000000, AcceptBroadcast = 0x40000000, RxFilterEnable = 0x80000000 }; enum StatsCtrl_bits { StatsWarn = 0x1, StatsFreeze = 0x2, StatsClear = 0x4, StatsStrobe = 0x8, }; enum MIntrCtrl_bits { MICRIntEn = 0x2, }; enum PhyCtrl_bits { PhyAddrMask = 0x1f, }; #define PHY_ADDR_NONE 32 #define PHY_ADDR_INTERNAL 1 /* values we might find in the silicon revision register */ #define SRR_DP83815_C 0x0302 #define SRR_DP83815_D 0x0403 #define SRR_DP83816_A4 0x0504 #define SRR_DP83816_A5 0x0505 /* The Rx and Tx buffer descriptors. */ /* Note that using only 32 bit fields simplifies conversion to big-endian architectures. */ struct netdev_desc { __le32 next_desc; __le32 cmd_status; __le32 addr; __le32 software_use; }; /* Bits in network_desc.status */ enum desc_status_bits { DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000, DescNoCRC=0x10000000, DescPktOK=0x08000000, DescSizeMask=0xfff, DescTxAbort=0x04000000, DescTxFIFO=0x02000000, DescTxCarrier=0x01000000, DescTxDefer=0x00800000, DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000, DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000, DescRxAbort=0x04000000, DescRxOver=0x02000000, DescRxDest=0x01800000, DescRxLong=0x00400000, DescRxRunt=0x00200000, DescRxInvalid=0x00100000, DescRxCRC=0x00080000, DescRxAlign=0x00040000, DescRxLoop=0x00020000, DesRxColl=0x00010000, }; struct netdev_private { /* Descriptor rings first for alignment */ dma_addr_t ring_dma; struct netdev_desc *rx_ring; struct netdev_desc *tx_ring; /* The addresses of receive-in-place skbuffs */ struct sk_buff *rx_skbuff[RX_RING_SIZE]; dma_addr_t rx_dma[RX_RING_SIZE]; /* address of a sent-in-place packet/buffer, for later free() */ struct sk_buff *tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE]; struct net_device *dev; void __iomem *ioaddr; struct napi_struct napi; /* Media monitoring timer */ struct timer_list timer; /* Frequently used values: keep some adjacent for cache effect */ struct pci_dev *pci_dev; struct netdev_desc *rx_head_desc; /* Producer/consumer ring indices */ unsigned int cur_rx, dirty_rx; unsigned int cur_tx, dirty_tx; /* Based on MTU+slack. */ unsigned int rx_buf_sz; int oom; /* Interrupt status */ u32 intr_status; /* Do not touch the nic registers */ int hands_off; /* Don't pay attention to the reported link state. */ int ignore_phy; /* external phy that is used: only valid if dev->if_port != PORT_TP */ int mii; int phy_addr_external; unsigned int full_duplex; /* Rx filter */ u32 cur_rx_mode; u32 rx_filter[16]; /* FIFO and PCI burst thresholds */ u32 tx_config, rx_config; /* original contents of ClkRun register */ u32 SavedClkRun; /* silicon revision */ u32 srr; /* expected DSPCFG value */ u16 dspcfg; int dspcfg_workaround; /* parms saved in ethtool format */ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ u8 duplex; /* Duplex, half or full */ u8 autoneg; /* Autonegotiation enabled */ /* MII transceiver section */ u16 advertising; unsigned int iosize; spinlock_t lock; u32 msg_enable; /* EEPROM data */ int eeprom_size; }; static void move_int_phy(struct net_device *dev, int addr); static int eeprom_read(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int reg); static void mdio_write(struct net_device *dev, int reg, u16 data); static void init_phy_fixup(struct net_device *dev); static int miiport_read(struct net_device *dev, int phy_id, int reg); static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data); static int find_mii(struct net_device *dev); static void natsemi_reset(struct net_device *dev); static void natsemi_reload_eeprom(struct net_device *dev); static void natsemi_stop_rxtx(struct net_device *dev); static int netdev_open(struct net_device *dev); static void do_cable_magic(struct net_device *dev); static void undo_cable_magic(struct net_device *dev); static void check_link(struct net_device *dev); static void netdev_timer(unsigned long data); static void dump_ring(struct net_device *dev); static void ns_tx_timeout(struct net_device *dev); static int alloc_ring(struct net_device *dev); static void refill_rx(struct net_device *dev); static void init_ring(struct net_device *dev); static void drain_tx(struct net_device *dev); static void drain_ring(struct net_device *dev); static void free_ring(struct net_device *dev); static void reinit_ring(struct net_device *dev); static void init_registers(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void netdev_error(struct net_device *dev, int intr_status); static int natsemi_poll(struct napi_struct *napi, int budget); static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); static void netdev_tx_done(struct net_device *dev); static int natsemi_change_mtu(struct net_device *dev, int new_mtu); #ifdef CONFIG_NET_POLL_CONTROLLER static void natsemi_poll_controller(struct net_device *dev); #endif static void __set_rx_mode(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void __get_stats(struct net_device *dev); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_set_wol(struct net_device *dev, u32 newval); static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); static int netdev_set_sopass(struct net_device *dev, u8 *newval); static int netdev_get_sopass(struct net_device *dev, u8 *data); static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); static void enable_wol_mode(struct net_device *dev, int enable_intr); static int netdev_close(struct net_device *dev); static int netdev_get_regs(struct net_device *dev, u8 *buf); static int netdev_get_eeprom(struct net_device *dev, u8 *buf); static const struct ethtool_ops ethtool_ops; #define NATSEMI_ATTR(_name) \ static ssize_t natsemi_show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf); \ static ssize_t natsemi_set_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count); \ static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name) #define NATSEMI_CREATE_FILE(_dev, _name) \ device_create_file(&_dev->dev, &dev_attr_##_name) #define NATSEMI_REMOVE_FILE(_dev, _name) \ device_remove_file(&_dev->dev, &dev_attr_##_name) NATSEMI_ATTR(dspcfg_workaround); static ssize_t natsemi_show_dspcfg_workaround(struct device *dev, struct device_attribute *attr, char *buf) { struct netdev_private *np = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); } static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netdev_private *np = netdev_priv(to_net_dev(dev)); int new_setting; unsigned long flags; /* Find out the new setting */ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) new_setting = 1; else if (!strncmp("off", buf, count - 1) || !strncmp("0", buf, count - 1)) new_setting = 0; else return count; spin_lock_irqsave(&np->lock, flags); np->dspcfg_workaround = new_setting; spin_unlock_irqrestore(&np->lock, flags); return count; } static inline void __iomem *ns_ioaddr(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->ioaddr; } static inline void natsemi_irq_enable(struct net_device *dev) { writel(1, ns_ioaddr(dev) + IntrEnable); readl(ns_ioaddr(dev) + IntrEnable); } static inline void natsemi_irq_disable(struct net_device *dev) { writel(0, ns_ioaddr(dev) + IntrEnable); readl(ns_ioaddr(dev) + IntrEnable); } static void move_int_phy(struct net_device *dev, int addr) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int target = 31; /* * The internal phy is visible on the external mii bus. Therefore we must * move it away before we can send commands to an external phy. * There are two addresses we must avoid: * - the address on the external phy that is used for transmission. * - the address that we want to access. User space can access phys * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the * phy that is used for transmission. */ if (target == addr) target--; if (target == np->phy_addr_external) target--; writew(target, ioaddr + PhyCtrl); readw(ioaddr + PhyCtrl); udelay(1); } static void natsemi_init_media(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); u32 tmp; if (np->ignore_phy) netif_carrier_on(dev); else netif_carrier_off(dev); /* get the initial settings from hardware */ tmp = mdio_read(dev, MII_BMCR); np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; np->advertising= mdio_read(dev, MII_ADVERTISE); if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && netif_msg_probe(np)) { printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s " "10%s %s duplex.\n", pci_name(np->pci_dev), (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)? "enabled, advertise" : "disabled, force", (np->advertising & (ADVERTISE_100FULL|ADVERTISE_100HALF))? "0" : "", (np->advertising & (ADVERTISE_100FULL|ADVERTISE_10FULL))? "full" : "half"); } if (netif_msg_probe(np)) printk(KERN_INFO "natsemi %s: Transceiver status %#04x advertising %#04x.\n", pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), np->advertising); } static const struct net_device_ops natsemi_netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = natsemi_change_mtu, .ndo_do_ioctl = netdev_ioctl, .ndo_tx_timeout = ns_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = natsemi_poll_controller, #endif }; static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; int i, option, irq, chip_idx = ent->driver_data; static int find_cnt = -1; resource_size_t iostart; unsigned long iosize; void __iomem *ioaddr; const int pcibar = 1; /* PCI base address register */ int prev_eedata; u32 tmp; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif i = pci_enable_device(pdev); if (i) return i; /* natsemi has a non-standard PM control register * in PCI config space. Some boards apparently need * to be brought to D0 in this manner. */ pci_read_config_dword(pdev, PCIPM, &tmp); if (tmp & PCI_PM_CTRL_STATE_MASK) { /* D0 state, disable PME assertion */ u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK; pci_write_config_dword(pdev, PCIPM, newtmp); } find_cnt++; iostart = pci_resource_start(pdev, pcibar); iosize = pci_resource_len(pdev, pcibar); irq = pdev->irq; pci_set_master(pdev); dev = alloc_etherdev(sizeof (struct netdev_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); i = pci_request_regions(pdev, DRV_NAME); if (i) goto err_pci_request_regions; ioaddr = ioremap(iostart, iosize); if (!ioaddr) { i = -ENOMEM; goto err_ioremap; } /* Work around the dropped serial bit. */ prev_eedata = eeprom_read(ioaddr, 6); for (i = 0; i < 3; i++) { int eedata = eeprom_read(ioaddr, i + 7); dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); dev->dev_addr[i*2+1] = eedata >> 7; prev_eedata = eedata; } np = netdev_priv(dev); np->ioaddr = ioaddr; netif_napi_add(dev, &np->napi, natsemi_poll, 64); np->dev = dev; np->pci_dev = pdev; pci_set_drvdata(pdev, dev); np->iosize = iosize; spin_lock_init(&np->lock); np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG; np->hands_off = 0; np->intr_status = 0; np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY) np->ignore_phy = 1; else np->ignore_phy = 0; np->dspcfg_workaround = dspcfg_workaround; /* Initial port: * - If configured to ignore the PHY set up for external. * - If the nic was configured to use an external phy and if find_mii * finds a phy: use external port, first phy that replies. * - Otherwise: internal port. * Note that the phy address for the internal phy doesn't matter: * The address would be used to access a phy over the mii bus, but * the internal phy is accessed through mapped registers. */ if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) dev->if_port = PORT_MII; else dev->if_port = PORT_TP; /* Reset the chip to erase previous misconfiguration. */ natsemi_reload_eeprom(dev); natsemi_reset(dev); if (dev->if_port != PORT_TP) { np->phy_addr_external = find_mii(dev); /* If we're ignoring the PHY it doesn't matter if we can't * find one. */ if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { dev->if_port = PORT_TP; np->phy_addr_external = PHY_ADDR_INTERNAL; } } else { np->phy_addr_external = PHY_ADDR_INTERNAL; } option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; /* The lower four bits are the media type. */ if (option) { if (option & 0x200) np->full_duplex = 1; if (option & 15) printk(KERN_INFO "natsemi %s: ignoring user supplied media type %d", pci_name(np->pci_dev), option & 15); } if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) np->full_duplex = 1; dev->netdev_ops = &natsemi_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &ethtool_ops; if (mtu) dev->mtu = mtu; natsemi_init_media(dev); /* save the silicon revision for later querying */ np->srr = readl(ioaddr + SiliconRev); if (netif_msg_hw(np)) printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n", pci_name(np->pci_dev), np->srr); i = register_netdev(dev); if (i) goto err_register_netdev; i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); if (i) goto err_create_file; if (netif_msg_drv(np)) { printk(KERN_INFO "natsemi %s: %s at %#08llx " "(%s), %pM, IRQ %d", dev->name, natsemi_pci_info[chip_idx].name, (unsigned long long)iostart, pci_name(np->pci_dev), dev->dev_addr, irq); if (dev->if_port == PORT_TP) printk(", port TP.\n"); else if (np->ignore_phy) printk(", port MII, ignoring PHY\n"); else printk(", port MII, phy ad %d.\n", np->phy_addr_external); } return 0; err_create_file: unregister_netdev(dev); err_register_netdev: iounmap(ioaddr); err_ioremap: pci_release_regions(pdev); err_pci_request_regions: free_netdev(dev); return i; } /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ /* Delay between EEPROM clock transitions. No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is deprecated. */ #define eeprom_delay(ee_addr) readl(ee_addr) #define EE_Write0 (EE_ChipSelect) #define EE_Write1 (EE_ChipSelect | EE_DataIn) /* The EEPROM commands include the alway-set leading bit. */ enum EEPROM_Cmds { EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), }; static int eeprom_read(void __iomem *addr, int location) { int i; int retval = 0; void __iomem *ee_addr = addr + EECtrl; int read_cmd = location | EE_ReadCmd; writel(EE_Write0, ee_addr); /* Shift the read command bits out. */ for (i = 10; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; writel(dataval, ee_addr); eeprom_delay(ee_addr); writel(dataval | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); } writel(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); for (i = 0; i < 16; i++) { writel(EE_ChipSelect | EE_ShiftClk, ee_addr); eeprom_delay(ee_addr); retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0; writel(EE_ChipSelect, ee_addr); eeprom_delay(ee_addr); } /* Terminate the EEPROM access. */ writel(EE_Write0, ee_addr); writel(0, ee_addr); return retval; } /* MII transceiver control section. * The 83815 series has an internal transceiver, and we present the * internal management registers as if they were MII connected. * External Phy registers are referenced through the MII interface. */ /* clock transitions >= 20ns (25MHz) * One readl should be good to PCI @ 100MHz */ #define mii_delay(ioaddr) readl(ioaddr + EECtrl) static int mii_getbit (struct net_device *dev) { int data; void __iomem *ioaddr = ns_ioaddr(dev); writel(MII_ShiftClk, ioaddr + EECtrl); data = readl(ioaddr + EECtrl); writel(0, ioaddr + EECtrl); mii_delay(ioaddr); return (data & MII_Data)? 1 : 0; } static void mii_send_bits (struct net_device *dev, u32 data, int len) { u32 i; void __iomem *ioaddr = ns_ioaddr(dev); for (i = (1 << (len-1)); i; i >>= 1) { u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0); writel(mdio_val, ioaddr + EECtrl); mii_delay(ioaddr); writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl); mii_delay(ioaddr); } writel(0, ioaddr + EECtrl); mii_delay(ioaddr); } static int miiport_read(struct net_device *dev, int phy_id, int reg) { u32 cmd; int i; u32 retval = 0; /* Ensure sync */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP = 0110'b for read operation */ cmd = (0x06 << 10) | (phy_id << 5) | reg; mii_send_bits (dev, cmd, 14); /* Turnaround */ if (mii_getbit (dev)) return 0; /* Read data */ for (i = 0; i < 16; i++) { retval <<= 1; retval |= mii_getbit (dev); } /* End cycle */ mii_getbit (dev); return retval; } static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) { u32 cmd; /* Ensure sync */ mii_send_bits (dev, 0xffffffff, 32); /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; mii_send_bits (dev, cmd, 32); /* End cycle */ mii_getbit (dev); } static int mdio_read(struct net_device *dev, int reg) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* The 83815 series has two ports: * - an internal transceiver * - an external mii bus */ if (dev->if_port == PORT_TP) return readw(ioaddr+BasicControl+(reg<<2)); else return miiport_read(dev, np->phy_addr_external, reg); } static void mdio_write(struct net_device *dev, int reg, u16 data) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* The 83815 series has an internal transceiver; handle separately */ if (dev->if_port == PORT_TP) writew(data, ioaddr+BasicControl+(reg<<2)); else miiport_write(dev, np->phy_addr_external, reg, data); } static void init_phy_fixup(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; u32 cfg; u16 tmp; /* restore stuff lost when power was out */ tmp = mdio_read(dev, MII_BMCR); if (np->autoneg == AUTONEG_ENABLE) { /* renegotiate if something changed */ if ((tmp & BMCR_ANENABLE) == 0 || np->advertising != mdio_read(dev, MII_ADVERTISE)) { /* turn on autonegotiation and force negotiation */ tmp |= (BMCR_ANENABLE | BMCR_ANRESTART); mdio_write(dev, MII_ADVERTISE, np->advertising); } } else { /* turn off auto negotiation, set speed and duplexity */ tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); if (np->speed == SPEED_100) tmp |= BMCR_SPEED100; if (np->duplex == DUPLEX_FULL) tmp |= BMCR_FULLDPLX; /* * Note: there is no good way to inform the link partner * that our capabilities changed. The user has to unplug * and replug the network cable after some changes, e.g. * after switching from 10HD, autoneg off to 100 HD, * autoneg off. */ } mdio_write(dev, MII_BMCR, tmp); readl(ioaddr + ChipConfig); udelay(1); /* find out what phy this is */ np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + mdio_read(dev, MII_PHYSID2); /* handle external phys here */ switch (np->mii) { case PHYID_AM79C874: /* phy specific configuration for fibre/tp operation */ tmp = mdio_read(dev, MII_MCTRL); tmp &= ~(MII_FX_SEL | MII_EN_SCRM); if (dev->if_port == PORT_FIBRE) tmp |= MII_FX_SEL; else tmp |= MII_EN_SCRM; mdio_write(dev, MII_MCTRL, tmp); break; default: break; } cfg = readl(ioaddr + ChipConfig); if (cfg & CfgExtPhy) return; /* On page 78 of the spec, they recommend some settings for "optimum performance" to be done in sequence. These settings optimize some of the 100Mbit autodetection circuitry. They say we only want to do this for rev C of the chip, but engineers at NSC (Bradley Kennedy) recommends always setting them. If you don't, you get errors on some autonegotiations that make the device unusable. It seems that the DSP needs a few usec to reinitialize after the start of the phy. Just retry writing these values until they stick. */ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { int dspcfg; writew(1, ioaddr + PGSEL); writew(PMDCSR_VAL, ioaddr + PMDCSR); writew(TSTDAT_VAL, ioaddr + TSTDAT); np->dspcfg = (np->srr <= SRR_DP83815_C)? DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG)); writew(np->dspcfg, ioaddr + DSPCFG); writew(SDCFG_VAL, ioaddr + SDCFG); writew(0, ioaddr + PGSEL); readl(ioaddr + ChipConfig); udelay(10); writew(1, ioaddr + PGSEL); dspcfg = readw(ioaddr + DSPCFG); writew(0, ioaddr + PGSEL); if (np->dspcfg == dspcfg) break; } if (netif_msg_link(np)) { if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_INFO "%s: DSPCFG mismatch after retrying for %d usec.\n", dev->name, i*10); } else { printk(KERN_INFO "%s: DSPCFG accepted after %d usec.\n", dev->name, i*10); } } /* * Enable PHY Specific event based interrupts. Link state change * and Auto-Negotiation Completion are among the affected. * Read the intr status to clear it (needed for wake events). */ readw(ioaddr + MIntrStatus); writew(MICRIntEn, ioaddr + MIntrCtrl); } static int switch_port_external(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); u32 cfg; cfg = readl(ioaddr + ChipConfig); if (cfg & CfgExtPhy) return 0; if (netif_msg_link(np)) { printk(KERN_INFO "%s: switching to external transceiver.\n", dev->name); } /* 1) switch back to external phy */ writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig); readl(ioaddr + ChipConfig); udelay(1); /* 2) reset the external phy: */ /* resetting the external PHY has been known to cause a hub supplying * power over Ethernet to kill the power. We don't want to kill * power to this computer, so we avoid resetting the phy. */ /* 3) reinit the phy fixup, it got lost during power down. */ move_int_phy(dev, np->phy_addr_external); init_phy_fixup(dev); return 1; } static int switch_port_internal(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; u32 cfg; u16 bmcr; cfg = readl(ioaddr + ChipConfig); if (!(cfg &CfgExtPhy)) return 0; if (netif_msg_link(np)) { printk(KERN_INFO "%s: switching to internal transceiver.\n", dev->name); } /* 1) switch back to internal phy: */ cfg = cfg & ~(CfgExtPhy | CfgPhyDis); writel(cfg, ioaddr + ChipConfig); readl(ioaddr + ChipConfig); udelay(1); /* 2) reset the internal phy: */ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2)); readl(ioaddr + ChipConfig); udelay(10); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); if (!(bmcr & BMCR_RESET)) break; udelay(10); } if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { printk(KERN_INFO "%s: phy reset did not complete in %d usec.\n", dev->name, i*10); } /* 3) reinit the phy fixup, it got lost during power down. */ init_phy_fixup(dev); return 1; } /* Scan for a PHY on the external mii bus. * There are two tricky points: * - Do not scan while the internal phy is enabled. The internal phy will * crash: e.g. reads from the DSPCFG register will return odd values and * the nasty random phy reset code will reset the nic every few seconds. * - The internal phy must be moved around, an external phy could * have the same address as the internal phy. */ static int find_mii(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int tmp; int i; int did_switch; /* Switch to external phy */ did_switch = switch_port_external(dev); /* Scan the possible phy addresses: * * PHY address 0 means that the phy is in isolate mode. Not yet * supported due to lack of test hardware. User space should * handle it through ethtool. */ for (i = 1; i <= 31; i++) { move_int_phy(dev, i); tmp = miiport_read(dev, i, MII_BMSR); if (tmp != 0xffff && tmp != 0x0000) { /* found something! */ np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + mdio_read(dev, MII_PHYSID2); if (netif_msg_probe(np)) { printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n", pci_name(np->pci_dev), np->mii, i); } break; } } /* And switch back to internal phy: */ if (did_switch) switch_port_internal(dev); return i; } /* CFG bits [13:16] [18:23] */ #define CFG_RESET_SAVE 0xfde000 /* WCSR bits [0:4] [9:10] */ #define WCSR_RESET_SAVE 0x61f /* RFCR bits [20] [22] [27:31] */ #define RFCR_RESET_SAVE 0xf8500000 static void natsemi_reset(struct net_device *dev) { int i; u32 cfg; u32 wcsr; u32 rfcr; u16 pmatch[3]; u16 sopass[3]; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); /* * Resetting the chip causes some registers to be lost. * Natsemi suggests NOT reloading the EEPROM while live, so instead * we save the state that would have been loaded from EEPROM * on a normal power-up (see the spec EEPROM map). This assumes * whoever calls this will follow up with init_registers() eventually. */ /* CFG */ cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE; /* WCSR */ wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE; /* RFCR */ rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE; /* PMATCH */ for (i = 0; i < 3; i++) { writel(i*2, ioaddr + RxFilterAddr); pmatch[i] = readw(ioaddr + RxFilterData); } /* SOPAS */ for (i = 0; i < 3; i++) { writel(0xa+(i*2), ioaddr + RxFilterAddr); sopass[i] = readw(ioaddr + RxFilterData); } /* now whack the chip */ writel(ChipReset, ioaddr + ChipCmd); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { if (!(readl(ioaddr + ChipCmd) & ChipReset)) break; udelay(5); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: reset did not complete in %d usec.\n", dev->name, i*5); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "%s: reset completed in %d usec.\n", dev->name, i*5); } /* restore CFG */ cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE; /* turn on external phy if it was selected */ if (dev->if_port == PORT_TP) cfg &= ~(CfgExtPhy | CfgPhyDis); else cfg |= (CfgExtPhy | CfgPhyDis); writel(cfg, ioaddr + ChipConfig); /* restore WCSR */ wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE; writel(wcsr, ioaddr + WOLCmd); /* read RFCR */ rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE; /* restore PMATCH */ for (i = 0; i < 3; i++) { writel(i*2, ioaddr + RxFilterAddr); writew(pmatch[i], ioaddr + RxFilterData); } for (i = 0; i < 3; i++) { writel(0xa+(i*2), ioaddr + RxFilterAddr); writew(sopass[i], ioaddr + RxFilterData); } /* restore RFCR */ writel(rfcr, ioaddr + RxFilterAddr); } static void reset_rx(struct net_device *dev) { int i; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); np->intr_status &= ~RxResetDone; writel(RxReset, ioaddr + ChipCmd); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { np->intr_status |= readl(ioaddr + IntrStatus); if (np->intr_status & RxResetDone) break; udelay(15); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", dev->name, i*15); } else if (netif_msg_hw(np)) { printk(KERN_WARNING "%s: RX reset took %d usec.\n", dev->name, i*15); } } static void natsemi_reload_eeprom(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); int i; writel(EepromReload, ioaddr + PCIBusCfg); for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { udelay(50); if (!(readl(ioaddr + PCIBusCfg) & EepromReload)) break; } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n", pci_name(np->pci_dev), i*50); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n", pci_name(np->pci_dev), i*50); } } static void natsemi_stop_rxtx(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); int i; writel(RxOff | TxOff, ioaddr + ChipCmd); for(i=0;i< NATSEMI_HW_TIMEOUT;i++) { if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0) break; udelay(5); } if (i==NATSEMI_HW_TIMEOUT) { printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n", dev->name, i*5); } else if (netif_msg_hw(np)) { printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", dev->name, i*5); } } static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; int i; /* Reset the chip, just in case. */ natsemi_reset(dev); i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", dev->name, irq); i = alloc_ring(dev); if (i < 0) { free_irq(irq, dev); return i; } napi_enable(&np->napi); init_ring(dev); spin_lock_irq(&np->lock); init_registers(dev); /* now set the MAC address according to dev->dev_addr */ for (i = 0; i < 3; i++) { u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; writel(i*2, ioaddr + RxFilterAddr); writew(mac, ioaddr + RxFilterData); } writel(np->cur_rx_mode, ioaddr + RxFilterAddr); spin_unlock_irq(&np->lock); netif_start_queue(dev); if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n", dev->name, (int)readl(ioaddr + ChipCmd)); /* Set the timer to check for link beat. */ init_timer(&np->timer); np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); np->timer.data = (unsigned long)dev; np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; } static void do_cable_magic(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = ns_ioaddr(dev); if (dev->if_port != PORT_TP) return; if (np->srr >= SRR_DP83816_A5) return; /* * 100 MBit links with short cables can trip an issue with the chip. * The problem manifests as lots of CRC errors and/or flickering * activity LED while idle. This process is based on instructions * from engineers at National. */ if (readl(ioaddr + ChipConfig) & CfgSpeed100) { u16 data; writew(1, ioaddr + PGSEL); /* * coefficient visibility should already be enabled via * DSPCFG | 0x1000 */ data = readw(ioaddr + TSTDAT) & 0xff; /* * the value must be negative, and within certain values * (these values all come from National) */ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { np = netdev_priv(dev); /* the bug has been triggered - fix the coefficient */ writew(TSTDAT_FIXED, ioaddr + TSTDAT); /* lock the value */ data = readw(ioaddr + DSPCFG); np->dspcfg = data | DSPCFG_LOCK; writew(np->dspcfg, ioaddr + DSPCFG); } writew(0, ioaddr + PGSEL); } } static void undo_cable_magic(struct net_device *dev) { u16 data; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); if (dev->if_port != PORT_TP) return; if (np->srr >= SRR_DP83816_A5) return; writew(1, ioaddr + PGSEL); /* make sure the lock bit is clear */ data = readw(ioaddr + DSPCFG); np->dspcfg = data & ~DSPCFG_LOCK; writew(np->dspcfg, ioaddr + DSPCFG); writew(0, ioaddr + PGSEL); } static void check_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); int duplex = np->duplex; u16 bmsr; /* If we are ignoring the PHY then don't try reading it. */ if (np->ignore_phy) goto propagate_state; /* The link status field is latched: it remains low after a temporary * link failure until it's read. We need the current link status, * thus read twice. */ mdio_read(dev, MII_BMSR); bmsr = mdio_read(dev, MII_BMSR); if (!(bmsr & BMSR_LSTATUS)) { if (netif_carrier_ok(dev)) { if (netif_msg_link(np)) printk(KERN_NOTICE "%s: link down.\n", dev->name); netif_carrier_off(dev); undo_cable_magic(dev); } return; } if (!netif_carrier_ok(dev)) { if (netif_msg_link(np)) printk(KERN_NOTICE "%s: link up.\n", dev->name); netif_carrier_on(dev); do_cable_magic(dev); } duplex = np->full_duplex; if (!duplex) { if (bmsr & BMSR_ANEGCOMPLETE) { int tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_10FULL) duplex = 1; } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX) duplex = 1; } propagate_state: /* if duplex is set then bit 28 must be set, too */ if (duplex ^ !!(np->rx_config & RxAcceptTx)) { if (netif_msg_link(np)) printk(KERN_INFO "%s: Setting %s-duplex based on negotiated " "link capability.\n", dev->name, duplex ? "full" : "half"); if (duplex) { np->rx_config |= RxAcceptTx; np->tx_config |= TxCarrierIgn | TxHeartIgn; } else { np->rx_config &= ~RxAcceptTx; np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); } writel(np->tx_config, ioaddr + TxConfig); writel(np->rx_config, ioaddr + RxConfig); } } static void init_registers(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); init_phy_fixup(dev); /* clear any interrupts that are pending, such as wake events */ readl(ioaddr + IntrStatus); writel(np->ring_dma, ioaddr + RxRingPtr); writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), ioaddr + TxRingPtr); /* Initialize other registers. * Configure the PCI bus bursts and FIFO thresholds. * Configure for standard, in-spec Ethernet. * Start with half-duplex. check_link will update * to the correct settings. */ /* DRTH: 2: start tx if 64 bytes are in the fifo * FLTH: 0x10: refill with next packet if 512 bytes are free * MXDMA: 0: up to 256 byte bursts. * MXDMA must be <= FLTH * ECRETRY=1 * ATP=1 */ np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | TX_FLTH_VAL | TX_DRTH_VAL_START; writel(np->tx_config, ioaddr + TxConfig); /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo * MXDMA 0: up to 256 byte bursts */ np->rx_config = RxMxdma_256 | RX_DRTH_VAL; /* if receive ring now has bigger buffers than normal, enable jumbo */ if (np->rx_buf_sz > NATSEMI_LONGPKT) np->rx_config |= RxAcceptLong; writel(np->rx_config, ioaddr + RxConfig); /* Disable PME: * The PME bit is initialized from the EEPROM contents. * PCI cards probably have PME disabled, but motherboard * implementations may have PME set to enable WakeOnLan. * With PME set the chip will scan incoming packets but * nothing will be written to memory. */ np->SavedClkRun = readl(ioaddr + ClkRun); writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { printk(KERN_NOTICE "%s: Wake-up event %#08x\n", dev->name, readl(ioaddr + WOLCmd)); } check_link(dev); __set_rx_mode(dev); /* Enable interrupts by setting the interrupt mask. */ writel(DEFAULT_INTR, ioaddr + IntrMask); natsemi_irq_enable(dev); writel(RxOn | TxOn, ioaddr + ChipCmd); writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */ } /* * netdev_timer: * Purpose: * 1) check for link changes. Usually they are handled by the MII interrupt * but it doesn't hurt to check twice. * 2) check for sudden death of the NIC: * It seems that a reference set for this chip went out with incorrect info, * and there exist boards that aren't quite right. An unexpected voltage * drop can cause the PHY to get itself in a weird state (basically reset). * NOTE: this only seems to affect revC chips. The user can disable * this check via dspcfg_workaround sysfs option. * 3) check of death of the RX path due to OOM */ static void netdev_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); int next_tick = NATSEMI_TIMER_FREQ; const int irq = np->pci_dev->irq; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, * a read clears any pending interrupts. */ printk(KERN_DEBUG "%s: Media selection timer tick.\n", dev->name); } if (dev->if_port == PORT_TP) { u16 dspcfg; spin_lock_irq(&np->lock); /* check for a nasty random phy-reset - use dspcfg as a flag */ writew(1, ioaddr+PGSEL); dspcfg = readw(ioaddr+DSPCFG); writew(0, ioaddr+PGSEL); if (np->dspcfg_workaround && dspcfg != np->dspcfg) { if (!netif_queue_stopped(dev)) { spin_unlock_irq(&np->lock); if (netif_msg_drv(np)) printk(KERN_NOTICE "%s: possible phy reset: " "re-initializing\n", dev->name); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_stop_rxtx(dev); dump_ring(dev); reinit_ring(dev); init_registers(dev); spin_unlock_irq(&np->lock); enable_irq(irq); } else { /* hurry back */ next_tick = HZ; spin_unlock_irq(&np->lock); } } else { /* init_registers() calls check_link() for the above case */ check_link(dev); spin_unlock_irq(&np->lock); } } else { spin_lock_irq(&np->lock); check_link(dev); spin_unlock_irq(&np->lock); } if (np->oom) { disable_irq(irq); np->oom = 0; refill_rx(dev); enable_irq(irq); if (!np->oom) { writel(RxOn, ioaddr + ChipCmd); } else { next_tick = 1; } } if (next_tick > 1) mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); else mod_timer(&np->timer, jiffies + next_tick); } static void dump_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); if (netif_msg_pktdata(np)) { int i; printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", i, np->tx_ring[i].next_desc, np->tx_ring[i].cmd_status, np->tx_ring[i].addr); } printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", i, np->rx_ring[i].next_desc, np->rx_ring[i].cmd_status, np->rx_ring[i].addr); } } } static void ns_tx_timeout(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; disable_irq(irq); spin_lock_irq(&np->lock); if (!np->hands_off) { if (netif_msg_tx_err(np)) printk(KERN_WARNING "%s: Transmit timed out, status %#08x," " resetting...\n", dev->name, readl(ioaddr + IntrStatus)); dump_ring(dev); natsemi_reset(dev); reinit_ring(dev); init_registers(dev); } else { printk(KERN_WARNING "%s: tx_timeout while in hands_off state?\n", dev->name); } spin_unlock_irq(&np->lock); enable_irq(irq); dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } static int alloc_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); np->rx_ring = pci_alloc_consistent(np->pci_dev, sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), &np->ring_dma); if (!np->rx_ring) return -ENOMEM; np->tx_ring = &np->rx_ring[RX_RING_SIZE]; return 0; } static void refill_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; int entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; skb = netdev_alloc_skb(dev, buflen); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ np->rx_dma[entry] = pci_map_single(np->pci_dev, skb->data, buflen, PCI_DMA_FROMDEVICE); np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); } np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); } if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { if (netif_msg_rx_err(np)) printk(KERN_WARNING "%s: going OOM.\n", dev->name); np->oom = 1; } } static void set_bufsize(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); if (dev->mtu <= ETH_DATA_LEN) np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; else np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void init_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* 1) TX ring */ np->dirty_tx = np->cur_tx = 0; for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma +sizeof(struct netdev_desc) *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); np->tx_ring[i].cmd_status = 0; } /* 2) RX ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; np->oom = 0; set_bufsize(dev); np->rx_head_desc = &np->rx_ring[0]; /* Please be careful before changing this loop - at least gcc-2.95.1 * miscompiles it otherwise. */ /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma +sizeof(struct netdev_desc) *((i+1)%RX_RING_SIZE)); np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); np->rx_skbuff[i] = NULL; } refill_rx(dev); dump_ring(dev); } static void drain_tx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; for (i = 0; i < TX_RING_SIZE; i++) { if (np->tx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->tx_dma[i], np->tx_skbuff[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(np->tx_skbuff[i]); dev->stats.tx_dropped++; } np->tx_skbuff[i] = NULL; } } static void drain_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); unsigned int buflen = np->rx_buf_sz; int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].cmd_status = 0; np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (np->rx_skbuff[i]) { pci_unmap_single(np->pci_dev, np->rx_dma[i], buflen + NATSEMI_PADDING, PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_skbuff[i]); } np->rx_skbuff[i] = NULL; } } static void drain_ring(struct net_device *dev) { drain_rx(dev); drain_tx(dev); } static void free_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); pci_free_consistent(np->pci_dev, sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), np->rx_ring, np->ring_dma); } static void reinit_rx(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* RX Ring */ np->dirty_rx = 0; np->cur_rx = RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[0]; /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); refill_rx(dev); } static void reinit_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; /* drain TX ring */ drain_tx(dev); np->dirty_tx = np->cur_tx = 0; for (i=0;i<TX_RING_SIZE;i++) np->tx_ring[i].cmd_status = 0; reinit_rx(dev); } static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); unsigned entry; unsigned long flags; /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; np->tx_dma[entry] = pci_map_single(np->pci_dev, skb->data,skb->len, PCI_DMA_TODEVICE); np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); spin_lock_irqsave(&np->lock, flags); if (!np->hands_off) { np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); /* StrongARM: Explicitly cache flush np->tx_ring and * skb->data,skb->len. */ wmb(); np->cur_tx++; if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { netdev_tx_done(dev); if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) netif_stop_queue(dev); } /* Wake the potentially-idle transmit channel. */ writel(TxOn, ioaddr + ChipCmd); } else { dev_kfree_skb_irq(skb); dev->stats.tx_dropped++; } spin_unlock_irqrestore(&np->lock, flags); if (netif_msg_tx_queued(np)) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return NETDEV_TX_OK; } static void netdev_tx_done(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) break; if (netif_msg_tx_done(np)) printk(KERN_DEBUG "%s: tx frame #%d finished, status %#08x.\n", dev->name, np->dirty_tx, le32_to_cpu(np->tx_ring[entry].cmd_status)); if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { dev->stats.tx_packets++; dev->stats.tx_bytes += np->tx_skbuff[entry]->len; } else { /* Various Tx errors */ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status); if (tx_status & (DescTxAbort|DescTxExcColl)) dev->stats.tx_aborted_errors++; if (tx_status & DescTxFIFO) dev->stats.tx_fifo_errors++; if (tx_status & DescTxCarrier) dev->stats.tx_carrier_errors++; if (tx_status & DescTxOOWCol) dev->stats.tx_window_errors++; dev->stats.tx_errors++; } pci_unmap_single(np->pci_dev,np->tx_dma[entry], np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); /* Free the original skb. */ dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; } if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, wake queue. */ netif_wake_queue(dev); } } /* The interrupt handler doesn't actually handle interrupts itself, it * schedules a NAPI poll if there is anything to do. */ static irqreturn_t intr_handler(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); /* Reading IntrStatus automatically acknowledges so don't do * that while interrupts are disabled, (for example, while a * poll is scheduled). */ if (np->hands_off || !readl(ioaddr + IntrEnable)) return IRQ_NONE; np->intr_status = readl(ioaddr + IntrStatus); if (!np->intr_status) return IRQ_NONE; if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Interrupt, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); if (napi_schedule_prep(&np->napi)) { /* Disable interrupts and register for poll */ natsemi_irq_disable(dev); __napi_schedule(&np->napi); } else printk(KERN_WARNING "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); return IRQ_HANDLED; } /* This is the NAPI poll routine. As well as the standard RX handling * it also handles all other interrupts that the chip might raise. */ static int natsemi_poll(struct napi_struct *napi, int budget) { struct netdev_private *np = container_of(napi, struct netdev_private, napi); struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); int work_done = 0; do { if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Poll, status %#08x, mask %#08x.\n", dev->name, np->intr_status, readl(ioaddr + IntrMask)); /* netdev_rx() may read IntrStatus again if the RX state * machine falls over so do it first. */ if (np->intr_status & (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | IntrRxErr | IntrRxOverrun)) { netdev_rx(dev, &work_done, budget); } if (np->intr_status & (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { spin_lock(&np->lock); netdev_tx_done(dev); spin_unlock(&np->lock); } /* Abnormal error summary/uncommon events handlers. */ if (np->intr_status & IntrAbnormalSummary) netdev_error(dev, np->intr_status); if (work_done >= budget) return work_done; np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); napi_complete(napi); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ spin_lock(&np->lock); if (!np->hands_off) natsemi_irq_enable(dev); spin_unlock(&np->lock); return work_done; } /* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); unsigned int buflen = np->rx_buf_sz; void __iomem * ioaddr = ns_ioaddr(dev); /* If the driver owns the next entry it's a new packet. Send it up. */ while (desc_status < 0) { /* e.g. & DescOwn */ int pkt_len; if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() entry %d status was %#08x.\n", entry, desc_status); if (--boguscnt < 0) break; if (*work_done >= work_to_do) break; (*work_done)++; pkt_len = (desc_status & DescSizeMask) - 4; if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ if (desc_status & DescMore) { unsigned long flags; if (netif_msg_rx_err(np)) printk(KERN_WARNING "%s: Oversized(?) Ethernet " "frame spanned multiple " "buffers, entry %#08x " "status %#08x.\n", dev->name, np->cur_rx, desc_status); dev->stats.rx_length_errors++; /* The RX state machine has probably * locked up beneath us. Follow the * reset procedure documented in * AN-1287. */ spin_lock_irqsave(&np->lock, flags); reset_rx(dev); reinit_rx(dev); writel(np->ring_dma, ioaddr + RxRingPtr); check_link(dev); spin_unlock_irqrestore(&np->lock, flags); /* We'll enable RX on exit from this * function. */ break; } else { /* There was an error. */ dev->stats.rx_errors++; if (desc_status & (DescRxAbort|DescRxOver)) dev->stats.rx_over_errors++; if (desc_status & (DescRxLong|DescRxRunt)) dev->stats.rx_length_errors++; if (desc_status & (DescRxInvalid|DescRxAlign)) dev->stats.rx_frame_errors++; if (desc_status & DescRxCRC) dev->stats.rx_crc_errors++; } } else if (pkt_len > np->rx_buf_sz) { /* if this is the tail of a double buffer * packet, we've already counted the error * on the first part. Ignore the second half. */ } else { struct sk_buff *skb; /* Omit CRC size. */ /* Check if the packet is long enough to accept * without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) { /* 16 byte align the IP header */ skb_reserve(skb, RX_OFFSET); pci_dma_sync_single_for_cpu(np->pci_dev, np->rx_dma[entry], buflen, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(np->pci_dev, np->rx_dma[entry], buflen, PCI_DMA_FROMDEVICE); } else { pci_unmap_single(np->pci_dev, np->rx_dma[entry], buflen + NATSEMI_PADDING, PCI_DMA_FROMDEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } entry = (++np->cur_rx) % RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[entry]; desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); } refill_rx(dev); /* Restart Rx engine if stopped. */ if (np->oom) mod_timer(&np->timer, jiffies + 1); else writel(RxOn, ioaddr + ChipCmd); } static void netdev_error(struct net_device *dev, int intr_status) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); spin_lock(&np->lock); if (intr_status & LinkChange) { u16 lpa = mdio_read(dev, MII_LPA); if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE && netif_msg_link(np)) { printk(KERN_INFO "%s: Autonegotiation advertising" " %#04x partner %#04x.\n", dev->name, np->advertising, lpa); } /* read MII int status to clear the flag */ readw(ioaddr + MIntrStatus); check_link(dev); } if (intr_status & StatsMax) { __get_stats(dev); } if (intr_status & IntrTxUnderrun) { if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { np->tx_config += TX_DRTH_VAL_INC; if (netif_msg_tx_err(np)) printk(KERN_NOTICE "%s: increased tx threshold, txcfg %#08x.\n", dev->name, np->tx_config); } else { if (netif_msg_tx_err(np)) printk(KERN_NOTICE "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n", dev->name, np->tx_config); } writel(np->tx_config, ioaddr + TxConfig); } if (intr_status & WOLPkt && netif_msg_wol(np)) { int wol_status = readl(ioaddr + WOLCmd); printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", dev->name, wol_status); } if (intr_status & RxStatusFIFOOver) { if (netif_msg_rx_err(np) && netif_msg_intr(np)) { printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", dev->name); } dev->stats.rx_fifo_errors++; dev->stats.rx_errors++; } /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & IntrPCIErr) { printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, intr_status & IntrPCIErr); dev->stats.tx_fifo_errors++; dev->stats.tx_errors++; dev->stats.rx_fifo_errors++; dev->stats.rx_errors++; } spin_unlock(&np->lock); } static void __get_stats(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); /* The chip only need report frame silently dropped. */ dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); dev->stats.rx_missed_errors += readl(ioaddr + RxMissed); } static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); /* The chip only need report frame silently dropped. */ spin_lock_irq(&np->lock); if (netif_running(dev) && !np->hands_off) __get_stats(dev); spin_unlock_irq(&np->lock); return &dev->stats; } #ifdef CONFIG_NET_POLL_CONTROLLER static void natsemi_poll_controller(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); const int irq = np->pci_dev->irq; disable_irq(irq); intr_handler(irq, dev); enable_irq(irq); } #endif #define HASH_TABLE 0x200 static void __set_rx_mode(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); u8 mc_filter[64]; /* Multicast hash filter */ u32 rx_mode; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys; } else { struct netdev_hw_addr *ha; int i; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast | AcceptMulticast | AcceptMyPhys; for (i = 0; i < 64; i += 2) { writel(HASH_TABLE + i, ioaddr + RxFilterAddr); writel((mc_filter[i + 1] << 8) + mc_filter[i], ioaddr + RxFilterData); } } writel(rx_mode, ioaddr + RxFilterAddr); np->cur_rx_mode = rx_mode; } static int natsemi_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS) return -EINVAL; dev->mtu = new_mtu; /* synchronized against open : rtnl_lock() held by caller */ if (netif_running(dev)) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); const int irq = np->pci_dev->irq; disable_irq(irq); spin_lock(&np->lock); /* stop engines */ natsemi_stop_rxtx(dev); /* drain rx queue */ drain_rx(dev); /* change buffers */ set_bufsize(dev); reinit_rx(dev); writel(np->ring_dma, ioaddr + RxRingPtr); /* restart engines */ writel(RxOn | TxOn, ioaddr + ChipCmd); spin_unlock(&np->lock); enable_irq(irq); } return 0; } static void set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); if (!np->hands_off) __set_rx_mode(dev); spin_unlock_irq(&np->lock); } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_regs_len(struct net_device *dev) { return NATSEMI_REGS_SIZE; } static int get_eeprom_len(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->eeprom_size; } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); netdev_get_ecmd(dev, ecmd); spin_unlock_irq(&np->lock); return 0; } static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); res = netdev_set_ecmd(dev, ecmd); spin_unlock_irq(&np->lock); return res; } static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); netdev_get_wol(dev, &wol->supported, &wol->wolopts); netdev_get_sopass(dev, wol->sopass); spin_unlock_irq(&np->lock); } static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); netdev_set_wol(dev, wol->wolopts); res = netdev_set_sopass(dev, wol->sopass); spin_unlock_irq(&np->lock); return res; } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct netdev_private *np = netdev_priv(dev); regs->version = NATSEMI_REGS_VER; spin_lock_irq(&np->lock); netdev_get_regs(dev, buf); spin_unlock_irq(&np->lock); } static u32 get_msglevel(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct netdev_private *np = netdev_priv(dev); np->msg_enable = val; } static int nway_reset(struct net_device *dev) { int tmp; int r = -EINVAL; /* if autoneg is off, it's an error */ tmp = mdio_read(dev, MII_BMCR); if (tmp & BMCR_ANENABLE) { tmp |= (BMCR_ANRESTART); mdio_write(dev, MII_BMCR, tmp); r = 0; } return r; } static u32 get_link(struct net_device *dev) { /* LSTATUS is latched low until a read - so read twice */ mdio_read(dev, MII_BMSR); return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0; } static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct netdev_private *np = netdev_priv(dev); u8 *eebuf; int res; eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); if (!eebuf) return -ENOMEM; eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16); spin_lock_irq(&np->lock); res = netdev_get_eeprom(dev, eebuf); spin_unlock_irq(&np->lock); if (!res) memcpy(data, eebuf+eeprom->offset, eeprom->len); kfree(eebuf); return res; } static const struct ethtool_ops ethtool_ops = { .get_drvinfo = get_drvinfo, .get_regs_len = get_regs_len, .get_eeprom_len = get_eeprom_len, .get_settings = get_settings, .set_settings = set_settings, .get_wol = get_wol, .set_wol = set_wol, .get_regs = get_regs, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .nway_reset = nway_reset, .get_link = get_link, .get_eeprom = get_eeprom, }; static int netdev_set_wol(struct net_device *dev, u32 newval) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary; /* translate to bitmasks this chip understands */ if (newval & WAKE_PHY) data |= WakePhy; if (newval & WAKE_UCAST) data |= WakeUnicast; if (newval & WAKE_MCAST) data |= WakeMulticast; if (newval & WAKE_BCAST) data |= WakeBroadcast; if (newval & WAKE_ARP) data |= WakeArp; if (newval & WAKE_MAGIC) data |= WakeMagic; if (np->srr >= SRR_DP83815_D) { if (newval & WAKE_MAGICSECURE) { data |= WakeMagicSecure; } } writel(data, ioaddr + WOLCmd); return 0; } static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u32 regval = readl(ioaddr + WOLCmd); *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_ARP | WAKE_MAGIC); if (np->srr >= SRR_DP83815_D) { /* SOPASS works on revD and higher */ *supported |= WAKE_MAGICSECURE; } *cur = 0; /* translate from chip bitmasks */ if (regval & WakePhy) *cur |= WAKE_PHY; if (regval & WakeUnicast) *cur |= WAKE_UCAST; if (regval & WakeMulticast) *cur |= WAKE_MCAST; if (regval & WakeBroadcast) *cur |= WAKE_BCAST; if (regval & WakeArp) *cur |= WAKE_ARP; if (regval & WakeMagic) *cur |= WAKE_MAGIC; if (regval & WakeMagicSecure) { /* this can be on in revC, but it's broken */ *cur |= WAKE_MAGICSECURE; } return 0; } static int netdev_set_sopass(struct net_device *dev, u8 *newval) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u16 *sval = (u16 *)newval; u32 addr; if (np->srr < SRR_DP83815_D) { return 0; } /* enable writing to these registers by disabling the RX filter */ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; addr &= ~RxFilterEnable; writel(addr, ioaddr + RxFilterAddr); /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */ writel(addr | 0xa, ioaddr + RxFilterAddr); writew(sval[0], ioaddr + RxFilterData); writel(addr | 0xc, ioaddr + RxFilterAddr); writew(sval[1], ioaddr + RxFilterData); writel(addr | 0xe, ioaddr + RxFilterAddr); writew(sval[2], ioaddr + RxFilterData); /* re-enable the RX filter */ writel(addr | RxFilterEnable, ioaddr + RxFilterAddr); return 0; } static int netdev_get_sopass(struct net_device *dev, u8 *data) { struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); u16 *sval = (u16 *)data; u32 addr; if (np->srr < SRR_DP83815_D) { sval[0] = sval[1] = sval[2] = 0; return 0; } /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; writel(addr | 0xa, ioaddr + RxFilterAddr); sval[0] = readw(ioaddr + RxFilterData); writel(addr | 0xc, ioaddr + RxFilterAddr); sval[1] = readw(ioaddr + RxFilterData); writel(addr | 0xe, ioaddr + RxFilterAddr); sval[2] = readw(ioaddr + RxFilterData); writel(addr, ioaddr + RxFilterAddr); return 0; } static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); u32 tmp; ecmd->port = dev->if_port; ethtool_cmd_speed_set(ecmd, np->speed); ecmd->duplex = np->duplex; ecmd->autoneg = np->autoneg; ecmd->advertising = 0; if (np->advertising & ADVERTISE_10HALF) ecmd->advertising |= ADVERTISED_10baseT_Half; if (np->advertising & ADVERTISE_10FULL) ecmd->advertising |= ADVERTISED_10baseT_Full; if (np->advertising & ADVERTISE_100HALF) ecmd->advertising |= ADVERTISED_100baseT_Half; if (np->advertising & ADVERTISE_100FULL) ecmd->advertising |= ADVERTISED_100baseT_Full; ecmd->supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); ecmd->phy_address = np->phy_addr_external; /* * We intentionally report the phy address of the external * phy, even if the internal phy is used. This is necessary * to work around a deficiency of the ethtool interface: * It's only possible to query the settings of the active * port. Therefore * # ethtool -s ethX port mii * actually sends an ioctl to switch to port mii with the * settings that are used for the current active port. * If we would report a different phy address in this * command, then * # ethtool -s ethX port tp;ethtool -s ethX port mii * would unintentionally change the phy address. * * Fortunately the phy address doesn't matter with the * internal phy... */ /* set information based on active port type */ switch (ecmd->port) { default: case PORT_TP: ecmd->advertising |= ADVERTISED_TP; ecmd->transceiver = XCVR_INTERNAL; break; case PORT_MII: ecmd->advertising |= ADVERTISED_MII; ecmd->transceiver = XCVR_EXTERNAL; break; case PORT_FIBRE: ecmd->advertising |= ADVERTISED_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; break; } /* if autonegotiation is on, try to return the active speed/duplex */ if (ecmd->autoneg == AUTONEG_ENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; tmp = mii_nway_result( np->advertising & mdio_read(dev, MII_LPA)); if (tmp == LPA_100FULL || tmp == LPA_100HALF) ethtool_cmd_speed_set(ecmd, SPEED_100); else ethtool_cmd_speed_set(ecmd, SPEED_10); if (tmp == LPA_100FULL || tmp == LPA_10FULL) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } /* ignore maxtxpkt, maxrxpkt for now */ return 0; } static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) return -EINVAL; if (ecmd->autoneg == AUTONEG_ENABLE) { if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full)) == 0) { return -EINVAL; } } else if (ecmd->autoneg == AUTONEG_DISABLE) { u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; } /* * If we're ignoring the PHY then autoneg and the internal * transceiver are really not going to work so don't let the * user select them. */ if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || ecmd->port == PORT_TP)) return -EINVAL; /* * maxtxpkt, maxrxpkt: ignored for now. * * transceiver: * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and * selects based on ecmd->port. * * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre * phys that are connected to the mii bus. It's used to apply fibre * specific updates. */ /* WHEW! now lets bang some bits */ /* save the parms */ dev->if_port = ecmd->port; np->autoneg = ecmd->autoneg; np->phy_addr_external = ecmd->phy_address & PhyAddrMask; if (np->autoneg == AUTONEG_ENABLE) { /* advertise only what has been requested */ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (ecmd->advertising & ADVERTISED_10baseT_Half) np->advertising |= ADVERTISE_10HALF; if (ecmd->advertising & ADVERTISED_10baseT_Full) np->advertising |= ADVERTISE_10FULL; if (ecmd->advertising & ADVERTISED_100baseT_Half) np->advertising |= ADVERTISE_100HALF; if (ecmd->advertising & ADVERTISED_100baseT_Full) np->advertising |= ADVERTISE_100FULL; } else { np->speed = ethtool_cmd_speed(ecmd); np->duplex = ecmd->duplex; /* user overriding the initial full duplex parm? */ if (np->duplex == DUPLEX_HALF) np->full_duplex = 0; } /* get the right phy enabled */ if (ecmd->port == PORT_TP) switch_port_internal(dev); else switch_port_external(dev); /* set parms and see how this affected our link status */ init_phy_fixup(dev); check_link(dev); return 0; } static int netdev_get_regs(struct net_device *dev, u8 *buf) { int i; int j; u32 rfcr; u32 *rbuf = (u32 *)buf; void __iomem * ioaddr = ns_ioaddr(dev); /* read non-mii page 0 of registers */ for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) { rbuf[i] = readl(ioaddr + i*4); } /* read current mii registers */ for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++) rbuf[i] = mdio_read(dev, i & 0x1f); /* read only the 'magic' registers from page 1 */ writew(1, ioaddr + PGSEL); rbuf[i++] = readw(ioaddr + PMDCSR); rbuf[i++] = readw(ioaddr + TSTDAT); rbuf[i++] = readw(ioaddr + DSPCFG); rbuf[i++] = readw(ioaddr + SDCFG); writew(0, ioaddr + PGSEL); /* read RFCR indexed registers */ rfcr = readl(ioaddr + RxFilterAddr); for (j = 0; j < NATSEMI_RFDR_NREGS; j++) { writel(j*2, ioaddr + RxFilterAddr); rbuf[i++] = readw(ioaddr + RxFilterData); } writel(rfcr, ioaddr + RxFilterAddr); /* the interrupt status is clear-on-read - see if we missed any */ if (rbuf[4] & rbuf[5]) { printk(KERN_WARNING "%s: shoot, we dropped an interrupt (%#08x)\n", dev->name, rbuf[4] & rbuf[5]); } return 0; } #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \ | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \ | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \ | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \ | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \ | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \ | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \ | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) ) static int netdev_get_eeprom(struct net_device *dev, u8 *buf) { int i; u16 *ebuf = (u16 *)buf; void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); /* eeprom_read reads 16 bits, and indexes by 16 bits */ for (i = 0; i < np->eeprom_size/2; i++) { ebuf[i] = eeprom_read(ioaddr, i); /* The EEPROM itself stores data bit-swapped, but eeprom_read * reads it back "sanely". So we swap it back here in order to * present it to userland as it is stored. */ ebuf[i] = SWAP_BITS(ebuf[i]); } return 0; } static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mii_ioctl_data *data = if_mii(rq); struct netdev_private *np = netdev_priv(dev); switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phy_addr_external; /* Fall Through */ case SIOCGMIIREG: /* Read MII PHY register. */ /* The phy_id is not enough to uniquely identify * the intended target. Therefore the command is sent to * the given mii on the current port. */ if (dev->if_port == PORT_TP) { if ((data->phy_id & 0x1f) == np->phy_addr_external) data->val_out = mdio_read(dev, data->reg_num & 0x1f); else data->val_out = 0; } else { move_int_phy(dev, data->phy_id & 0x1f); data->val_out = miiport_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); } return 0; case SIOCSMIIREG: /* Write MII PHY register. */ if (dev->if_port == PORT_TP) { if ((data->phy_id & 0x1f) == np->phy_addr_external) { if ((data->reg_num & 0x1f) == MII_ADVERTISE) np->advertising = data->val_in; mdio_write(dev, data->reg_num & 0x1f, data->val_in); } } else { if ((data->phy_id & 0x1f) == np->phy_addr_external) { if ((data->reg_num & 0x1f) == MII_ADVERTISE) np->advertising = data->val_in; } move_int_phy(dev, data->phy_id & 0x1f); miiport_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); } return 0; default: return -EOPNOTSUPP; } } static void enable_wol_mode(struct net_device *dev, int enable_intr) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); if (netif_msg_wol(np)) printk(KERN_INFO "%s: remaining active for wake-on-lan\n", dev->name); /* For WOL we must restart the rx process in silent mode. * Write NULL to the RxRingPtr. Only possible if * rx process is stopped */ writel(0, ioaddr + RxRingPtr); /* read WoL status to clear */ readl(ioaddr + WOLCmd); /* PME on, clear status */ writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); /* and restart the rx process */ writel(RxOn, ioaddr + ChipCmd); if (enable_intr) { /* enable the WOL interrupt. * Could be used to send a netlink message. */ writel(WOLPkt | LinkChange, ioaddr + IntrMask); natsemi_irq_enable(dev); } } static int netdev_close(struct net_device *dev) { void __iomem * ioaddr = ns_ioaddr(dev); struct netdev_private *np = netdev_priv(dev); const int irq = np->pci_dev->irq; if (netif_msg_ifdown(np)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %#04x.\n", dev->name, (int)readl(ioaddr + ChipCmd)); if (netif_msg_pktdata(np)) printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); napi_disable(&np->napi); /* * FIXME: what if someone tries to close a device * that is suspended? * Should we reenable the nic to switch to * the final WOL settings? */ del_timer_sync(&np->timer); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); np->hands_off = 1; spin_unlock_irq(&np->lock); enable_irq(irq); free_irq(irq, dev); /* Interrupt disabled, interrupt handler released, * queue stopped, timer deleted, rtnl_lock held * All async codepaths that access the driver are disabled. */ spin_lock_irq(&np->lock); np->hands_off = 0; readl(ioaddr + IntrMask); readw(ioaddr + MIntrStatus); /* Freeze Stats */ writel(StatsFreeze, ioaddr + StatsCtrl); /* Stop the chip's Tx and Rx processes. */ natsemi_stop_rxtx(dev); __get_stats(dev); spin_unlock_irq(&np->lock); /* clear the carrier last - an interrupt could reenable it otherwise */ netif_carrier_off(dev); netif_stop_queue(dev); dump_ring(dev); drain_ring(dev); free_ring(dev); { u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; if (wol) { /* restart the NIC in WOL mode. * The nic must be stopped for this. */ enable_wol_mode(dev, 0); } else { /* Restore PME enable bit unmolested */ writel(np->SavedClkRun, ioaddr + ClkRun); } } return 0; } static void natsemi_remove1(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); void __iomem * ioaddr = ns_ioaddr(dev); NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); unregister_netdev (dev); pci_release_regions (pdev); iounmap(ioaddr); free_netdev (dev); } #ifdef CONFIG_PM /* * The ns83815 chip doesn't have explicit RxStop bits. * Kicking the Rx or Tx process for a new packet reenables the Rx process * of the nic, thus this function must be very careful: * * suspend/resume synchronization: * entry points: * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, * start_tx, ns_tx_timeout * * No function accesses the hardware without checking np->hands_off. * the check occurs under spin_lock_irq(&np->lock); * exceptions: * * netdev_ioctl: noncritical access. * * netdev_open: cannot happen due to the device_detach * * netdev_close: doesn't hurt. * * netdev_timer: timer stopped by natsemi_suspend. * * intr_handler: doesn't acquire the spinlock. suspend calls * disable_irq() to enforce synchronization. * * natsemi_poll: checks before reenabling interrupts. suspend * sets hands_off, disables interrupts and then waits with * napi_disable(). * * Interrupts must be disabled, otherwise hands_off can cause irq storms. */ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata (pdev); struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); rtnl_lock(); if (netif_running (dev)) { const int irq = np->pci_dev->irq; del_timer_sync(&np->timer); disable_irq(irq); spin_lock_irq(&np->lock); natsemi_irq_disable(dev); np->hands_off = 1; natsemi_stop_rxtx(dev); netif_stop_queue(dev); spin_unlock_irq(&np->lock); enable_irq(irq); napi_disable(&np->napi); /* Update the error counts. */ __get_stats(dev); /* pci_power_off(pdev, -1); */ drain_ring(dev); { u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; /* Restore PME enable bit */ if (wol) { /* restart the NIC in WOL mode. * The nic must be stopped for this. * FIXME: use the WOL interrupt */ enable_wol_mode(dev, 0); } else { /* Restore PME enable bit unmolested */ writel(np->SavedClkRun, ioaddr + ClkRun); } } } netif_device_detach(dev); rtnl_unlock(); return 0; } static int natsemi_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct netdev_private *np = netdev_priv(dev); int ret = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; if (netif_running(dev)) { const int irq = np->pci_dev->irq; BUG_ON(!np->hands_off); ret = pci_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "pci_enable_device() failed: %d\n", ret); goto out; } /* pci_power_on(pdev); */ napi_enable(&np->napi); natsemi_reset(dev); init_ring(dev); disable_irq(irq); spin_lock_irq(&np->lock); np->hands_off = 0; init_registers(dev); netif_device_attach(dev); spin_unlock_irq(&np->lock); enable_irq(irq); mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); } netif_device_attach(dev); out: rtnl_unlock(); return ret; } #endif /* CONFIG_PM */ static struct pci_driver natsemi_driver = { .name = DRV_NAME, .id_table = natsemi_pci_tbl, .probe = natsemi_probe1, .remove = natsemi_remove1, #ifdef CONFIG_PM .suspend = natsemi_suspend, .resume = natsemi_resume, #endif }; static int __init natsemi_init_mod (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE printk(version); #endif return pci_register_driver(&natsemi_driver); } static void __exit natsemi_exit_mod (void) { pci_unregister_driver (&natsemi_driver); } module_init(natsemi_init_mod); module_exit(natsemi_exit_mod);
gpl-2.0
wwenigma/cocktail-kernel-msm7x30
net/mac80211/offchannel.c
1802
8721
/* * Off-channel operation helpers * * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-trace.h" /* * Tell our hardware to disable PS. * Optionally inform AP that we will go to sleep so that it will buffer * the frames while we are doing off-channel work. This is optional * because we *may* be doing work on-operating channel, and want our * hardware unconditionally awake, but still let the AP send us normal frames. */ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata, bool tell_ap) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; local->offchannel_ps_enabled = false; /* FIXME: what to do when local->pspolling is true? */ del_timer_sync(&local->dynamic_ps_timer); del_timer_sync(&ifmgd->bcn_mon_timer); del_timer_sync(&ifmgd->conn_mon_timer); cancel_work_sync(&local->dynamic_ps_enable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { local->offchannel_ps_enabled = true; local->hw.conf.flags &= ~IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } if (tell_ap && (!local->offchannel_ps_enabled || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))) /* * If power save was enabled, no need to send a nullfunc * frame because AP knows that we are sleeping. But if the * hardware is creating the nullfunc frame for power save * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not * enabled) and power save was enabled, the firmware just * sent a null frame with power save disabled. So we need * to send a new nullfunc frame to inform the AP that we * are again sleeping. */ ieee80211_send_nullfunc(local, sdata, 1); } /* inform AP that we are awake again, unless power save is enabled */ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; if (!local->ps_sdata) ieee80211_send_nullfunc(local, sdata, 0); else if (local->offchannel_ps_enabled) { /* * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware * will send a nullfunc frame with the powersave bit set * even though the AP already knows that we are sleeping. * This could be avoided by sending a null frame with power * save bit disabled before enabling the power save, but * this doesn't gain anything. * * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need * to send a nullfunc frame because AP already knows that * we are sleeping, let's just enable power save mode in * hardware. */ /* TODO: Only set hardware if CONF_PS changed? * TODO: Should we set offchannel_ps_enabled to false? */ local->hw.conf.flags |= IEEE80211_CONF_PS; ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); } else if (local->hw.conf.dynamic_ps_timeout > 0) { /* * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer * had been running before leaving the operating channel, * restart the timer now and send a nullfunc frame to inform * the AP that we are awake. */ ieee80211_send_nullfunc(local, sdata, 0); mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); } void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, bool offchannel_ps_enable) { struct ieee80211_sub_if_data *sdata; /* * notify the AP about us leaving the channel and stop all * STA interfaces. */ mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_MONITOR) set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); /* Check to see if we should disable beaconing. */ if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_ADHOC || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ieee80211_bss_info_change_notify( sdata, BSS_CHANGED_BEACON_ENABLED); if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { netif_tx_stop_all_queues(sdata->dev); if (offchannel_ps_enable && (sdata->vif.type == NL80211_IFTYPE_STATION) && sdata->u.mgd.associated) ieee80211_offchannel_ps_enable(sdata, true); } } mutex_unlock(&local->iflist_mtx); } void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, bool tell_ap) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.associated) ieee80211_offchannel_ps_enable(sdata, tell_ap); } mutex_unlock(&local->iflist_mtx); } void ieee80211_offchannel_return(struct ieee80211_local *local, bool enable_beaconing, bool offchannel_ps_disable) { struct ieee80211_sub_if_data *sdata; mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; /* Tell AP we're back */ if (offchannel_ps_disable && sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.associated) ieee80211_offchannel_ps_disable(sdata); } if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); /* * This may wake up queues even though the driver * currently has them stopped. This is not very * likely, since the driver won't have gotten any * (or hardly any) new packets while we weren't * on the right channel, and even if it happens * it will at most lead to queueing up one more * packet per queue in mac80211 rather than on * the interface qdisc. */ netif_tx_wake_all_queues(sdata->dev); } /* Check to see if we should re-enable beaconing */ if (enable_beaconing && (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_ADHOC || sdata->vif.type == NL80211_IFTYPE_MESH_POINT)) ieee80211_bss_info_change_notify( sdata, BSS_CHANGED_BEACON_ENABLED); } mutex_unlock(&local->iflist_mtx); } static void ieee80211_hw_roc_start(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_start); struct ieee80211_sub_if_data *sdata; mutex_lock(&local->mtx); if (!local->hw_roc_channel) { mutex_unlock(&local->mtx); return; } ieee80211_recalc_idle(local); if (local->hw_roc_skb) { sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); ieee80211_tx_skb(sdata, local->hw_roc_skb); local->hw_roc_skb = NULL; } else { cfg80211_ready_on_channel(local->hw_roc_dev, local->hw_roc_cookie, local->hw_roc_channel, local->hw_roc_channel_type, local->hw_roc_duration, GFP_KERNEL); } mutex_unlock(&local->mtx); } void ieee80211_ready_on_channel(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_ready_on_channel(local); ieee80211_queue_work(hw, &local->hw_roc_start); } EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel); static void ieee80211_hw_roc_done(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, hw_roc_done); mutex_lock(&local->mtx); if (!local->hw_roc_channel) { mutex_unlock(&local->mtx); return; } if (!local->hw_roc_for_tx) cfg80211_remain_on_channel_expired(local->hw_roc_dev, local->hw_roc_cookie, local->hw_roc_channel, local->hw_roc_channel_type, GFP_KERNEL); local->hw_roc_channel = NULL; local->hw_roc_cookie = 0; ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); } void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_remain_on_channel_expired(local); ieee80211_queue_work(hw, &local->hw_roc_done); } EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired); void ieee80211_hw_roc_setup(struct ieee80211_local *local) { INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start); INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done); }
gpl-2.0
Pauliecoon/android_kernel_moto_shamu
drivers/scsi/sym53c8xx_2/sym_glue.c
2314
53958
/* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *----------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include "sym_glue.h" #include "sym_nvram.h" #define NAME53C "sym53c" #define NAME53C8XX "sym53c8xx" struct sym_driver_setup sym_driver_setup = SYM_LINUX_DRIVER_SETUP; unsigned int sym_debug_flags = 0; static char *excl_string; static char *safe_string; module_param_named(cmd_per_lun, sym_driver_setup.max_tag, ushort, 0); module_param_named(burst, sym_driver_setup.burst_order, byte, 0); module_param_named(led, sym_driver_setup.scsi_led, byte, 0); module_param_named(diff, sym_driver_setup.scsi_diff, byte, 0); module_param_named(irqm, sym_driver_setup.irq_mode, byte, 0); module_param_named(buschk, sym_driver_setup.scsi_bus_check, byte, 0); module_param_named(hostid, sym_driver_setup.host_id, byte, 0); module_param_named(verb, sym_driver_setup.verbose, byte, 0); module_param_named(debug, sym_debug_flags, uint, 0); module_param_named(settle, sym_driver_setup.settle_delay, byte, 0); module_param_named(nvram, sym_driver_setup.use_nvram, byte, 0); module_param_named(excl, excl_string, charp, 0); module_param_named(safe, safe_string, charp, 0); MODULE_PARM_DESC(cmd_per_lun, "The maximum number of tags to use by default"); MODULE_PARM_DESC(burst, "Maximum burst. 0 to disable, 255 to read from registers"); MODULE_PARM_DESC(led, "Set to 1 to enable LED support"); MODULE_PARM_DESC(diff, "0 for no differential mode, 1 for BIOS, 2 for always, 3 for not GPIO3"); MODULE_PARM_DESC(irqm, "0 for open drain, 1 to leave alone, 2 for totem pole"); MODULE_PARM_DESC(buschk, "0 to not check, 1 for detach on error, 2 for warn on error"); MODULE_PARM_DESC(hostid, "The SCSI ID to use for the host adapters"); MODULE_PARM_DESC(verb, "0 for minimal verbosity, 1 for normal, 2 for excessive"); MODULE_PARM_DESC(debug, "Set bits to enable debugging"); MODULE_PARM_DESC(settle, "Settle delay in seconds. Default 3"); MODULE_PARM_DESC(nvram, "Option currently not used"); MODULE_PARM_DESC(excl, "List ioport addresses here to prevent controllers from being attached"); MODULE_PARM_DESC(safe, "Set other settings to a \"safe mode\""); MODULE_LICENSE("GPL"); MODULE_VERSION(SYM_VERSION); MODULE_AUTHOR("Matthew Wilcox <matthew@wil.cx>"); MODULE_DESCRIPTION("NCR, Symbios and LSI 8xx and 1010 PCI SCSI adapters"); static void sym2_setup_params(void) { char *p = excl_string; int xi = 0; while (p && (xi < 8)) { char *next_p; int val = (int) simple_strtoul(p, &next_p, 0); sym_driver_setup.excludes[xi++] = val; p = next_p; } if (safe_string) { if (*safe_string == 'y') { sym_driver_setup.max_tag = 0; sym_driver_setup.burst_order = 0; sym_driver_setup.scsi_led = 0; sym_driver_setup.scsi_diff = 1; sym_driver_setup.irq_mode = 0; sym_driver_setup.scsi_bus_check = 2; sym_driver_setup.host_id = 7; sym_driver_setup.verbose = 2; sym_driver_setup.settle_delay = 10; sym_driver_setup.use_nvram = 1; } else if (*safe_string != 'n') { printk(KERN_WARNING NAME53C8XX "Ignoring parameter %s" " passed to safe option", safe_string); } } } static struct scsi_transport_template *sym2_transport_template = NULL; /* * Driver private area in the SCSI command structure. */ struct sym_ucmd { /* Override the SCSI pointer structure */ struct completion *eh_done; /* SCSI error handling */ }; #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) #define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host) /* * Complete a pending CAM CCB. */ void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd) { struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); if (ucmd->eh_done) complete(ucmd->eh_done); scsi_dma_unmap(cmd); cmd->scsi_done(cmd); } /* * Tell the SCSI layer about a BUS RESET. */ void sym_xpt_async_bus_reset(struct sym_hcb *np) { printf_notice("%s: SCSI BUS has been reset.\n", sym_name(np)); np->s.settle_time = jiffies + sym_driver_setup.settle_delay * HZ; np->s.settle_time_valid = 1; if (sym_verbose >= 2) printf_info("%s: command processing suspended for %d seconds\n", sym_name(np), sym_driver_setup.settle_delay); } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = DID_PARITY; else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) cam_status = DID_ERROR; else if (x_status & XE_BAD_PHASE) cam_status = DID_ERROR; else cam_status = DID_ERROR; } return cam_status; } /* * Build CAM result for a failed or auto-sensed IO. */ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) { struct scsi_cmnd *cmd = cp->cmd; u_int cam_status, scsi_status, drv_status; drv_status = 0; cam_status = DID_OK; scsi_status = cp->ssss_status; if (cp->host_flags & HF_SENSE) { scsi_status = cp->sv_scsi_status; resid = cp->sv_resid; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cmd, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(DID_OK, cp->sv_xerr_status); drv_status = DRIVER_SENSE; /* * Bounce back the sense data to user. */ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->sense_buffer, cp->sns_bbuf, min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN)); #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) cmd->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, DID_ABORT, cp->target,cp->lun, -1); } #endif } else { /* * Error return from our internal request sense. This * is bad: we must clear the contingent allegiance * condition otherwise the device will always return * BUSY. Use a big stick. */ sym_reset_scsi_target(np, cmd->device->id); cam_status = DID_ERROR; } } else if (cp->host_status == HS_COMPLETE) /* Bad SCSI status */ cam_status = DID_OK; else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = DID_NO_CONNECT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = DID_ERROR; else { /* Extended error */ if (sym_verbose) { sym_print_addr(cmd, "COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); } scsi_set_resid(cmd, resid); cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; } static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) { int segment; int use_sg; cp->data_len = 0; use_sg = scsi_dma_map(cmd); if (use_sg > 0) { struct scatterlist *sg; struct sym_tcb *tp = &np->target[cp->target]; struct sym_tblmove *data; if (use_sg > SYM_CONF_MAX_SG) { scsi_dma_unmap(cmd); return -1; } data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; scsi_for_each_sg(cmd, sg, use_sg, segment) { dma_addr_t baddr = sg_dma_address(sg); unsigned int len = sg_dma_len(sg); if ((len & 1) && (tp->head.wval & EWS)) { len++; cp->odd_byte_adjustment++; } sym_build_sge(np, &data[segment], baddr, len); cp->data_len += len; } } else { segment = -2; } return segment; } /* * Queue a SCSI command. */ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct sym_tcb *tp; struct sym_lcb *lp; struct sym_ccb *cp; int order; /* * Retrieve the target descriptor. */ tp = &np->target[sdev->id]; /* * Select tagged/untagged. */ lp = sym_lp(tp, sdev->lun); order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0; /* * Queue the SCSI IO. */ cp = sym_get_ccb(np, cmd, order); if (!cp) return 1; /* Means resource shortage */ sym_queue_scsiio(np, cmd, cp); return 0; } /* * Setup buffers and pointers that address the CDB. */ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); return 0; } /* * Setup pointers that address the data and start the I/O. */ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { u32 lastp, goalp; int dir; /* * Build the CDB. */ if (sym_setup_cdb(np, cmd, cp)) goto out_abort; /* * No direction means no data. */ dir = cmd->sc_data_direction; if (dir != DMA_NONE) { cp->segments = sym_scatter(np, cp, cmd); if (cp->segments < 0) { sym_set_cam_status(cmd, DID_ERROR); goto out_abort; } /* * No segments means no data. */ if (!cp->segments) dir = DMA_NONE; } else { cp->data_len = 0; cp->segments = 0; } /* * Set the data pointer. */ switch (dir) { case DMA_BIDIRECTIONAL: scmd_printk(KERN_INFO, cmd, "got DMA_BIDIRECTIONAL command"); sym_set_cam_status(cmd, DID_ERROR); goto out_abort; case DMA_TO_DEVICE: goalp = SCRIPTA_BA(np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case DMA_FROM_DEVICE: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA(np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case DMA_NONE: default: lastp = goalp = SCRIPTB_BA(np, no_data); break; } /* * Set all pointers values needed by SCRIPTS. */ cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; cp->goalp = cpu_to_scr(goalp); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); break; default: break; } #endif /* * activate this job. */ sym_put_start_queue(np, cp); return 0; out_abort: sym_free_ccb(np, cp); sym_xpt_done(np, cmd); return 0; } /* * timer daemon. * * Misused to keep the driver running when * interrupts are not configured correctly. */ static void sym_timer(struct sym_hcb *np) { unsigned long thistime = jiffies; /* * Restart the timer. */ np->s.timer.expires = thistime + SYM_CONF_TIMER_INTERVAL; add_timer(&np->s.timer); /* * If we are resetting the ncr, wait for settle_time before * clearing it. Then command processing will be resumed. */ if (np->s.settle_time_valid) { if (time_before_eq(np->s.settle_time, thistime)) { if (sym_verbose >= 2 ) printk("%s: command processing resumed\n", sym_name(np)); np->s.settle_time_valid = 0; } return; } /* * Nothing to do for now, but that may come. */ if (np->s.lasttime + 4*HZ < thistime) { np->s.lasttime = thistime; } #ifdef SYM_CONF_PCIQ_MAY_MISS_COMPLETIONS /* * Some way-broken PCI bridges may lead to * completions being lost when the clearing * of the INTFLY flag by the CPU occurs * concurrently with the chip raising this flag. * If this ever happen, lost completions will * be reaped here. */ sym_wakeup_done(np); #endif } /* * PCI BUS error handler. */ void sym_log_bus_error(struct Scsi_Host *shost) { struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; unsigned short pci_sts; pci_read_config_word(pdev, PCI_STATUS, &pci_sts); if (pci_sts & 0xf900) { pci_write_config_word(pdev, PCI_STATUS, pci_sts); shost_printk(KERN_WARNING, shost, "PCI bus error: status = 0x%04x\n", pci_sts & 0xf900); } } /* * queuecommand method. Entered with the host adapter lock held and * interrupts disabled. */ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct sym_hcb *np = SYM_SOFTC_PTR(cmd); struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); int sts = 0; cmd->scsi_done = done; memset(ucp, 0, sizeof(*ucp)); /* * Shorten our settle_time if needed for * this command not to time out. */ if (np->s.settle_time_valid && cmd->request->timeout) { unsigned long tlimit = jiffies + cmd->request->timeout; tlimit -= SYM_CONF_TIMER_INTERVAL*2; if (time_after(np->s.settle_time, tlimit)) { np->s.settle_time = tlimit; } } if (np->s.settle_time_valid) return SCSI_MLQUEUE_HOST_BUSY; sts = sym_queue_command(np, cmd); if (sts) return SCSI_MLQUEUE_HOST_BUSY; return 0; } static DEF_SCSI_QCMD(sym53c8xx_queue_command) /* * Linux entry point of the interrupt handler. */ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id) { struct Scsi_Host *shost = dev_id; struct sym_data *sym_data = shost_priv(shost); irqreturn_t result; /* Avoid spinloop trying to handle interrupts on frozen device */ if (pci_channel_offline(sym_data->pdev)) return IRQ_NONE; if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("["); spin_lock(shost->host_lock); result = sym_interrupt(shost); spin_unlock(shost->host_lock); if (DEBUG_FLAGS & DEBUG_TINY) printf_debug ("]\n"); return result; } /* * Linux entry point of the timer handler */ static void sym53c8xx_timer(unsigned long npref) { struct sym_hcb *np = (struct sym_hcb *)npref; unsigned long flags; spin_lock_irqsave(np->s.host->host_lock, flags); sym_timer(np); spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* * What the eh thread wants us to perform. */ #define SYM_EH_ABORT 0 #define SYM_EH_DEVICE_RESET 1 #define SYM_EH_BUS_RESET 2 #define SYM_EH_HOST_RESET 3 /* * Generic method for our eh processing. * The 'op' argument tells what we have to do. */ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) { struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); struct Scsi_Host *shost = cmd->device->host; struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; SYM_QUEHEAD *qp; int cmd_queued = 0; int sts = -1; struct completion eh_done; scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname); /* We may be in an error condition because the PCI bus * went down. In this case, we need to wait until the * PCI bus is reset, the card is reset, and only then * proceed with the scsi error recovery. There's no * point in hurrying; take a leisurely wait. */ #define WAIT_FOR_PCI_RECOVERY 35 if (pci_channel_offline(pdev)) { int finished_reset = 0; init_completion(&eh_done); spin_lock_irq(shost->host_lock); /* Make sure we didn't race */ if (pci_channel_offline(pdev)) { BUG_ON(sym_data->io_reset); sym_data->io_reset = &eh_done; } else { finished_reset = 1; } spin_unlock_irq(shost->host_lock); if (!finished_reset) finished_reset = wait_for_completion_timeout (sym_data->io_reset, WAIT_FOR_PCI_RECOVERY*HZ); spin_lock_irq(shost->host_lock); sym_data->io_reset = NULL; spin_unlock_irq(shost->host_lock); if (!finished_reset) return SCSI_FAILED; } spin_lock_irq(shost->host_lock); /* This one is queued in some place -> to wait for completion */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->cmd == cmd) { cmd_queued = 1; break; } } /* Try to proceed the operation we have been asked for */ sts = -1; switch(op) { case SYM_EH_ABORT: sts = sym_abort_scsiio(np, cmd, 1); break; case SYM_EH_DEVICE_RESET: sts = sym_reset_scsi_target(np, cmd->device->id); break; case SYM_EH_BUS_RESET: sym_reset_scsi_bus(np, 1); sts = 0; break; case SYM_EH_HOST_RESET: sym_reset_scsi_bus(np, 0); sym_start_up(shost, 1); sts = 0; break; default: break; } /* On error, restore everything and cross fingers :) */ if (sts) cmd_queued = 0; if (cmd_queued) { init_completion(&eh_done); ucmd->eh_done = &eh_done; spin_unlock_irq(shost->host_lock); if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { ucmd->eh_done = NULL; sts = -2; } } else { spin_unlock_irq(shost->host_lock); } dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); return sts ? SCSI_FAILED : SCSI_SUCCESS; } /* * Error handlers called from the eh thread (one thread per HBA). */ static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd); } static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd); } static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd); } static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd) { return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd); } /* * Tune device queuing depth, according to various limits. */ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) { struct sym_lcb *lp = sym_lp(tp, lun); u_short oldtags; if (!lp) return; oldtags = lp->s.reqtags; if (reqtags > lp->s.scdev_depth) reqtags = lp->s.scdev_depth; lp->s.reqtags = reqtags; if (reqtags != oldtags) { dev_info(&tp->starget->dev, "tagged command queuing %s, command queue depth %d.\n", lp->s.reqtags ? "enabled" : "disabled", reqtags); } } static int sym53c8xx_slave_alloc(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp; unsigned long flags; int error; if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) return -ENXIO; spin_lock_irqsave(np->s.host->host_lock, flags); /* * Fail the device init if the device is flagged NOSCAN at BOOT in * the NVRAM. This may speed up boot and maintain coherency with * BIOS device numbering. Clearing the flag allows the user to * rescan skipped devices later. We also return an error for * devices not flagged for SCAN LUNS in the NVRAM since some single * lun devices behave badly when asked for a non zero LUN. */ if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; starget_printk(KERN_INFO, sdev->sdev_target, "Scan at boot disabled in NVRAM\n"); error = -ENXIO; goto out; } if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { if (sdev->lun != 0) { error = -ENXIO; goto out; } starget_printk(KERN_INFO, sdev->sdev_target, "Multiple LUNs disabled in NVRAM\n"); } lp = sym_alloc_lcb(np, sdev->id, sdev->lun); if (!lp) { error = -ENOMEM; goto out; } if (tp->nlcb == 1) tp->starget = sdev->sdev_target; spi_min_period(tp->starget) = tp->usr_period; spi_max_width(tp->starget) = tp->usr_width; error = 0; out: spin_unlock_irqrestore(np->s.host->host_lock, flags); return error; } /* * Linux entry point for device queue sizing. */ static int sym53c8xx_slave_configure(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp = sym_lp(tp, sdev->lun); int reqtags, depth_to_use; /* * Get user flags. */ lp->curr_flags = lp->user_flags; /* * Select queue depth from driver setup. * Do not use more than configured by user. * Use at least 1. * Do not use more than our maximum. */ reqtags = sym_driver_setup.max_tag; if (reqtags > tp->usrtags) reqtags = tp->usrtags; if (!sdev->tagged_supported) reqtags = 0; if (reqtags > SYM_CONF_MAX_TAG) reqtags = SYM_CONF_MAX_TAG; depth_to_use = reqtags ? reqtags : 1; scsi_adjust_queue_depth(sdev, sdev->tagged_supported ? MSG_SIMPLE_TAG : 0, depth_to_use); lp->s.scdev_depth = depth_to_use; sym_tune_dev_queuing(tp, sdev->lun, reqtags); if (!spi_initial_dv(sdev->sdev_target)) spi_dv_device(sdev); return 0; } static void sym53c8xx_slave_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp = sym_lp(tp, sdev->lun); unsigned long flags; /* if slave_alloc returned before allocating a sym_lcb, return */ if (!lp) return; spin_lock_irqsave(np->s.host->host_lock, flags); if (lp->busy_itlq || lp->busy_itl) { /* * This really shouldn't happen, but we can't return an error * so let's try to stop all on-going I/O. */ starget_printk(KERN_WARNING, tp->starget, "Removing busy LCB (%d)\n", sdev->lun); sym_reset_scsi_bus(np, 1); } if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { /* * It was the last unit for this target. */ tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tgoal.check_nego = 1; tp->starget = NULL; } spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* * Linux entry point for info() function */ static const char *sym53c8xx_info (struct Scsi_Host *host) { return SYM_DRIVER_NAME; } #ifdef SYM_LINUX_PROC_INFO_SUPPORT /* * Proc file system stuff * * A read operation returns adapter information. * A write operation is a control command. * The string is parsed in the driver code and the command is passed * to the sym_usercmd() function. */ #ifdef SYM_LINUX_USER_COMMAND_SUPPORT struct sym_usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UC_SETVERBOSE 17 #define UC_RESETDEV 18 #define UC_CLEARDEV 19 static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) { struct sym_tcb *tp; int t, l; switch (uc->cmd) { case 0: return; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT case UC_SETDEBUG: sym_debug_flags = uc->data; break; #endif case UC_SETVERBOSE: np->verbose = uc->data; break; default: /* * We assume that other commands apply to targets. * This should always be the case and avoid the below * 4 lines to be repeated 6 times. */ for (t = 0; t < SYM_CONF_MAX_TARGET; t++) { if (!((uc->target >> t) & 1)) continue; tp = &np->target[t]; if (!tp->nlcb) continue; switch (uc->cmd) { case UC_SETSYNC: if (!uc->data || uc->data >= 255) { tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.offset = 0; } else if (uc->data <= 9 && np->minsync_dt) { if (uc->data < np->minsync_dt) uc->data = np->minsync_dt; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 1; tp->tgoal.width = 1; tp->tgoal.period = uc->data; tp->tgoal.offset = np->maxoffs_dt; } else { if (uc->data < np->minsync) uc->data = np->minsync; tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.period = uc->data; tp->tgoal.offset = np->maxoffs; } tp->tgoal.check_nego = 1; break; case UC_SETWIDE: tp->tgoal.width = uc->data ? 1 : 0; tp->tgoal.check_nego = 1; break; case UC_SETTAGS: for (l = 0; l < SYM_CONF_MAX_LUN; l++) sym_tune_dev_queuing(tp, l, uc->data); break; case UC_RESETDEV: tp->to_reset = 1; np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); break; case UC_CLEARDEV: for (l = 0; l < SYM_CONF_MAX_LUN; l++) { struct sym_lcb *lp = sym_lp(tp, l); if (lp) lp->to_clear = 1; } np->istat_sem = SEM; OUTB(np, nc_istat, SIGP|SEM); break; case UC_SETFLAG: tp->usrflags = uc->data; break; } } break; } } static int sym_skip_spaces(char *ptr, int len) { int cnt, c; for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); return (len - cnt); } static int get_int_arg(char *ptr, int len, u_long *pv) { char *end; *pv = simple_strtoul(ptr, &end, 10); return (end - ptr); } static int is_keyword(char *ptr, int len, char *verb) { int verb_len = strlen(verb); if (len >= verb_len && !memcmp(verb, ptr, verb_len)) return verb_len; else return 0; } #define SKIP_SPACES(ptr, len) \ if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \ return -EINVAL; \ ptr += arg_len; len -= arg_len; #define GET_INT_ARG(ptr, len, v) \ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ return -EINVAL; \ ptr += arg_len; len -= arg_len; /* * Parse a control command */ static int sym_user_command(struct Scsi_Host *shost, char *buffer, int length) { struct sym_hcb *np = sym_get_hcb(shost); char *ptr = buffer; int len = length; struct sym_usrcmd cmd, *uc = &cmd; int arg_len; u_long target; memset(uc, 0, sizeof(*uc)); if (len > 0 && ptr[len-1] == '\n') --len; if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) uc->cmd = UC_SETSYNC; else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) uc->cmd = UC_SETTAGS; else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) uc->cmd = UC_SETVERBOSE; else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) uc->cmd = UC_SETWIDE; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) uc->cmd = UC_SETDEBUG; #endif else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) uc->cmd = UC_SETFLAG; else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) uc->cmd = UC_RESETDEV; else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) uc->cmd = UC_CLEARDEV; else arg_len = 0; #ifdef DEBUG_PROC_INFO printk("sym_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); #endif if (!arg_len) return -EINVAL; ptr += arg_len; len -= arg_len; switch(uc->cmd) { case UC_SETSYNC: case UC_SETTAGS: case UC_SETWIDE: case UC_SETFLAG: case UC_RESETDEV: case UC_CLEARDEV: SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "all")) != 0) { ptr += arg_len; len -= arg_len; uc->target = ~0; } else { GET_INT_ARG(ptr, len, target); uc->target = (1<<target); #ifdef DEBUG_PROC_INFO printk("sym_user_command: target=%ld\n", target); #endif } break; } switch(uc->cmd) { case UC_SETVERBOSE: case UC_SETSYNC: case UC_SETTAGS: case UC_SETWIDE: SKIP_SPACES(ptr, len); GET_INT_ARG(ptr, len, uc->data); #ifdef DEBUG_PROC_INFO printk("sym_user_command: data=%ld\n", uc->data); #endif break; #ifdef SYM_LINUX_DEBUG_CONTROL_SUPPORT case UC_SETDEBUG: while (len > 0) { SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "alloc"))) uc->data |= DEBUG_ALLOC; else if ((arg_len = is_keyword(ptr, len, "phase"))) uc->data |= DEBUG_PHASE; else if ((arg_len = is_keyword(ptr, len, "queue"))) uc->data |= DEBUG_QUEUE; else if ((arg_len = is_keyword(ptr, len, "result"))) uc->data |= DEBUG_RESULT; else if ((arg_len = is_keyword(ptr, len, "scatter"))) uc->data |= DEBUG_SCATTER; else if ((arg_len = is_keyword(ptr, len, "script"))) uc->data |= DEBUG_SCRIPT; else if ((arg_len = is_keyword(ptr, len, "tiny"))) uc->data |= DEBUG_TINY; else if ((arg_len = is_keyword(ptr, len, "timing"))) uc->data |= DEBUG_TIMING; else if ((arg_len = is_keyword(ptr, len, "nego"))) uc->data |= DEBUG_NEGO; else if ((arg_len = is_keyword(ptr, len, "tags"))) uc->data |= DEBUG_TAGS; else if ((arg_len = is_keyword(ptr, len, "pointer"))) uc->data |= DEBUG_POINTER; else return -EINVAL; ptr += arg_len; len -= arg_len; } #ifdef DEBUG_PROC_INFO printk("sym_user_command: data=%ld\n", uc->data); #endif break; #endif /* SYM_LINUX_DEBUG_CONTROL_SUPPORT */ case UC_SETFLAG: while (len > 0) { SKIP_SPACES(ptr, len); if ((arg_len = is_keyword(ptr, len, "no_disc"))) uc->data &= ~SYM_DISC_ENABLED; else return -EINVAL; ptr += arg_len; len -= arg_len; } break; default: break; } if (len) return -EINVAL; else { unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); sym_exec_user_command(np, uc); spin_unlock_irqrestore(shost->host_lock, flags); } return length; } #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ /* * Copy formatted information into the input buffer. */ static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost) { #ifdef SYM_LINUX_USER_INFO_SUPPORT struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, " "revision id 0x%x\n", np->s.chip_name, pdev->device, pdev->revision); seq_printf(m, "At PCI address %s, IRQ %u\n", pci_name(pdev), pdev->irq); seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n", (int) (np->minsync_dt ? np->minsync_dt : np->minsync), np->maxwide ? "Wide" : "Narrow", np->minsync_dt ? ", DT capable" : ""); seq_printf(m, "Max. started commands %d, " "max. commands per LUN %d\n", SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); return 0; #else return -EINVAL; #endif /* SYM_LINUX_USER_INFO_SUPPORT */ } #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ /* * Free resources claimed by sym_iomap_device(). Note that * sym_free_resources() should be used instead of this function after calling * sym_attach(). */ static void sym_iounmap_device(struct sym_device *device) { if (device->s.ioaddr) pci_iounmap(device->pdev, device->s.ioaddr); if (device->s.ramaddr) pci_iounmap(device->pdev, device->s.ramaddr); } /* * Free controller resources. */ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev, int do_free_irq) { /* * Free O/S specific resources. */ if (do_free_irq) free_irq(pdev->irq, np->s.host); if (np->s.ioaddr) pci_iounmap(pdev, np->s.ioaddr); if (np->s.ramaddr) pci_iounmap(pdev, np->s.ramaddr); /* * Free O/S independent resources. */ sym_hcb_free(np); sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Host attach and initialisations. * * Allocate host data and ncb structure. * Remap MMIO region. * Do chip initialization. * If all is OK, install interrupt handling and * start the timer daemon. */ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit, struct sym_device *dev) { struct sym_data *sym_data; struct sym_hcb *np = NULL; struct Scsi_Host *shost = NULL; struct pci_dev *pdev = dev->pdev; unsigned long flags; struct sym_fw *fw; int do_free_irq = 0; printk(KERN_INFO "sym%d: <%s> rev 0x%x at pci %s irq %u\n", unit, dev->chip.name, pdev->revision, pci_name(pdev), pdev->irq); /* * Get the firmware for this chip. */ fw = sym_find_firmware(&dev->chip); if (!fw) goto attach_failed; shost = scsi_host_alloc(tpnt, sizeof(*sym_data)); if (!shost) goto attach_failed; sym_data = shost_priv(shost); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ np = __sym_calloc_dma(&pdev->dev, sizeof(*np), "HCB"); if (!np) goto attach_failed; np->bus_dmat = &pdev->dev; /* Result in 1 DMA pool per HBA */ sym_data->ncb = np; sym_data->pdev = pdev; np->s.host = shost; pci_set_drvdata(pdev, shost); /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = sym_driver_setup.verbose; np->s.unit = unit; np->features = dev->chip.features; np->clock_divn = dev->chip.nr_divisor; np->maxoffs = dev->chip.offset_max; np->maxburst = dev->chip.burst_max; np->myaddr = dev->host_id; np->mmio_ba = (u32)dev->mmio_base; np->ram_ba = (u32)dev->ram_base; np->s.ioaddr = dev->s.ioaddr; np->s.ramaddr = dev->s.ramaddr; /* * Edit its name. */ strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); sprintf(np->s.inst_name, "sym%d", np->s.unit); if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && !pci_set_dma_mask(pdev, DMA_DAC_MASK)) { set_dac(np); } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printf_warning("%s: No suitable DMA available\n", sym_name(np)); goto attach_failed; } if (sym_hcb_attach(shost, fw, dev->nvram)) goto attach_failed; /* * Install the interrupt handler. * If we synchonize the C code with SCRIPTS on interrupt, * we do not want to share the INTR line at all. */ if (request_irq(pdev->irq, sym53c8xx_intr, IRQF_SHARED, NAME53C8XX, shost)) { printf_err("%s: request irq %u failure\n", sym_name(np), pdev->irq); goto attach_failed; } do_free_irq = 1; /* * After SCSI devices have been opened, we cannot * reset the bus safely, so we do it here. */ spin_lock_irqsave(shost->host_lock, flags); if (sym_reset_scsi_bus(np, 0)) goto reset_failed; /* * Start the SCRIPTS. */ sym_start_up(shost, 1); /* * Start the timer daemon */ init_timer(&np->s.timer); np->s.timer.data = (unsigned long) np; np->s.timer.function = sym53c8xx_timer; np->s.lasttime=0; sym_timer (np); /* * Fill Linux host instance structure * and return success. */ shost->max_channel = 0; shost->this_id = np->myaddr; shost->max_id = np->maxwide ? 16 : 8; shost->max_lun = SYM_CONF_MAX_LUN; shost->unique_id = pci_resource_start(pdev, 0); shost->cmd_per_lun = SYM_CONF_MAX_TAG; shost->can_queue = (SYM_CONF_MAX_START-2); shost->sg_tablesize = SYM_CONF_MAX_SG; shost->max_cmd_len = 16; BUG_ON(sym2_transport_template == NULL); shost->transportt = sym2_transport_template; /* 53c896 rev 1 errata: DMA may not cross 16MB boundary */ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 2) shost->dma_boundary = 0xFFFFFF; spin_unlock_irqrestore(shost->host_lock, flags); return shost; reset_failed: printf_err("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, " "TERMINATION, DEVICE POWER etc.!\n", sym_name(np)); spin_unlock_irqrestore(shost->host_lock, flags); attach_failed: printf_info("sym%d: giving up ...\n", unit); if (np) sym_free_resources(np, pdev, do_free_irq); else sym_iounmap_device(dev); if (shost) scsi_host_put(shost); return NULL; } /* * Detect and try to read SYMBIOS and TEKRAM NVRAM. */ #if SYM_CONF_NVRAM_SUPPORT static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) { devp->nvram = nvp; nvp->type = 0; sym_read_nvram(devp, nvp); } #else static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp) { } #endif /* SYM_CONF_NVRAM_SUPPORT */ static int sym_check_supported(struct sym_device *device) { struct sym_chip *chip; struct pci_dev *pdev = device->pdev; unsigned long io_port = pci_resource_start(pdev, 0); int i; /* * If user excluded this chip, do not initialize it. * I hate this code so much. Must kill it. */ if (io_port) { for (i = 0 ; i < 8 ; i++) { if (sym_driver_setup.excludes[i] == io_port) return -ENODEV; } } /* * Check if the chip is supported. Then copy the chip description * to our device structure so we can make it match the actual device * and options. */ chip = sym_lookup_chip_table(pdev->device, pdev->revision); if (!chip) { dev_info(&pdev->dev, "device not supported\n"); return -ENODEV; } memcpy(&device->chip, chip, sizeof(device->chip)); return 0; } /* * Ignore Symbios chips controlled by various RAID controllers. * These controllers set value 0x52414944 at RAM end - 16. */ static int sym_check_raid(struct sym_device *device) { unsigned int ram_size, ram_val; if (!device->s.ramaddr) return 0; if (device->chip.features & FE_RAM8K) ram_size = 8192; else ram_size = 4096; ram_val = readl(device->s.ramaddr + ram_size - 16); if (ram_val != 0x52414944) return 0; dev_info(&device->pdev->dev, "not initializing, driven by RAID controller.\n"); return -ENODEV; } static int sym_set_workarounds(struct sym_device *device) { struct sym_chip *chip = &device->chip; struct pci_dev *pdev = device->pdev; u_short status_reg; /* * (ITEM 12 of a DEL about the 896 I haven't yet). * We must ensure the chip will use WRITE AND INVALIDATE. * The revision number limit is for now arbitrary. */ if (pdev->device == PCI_DEVICE_ID_NCR_53C896 && pdev->revision < 0x4) { chip->features |= (FE_WRIE | FE_CLSE); } /* If the chip can do Memory Write Invalidate, enable it */ if (chip->features & FE_WRIE) { if (pci_set_mwi(pdev)) return -ENODEV; } /* * Work around for errant bit in 895A. The 66Mhz * capable bit is set erroneously. Clear this bit. * (Item 1 DEL 533) * * Make sure Config space and Features agree. * * Recall: writes are not normal to status register - * write a 1 to clear and a 0 to leave unchanged. * Can only reset bits. */ pci_read_config_word(pdev, PCI_STATUS, &status_reg); if (chip->features & FE_66MHZ) { if (!(status_reg & PCI_STATUS_66MHZ)) chip->features &= ~FE_66MHZ; } else { if (status_reg & PCI_STATUS_66MHZ) { status_reg = PCI_STATUS_66MHZ; pci_write_config_word(pdev, PCI_STATUS, status_reg); pci_read_config_word(pdev, PCI_STATUS, &status_reg); } } return 0; } /* * Map HBA registers and on-chip SRAM (if present). */ static int sym_iomap_device(struct sym_device *device) { struct pci_dev *pdev = device->pdev; struct pci_bus_region bus_addr; int i = 2; pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); device->mmio_base = bus_addr.start; if (device->chip.features & FE_RAM) { /* * If the BAR is 64-bit, resource 2 will be occupied by the * upper 32 bits */ if (!pdev->resource[i].flags) i++; pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); device->ram_base = bus_addr.start; } #ifdef CONFIG_SCSI_SYM53C8XX_MMIO if (device->mmio_base) device->s.ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); #endif if (!device->s.ioaddr) device->s.ioaddr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!device->s.ioaddr) { dev_err(&pdev->dev, "could not map registers; giving up.\n"); return -EIO; } if (device->ram_base) { device->s.ramaddr = pci_iomap(pdev, i, pci_resource_len(pdev, i)); if (!device->s.ramaddr) { dev_warn(&pdev->dev, "could not map SRAM; continuing anyway.\n"); device->ram_base = 0; } } return 0; } /* * The NCR PQS and PDS cards are constructed as a DEC bridge * behind which sits a proprietary NCR memory controller and * either four or two 53c875s as separate devices. We can tell * if an 875 is part of a PQS/PDS or not since if it is, it will * be on the same bus as the memory controller. In its usual * mode of operation, the 875s are slaved to the memory * controller for all transfers. To operate with the Linux * driver, the memory controller is disabled and the 875s * freed to function independently. The only wrinkle is that * the preset SCSI ID (which may be zero) must be read in from * a special configuration space register of the 875. */ static void sym_config_pqs(struct pci_dev *pdev, struct sym_device *sym_dev) { int slot; u8 tmp; for (slot = 0; slot < 256; slot++) { struct pci_dev *memc = pci_get_slot(pdev->bus, slot); if (!memc || memc->vendor != 0x101a || memc->device == 0x0009) { pci_dev_put(memc); continue; } /* bit 1: allow individual 875 configuration */ pci_read_config_byte(memc, 0x44, &tmp); if ((tmp & 0x2) == 0) { tmp |= 0x2; pci_write_config_byte(memc, 0x44, tmp); } /* bit 2: drive individual 875 interrupts to the bus */ pci_read_config_byte(memc, 0x45, &tmp); if ((tmp & 0x4) == 0) { tmp |= 0x4; pci_write_config_byte(memc, 0x45, tmp); } pci_dev_put(memc); break; } pci_read_config_byte(pdev, 0x84, &tmp); sym_dev->host_id = tmp; } /* * Called before unloading the module. * Detach the host. * We have to free resources and halt the NCR chip. */ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev) { struct sym_hcb *np = sym_get_hcb(shost); printk("%s: detaching ...\n", sym_name(np)); del_timer_sync(&np->s.timer); /* * Reset NCR chip. * We should use sym_soft_reset(), but we don't want to do * so, since we may not be safe if interrupts occur. */ printk("%s: resetting chip\n", sym_name(np)); OUTB(np, nc_istat, SRST); INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); sym_free_resources(np, pdev, 1); scsi_host_put(shost); return 1; } /* * Driver host template. */ static struct scsi_host_template sym2_template = { .module = THIS_MODULE, .name = "sym53c8xx", .info = sym53c8xx_info, .queuecommand = sym53c8xx_queue_command, .slave_alloc = sym53c8xx_slave_alloc, .slave_configure = sym53c8xx_slave_configure, .slave_destroy = sym53c8xx_slave_destroy, .eh_abort_handler = sym53c8xx_eh_abort_handler, .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler, .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, .this_id = 7, .use_clustering = ENABLE_CLUSTERING, .max_sectors = 0xFFFF, #ifdef SYM_LINUX_PROC_INFO_SUPPORT .show_info = sym_show_info, #ifdef SYM_LINUX_USER_COMMAND_SUPPORT .write_info = sym_user_command, #endif .proc_name = NAME53C8XX, #endif }; static int attach_count; static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sym_device sym_dev; struct sym_nvram nvram; struct Scsi_Host *shost; int do_iounmap = 0; int do_disable_device = 1; memset(&sym_dev, 0, sizeof(sym_dev)); memset(&nvram, 0, sizeof(nvram)); sym_dev.pdev = pdev; sym_dev.host_id = SYM_SETUP_HOST_ID; if (pci_enable_device(pdev)) goto leave; pci_set_master(pdev); if (pci_request_regions(pdev, NAME53C8XX)) goto disable; if (sym_check_supported(&sym_dev)) goto free; if (sym_iomap_device(&sym_dev)) goto free; do_iounmap = 1; if (sym_check_raid(&sym_dev)) { do_disable_device = 0; /* Don't disable the device */ goto free; } if (sym_set_workarounds(&sym_dev)) goto free; sym_config_pqs(pdev, &sym_dev); sym_get_nvram(&sym_dev, &nvram); do_iounmap = 0; /* Don't sym_iounmap_device() after sym_attach(). */ shost = sym_attach(&sym2_template, attach_count, &sym_dev); if (!shost) goto free; if (scsi_add_host(shost, &pdev->dev)) goto detach; scsi_scan_host(shost); attach_count++; return 0; detach: sym_detach(pci_get_drvdata(pdev), pdev); free: if (do_iounmap) sym_iounmap_device(&sym_dev); pci_release_regions(pdev); disable: if (do_disable_device) pci_disable_device(pdev); leave: return -ENODEV; } static void sym2_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); scsi_remove_host(shost); sym_detach(shost, pdev); pci_release_regions(pdev); pci_disable_device(pdev); attach_count--; } /** * sym2_io_error_detected() - called when PCI error is detected * @pdev: pointer to PCI device * @state: current state of the PCI slot */ static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { /* If slot is permanently frozen, turn everything off */ if (state == pci_channel_io_perm_failure) { sym2_remove(pdev); return PCI_ERS_RESULT_DISCONNECT; } disable_irq(pdev->irq); pci_disable_device(pdev); /* Request that MMIO be enabled, so register dump can be taken. */ return PCI_ERS_RESULT_CAN_RECOVER; } /** * sym2_io_slot_dump - Enable MMIO and dump debug registers * @pdev: pointer to PCI device */ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); sym_dump_registers(shost); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * sym2_reset_workarounds - hardware-specific work-arounds * * This routine is similar to sym_set_workarounds(), except * that, at this point, we already know that the device was * successfully initialized at least once before, and so most * of the steps taken there are un-needed here. */ static void sym2_reset_workarounds(struct pci_dev *pdev) { u_short status_reg; struct sym_chip *chip; chip = sym_lookup_chip_table(pdev->device, pdev->revision); /* Work around for errant bit in 895A, in a fashion * similar to what is done in sym_set_workarounds(). */ pci_read_config_word(pdev, PCI_STATUS, &status_reg); if (!(chip->features & FE_66MHZ) && (status_reg & PCI_STATUS_66MHZ)) { status_reg = PCI_STATUS_66MHZ; pci_write_config_word(pdev, PCI_STATUS, status_reg); pci_read_config_word(pdev, PCI_STATUS, &status_reg); } } /** * sym2_io_slot_reset() - called when the pci bus has been reset. * @pdev: pointer to PCI device * * Restart the card from scratch. */ static pci_ers_result_t sym2_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct sym_hcb *np = sym_get_hcb(shost); printk(KERN_INFO "%s: recovering from a PCI slot reset\n", sym_name(np)); if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: Unable to enable after PCI reset\n", sym_name(np)); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); enable_irq(pdev->irq); /* If the chip can do Memory Write Invalidate, enable it */ if (np->features & FE_WRIE) { if (pci_set_mwi(pdev)) return PCI_ERS_RESULT_DISCONNECT; } /* Perform work-arounds, analogous to sym_set_workarounds() */ sym2_reset_workarounds(pdev); /* Perform host reset only on one instance of the card */ if (PCI_FUNC(pdev->devfn) == 0) { if (sym_reset_scsi_bus(np, 0)) { printk(KERN_ERR "%s: Unable to reset scsi host\n", sym_name(np)); return PCI_ERS_RESULT_DISCONNECT; } sym_start_up(shost, 1); } return PCI_ERS_RESULT_RECOVERED; } /** * sym2_io_resume() - resume normal ops after PCI reset * @pdev: pointer to PCI device * * Called when the error recovery driver tells us that its * OK to resume normal operation. Use completion to allow * halted scsi ops to resume. */ static void sym2_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct sym_data *sym_data = shost_priv(shost); spin_lock_irq(shost->host_lock); if (sym_data->io_reset) complete_all(sym_data->io_reset); spin_unlock_irq(shost->host_lock); } static void sym2_get_signalling(struct Scsi_Host *shost) { struct sym_hcb *np = sym_get_hcb(shost); enum spi_signal_type type; switch (np->scsi_mode) { case SMODE_SE: type = SPI_SIGNAL_SE; break; case SMODE_LVD: type = SPI_SIGNAL_LVD; break; case SMODE_HVD: type = SPI_SIGNAL_HVD; break; default: type = SPI_SIGNAL_UNKNOWN; break; } spi_signalling(shost) = type; } static void sym2_set_offset(struct scsi_target *starget, int offset) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; tp->tgoal.offset = offset; tp->tgoal.check_nego = 1; } static void sym2_set_period(struct scsi_target *starget, int period) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* have to have DT for these transfers, but DT will also * set width, so check that this is allowed */ if (period <= np->minsync && spi_width(starget)) tp->tgoal.dt = 1; tp->tgoal.period = period; tp->tgoal.check_nego = 1; } static void sym2_set_width(struct scsi_target *starget, int width) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* It is illegal to have DT set on narrow transfers. If DT is * clear, we must also clear IU and QAS. */ if (width == 0) tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.width = width; tp->tgoal.check_nego = 1; } static void sym2_set_dt(struct scsi_target *starget, int dt) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; /* We must clear QAS and IU if DT is clear */ if (dt) tp->tgoal.dt = 1; else tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } #if 0 static void sym2_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; if (iu) tp->tgoal.iu = tp->tgoal.dt = 1; else tp->tgoal.iu = 0; tp->tgoal.check_nego = 1; } static void sym2_set_qas(struct scsi_target *starget, int qas) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct sym_hcb *np = sym_get_hcb(shost); struct sym_tcb *tp = &np->target[starget->id]; if (qas) tp->tgoal.dt = tp->tgoal.qas = 1; else tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } #endif static struct spi_function_template sym2_transport_functions = { .set_offset = sym2_set_offset, .show_offset = 1, .set_period = sym2_set_period, .show_period = 1, .set_width = sym2_set_width, .show_width = 1, .set_dt = sym2_set_dt, .show_dt = 1, #if 0 .set_iu = sym2_set_iu, .show_iu = 1, .set_qas = sym2_set_qas, .show_qas = 1, #endif .get_signalling = sym2_get_signalling, }; static struct pci_device_id sym2_id_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C815, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C810AP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C885, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C1510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, /* new */ { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C895A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C875A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_33, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1010_66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C875J, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } }; MODULE_DEVICE_TABLE(pci, sym2_id_table); static const struct pci_error_handlers sym2_err_handler = { .error_detected = sym2_io_error_detected, .mmio_enabled = sym2_io_slot_dump, .slot_reset = sym2_io_slot_reset, .resume = sym2_io_resume, }; static struct pci_driver sym2_driver = { .name = NAME53C8XX, .id_table = sym2_id_table, .probe = sym2_probe, .remove = sym2_remove, .err_handler = &sym2_err_handler, }; static int __init sym2_init(void) { int error; sym2_setup_params(); sym2_transport_template = spi_attach_transport(&sym2_transport_functions); if (!sym2_transport_template) return -ENODEV; error = pci_register_driver(&sym2_driver); if (error) spi_release_transport(sym2_transport_template); return error; } static void __exit sym2_exit(void) { pci_unregister_driver(&sym2_driver); spi_release_transport(sym2_transport_template); } module_init(sym2_init); module_exit(sym2_exit);
gpl-2.0
Pafcholini/kernel-msm-3.10
drivers/isdn/hisax/amd7930_fn.c
2314
20800
/* gerdes_amd7930.c,v 0.99 2001/10/02 * * gerdes_amd7930.c Amd 79C30A and 79C32A specific routines * (based on HiSax driver by Karsten Keil) * * Author Christoph Ersfeld <info@formula-n.de> * Formula-n Europe AG (www.formula-n.com) * previously Gerdes AG * * * This file is (c) under GNU PUBLIC LICENSE * * * Notes: * Version 0.99 is the first release of this driver and there are * certainly a few bugs. * * Please don't report any malfunction to me without sending * (compressed) debug-logs. * It would be nearly impossible to retrace it. * * Log D-channel-processing as follows: * * 1. Load hisax with card-specific parameters, this example ist for * Formula-n enter:now ISDN PCI and compatible * (f.e. Gerdes Power ISDN PCI) * * modprobe hisax type=41 protocol=2 id=gerdes * * if you chose an other value for id, you need to modify the * code below, too. * * 2. set debug-level * * hisaxctrl gerdes 1 0x3ff * hisaxctrl gerdes 11 0x4f * cat /dev/isdnctrl >> ~/log & * * Please take also a look into /var/log/messages if there is * anything importand concerning HISAX. * * * Credits: * Programming the driver for Formula-n enter:now ISDN PCI and * necessary this driver for the used Amd 7930 D-channel-controller * was spnsored by Formula-n Europe AG. * Thanks to Karsten Keil and Petr Novak, who gave me support in * Hisax-specific questions. * I want so say special thanks to Carl-Friedrich Braun, who had to * answer a lot of questions about generally ISDN and about handling * of the Amd-Chip. * */ #include "hisax.h" #include "isdnl1.h" #include "isac.h" #include "amd7930_fn.h" #include <linux/interrupt.h> #include <linux/init.h> #include <linux/gfp.h> static void Amd7930_new_ph(struct IsdnCardState *cs); static WORD initAMD[] = { 0x0100, 0x00A5, 3, 0x01, 0x40, 0x58, // LPR, LMR1, LMR2 0x0086, 1, 0x0B, // DMR1 (D-Buffer TH-Interrupts on) 0x0087, 1, 0xFF, // DMR2 0x0092, 1, 0x03, // EFCR (extended mode d-channel-fifo on) 0x0090, 4, 0xFE, 0xFF, 0x02, 0x0F, // FRAR4, SRAR4, DMR3, DMR4 (address recognition ) 0x0084, 2, 0x80, 0x00, // DRLR 0x00C0, 1, 0x47, // PPCR1 0x00C8, 1, 0x01, // PPCR2 0x0102, 0x0107, 0x01A1, 1, 0x0121, 1, 0x0189, 2, 0x0045, 4, 0x61, 0x72, 0x00, 0x00, // MCR1, MCR2, MCR3, MCR4 0x0063, 2, 0x08, 0x08, // GX 0x0064, 2, 0x08, 0x08, // GR 0x0065, 2, 0x99, 0x00, // GER 0x0066, 2, 0x7C, 0x8B, // STG 0x0067, 2, 0x00, 0x00, // FTGR1, FTGR2 0x0068, 2, 0x20, 0x20, // ATGR1, ATGR2 0x0069, 1, 0x4F, // MMR1 0x006A, 1, 0x00, // MMR2 0x006C, 1, 0x40, // MMR3 0x0021, 1, 0x02, // INIT 0x00A3, 1, 0x40, // LMR1 0xFFFF }; static void /* macro wWordAMD */ WriteWordAmd7930(struct IsdnCardState *cs, BYTE reg, WORD val) { wByteAMD(cs, 0x00, reg); wByteAMD(cs, 0x01, LOBYTE(val)); wByteAMD(cs, 0x01, HIBYTE(val)); } static WORD /* macro rWordAMD */ ReadWordAmd7930(struct IsdnCardState *cs, BYTE reg) { WORD res; /* direct access register */ if (reg < 8) { res = rByteAMD(cs, reg); res += 256 * rByteAMD(cs, reg); } /* indirect access register */ else { wByteAMD(cs, 0x00, reg); res = rByteAMD(cs, 0x01); res += 256 * rByteAMD(cs, 0x01); } return (res); } static void Amd7930_ph_command(struct IsdnCardState *cs, u_char command, char *s) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "AMD7930: %s: ph_command 0x%02X", s, command); cs->dc.amd7930.lmr1 = command; wByteAMD(cs, 0xA3, command); } static BYTE i430States[] = { // to reset F3 F4 F5 F6 F7 F8 AR from 0x01, 0x02, 0x00, 0x00, 0x00, 0x07, 0x05, 0x00, // init 0x01, 0x02, 0x00, 0x00, 0x00, 0x07, 0x05, 0x00, // reset 0x01, 0x02, 0x00, 0x00, 0x00, 0x09, 0x05, 0x04, // F3 0x01, 0x02, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, // F4 0x01, 0x02, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, // F5 0x01, 0x03, 0x00, 0x00, 0x00, 0x06, 0x05, 0x00, // F6 0x11, 0x13, 0x00, 0x00, 0x1B, 0x00, 0x15, 0x00, // F7 0x01, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, // F8 0x01, 0x03, 0x00, 0x00, 0x00, 0x09, 0x00, 0x0A}; // AR /* Row init - reset F3 F4 F5 F6 F7 F8 AR */ static BYTE stateHelper[] = { 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }; static void Amd7930_get_state(struct IsdnCardState *cs) { BYTE lsr = rByteAMD(cs, 0xA1); cs->dc.amd7930.ph_state = (lsr & 0x7) + 2; Amd7930_new_ph(cs); } static void Amd7930_new_ph(struct IsdnCardState *cs) { u_char index = stateHelper[cs->dc.amd7930.old_state] * 8 + stateHelper[cs->dc.amd7930.ph_state] - 1; u_char message = i430States[index]; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "AMD7930: new_ph %d, old_ph %d, message %d, index %d", cs->dc.amd7930.ph_state, cs->dc.amd7930.old_state, message & 0x0f, index); cs->dc.amd7930.old_state = cs->dc.amd7930.ph_state; /* abort transmit if nessesary */ if ((message & 0xf0) && (cs->tx_skb)) { wByteAMD(cs, 0x21, 0xC2); wByteAMD(cs, 0x21, 0x02); } switch (message & 0x0f) { case (1): l1_msg(cs, HW_RESET | INDICATION, NULL); Amd7930_get_state(cs); break; case (2): /* init, Card starts in F3 */ l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL); break; case (3): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (4): l1_msg(cs, HW_POWERUP | CONFIRM, NULL); Amd7930_ph_command(cs, 0x50, "HW_ENABLE REQUEST"); break; case (5): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (6): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; case (7): /* init, Card starts in F7 */ l1_msg(cs, HW_RSYNC | INDICATION, NULL); l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; case (8): l1_msg(cs, HW_POWERUP | CONFIRM, NULL); /* fall through */ case (9): Amd7930_ph_command(cs, 0x40, "HW_ENABLE REQ cleared if set"); l1_msg(cs, HW_RSYNC | INDICATION, NULL); l1_msg(cs, HW_INFO2 | INDICATION, NULL); l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; case (10): Amd7930_ph_command(cs, 0x40, "T3 expired, HW_ENABLE REQ cleared"); cs->dc.amd7930.old_state = 3; break; case (11): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; default: break; } } static void Amd7930_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) { if (cs->debug) debugl1(cs, "Amd7930: bh, D-Channel Busy cleared"); stptr = cs->stlist; while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL); stptr = stptr->next; } } if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "AMD7930: bh, D_L1STATECHANGE"); Amd7930_new_ph(cs); } if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "AMD7930: bh, D_RCVBUFREADY"); DChannel_proc_rcv(cs); } if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "AMD7930: bh, D_XMTBUFREADY"); DChannel_proc_xmt(cs); } } static void Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag) { BYTE stat, der; BYTE *ptr; struct sk_buff *skb; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "Amd7930: empty_Dfifo"); ptr = cs->rcvbuf + cs->rcvidx; /* AMD interrupts off */ AmdIrqOff(cs); /* read D-Channel-Fifo*/ stat = rByteAMD(cs, 0x07); // DSR2 /* while Data in Fifo ... */ while ((stat & 2) && ((ptr-cs->rcvbuf) < MAX_DFRAME_LEN_L1)) { *ptr = rByteAMD(cs, 0x04); // DCRB ptr++; stat = rByteAMD(cs, 0x07); // DSR2 cs->rcvidx = ptr - cs->rcvbuf; /* Paket ready? */ if (stat & 1) { der = rWordAMD(cs, 0x03); /* no errors, packet ok */ if (!der && !flag) { rWordAMD(cs, 0x89); // clear DRCR if ((cs->rcvidx) > 0) { if (!(skb = alloc_skb(cs->rcvidx, GFP_ATOMIC))) printk(KERN_WARNING "HiSax: Amd7930: empty_Dfifo, D receive out of memory!\n"); else { /* Debugging */ if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx); QuickHex(t, cs->rcvbuf, cs->rcvidx); debugl1(cs, cs->dlog); } /* moves received data in sk-buffer */ memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx); skb_queue_tail(&cs->rq, skb); } } } /* throw damaged packets away, reset receive-buffer, indicate RX */ ptr = cs->rcvbuf; cs->rcvidx = 0; schedule_event(cs, D_RCVBUFREADY); } } /* Packet to long, overflow */ if (cs->rcvidx >= MAX_DFRAME_LEN_L1) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "AMD7930: empty_Dfifo L2-Framelength overrun"); cs->rcvidx = 0; return; } /* AMD interrupts on */ AmdIrqOn(cs); } static void Amd7930_fill_Dfifo(struct IsdnCardState *cs) { WORD dtcrr, dtcrw, len, count; BYTE txstat, dmr3; BYTE *ptr, *deb_ptr; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "Amd7930: fill_Dfifo"); if ((!cs->tx_skb) || (cs->tx_skb->len <= 0)) return; dtcrw = 0; if (!cs->dc.amd7930.tx_xmtlen) /* new Frame */ len = dtcrw = cs->tx_skb->len; /* continue frame */ else len = cs->dc.amd7930.tx_xmtlen; /* AMD interrupts off */ AmdIrqOff(cs); deb_ptr = ptr = cs->tx_skb->data; /* while free place in tx-fifo available and data in sk-buffer */ txstat = 0x10; while ((txstat & 0x10) && (cs->tx_cnt < len)) { wByteAMD(cs, 0x04, *ptr); ptr++; cs->tx_cnt++; txstat = rByteAMD(cs, 0x07); } count = ptr - cs->tx_skb->data; skb_pull(cs->tx_skb, count); dtcrr = rWordAMD(cs, 0x85); // DTCR dmr3 = rByteAMD(cs, 0x8E); if (cs->debug & L1_DEB_ISAC) { debugl1(cs, "Amd7930: fill_Dfifo, DMR3: 0x%02X, DTCR read: 0x%04X write: 0x%02X 0x%02X", dmr3, dtcrr, LOBYTE(dtcrw), HIBYTE(dtcrw)); } /* writeing of dtcrw starts transmit */ if (!cs->dc.amd7930.tx_xmtlen) { wWordAMD(cs, 0x85, dtcrw); cs->dc.amd7930.tx_xmtlen = dtcrw; } if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { debugl1(cs, "Amd7930: fill_Dfifo dbusytimer running"); del_timer(&cs->dbusytimer); } init_timer(&cs->dbusytimer); cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000); add_timer(&cs->dbusytimer); if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count); QuickHex(t, deb_ptr, count); debugl1(cs, cs->dlog); } /* AMD interrupts on */ AmdIrqOn(cs); } void Amd7930_interrupt(struct IsdnCardState *cs, BYTE irflags) { BYTE dsr1, dsr2, lsr; WORD der; while (irflags) { dsr1 = rByteAMD(cs, 0x02); der = rWordAMD(cs, 0x03); dsr2 = rByteAMD(cs, 0x07); lsr = rByteAMD(cs, 0xA1); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: interrupt: flags: 0x%02X, DSR1: 0x%02X, DSR2: 0x%02X, LSR: 0x%02X, DER=0x%04X", irflags, dsr1, dsr2, lsr, der); /* D error -> read DER and DSR2 bit 2 */ if (der || (dsr2 & 4)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "Amd7930: interrupt: D error DER=0x%04X", der); /* RX, TX abort if collision detected */ if (der & 2) { wByteAMD(cs, 0x21, 0xC2); wByteAMD(cs, 0x21, 0x02); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); /* restart frame */ if (cs->tx_skb) { skb_push(cs->tx_skb, cs->tx_cnt); cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; Amd7930_fill_Dfifo(cs); } else { printk(KERN_WARNING "HiSax: Amd7930 D-Collision, no skb\n"); debugl1(cs, "Amd7930: interrupt: D-Collision, no skb"); } } /* remove damaged data from fifo */ Amd7930_empty_Dfifo(cs, 1); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); /* restart TX-Frame */ if (cs->tx_skb) { skb_push(cs->tx_skb, cs->tx_cnt); cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; Amd7930_fill_Dfifo(cs); } } /* D TX FIFO empty -> fill */ if (irflags & 1) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: interrupt: clear Timer and fill D-TX-FIFO if data"); /* AMD interrupts off */ AmdIrqOff(cs); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) Amd7930_fill_Dfifo(cs); } /* AMD interrupts on */ AmdIrqOn(cs); } /* D RX FIFO full or tiny packet in Fifo -> empty */ if ((irflags & 2) || (dsr1 & 2)) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: interrupt: empty D-FIFO"); Amd7930_empty_Dfifo(cs, 0); } /* D-Frame transmit complete */ if (dsr1 & 64) { if (cs->debug & L1_DEB_ISAC) { debugl1(cs, "Amd7930: interrupt: transmit packet ready"); } /* AMD interrupts off */ AmdIrqOff(cs); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: interrupt: TX-Packet ready, freeing skb"); dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; cs->tx_skb = NULL; } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: interrupt: TX-Packet ready, next packet dequeued"); cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; Amd7930_fill_Dfifo(cs); } else schedule_event(cs, D_XMTBUFREADY); /* AMD interrupts on */ AmdIrqOn(cs); } /* LIU status interrupt -> read LSR, check statechanges */ if (lsr & 0x38) { /* AMD interrupts off */ AmdIrqOff(cs); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd: interrupt: LSR=0x%02X, LIU is in state %d", lsr, ((lsr & 0x7) + 2)); cs->dc.amd7930.ph_state = (lsr & 0x7) + 2; schedule_event(cs, D_L1STATECHANGE); /* AMD interrupts on */ AmdIrqOn(cs); } /* reads Interrupt-Register again. If there is a new interrupt-flag: restart handler */ irflags = rByteAMD(cs, 0x00); } } static void Amd7930_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; u_long flags; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: l1hw called, pr: 0x%04X", pr); switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA", 0); #endif Amd7930_fill_Dfifo(cs); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "Amd7930: l1hw: l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; cs->dc.amd7930.tx_xmtlen = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "Amd7930: l1hw: PH_DATA_PULLED", 0); #endif Amd7930_fill_Dfifo(cs); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "Amd7930: l1hw: -> PH_REQUEST_PULL, skb: %s", (cs->tx_skb) ? "yes" : "no"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); if ((cs->dc.amd7930.ph_state == 8)) { /* b-channels off, PH-AR cleared * change to F3 */ Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5 spin_unlock_irqrestore(&cs->lock, flags); } else { Amd7930_ph_command(cs, 0x40, "HW_RESET REQUEST"); cs->dc.amd7930.ph_state = 2; spin_unlock_irqrestore(&cs->lock, flags); Amd7930_new_ph(cs); } break; case (HW_ENABLE | REQUEST): cs->dc.amd7930.ph_state = 9; Amd7930_new_ph(cs); break; case (HW_INFO3 | REQUEST): // automatic break; case (HW_TESTLOOP | REQUEST): /* not implemented yet */ break; case (HW_DEACTIVATE | RESPONSE): skb_queue_purge(&cs->rq); skb_queue_purge(&cs->sq); if (cs->tx_skb) { dev_kfree_skb(cs->tx_skb); cs->tx_skb = NULL; } if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "Amd7930: l1hw: unknown %04x", pr); break; } } static void setstack_Amd7930(struct PStack *st, struct IsdnCardState *cs) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: setstack called"); st->l1.l1hw = Amd7930_l1hw; } static void DC_Close_Amd7930(struct IsdnCardState *cs) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: DC_Close called"); } static void dbusy_timer_handler(struct IsdnCardState *cs) { u_long flags; struct PStack *stptr; WORD dtcr, der; BYTE dsr1, dsr2; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: dbusy_timer expired!"); if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { spin_lock_irqsave(&cs->lock, flags); /* D Transmit Byte Count Register: * Counts down packet's number of Bytes, 0 if packet ready */ dtcr = rWordAMD(cs, 0x85); dsr1 = rByteAMD(cs, 0x02); dsr2 = rByteAMD(cs, 0x07); der = rWordAMD(cs, 0x03); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: dbusy_timer_handler: DSR1=0x%02X, DSR2=0x%02X, DER=0x%04X, cs->tx_skb->len=%u, tx_stat=%u, dtcr=%u, cs->tx_cnt=%u", dsr1, dsr2, der, cs->tx_skb->len, cs->dc.amd7930.tx_xmtlen, dtcr, cs->tx_cnt); if ((cs->dc.amd7930.tx_xmtlen - dtcr) < cs->tx_cnt) { /* D-Channel Busy */ test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags); stptr = cs->stlist; spin_unlock_irqrestore(&cs->lock, flags); while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL); stptr = stptr->next; } } else { /* discard frame; reset transceiver */ test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; cs->dc.amd7930.tx_xmtlen = 0; } else { printk(KERN_WARNING "HiSax: Amd7930: D-Channel Busy no skb\n"); debugl1(cs, "Amd7930: D-Channel Busy no skb"); } /* Transmitter reset, abort transmit */ wByteAMD(cs, 0x21, 0x82); wByteAMD(cs, 0x21, 0x02); spin_unlock_irqrestore(&cs->lock, flags); cs->irq_func(cs->irq, cs); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: dbusy_timer_handler: Transmitter reset"); } } } void Amd7930_init(struct IsdnCardState *cs) { WORD *ptr; BYTE cmd, cnt; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "Amd7930: initamd called"); cs->dc.amd7930.tx_xmtlen = 0; cs->dc.amd7930.old_state = 0; cs->dc.amd7930.lmr1 = 0x40; cs->dc.amd7930.ph_command = Amd7930_ph_command; cs->setstack_d = setstack_Amd7930; cs->DC_Close = DC_Close_Amd7930; /* AMD Initialisation */ for (ptr = initAMD; *ptr != 0xFFFF; ) { cmd = LOBYTE(*ptr); /* read */ if (*ptr++ >= 0x100) { if (cmd < 8) /* reset register */ rByteAMD(cs, cmd); else { wByteAMD(cs, 0x00, cmd); for (cnt = *ptr++; cnt > 0; cnt--) rByteAMD(cs, 0x01); } } /* write */ else if (cmd < 8) wByteAMD(cs, cmd, LOBYTE(*ptr++)); else { wByteAMD(cs, 0x00, cmd); for (cnt = *ptr++; cnt > 0; cnt--) wByteAMD(cs, 0x01, LOBYTE(*ptr++)); } } } void setup_Amd7930(struct IsdnCardState *cs) { INIT_WORK(&cs->tqueue, Amd7930_bh); cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); }
gpl-2.0
matthew-l-weber/linux-3-10-rc1-moxart
fs/affs/file.c
2314
24765
/* * linux/fs/affs/file.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem * * affs regular file handling primitives */ #include "affs.h" #if PAGE_SIZE < 4096 #error PAGE_SIZE must be at least 4096 #endif static int affs_grow_extcache(struct inode *inode, u32 lc_idx); static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext); static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext); static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); static int affs_file_open(struct inode *inode, struct file *filp); static int affs_file_release(struct inode *inode, struct file *filp); const struct file_operations affs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .aio_read = generic_file_aio_read, .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, .open = affs_file_open, .release = affs_file_release, .fsync = affs_file_fsync, .splice_read = generic_file_splice_read, }; const struct inode_operations affs_file_inode_operations = { .setattr = affs_notify_change, }; static int affs_file_open(struct inode *inode, struct file *filp) { pr_debug("AFFS: open(%lu,%d)\n", inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); atomic_inc(&AFFS_I(inode)->i_opencnt); return 0; } static int affs_file_release(struct inode *inode, struct file *filp) { pr_debug("AFFS: release(%lu, %d)\n", inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { mutex_lock(&inode->i_mutex); if (inode->i_size != AFFS_I(inode)->mmu_private) affs_truncate(inode); affs_free_prealloc(inode); mutex_unlock(&inode->i_mutex); } return 0; } static int affs_grow_extcache(struct inode *inode, u32 lc_idx) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; u32 lc_max; int i, j, key; if (!AFFS_I(inode)->i_lc) { char *ptr = (char *)get_zeroed_page(GFP_NOFS); if (!ptr) return -ENOMEM; AFFS_I(inode)->i_lc = (u32 *)ptr; AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); } lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; if (AFFS_I(inode)->i_extcnt > lc_max) { u32 lc_shift, lc_mask, tmp, off; /* need to recalculate linear cache, start from old size */ lc_shift = AFFS_I(inode)->i_lc_shift; tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; for (; tmp; tmp >>= 1) lc_shift++; lc_mask = (1 << lc_shift) - 1; /* fix idx and old size to new shift */ lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); /* first shrink old cache to make more space */ off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; AFFS_I(inode)->i_lc_shift = lc_shift; AFFS_I(inode)->i_lc_mask = lc_mask; } /* fill cache to the needed index */ i = AFFS_I(inode)->i_lc_size; AFFS_I(inode)->i_lc_size = lc_idx + 1; for (; i <= lc_idx; i++) { if (!i) { AFFS_I(inode)->i_lc[0] = inode->i_ino; continue; } key = AFFS_I(inode)->i_lc[i - 1]; j = AFFS_I(inode)->i_lc_mask + 1; // unlock cache for (; j > 0; j--) { bh = affs_bread(sb, key); if (!bh) goto err; key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); affs_brelse(bh); } // lock cache AFFS_I(inode)->i_lc[i] = key; } return 0; err: // lock cache return -EIO; } static struct buffer_head * affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh; u32 blocknr, tmp; blocknr = affs_alloc_block(inode, bh->b_blocknr); if (!blocknr) return ERR_PTR(-ENOSPC); new_bh = affs_getzeroblk(sb, blocknr); if (!new_bh) { affs_free_block(sb, blocknr); return ERR_PTR(-EIO); } AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); affs_fix_checksum(sb, new_bh); mark_buffer_dirty_inode(new_bh, inode); tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); if (tmp) affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); affs_adjust_checksum(bh, blocknr - tmp); mark_buffer_dirty_inode(bh, inode); AFFS_I(inode)->i_extcnt++; mark_inode_dirty(inode); return new_bh; } static inline struct buffer_head * affs_get_extblock(struct inode *inode, u32 ext) { /* inline the simplest case: same extended block as last time */ struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; if (ext == AFFS_I(inode)->i_ext_last) get_bh(bh); else /* we have to do more (not inlined) */ bh = affs_get_extblock_slow(inode, ext); return bh; } static struct buffer_head * affs_get_extblock_slow(struct inode *inode, u32 ext) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; u32 ext_key; u32 lc_idx, lc_off, ac_idx; u32 tmp, idx; if (ext == AFFS_I(inode)->i_ext_last + 1) { /* read the next extended block from the current one */ bh = AFFS_I(inode)->i_ext_bh; ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); if (ext < AFFS_I(inode)->i_extcnt) goto read_ext; if (ext > AFFS_I(inode)->i_extcnt) BUG(); bh = affs_alloc_extblock(inode, bh, ext); if (IS_ERR(bh)) return bh; goto store_ext; } if (ext == 0) { /* we seek back to the file header block */ ext_key = inode->i_ino; goto read_ext; } if (ext >= AFFS_I(inode)->i_extcnt) { struct buffer_head *prev_bh; /* allocate a new extended block */ if (ext > AFFS_I(inode)->i_extcnt) BUG(); /* get previous extended block */ prev_bh = affs_get_extblock(inode, ext - 1); if (IS_ERR(prev_bh)) return prev_bh; bh = affs_alloc_extblock(inode, prev_bh, ext); affs_brelse(prev_bh); if (IS_ERR(bh)) return bh; goto store_ext; } again: /* check if there is an extended cache and whether it's large enough */ lc_idx = ext >> AFFS_I(inode)->i_lc_shift; lc_off = ext & AFFS_I(inode)->i_lc_mask; if (lc_idx >= AFFS_I(inode)->i_lc_size) { int err; err = affs_grow_extcache(inode, lc_idx); if (err) return ERR_PTR(err); goto again; } /* every n'th key we find in the linear cache */ if (!lc_off) { ext_key = AFFS_I(inode)->i_lc[lc_idx]; goto read_ext; } /* maybe it's still in the associative cache */ ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { ext_key = AFFS_I(inode)->i_ac[ac_idx].key; goto read_ext; } /* try to find one of the previous extended blocks */ tmp = ext; idx = ac_idx; while (--tmp, --lc_off > 0) { idx = (idx - 1) & AFFS_AC_MASK; if (AFFS_I(inode)->i_ac[idx].ext == tmp) { ext_key = AFFS_I(inode)->i_ac[idx].key; goto find_ext; } } /* fall back to the linear cache */ ext_key = AFFS_I(inode)->i_lc[lc_idx]; find_ext: /* read all extended blocks until we find the one we need */ //unlock cache do { bh = affs_bread(sb, ext_key); if (!bh) goto err_bread; ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); affs_brelse(bh); tmp++; } while (tmp < ext); //lock cache /* store it in the associative cache */ // recalculate ac_idx? AFFS_I(inode)->i_ac[ac_idx].ext = ext; AFFS_I(inode)->i_ac[ac_idx].key = ext_key; read_ext: /* finally read the right extended block */ //unlock cache bh = affs_bread(sb, ext_key); if (!bh) goto err_bread; //lock cache store_ext: /* release old cached extended block and store the new one */ affs_brelse(AFFS_I(inode)->i_ext_bh); AFFS_I(inode)->i_ext_last = ext; AFFS_I(inode)->i_ext_bh = bh; get_bh(bh); return bh; err_bread: affs_brelse(bh); return ERR_PTR(-EIO); } static int affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; struct buffer_head *ext_bh; u32 ext; pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block); BUG_ON(block > (sector_t)0x7fffffffUL); if (block >= AFFS_I(inode)->i_blkcnt) { if (block > AFFS_I(inode)->i_blkcnt || !create) goto err_big; } else create = 0; //lock cache affs_lock_ext(inode); ext = (u32)block / AFFS_SB(sb)->s_hashsize; block -= ext * AFFS_SB(sb)->s_hashsize; ext_bh = affs_get_extblock(inode, ext); if (IS_ERR(ext_bh)) goto err_ext; map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); if (create) { u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); if (!blocknr) goto err_alloc; set_buffer_new(bh_result); AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; AFFS_I(inode)->i_blkcnt++; /* store new block */ if (bh_result->b_blocknr) affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr); AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); bh_result->b_blocknr = blocknr; if (!block) { /* insert first block into header block */ u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); if (tmp) affs_warning(sb, "get_block", "first block already set (%d)", tmp); AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); affs_adjust_checksum(ext_bh, blocknr - tmp); } } affs_brelse(ext_bh); //unlock cache affs_unlock_ext(inode); return 0; err_big: affs_error(inode->i_sb,"get_block","strange block request %d", block); return -EIO; err_ext: // unlock cache affs_unlock_ext(inode); return PTR_ERR(ext_bh); err_alloc: brelse(ext_bh); clear_buffer_mapped(bh_result); bh_result->b_bdev = NULL; // unlock cache affs_unlock_ext(inode); return -ENOSPC; } static int affs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, affs_get_block, wbc); } static int affs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, affs_get_block); } static void affs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, to, inode->i_size); affs_truncate(inode); } } static int affs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; *pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, affs_get_block, &AFFS_I(mapping->host)->mmu_private); if (unlikely(ret)) affs_write_failed(mapping, pos + len); return ret; } static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); } const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, .write_end = generic_write_end, .bmap = _affs_bmap }; static inline struct buffer_head * affs_bread_ino(struct inode *inode, int block, int create) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, create); if (!err) { bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static inline struct buffer_head * affs_getzeroblk_ino(struct inode *inode, int block) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, 1); if (!err) { bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static inline struct buffer_head * affs_getemptyblk_ino(struct inode *inode, int block) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, 1); if (!err) { bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static int affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; struct buffer_head *bh; char *data; u32 bidx, boff, bsize; u32 tmp; pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); BUG_ON(from > to || to > PAGE_CACHE_SIZE); kmap(page); data = page_address(page); bsize = AFFS_SB(sb)->s_data_blksize; tmp = (page->index << PAGE_CACHE_SHIFT) + from; bidx = tmp / bsize; boff = tmp % bsize; while (from < to) { bh = affs_bread_ino(inode, bidx, 0); if (IS_ERR(bh)) return PTR_ERR(bh); tmp = min(bsize - boff, to - from); BUG_ON(from + tmp > to || tmp > bsize); memcpy(data + from, AFFS_DATA(bh) + boff, tmp); affs_brelse(bh); bidx++; from += tmp; boff = 0; } flush_dcache_page(page); kunmap(page); return 0; } static int affs_extent_file_ofs(struct inode *inode, u32 newsize) { struct super_block *sb = inode->i_sb; struct buffer_head *bh, *prev_bh; u32 bidx, boff; u32 size, bsize; u32 tmp; pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize); bsize = AFFS_SB(sb)->s_data_blksize; bh = NULL; size = AFFS_I(inode)->mmu_private; bidx = size / bsize; boff = size % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); if (IS_ERR(bh)) return PTR_ERR(bh); tmp = min(bsize - boff, newsize - size); BUG_ON(boff + tmp > bsize || tmp > bsize); memset(AFFS_DATA(bh) + boff, 0, tmp); be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); size += tmp; bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); if (IS_ERR(bh)) return PTR_ERR(bh); } while (size < newsize) { prev_bh = bh; bh = affs_getzeroblk_ino(inode, bidx); if (IS_ERR(bh)) goto out; tmp = min(bsize, newsize - size); BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); mark_buffer_dirty_inode(bh, inode); if (prev_bh) { u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp) affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); mark_buffer_dirty_inode(prev_bh, inode); affs_brelse(prev_bh); } size += bsize; bidx++; } affs_brelse(bh); inode->i_size = AFFS_I(inode)->mmu_private = newsize; return 0; out: inode->i_size = AFFS_I(inode)->mmu_private = newsize; return PTR_ERR(bh); } static int affs_readpage_ofs(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; u32 to; int err; pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index); to = PAGE_CACHE_SIZE; if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { to = inode->i_size & ~PAGE_CACHE_MASK; memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); } err = affs_do_readpage_ofs(file, page, 0, to); if (!err) SetPageUptodate(page); unlock_page(page); return err; } static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct page *page; pgoff_t index; int err = 0; pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len); if (pos > AFFS_I(inode)->mmu_private) { /* XXX: this probably leaves a too-big i_size in case of * failure. Should really be updating i_size at write_end time */ err = affs_extent_file_ofs(inode, pos); if (err) return err; } index = pos >> PAGE_CACHE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; if (PageUptodate(page)) return 0; /* XXX: inefficient but safe in the face of short writes */ err = affs_do_readpage_ofs(file, page, 0, PAGE_CACHE_SIZE); if (err) { unlock_page(page); page_cache_release(page); } return err; } static int affs_write_end_ofs(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; struct super_block *sb = inode->i_sb; struct buffer_head *bh, *prev_bh; char *data; u32 bidx, boff, bsize; unsigned from, to; u32 tmp; int written; from = pos & (PAGE_CACHE_SIZE - 1); to = pos + len; /* * XXX: not sure if this can handle short copies (len < copied), but * we don't have to, because the page should always be uptodate here, * due to write_begin. */ pr_debug("AFFS: write_begin(%u, %llu, %llu)\n", (u32)inode->i_ino, (unsigned long long)pos, (unsigned long long)pos + len); bsize = AFFS_SB(sb)->s_data_blksize; data = page_address(page); bh = NULL; written = 0; tmp = (page->index << PAGE_CACHE_SHIFT) + from; bidx = tmp / bsize; boff = tmp % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); if (IS_ERR(bh)) return PTR_ERR(bh); tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; from += tmp; bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); if (IS_ERR(bh)) return PTR_ERR(bh); } while (from + bsize <= to) { prev_bh = bh; bh = affs_getemptyblk_ino(inode, bidx); if (IS_ERR(bh)) goto out; memcpy(AFFS_DATA(bh), data + from, bsize); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); if (prev_bh) { u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp) affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); mark_buffer_dirty_inode(prev_bh, inode); } } affs_brelse(prev_bh); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += bsize; from += bsize; bidx++; } if (from < to) { prev_bh = bh; bh = affs_bread_ino(inode, bidx, 1); if (IS_ERR(bh)) goto out; tmp = min(bsize, to - from); BUG_ON(tmp > bsize); memcpy(AFFS_DATA(bh), data + from, tmp); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); if (prev_bh) { u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp) affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp); mark_buffer_dirty_inode(prev_bh, inode); } } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_brelse(prev_bh); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; from += tmp; bidx++; } SetPageUptodate(page); done: affs_brelse(bh); tmp = (page->index << PAGE_CACHE_SHIFT) + from; if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; unlock_page(page); page_cache_release(page); return written; out: bh = prev_bh; if (!written) written = PTR_ERR(bh); goto done; } const struct address_space_operations affs_aops_ofs = { .readpage = affs_readpage_ofs, //.writepage = affs_writepage_ofs, .write_begin = affs_write_begin_ofs, .write_end = affs_write_end_ofs }; /* Free any preallocated blocks. */ void affs_free_prealloc(struct inode *inode) { struct super_block *sb = inode->i_sb; pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino); while (AFFS_I(inode)->i_pa_cnt) { AFFS_I(inode)->i_pa_cnt--; affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); } } /* Truncate (or enlarge) a file to the requested size. */ void affs_truncate(struct inode *inode) { struct super_block *sb = inode->i_sb; u32 ext, ext_key; u32 last_blk, blkcnt, blk; u32 size; struct buffer_head *ext_bh; int i; pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n", (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size); last_blk = 0; ext = 0; if (inode->i_size) { last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; ext = last_blk / AFFS_SB(sb)->s_hashsize; } if (inode->i_size > AFFS_I(inode)->mmu_private) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; u32 size = inode->i_size; int res; res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); if (!res) res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata); else inode->i_size = AFFS_I(inode)->mmu_private; mark_inode_dirty(inode); return; } else if (inode->i_size == AFFS_I(inode)->mmu_private) return; // lock cache ext_bh = affs_get_extblock(inode, ext); if (IS_ERR(ext_bh)) { affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)", ext, PTR_ERR(ext_bh)); return; } if (AFFS_I(inode)->i_lc) { /* clear linear cache */ i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; if (AFFS_I(inode)->i_lc_size > i) { AFFS_I(inode)->i_lc_size = i; for (; i < AFFS_LC_SIZE; i++) AFFS_I(inode)->i_lc[i] = 0; } /* clear associative cache */ for (i = 0; i < AFFS_AC_SIZE; i++) if (AFFS_I(inode)->i_ac[i].ext >= ext) AFFS_I(inode)->i_ac[i].ext = 0; } ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); blkcnt = AFFS_I(inode)->i_blkcnt; i = 0; blk = last_blk; if (inode->i_size) { i = last_blk % AFFS_SB(sb)->s_hashsize + 1; blk++; } else AFFS_HEAD(ext_bh)->first_data = 0; AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); size = AFFS_SB(sb)->s_hashsize; if (size > blkcnt - blk + i) size = blkcnt - blk + i; for (; i < size; i++, blk++) { affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); AFFS_BLOCK(sb, ext_bh, i) = 0; } AFFS_TAIL(sb, ext_bh)->extension = 0; affs_fix_checksum(sb, ext_bh); mark_buffer_dirty_inode(ext_bh, inode); affs_brelse(ext_bh); if (inode->i_size) { AFFS_I(inode)->i_blkcnt = last_blk + 1; AFFS_I(inode)->i_extcnt = ext + 1; if (AFFS_SB(sb)->s_flags & SF_OFS) { struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); u32 tmp; if (IS_ERR(bh)) { affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)", ext, PTR_ERR(bh)); return; } tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); AFFS_DATA_HEAD(bh)->next = 0; affs_adjust_checksum(bh, -tmp); affs_brelse(bh); } } else { AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_extcnt = 1; } AFFS_I(inode)->mmu_private = inode->i_size; // unlock cache while (ext_key) { ext_bh = affs_bread(sb, ext_key); size = AFFS_SB(sb)->s_hashsize; if (size > blkcnt - blk) size = blkcnt - blk; for (i = 0; i < size; i++, blk++) affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); affs_free_block(sb, ext_key); ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); affs_brelse(ext_bh); } affs_free_prealloc(inode); } int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int ret, err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); ret = write_inode_now(inode, 0); err = sync_blockdev(inode->i_sb->s_bdev); if (!ret) ret = err; mutex_unlock(&inode->i_mutex); return ret; }
gpl-2.0
shankarathi07/linux_motorola_lollipop
drivers/i2c/busses/i2c-amd756-s4882.c
2314
7583
/* * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard * * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * We select the channels by sending commands to the Philips * PCA9556 chip at I2C address 0x18. The main adapter is used for * the non-multiplexed part of the bus, and 4 virtual adapters * are defined for the multiplexed addresses: 0x50-0x53 (memory * module EEPROM) located on channels 1-4, and 0x4c (LM63) * located on multiplexed channels 0 and 5-7. We define one * virtual adapter per CPU, which corresponds to two multiplexed * channels: * CPU0: virtual adapter 1, channels 1 and 0 * CPU1: virtual adapter 2, channels 2 and 5 * CPU2: virtual adapter 3, channels 3 and 6 * CPU3: virtual adapter 4, channels 4 and 7 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> extern struct i2c_adapter amd756_smbus; static struct i2c_adapter *s4882_adapter; static struct i2c_algorithm *s4882_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(amd756_lock); static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int error; /* We exclude the multiplexed addresses */ if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 || addr == 0x18) return -ENXIO; mutex_lock(&amd756_lock); error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); mutex_unlock(&amd756_lock); return error; } /* We remember the last used channels combination so as to only switch channels when it is really needed. This greatly reduces the SMBus overhead, but also assumes that nobody will be writing to the PCA9556 in our back. */ static u8 last_channels; static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data, u8 channels) { int error; /* We exclude the non-multiplexed addresses */ if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -ENXIO; mutex_lock(&amd756_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; mplxdata.byte = channels; error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0, I2C_SMBUS_WRITE, 0x01, I2C_SMBUS_BYTE_DATA, &mplxdata); if (error) goto UNLOCK; last_channels = channels; } error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); UNLOCK: mutex_unlock(&amd756_lock); return error; } static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU0: channels 1 and 0 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x03); } static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU1: channels 2 and 5 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x24); } static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU2: channels 3 and 6 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x48); } static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU3: channels 4 and 7 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x90); } static int __init amd756_s4882_init(void) { int i, error; union i2c_smbus_data ioconfig; if (!amd756_smbus.dev.parent) return -ENODEV; /* Configure the PCA9556 multiplexer */ ioconfig.byte = 0x00; /* All I/O to output mode */ error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, I2C_SMBUS_BYTE_DATA, &ioconfig); if (error) { dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); error = -EIO; goto ERROR0; } /* Unregister physical bus */ i2c_del_adapter(&amd756_smbus); printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); /* Define the 5 virtual adapters and algorithms structures */ if (!(s4882_adapter = kzalloc(5 * sizeof(struct i2c_adapter), GFP_KERNEL))) { error = -ENOMEM; goto ERROR1; } if (!(s4882_algo = kzalloc(5 * sizeof(struct i2c_algorithm), GFP_KERNEL))) { error = -ENOMEM; goto ERROR2; } /* Fill in the new structures */ s4882_algo[0] = *(amd756_smbus.algo); s4882_algo[0].smbus_xfer = amd756_access_virt0; s4882_adapter[0] = amd756_smbus; s4882_adapter[0].algo = s4882_algo; s4882_adapter[0].dev.parent = amd756_smbus.dev.parent; for (i = 1; i < 5; i++) { s4882_algo[i] = *(amd756_smbus.algo); s4882_adapter[i] = amd756_smbus; snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name), "SMBus 8111 adapter (CPU%d)", i-1); s4882_adapter[i].algo = s4882_algo+i; s4882_adapter[i].dev.parent = amd756_smbus.dev.parent; } s4882_algo[1].smbus_xfer = amd756_access_virt1; s4882_algo[2].smbus_xfer = amd756_access_virt2; s4882_algo[3].smbus_xfer = amd756_access_virt3; s4882_algo[4].smbus_xfer = amd756_access_virt4; /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4882_adapter+i); if (error) { printk(KERN_ERR "i2c-amd756-s4882: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4882_adapter+i); goto ERROR3; } } return 0; ERROR3: kfree(s4882_algo); s4882_algo = NULL; ERROR2: kfree(s4882_adapter); s4882_adapter = NULL; ERROR1: /* Restore physical bus */ i2c_add_adapter(&amd756_smbus); ERROR0: return error; } static void __exit amd756_s4882_exit(void) { if (s4882_adapter) { int i; for (i = 0; i < 5; i++) i2c_del_adapter(s4882_adapter+i); kfree(s4882_adapter); s4882_adapter = NULL; } kfree(s4882_algo); s4882_algo = NULL; /* Restore physical bus */ if (i2c_add_adapter(&amd756_smbus)) printk(KERN_ERR "i2c-amd756-s4882: " "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("S4882 SMBus multiplexing"); MODULE_LICENSE("GPL"); module_init(amd756_s4882_init); module_exit(amd756_s4882_exit);
gpl-2.0
furiousanger/FuriousKernel
arch/arm/plat-omap/io.c
2826
5219
/* * Common io.c file * This file is created by Russell King <rmk+kernel@arm.linux.org.uk> * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/io.h> #include <linux/mm.h> #include <plat/omap7xx.h> #include <plat/omap1510.h> #include <plat/omap16xx.h> #include <plat/omap24xx.h> #include <plat/omap34xx.h> #include <plat/omap44xx.h> #define BETWEEN(p,st,sz) ((p) >= (st) && (p) < ((st) + (sz))) #define XLATE(p,pst,vst) ((void __iomem *)((p) - (pst) + (vst))) /* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type) { #ifdef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap1()) { if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE)) return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT); } if (cpu_is_omap7xx()) { if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE)) return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START); if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE)) return XLATE(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_START); } if (cpu_is_omap15xx()) { if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE)) return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START); if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE)) return XLATE(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_START); } if (cpu_is_omap16xx()) { if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE)) return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START); if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE)) return XLATE(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_START); } #endif #ifdef CONFIG_ARCH_OMAP2 if (cpu_is_omap24xx()) { if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE)) return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT); if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE)) return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT); } if (cpu_is_omap2420()) { if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE)) return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT); if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE)) return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE); if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE)) return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT); } if (cpu_is_omap2430()) { if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE)) return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT); if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE)) return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT); if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE)) return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT); if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE)) return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT); } #endif #ifdef CONFIG_ARCH_OMAP3 if (cpu_is_ti816x()) { if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE)) return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT); } else if (cpu_is_omap34xx()) { if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE)) return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT); if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE)) return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT); if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE)) return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT); if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE)) return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT); if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE)) return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT); if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE)) return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT); if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE)) return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT); } #endif #ifdef CONFIG_ARCH_OMAP4 if (cpu_is_omap44xx()) { if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE)) return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT); if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE)) return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT); if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE)) return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT); if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE)) return XLATE(p, OMAP44XX_EMIF1_PHYS, \ OMAP44XX_EMIF1_VIRT); if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE)) return XLATE(p, OMAP44XX_EMIF2_PHYS, \ OMAP44XX_EMIF2_VIRT); if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE)) return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT); if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE)) return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT); if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE)) return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT); } #endif return __arm_ioremap_caller(p, size, type, __builtin_return_address(0)); } EXPORT_SYMBOL(omap_ioremap); void omap_iounmap(volatile void __iomem *addr) { unsigned long virt = (unsigned long)addr; if (virt >= VMALLOC_START && virt < VMALLOC_END) __iounmap(addr); } EXPORT_SYMBOL(omap_iounmap);
gpl-2.0
coolcpu/matisse_kernel_crespo
fs/gfs2/trans.c
3082
4713
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/kallsyms.h> #include <linux/gfs2_ondisk.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "log.h" #include "lops.h" #include "meta_io.h" #include "trans.h" #include "util.h" #include "trace_gfs2.h" int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, unsigned int revokes) { struct gfs2_trans *tr; int error; BUG_ON(current->journal_info); BUG_ON(blocks == 0 && revokes == 0); if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) return -EROFS; tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS); if (!tr) return -ENOMEM; tr->tr_ip = (unsigned long)__builtin_return_address(0); tr->tr_blocks = blocks; tr->tr_revokes = revokes; tr->tr_reserved = 1; if (blocks) tr->tr_reserved += 6 + blocks; if (revokes) tr->tr_reserved += gfs2_struct2blk(sdp, revokes, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_list_buf); gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); error = gfs2_glock_nq(&tr->tr_t_gh); if (error) goto fail_holder_uninit; error = gfs2_log_reserve(sdp, tr->tr_reserved); if (error) goto fail_gunlock; current->journal_info = tr; return 0; fail_gunlock: gfs2_glock_dq(&tr->tr_t_gh); fail_holder_uninit: gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); return error; } /** * gfs2_log_release - Release a given number of log blocks * @sdp: The GFS2 superblock * @blks: The number of blocks * */ static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) { atomic_add(blks, &sdp->sd_log_blks_free); trace_gfs2_log_blocks(sdp, blks); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); up_read(&sdp->sd_log_flush_lock); } void gfs2_trans_end(struct gfs2_sbd *sdp) { struct gfs2_trans *tr = current->journal_info; BUG_ON(!tr); current->journal_info = NULL; if (!tr->tr_touched) { gfs2_log_release(sdp, tr->tr_reserved); if (tr->tr_t_gh.gh_gl) { gfs2_glock_dq(&tr->tr_t_gh); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } return; } if (gfs2_assert_withdraw(sdp, tr->tr_num_buf <= tr->tr_blocks)) { fs_err(sdp, "tr_num_buf = %u, tr_blocks = %u ", tr->tr_num_buf, tr->tr_blocks); print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); } if (gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) { fs_err(sdp, "tr_num_revoke = %u, tr_revokes = %u ", tr->tr_num_revoke, tr->tr_revokes); print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); } gfs2_log_commit(sdp, tr); if (tr->tr_t_gh.gh_gl) { gfs2_glock_dq(&tr->tr_t_gh); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) gfs2_log_flush(sdp, NULL); } /** * gfs2_trans_add_bh - Add a to-be-modified buffer to the current transaction * @gl: the glock the buffer belongs to * @bh: The buffer to add * @meta: True in the case of adding metadata * */ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) { struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_bufdata *bd; bd = bh->b_private; if (bd) gfs2_assert(sdp, bd->bd_gl == gl); else { gfs2_attach_bufdata(gl, bh, meta); bd = bh->b_private; } lops_add(sdp, &bd->bd_le); } void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) { BUG_ON(!list_empty(&bd->bd_le.le_list)); BUG_ON(!list_empty(&bd->bd_ail_st_list)); BUG_ON(!list_empty(&bd->bd_ail_gl_list)); lops_init_le(&bd->bd_le, &gfs2_revoke_lops); lops_add(sdp, &bd->bd_le); } void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) { struct gfs2_bufdata *bd, *tmp; struct gfs2_trans *tr = current->journal_info; unsigned int n = len; gfs2_log_lock(sdp); list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_le.le_list) { if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) { list_del_init(&bd->bd_le.le_list); gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); sdp->sd_log_num_revoke--; kmem_cache_free(gfs2_bufdata_cachep, bd); tr->tr_num_revoke_rm++; if (--n == 0) break; } } gfs2_log_unlock(sdp); } void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd) { lops_add(rgd->rd_sbd, &rgd->rd_le); }
gpl-2.0
lostemp/lsk-3.4.47
drivers/infiniband/hw/mthca/mthca_main.c
5642
35476
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_config_reg.h" #include "mthca_cmd.h" #include "mthca_profile.h" #include "mthca_memfree.h" #include "mthca_wqe.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG int mthca_debug_level = 0; module_param_named(debug_level, mthca_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */ #ifdef CONFIG_PCI_MSI static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #else /* CONFIG_PCI_MSI */ #define msi_x (0) #endif /* CONFIG_PCI_MSI */ static int tune_pci = 0; module_param(tune_pci, int, 0444); MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); DEFINE_MUTEX(mthca_device_mutex); #define MTHCA_DEFAULT_NUM_QP (1 << 16) #define MTHCA_DEFAULT_RDB_PER_QP (1 << 2) #define MTHCA_DEFAULT_NUM_CQ (1 << 16) #define MTHCA_DEFAULT_NUM_MCG (1 << 13) #define MTHCA_DEFAULT_NUM_MPT (1 << 17) #define MTHCA_DEFAULT_NUM_MTT (1 << 20) #define MTHCA_DEFAULT_NUM_UDAV (1 << 15) #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18) #define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18) static struct mthca_profile hca_profile = { .num_qp = MTHCA_DEFAULT_NUM_QP, .rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP, .num_cq = MTHCA_DEFAULT_NUM_CQ, .num_mcg = MTHCA_DEFAULT_NUM_MCG, .num_mpt = MTHCA_DEFAULT_NUM_MPT, .num_mtt = MTHCA_DEFAULT_NUM_MTT, .num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */ .fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */ .uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */ }; module_param_named(num_qp, hca_profile.num_qp, int, 0444); MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA"); module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444); MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP"); module_param_named(num_cq, hca_profile.num_cq, int, 0444); MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA"); module_param_named(num_mcg, hca_profile.num_mcg, int, 0444); MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA"); module_param_named(num_mpt, hca_profile.num_mpt, int, 0444); MODULE_PARM_DESC(num_mpt, "maximum number of memory protection table entries per HCA"); module_param_named(num_mtt, hca_profile.num_mtt, int, 0444); MODULE_PARM_DESC(num_mtt, "maximum number of memory translation table segments per HCA"); module_param_named(num_udav, hca_profile.num_udav, int, 0444); MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA"); module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444); MODULE_PARM_DESC(fmr_reserved_mtts, "number of memory translation table segments reserved for FMR"); static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); static char mthca_version[] __devinitdata = DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; static int mthca_tune_pci(struct mthca_dev *mdev) { if (!tune_pci) return 0; /* First try to max out Read Byte Count */ if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { mthca_err(mdev, "Couldn't set PCI-X max read count, " "aborting.\n"); return -ENODEV; } } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); if (pci_is_pcie(mdev->pdev)) { if (pcie_set_readrq(mdev->pdev, 4096)) { mthca_err(mdev, "Couldn't write PCI Express read request, " "aborting.\n"); return -ENODEV; } } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) mthca_info(mdev, "No PCI Express capability, " "not setting Max Read Request Size.\n"); return 0; } static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) { int err; mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; err = mthca_QUERY_DEV_LIM(mdev, dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d" ", aborting.\n", err); return err; } if (dev_lim->min_page_sz > PAGE_SIZE) { mthca_err(mdev, "HCA minimum page size of %d bigger than " "kernel PAGE_SIZE of %ld, aborting.\n", dev_lim->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_lim->num_ports > MTHCA_MAX_PORTS) { mthca_err(mdev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev_lim->num_ports, MTHCA_MAX_PORTS); return -ENODEV; } if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) { mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than " "PCI resource 2 size of 0x%llx, aborting.\n", dev_lim->uar_size, (unsigned long long)pci_resource_len(mdev->pdev, 2)); return -ENODEV; } mdev->limits.num_ports = dev_lim->num_ports; mdev->limits.vl_cap = dev_lim->max_vl; mdev->limits.mtu_cap = dev_lim->max_mtu; mdev->limits.gid_table_len = dev_lim->max_gids; mdev->limits.pkey_table_len = dev_lim->max_pkeys; mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; /* * Need to allow for worst case send WQE overhead and check * whether max_desc_sz imposes a lower limit than max_sg; UD * send has the biggest overhead. */ mdev->limits.max_sg = min_t(int, dev_lim->max_sg, (dev_lim->max_desc_sz - sizeof (struct mthca_next_seg) - (mthca_is_memfree(mdev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg))) / sizeof (struct mthca_data_seg)); mdev->limits.max_wqes = dev_lim->max_qp_sz; mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; mdev->limits.reserved_qps = dev_lim->reserved_qps; mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; mdev->limits.reserved_srqs = dev_lim->reserved_srqs; mdev->limits.reserved_eecs = dev_lim->reserved_eecs; mdev->limits.max_desc_sz = dev_lim->max_desc_sz; mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an * empty CQ and a full CQ. */ mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; mdev->limits.reserved_cqs = dev_lim->reserved_cqs; mdev->limits.reserved_eqs = dev_lim->reserved_eqs; mdev->limits.reserved_mtts = dev_lim->reserved_mtts; mdev->limits.reserved_mrws = dev_lim->reserved_mrws; mdev->limits.reserved_uars = dev_lim->reserved_uars; mdev->limits.reserved_pds = dev_lim->reserved_pds; mdev->limits.port_width_cap = dev_lim->max_port_width; mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); mdev->limits.flags = dev_lim->flags; /* * For old FW that doesn't return static rate support, use a * value of 0x3 (only static rate values of 0 or 1 are handled), * except on Sinai, where even old FW can handle static rate * values of 2 and 3. */ if (dev_lim->stat_rate_support) mdev->limits.stat_rate_support = dev_lim->stat_rate_support; else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) mdev->limits.stat_rate_support = 0xf; else mdev->limits.stat_rate_support = 0x3; /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. May be doable since hardware supports it for SRQ. IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver. IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not supported by driver. */ mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR) mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI) mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG) mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE) mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev_lim->flags & DEV_LIM_FLAG_SRQ) mdev->mthca_flags |= MTHCA_FLAG_SRQ; if (mthca_is_memfree(mdev)) if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM) mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; return 0; } static int mthca_init_tavor(struct mthca_dev *mdev) { s64 size; int err; struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; err = mthca_SYS_EN(mdev); if (err) { mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); return err; } err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command returned %d," " aborting.\n", err); goto err_disable; } err = mthca_QUERY_DDR(mdev); if (err) { mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); goto err_disable; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.uarc_size = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (size < 0) { err = size; goto err_disable; } err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_disable; } return 0; err_disable: mthca_SYS_DIS(mdev); return err; } static int mthca_load_fw(struct mthca_dev *mdev) { int err; /* FIXME: use HCA-attached memory for FW if present */ mdev->fw.arbel.fw_icm = mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.fw_icm) { mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); return -ENOMEM; } err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); if (err) { mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); goto err_free; } err = mthca_RUN_FW(mdev); if (err) { mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); goto err_unmap_fa; } return 0; err_unmap_fa: mthca_UNMAP_FA(mdev); err_free: mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); return err; } static int mthca_init_icm(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim, struct mthca_init_hca_param *init_hca, u64 icm_size) { u64 aux_pages; int err; err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); if (err) { mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); return err; } mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2); mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!mdev->fw.arbel.aux_icm) { mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); return -ENOMEM; } err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); if (err) { mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); goto err_free_aux; } err = mthca_map_eq_icm(mdev, init_hca->eqc_base); if (err) { mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_aux; } /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size, dma_get_cache_alignment()) / mdev->limits.mtt_seg_size; mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, mdev->limits.mtt_seg_size, mdev->limits.num_mtt_segs, mdev->limits.reserved_mtts, 1, 0); if (!mdev->mr_table.mtt_table) { mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_eq; } mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, dev_lim->mpt_entry_sz, mdev->limits.num_mpts, mdev->limits.reserved_mrws, 1, 1); if (!mdev->mr_table.mpt_table) { mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mtt; } mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, dev_lim->qpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.qp_table) { mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_mpt; } mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, dev_lim->eqpc_entry_sz, mdev->limits.num_qps, mdev->limits.reserved_qps, 0, 0); if (!mdev->qp_table.eqp_table) { mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_qp; } mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, MTHCA_RDB_ENTRY_SIZE, mdev->limits.num_qps << mdev->qp_table.rdb_shift, 0, 0, 0); if (!mdev->qp_table.rdb_table) { mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); err = -ENOMEM; goto err_unmap_eqp; } mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, dev_lim->cqc_entry_sz, mdev->limits.num_cqs, mdev->limits.reserved_cqs, 0, 0); if (!mdev->cq_table.table) { mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_rdb; } if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { mdev->srq_table.table = mthca_alloc_icm_table(mdev, init_hca->srqc_base, dev_lim->srq_entry_sz, mdev->limits.num_srqs, mdev->limits.reserved_srqs, 0, 0); if (!mdev->srq_table.table) { mthca_err(mdev, "Failed to map SRQ context memory, " "aborting.\n"); err = -ENOMEM; goto err_unmap_cq; } } /* * It's not strictly required, but for simplicity just map the * whole multicast group table now. The table isn't very big * and it's a lot easier than trying to track ref counts. */ mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, MTHCA_MGM_ENTRY_SIZE, mdev->limits.num_mgms + mdev->limits.num_amgms, mdev->limits.num_mgms + mdev->limits.num_amgms, 0, 0); if (!mdev->mcg_table.table) { mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_srq; } return 0; err_unmap_srq: if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); err_unmap_cq: mthca_free_icm_table(mdev, mdev->cq_table.table); err_unmap_rdb: mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); err_unmap_eqp: mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); err_unmap_qp: mthca_free_icm_table(mdev, mdev->qp_table.qp_table); err_unmap_mpt: mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); err_unmap_mtt: mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); err_unmap_eq: mthca_unmap_eq_icm(mdev); err_unmap_aux: mthca_UNMAP_ICM_AUX(mdev); err_free_aux: mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); return err; } static void mthca_free_icms(struct mthca_dev *mdev) { mthca_free_icm_table(mdev, mdev->mcg_table.table); if (mdev->mthca_flags & MTHCA_FLAG_SRQ) mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); mthca_free_icm_table(mdev, mdev->qp_table.qp_table); mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); mthca_unmap_eq_icm(mdev); mthca_UNMAP_ICM_AUX(mdev); mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); } static int mthca_init_arbel(struct mthca_dev *mdev) { struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; s64 icm_size; int err; err = mthca_QUERY_FW(mdev); if (err) { mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); return err; } err = mthca_ENABLE_LAM(mdev); if (err == -EAGAIN) { mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; } else if (err) { mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); return err; } err = mthca_load_fw(mdev); if (err) { mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); goto err_disable; } err = mthca_dev_lim(mdev, &dev_lim); if (err) { mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); goto err_stop_fw; } profile = hca_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_udav = 0; if (mdev->mthca_flags & MTHCA_FLAG_SRQ) profile.num_srq = dev_lim.max_srqs; icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (icm_size < 0) { err = icm_size; goto err_stop_fw; } err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); if (err) goto err_stop_fw; err = mthca_INIT_HCA(mdev, &init_hca); if (err) { mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); goto err_free_icm; } return 0; err_free_icm: mthca_free_icms(mdev); err_stop_fw: mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); err_disable: if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); return err; } static void mthca_close_hca(struct mthca_dev *mdev) { mthca_CLOSE_HCA(mdev, 0); if (mthca_is_memfree(mdev)) { mthca_free_icms(mdev); mthca_UNMAP_FA(mdev); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) mthca_DISABLE_LAM(mdev); } else mthca_SYS_DIS(mdev); } static int mthca_init_hca(struct mthca_dev *mdev) { int err; struct mthca_adapter adapter; if (mthca_is_memfree(mdev)) err = mthca_init_arbel(mdev); else err = mthca_init_tavor(mdev); if (err) return err; err = mthca_QUERY_ADAPTER(mdev, &adapter); if (err) { mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); goto err_close; } mdev->eq_table.inta_pin = adapter.inta_pin; if (!mthca_is_memfree(mdev)) mdev->rev_id = adapter.revision_id; memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); return 0; err_close: mthca_close_hca(mdev); return err; } static int mthca_setup_hca(struct mthca_dev *dev) { int err; MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); err = mthca_init_uar_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "user access region table, aborting.\n"); return err; } err = mthca_uar_alloc(dev, &dev->driver_uar); if (err) { mthca_err(dev, "Failed to allocate driver access region, " "aborting.\n"); goto err_uar_table_free; } dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!dev->kar) { mthca_err(dev, "Couldn't map kernel access region, " "aborting.\n"); err = -ENOMEM; goto err_uar_free; } err = mthca_init_pd_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "protection domain table, aborting.\n"); goto err_kar_unmap; } err = mthca_init_mr_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "memory region table, aborting.\n"); goto err_pd_table_free; } err = mthca_pd_alloc(dev, 1, &dev->driver_pd); if (err) { mthca_err(dev, "Failed to create driver PD, " "aborting.\n"); goto err_mr_table_free; } err = mthca_init_eq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "event queue table, aborting.\n"); goto err_pd_free; } err = mthca_cmd_use_events(dev); if (err) { mthca_err(dev, "Failed to switch to event-driven " "firmware commands, aborting.\n"); goto err_eq_table_free; } err = mthca_NOP(dev); if (err) { if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { mthca_warn(dev, "NOP command failed to generate interrupt " "(IRQ %d).\n", dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector); mthca_warn(dev, "Trying again with MSI-X disabled.\n"); } else { mthca_err(dev, "NOP command failed to generate interrupt " "(IRQ %d), aborting.\n", dev->pdev->irq); mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } goto err_cmd_poll; } mthca_dbg(dev, "NOP command IRQ test passed\n"); err = mthca_init_cq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "completion queue table, aborting.\n"); goto err_cmd_poll; } err = mthca_init_srq_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "shared receive queue table, aborting.\n"); goto err_cq_table_free; } err = mthca_init_qp_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "queue pair table, aborting.\n"); goto err_srq_table_free; } err = mthca_init_av_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "address vector table, aborting.\n"); goto err_qp_table_free; } err = mthca_init_mcg_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "multicast group table, aborting.\n"); goto err_av_table_free; } return 0; err_av_table_free: mthca_cleanup_av_table(dev); err_qp_table_free: mthca_cleanup_qp_table(dev); err_srq_table_free: mthca_cleanup_srq_table(dev); err_cq_table_free: mthca_cleanup_cq_table(dev); err_cmd_poll: mthca_cmd_use_polling(dev); err_eq_table_free: mthca_cleanup_eq_table(dev); err_pd_free: mthca_pd_free(dev, &dev->driver_pd); err_mr_table_free: mthca_cleanup_mr_table(dev); err_pd_table_free: mthca_cleanup_pd_table(dev); err_kar_unmap: iounmap(dev->kar); err_uar_free: mthca_uar_free(dev, &dev->driver_uar); err_uar_table_free: mthca_cleanup_uar_table(dev); return err; } static int mthca_enable_msi_x(struct mthca_dev *mdev) { struct msix_entry entries[3]; int err; entries[0].entry = 0; entries[1].entry = 1; entries[2].entry = 2; err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries)); if (err) { if (err > 0) mthca_info(mdev, "Only %d MSI-X vectors available, " "not using MSI-X\n", err); return err; } mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector; return 0; } /* Types of supported HCA */ enum { TAVOR, /* MT23108 */ ARBEL_COMPAT, /* MT25208 in Tavor compat mode */ ARBEL_NATIVE, /* MT25208 with extended features */ SINAI /* MT25204 */ }; #define MTHCA_FW_VER(major, minor, subminor) \ (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor)) static struct { u64 latest_fw; u32 flags; } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0), .flags = 0 }, [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200), .flags = MTHCA_FLAG_PCIE }, [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } }; static int __mthca_init_one(struct pci_dev *pdev, int hca_type) { int ddr_hidden = 0; int err; struct mthca_dev *mdev; printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " "aborting.\n"); return err; } /* * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not * be present) */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) != 1 << 20) { dev_err(&pdev->dev, "Missing DCS, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing UAR, aborting.\n"); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) ddr_hidden = 1; err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, " "aborting.\n"); goto err_disable_pdev; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); goto err_free_res; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " "consistent PCI DMA mask.\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); goto err_free_res; } } /* We can handle large RDMA requests, so allow larger segments. */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); if (!mdev) { dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_free_res; } mdev->pdev = pdev; mdev->mthca_flags = mthca_hca_table[hca_type].flags; if (ddr_hidden) mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left * the HCA in an undefined state. */ err = mthca_reset(mdev); if (err) { mthca_err(mdev, "Failed to reset HCA, aborting.\n"); goto err_free_dev; } if (mthca_cmd_init(mdev)) { mthca_err(mdev, "Failed to init command interface, aborting.\n"); goto err_free_dev; } err = mthca_tune_pci(mdev); if (err) goto err_cmd; err = mthca_init_hca(mdev); if (err) goto err_cmd; if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n", (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff), (int) (mthca_hca_table[hca_type].latest_fw >> 32), (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff, (int) (mthca_hca_table[hca_type].latest_fw & 0xffff)); mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); } if (msi_x && !mthca_enable_msi_x(mdev)) mdev->mthca_flags |= MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; err = mthca_setup_hca(mdev); } if (err) goto err_close; err = mthca_register_device(mdev); if (err) goto err_cleanup; err = mthca_create_agents(mdev); if (err) goto err_unregister; pci_set_drvdata(pdev, mdev); mdev->hca_type = hca_type; mdev->active = true; return 0; err_unregister: mthca_unregister_device(mdev); err_cleanup: mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); mthca_cleanup_uar_table(mdev); err_close: if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); mthca_close_hca(mdev); err_cmd: mthca_cmd_cleanup(mdev); err_free_dev: ib_dealloc_device(&mdev->ib_dev); err_free_res: pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void __mthca_remove_one(struct pci_dev *pdev) { struct mthca_dev *mdev = pci_get_drvdata(pdev); int p; if (mdev) { mthca_free_agents(mdev); mthca_unregister_device(mdev); for (p = 1; p <= mdev->limits.num_ports; ++p) mthca_CLOSE_IB(mdev, p); mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); mthca_pd_free(mdev, &mdev->driver_pd); mthca_cleanup_mr_table(mdev); mthca_cleanup_pd_table(mdev); iounmap(mdev->kar); mthca_uar_free(mdev, &mdev->driver_uar); mthca_cleanup_uar_table(mdev); mthca_close_hca(mdev); mthca_cmd_cleanup(mdev); if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) pci_disable_msix(pdev); ib_dealloc_device(&mdev->ib_dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } int __mthca_restart_one(struct pci_dev *pdev) { struct mthca_dev *mdev; int hca_type; mdev = pci_get_drvdata(pdev); if (!mdev) return -ENODEV; hca_type = mdev->hca_type; __mthca_remove_one(pdev); return __mthca_init_one(pdev, hca_type); } static int __devinit mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; mutex_lock(&mthca_device_mutex); printk_once(KERN_INFO "%s", mthca_version); if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { printk(KERN_ERR PFX "%s has invalid driver data %lx\n", pci_name(pdev), id->driver_data); mutex_unlock(&mthca_device_mutex); return -ENODEV; } ret = __mthca_init_one(pdev, id->driver_data); mutex_unlock(&mthca_device_mutex); return ret; } static void __devexit mthca_remove_one(struct pci_dev *pdev) { mutex_lock(&mthca_device_mutex); __mthca_remove_one(pdev); mutex_unlock(&mthca_device_mutex); } static struct pci_device_id mthca_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT), .driver_data = ARBEL_COMPAT }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL), .driver_data = ARBEL_NATIVE }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD), .driver_data = SINAI }, { 0, } }; MODULE_DEVICE_TABLE(pci, mthca_pci_table); static struct pci_driver mthca_driver = { .name = DRV_NAME, .id_table = mthca_pci_table, .probe = mthca_init_one, .remove = __devexit_p(mthca_remove_one) }; static void __init __mthca_check_profile_val(const char *name, int *pval, int pval_default) { /* value must be positive and power of 2 */ int old_pval = *pval; if (old_pval <= 0) *pval = pval_default; else *pval = roundup_pow_of_two(old_pval); if (old_pval != *pval) { printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n", old_pval, name); printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval); } } #define mthca_check_profile_val(name, default) \ __mthca_check_profile_val(#name, &hca_profile.name, default) static void __init mthca_validate_profile(void) { mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP); mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP); mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ); mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG); mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT); mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT); mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV); mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS); if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) { printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n", hca_profile.fmr_reserved_mtts); printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n", hca_profile.num_mtt); hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2; printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", hca_profile.fmr_reserved_mtts); } if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n", log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8)); log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8); } } static int __init mthca_init(void) { int ret; mthca_validate_profile(); ret = mthca_catas_init(); if (ret) return ret; ret = pci_register_driver(&mthca_driver); if (ret < 0) { mthca_catas_cleanup(); return ret; } return 0; } static void __exit mthca_cleanup(void) { pci_unregister_driver(&mthca_driver); mthca_catas_cleanup(); } module_init(mthca_init); module_exit(mthca_cleanup);
gpl-2.0
zaventh/android_kernel_lge_hammerhead
drivers/staging/android/binder.c
11
104827
/* binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/cacheflush.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/security.h> #include "binder.h" static DEFINE_MUTEX(binder_lock); static DEFINE_MUTEX(binder_deferred_lock); static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); static HLIST_HEAD(binder_dead_nodes); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static struct binder_node *binder_context_mgr_node; static uid_t binder_context_mgr_uid = -1; static int binder_last_id; static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, binder_##name##_show, inode->i_private); \ } \ \ static const struct file_operations binder_##name##_fops = { \ .owner = THIS_MODULE, \ .open = binder_##name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } static int binder_proc_show(struct seq_file *m, void *unused); BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K #define SZ_1K 0x400 #endif #ifndef SZ_4M #define SZ_4M 0x400000 #endif #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, BINDER_DEBUG_OPEN_CLOSE = 1U << 3, BINDER_DEBUG_DEAD_BINDER = 1U << 4, BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, BINDER_DEBUG_READ_WRITE = 1U << 6, BINDER_DEBUG_USER_REFS = 1U << 7, BINDER_DEBUG_THREADS = 1U << 8, BINDER_DEBUG_TRANSACTION = 1U << 9, BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, BINDER_DEBUG_TOP_ERRORS = 1U << 16, }; static uint32_t binder_debug_mask; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); static bool binder_debug_no_lock; module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (binder_stop_on_user_error < 2) wake_up(&binder_user_error_wait); return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ printk(KERN_INFO x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ printk(KERN_INFO x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) enum binder_stat_types { BINDER_STAT_PROC, BINDER_STAT_THREAD, BINDER_STAT_NODE, BINDER_STAT_REF, BINDER_STAT_DEATH, BINDER_STAT_TRANSACTION, BINDER_STAT_TRANSACTION_COMPLETE, BINDER_STAT_COUNT }; struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; int obj_created[BINDER_STAT_COUNT]; int obj_deleted[BINDER_STAT_COUNT]; }; static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { binder_stats.obj_deleted[type]++; } static inline void binder_stats_created(enum binder_stat_types type) { binder_stats.obj_created[type]++; } struct binder_transaction_log_entry { int debug_id; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; }; struct binder_transaction_log { int next; int full; struct binder_transaction_log_entry entry[32]; }; static struct binder_transaction_log binder_transaction_log; static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; e = &log->entry[log->next]; memset(e, 0, sizeof(*e)); log->next++; if (log->next == ARRAY_SIZE(log->entry)) { log->next = 0; log->full = 1; } return e; } struct binder_work { struct list_head entry; enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, } type; }; struct binder_node { int debug_id; struct binder_work work; union { struct rb_node rb_node; struct hlist_node dead_node; }; struct binder_proc *proc; struct hlist_head refs; int internal_strong_refs; int local_weak_refs; int local_strong_refs; void __user *ptr; void __user *cookie; unsigned has_strong_ref:1; unsigned pending_strong_ref:1; unsigned has_weak_ref:1; unsigned pending_weak_ref:1; unsigned has_async_transaction:1; unsigned accept_fds:1; unsigned min_priority:8; struct list_head async_todo; }; struct binder_ref_death { struct binder_work work; void __user *cookie; }; struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ int debug_id; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; uint32_t desc; int strong; int weak; struct binder_ref_death *death; }; struct binder_buffer { struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:29; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; uint8_t data[0]; }; enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; void *buffer; ptrdiff_t user_buffer_offset; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int ready_threads; long default_priority; struct dentry *debugfs_entry; }; enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_NEED_RETURN = 0x20 }; struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; int pid; int looper; struct binder_transaction *transaction_stack; struct list_head todo; uint32_t return_error; /* Write failed, return error code in read buf */ uint32_t return_error2; /* Write failed, return error code in read */ /* buffer. Used when sending a reply to a dead process that */ /* we are also waiting on */ wait_queue_head_t wait; struct binder_stats stats; }; struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; struct binder_transaction *from_parent; struct binder_proc *to_proc; struct binder_thread *to_thread; struct binder_transaction *to_parent; unsigned need_reply:1; /* unsigned is_dead:1; */ /* not used at the moment */ struct binder_buffer *buffer; unsigned int code; unsigned int flags; long priority; long saved_priority; uid_t sender_euid; }; static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); /* * copied from get_unused_fd_flags */ int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; int fd, error; struct fdtable *fdt; unsigned long rlim_cur; unsigned long irqs; if (files == NULL) return -ESRCH; error = -EMFILE; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ rlim_cur = 0; if (lock_task_sighand(proc->tsk, &irqs)) { rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; unlock_task_sighand(proc->tsk, &irqs); } if (fd >= rlim_cur) goto out; /* Do we need to expand the fd array or fd set? */ error = expand_files(files, fd); if (error < 0) goto out; if (error) { /* * If we needed to expand the fs array we * might have blocked - try again. */ error = -EMFILE; goto repeat; } __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); fdt->fd[fd] = NULL; } #endif error = fd; out: spin_unlock(&files->file_lock); return error; } /* * copied from fd_install */ static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { struct files_struct *files = proc->files; struct fdtable *fdt; if (files == NULL) return; spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); } /* * copied from __put_unused_fd in open.c */ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } /* * copied from sys_close */ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { struct file *filp; struct files_struct *files = proc->files; struct fdtable *fdt; int retval; if (files == NULL) return -ESRCH; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __clear_close_on_exec(fd, fdt); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; } static void binder_set_nice(long nice) { long min_nice; if (can_nice(current, nice)) { set_user_nice(current, nice); return; } min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; binder_debug(BINDER_DEBUG_PRIORITY_CAP, "binder: %d: nice value %ld not allowed use " "%ld instead\n", current->pid, nice, min_nice); set_user_nice(current, min_nice); if (min_nice < 20) return; binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); } static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &proc->buffers)) return proc->buffer + proc->buffer_size - (void *)buffer->data; else return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; } static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; size_t new_buffer_size; BUG_ON(!new_buffer->free); new_buffer_size = binder_buffer_size(proc, new_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: add free buffer, size %zd, " "at %p\n", proc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); } static void binder_insert_allocated_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; BUG_ON(new_buffer->free); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (new_buffer < buffer) p = &parent->rb_left; else if (new_buffer > buffer) p = &parent->rb_right; else BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); } static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, void __user *user_ptr) { struct rb_node *n = proc->allocated_buffers.rb_node; struct binder_buffer *buffer; struct binder_buffer *kern_ptr; kern_ptr = user_ptr - proc->user_buffer_offset - offsetof(struct binder_buffer, data); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (kern_ptr < buffer) n = n->rb_left; else if (kern_ptr > buffer) n = n->rb_right; else return buffer; } return NULL; } static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("binder: %d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } if (allocate == 0) goto free_range; if (vma == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf failed to " "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf failed " "to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf failed " "to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; } static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { struct rb_node *n = proc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void *has_page_addr; void *end_page_addr; size_t size; if (proc->vma == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf, no vma\n", proc->pid); return NULL; } size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); if (size < data_size || size < offsets_size) { binder_user_error("binder: %d: got transaction with invalid " "size %zd-%zd\n", proc->pid, data_size, offsets_size); return NULL; } if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd" "failed, no async space left\n", proc->pid, size); return NULL; } while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } if (best_fit == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d: binder_alloc_buf size %zd failed, " "no address space\n", proc->pid, size); return NULL; } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_buffer_size(proc, buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd got buff" "er %p size %zd\n", proc->pid, size, buffer, buffer_size); has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); if (n == NULL) { if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) buffer_size = size; /* no room for other buffers */ else buffer_size = size + sizeof(struct binder_buffer); } end_page_addr = (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; if (binder_update_page_range(proc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; rb_erase(best_fit, &proc->free_buffers); buffer->free = 0; binder_insert_allocated_buffer(proc, buffer); if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(proc, new_buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd got " "%p\n", proc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; if (is_async) { proc->free_async_space -= size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "binder: %d: binder_alloc_buf size %zd " "async free %zd\n", proc->pid, size, proc->free_async_space); } return buffer; } static void *buffer_start_page(struct binder_buffer *buffer) { return (void *)((uintptr_t)buffer & PAGE_MASK); } static void *buffer_end_page(struct binder_buffer *buffer) { return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; BUG_ON(proc->buffers.next == &buffer->entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); BUG_ON(!prev->free); if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer %p " "share page with %p\n", proc->pid, buffer, prev); } if (!list_is_last(&buffer->entry, &proc->buffers)) { next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (buffer_start_page(next) == buffer_end_page(buffer)) { free_page_end = 0; if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer" " %p share page with %p\n", proc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer %p do " "not share page%s%s with with %p or %p\n", proc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); binder_update_page_range(proc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } } static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; buffer_size = binder_buffer_size(proc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_free_buf %p size %zd buffer" "_size %zd\n", proc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); BUG_ON((void *)buffer < proc->buffer); BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); if (buffer->async_transaction) { proc->free_async_space += size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "binder: %d: binder_free_buf size %zd " "async free %zd\n", proc->pid, size, proc->free_async_space); } binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } binder_insert_free_buffer(proc, buffer); } static struct binder_node *binder_get_node(struct binder_proc *proc, void __user *ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; while (n) { node = rb_entry(n, struct binder_node, rb_node); if (ptr < node->ptr) n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; else return node; } return NULL; } static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; while (*p) { parent = *p; node = rb_entry(parent, struct binder_node, rb_node); if (ptr < node->ptr) p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; else return NULL; } node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return NULL; binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = ++binder_last_id; node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p created\n", proc->pid, current->pid, node->debug_id, node->ptr, node->cookie); return node; } static int binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list) { if (strong) { if (internal) { if (target_list == NULL && node->internal_strong_refs == 0 && !(node == binder_context_mgr_node && node->has_strong_ref)) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: invalid inc strong " "node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { list_del_init(&node->work.entry); list_add_tail(&node->work.entry, target_list); } } else { if (!internal) node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: invalid inc weak node " "for %d\n", node->debug_id); return -EINVAL; } list_add_tail(&node->work.entry, target_list); } } return 0; } static int binder_dec_node(struct binder_node *node, int strong, int internal) { if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) return 0; } else { if (!internal) node->local_weak_refs--; if (node->local_weak_refs || !hlist_empty(&node->refs)) return 0; } if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { list_add_tail(&node->work.entry, &node->proc->todo); wake_up_interruptible(&node->proc->wait); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) { list_del_init(&node->work.entry); if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: dead node %d deleted\n", node->debug_id); } kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } } return 0; } static struct binder_ref *binder_get_ref(struct binder_proc *proc, uint32_t desc) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (desc < ref->desc) n = n->rb_left; else if (desc > ref->desc) n = n->rb_right; else return ref; } return NULL; } static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) { struct rb_node *n; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; struct binder_ref *ref, *new_ref; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); if (node < ref->node) p = &(*p)->rb_left; else if (node > ref->node) p = &(*p)->rb_right; else return ref; } new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (new_ref == NULL) return NULL; binder_stats_created(BINDER_STAT_REF); new_ref->debug_id = ++binder_last_id; new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->desc > new_ref->desc) break; new_ref->desc = ref->desc + 1; } p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); if (new_ref->desc < ref->desc) p = &(*p)->rb_left; else if (new_ref->desc > ref->desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); if (node) { hlist_add_head(&new_ref->node_entry, &node->refs); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d new ref %d desc %d for " "node %d\n", proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d new ref %d desc %d for " "dead node\n", proc->pid, new_ref->debug_id, new_ref->desc); } return new_ref; } static void binder_delete_ref(struct binder_ref *ref) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d delete ref %d desc %d for " "node %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); if (ref->strong) binder_dec_node(ref->node, 1, 1); hlist_del(&ref->node_entry); binder_dec_node(ref->node, 0, 1); if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: %d delete ref %d desc %d " "has death notification\n", ref->proc->pid, ref->debug_id, ref->desc); list_del(&ref->death->work.entry); kfree(ref->death); binder_stats_deleted(BINDER_STAT_DEATH); } kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list) { int ret; if (strong) { if (ref->strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } ref->strong++; } else { if (ref->weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } ref->weak++; } return 0; } static int binder_dec_ref(struct binder_ref *ref, int strong) { if (strong) { if (ref->strong == 0) { binder_user_error("binder: %d invalid dec strong, " "ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->strong--; if (ref->strong == 0) { int ret; ret = binder_dec_node(ref->node, strong, 1); if (ret) return ret; } } else { if (ref->weak == 0) { binder_user_error("binder: %d invalid dec weak, " "ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->weak--; } if (ref->strong == 0 && ref->weak == 0) binder_delete_ref(ref); return 0; } static void binder_pop_transaction(struct binder_thread *target_thread, struct binder_transaction *t) { if (target_thread) { BUG_ON(target_thread->transaction_stack != t); BUG_ON(target_thread->transaction_stack->from != target_thread); target_thread->transaction_stack = target_thread->transaction_stack->from_parent; t->from = NULL; } t->need_reply = 0; if (t->buffer) t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) { struct binder_thread *target_thread; BUG_ON(t->flags & TF_ONE_WAY); while (1) { target_thread = t->from; if (target_thread) { if (target_thread->return_error != BR_OK && target_thread->return_error2 == BR_OK) { target_thread->return_error2 = target_thread->return_error; target_thread->return_error = BR_OK; } if (target_thread->return_error == BR_OK) { binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply for " "transaction %d to %d:%d\n", t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); target_thread->return_error = error_code; wake_up_interruptible(&target_thread->wait); } else { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: reply failed, target " "thread, %d:%d, has error code %d " "already\n", target_thread->proc->pid, target_thread->pid, target_thread->return_error); } return; } else { struct binder_transaction *next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply " "for transaction %d, target dead\n", t->debug_id); binder_pop_transaction(target_thread, t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: reply failed," " no target thread at root\n"); return; } t = next; binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: reply failed, no target " "thread -- retry %d\n", t->debug_id); } } } static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at) { size_t *offp, *off_end; int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); if (failed_at) off_end = failed_at; else off_end = (void *)offp + buffer->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > buffer->data_size - sizeof(*fp) || buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: transaction release %d bad" "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); continue; } fp = (struct flat_binder_object *)(buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: transaction release %d" " bad node %p\n", debug_id, fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n", node->debug_id, node->ptr); binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: transaction release %d" " bad handle %ld\n", debug_id, fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; case BINDER_TYPE_FD: binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld\n", fp->handle); if (failed_at) task_close_fd(proc, fp->handle); break; default: binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: transaction release %d bad " "object type %lx\n", debug_id, fp->type); break; } } } static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { struct binder_transaction *t; struct binder_work *tcomplete; size_t *offp, *off_end; struct binder_proc *target_proc; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct list_head *target_list; wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error; e = binder_transaction_log_add(&binder_transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; if (reply) { in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_user_error("binder: %d:%d got reply transaction " "with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_empty_call_stack; } binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { binder_user_error("binder: %d:%d got reply transaction " "with bad transaction stack," " transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); return_error = BR_FAILED_REPLY; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; target_thread = in_reply_to->from; if (target_thread == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { binder_user_error("binder: %d:%d got reply transaction " "with bad target transaction stack %d, " "expected %d\n", proc->pid, thread->pid, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); return_error = BR_FAILED_REPLY; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; } else { if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } target_node = ref->node; } else { target_node = binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; goto err_no_context_mgr_node; } } e->to_node = target_node->debug_id; target_proc = target_node->proc; if (target_proc == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { binder_user_error("binder: %d:%d got new " "transaction with bad transaction stack" ", transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); return_error = BR_FAILED_REPLY; goto err_bad_call_stack; } while (tmp) { if (tmp->from && tmp->from->proc == target_proc) target_thread = tmp->from; tmp = tmp->from_parent; } } } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = ++binder_last_id; e->debug_id = t->debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d BC_REPLY %d -> %d:%d, " "data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d BC_TRANSACTION %d -> " "%d - node %d, data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = proc->tsk->cred->euid; t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { binder_user_error("binder: %d:%d got transaction with invalid " "data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("binder: %d:%d got transaction with invalid " "offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { binder_user_error("binder: %d:%d got transaction with " "invalid offsets size, %zd\n", proc->pid, thread->pid, tr->offsets_size); return_error = BR_FAILED_REPLY; goto err_bad_offset; } off_end = (void *)offp + tr->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > t->buffer->data_size - sizeof(*fp) || t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { binder_user_error("binder: %d:%d got transaction with " "invalid offset, %zd\n", proc->pid, thread->pid, *offp); return_error = BR_FAILED_REPLY; goto err_bad_offset; } fp = (struct flat_binder_object *)(t->buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("binder: %d:%d sending u%p " "node %d, cookie mismatch %p != %p\n", proc->pid, thread->pid, fp->binder, node->debug_id, fp->cookie, node->cookie); goto err_binder_get_ref_for_node_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } ref = binder_get_ref_for_node(target_proc, node); if (ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (fp->type == BINDER_TYPE_BINDER) fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->handle = ref->desc; binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p -> ref %d desc %d\n", node->debug_id, node->ptr, ref->debug_id, ref->desc); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction with invalid " "handle, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (ref->node->proc == target_proc) { if (fp->type == BINDER_TYPE_HANDLE) fp->type = BINDER_TYPE_BINDER; else fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%p\n", ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr); } else { struct binder_ref *new_ref; new_ref = binder_get_ref_for_node(target_proc, ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); } } break; case BINDER_TYPE_FD: { int target_fd; struct file *file; if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } file = fget(fp->handle); if (file == NULL) { binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } task_fd_install(target_proc, target_fd, file); binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld -> %d\n", fp->handle, target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; default: binder_user_error("binder: %d:%d got transactio" "n with invalid object type, %lx\n", proc->pid, thread->pid, fp->type); return_error = BR_FAILED_REPLY; goto err_bad_object_type; } } if (reply) { BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; err_get_unused_fd_failed: err_fget_failed: err_fd_not_allowed: err_binder_get_ref_for_node_failed: err_binder_get_ref_failed: err_binder_new_node_failed: err_bad_object_type: err_bad_offset: err_copy_data_failed: binder_transaction_buffer_release(target_proc, t->buffer, offp); t->buffer->transaction = NULL; binder_free_buf(target_proc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: %d:%d transaction failed %d, size %zd-%zd\n", proc->pid, thread->pid, return_error, tr->data_size, tr->offsets_size); { struct binder_transaction_log_entry *fe; fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; } BUG_ON(thread->return_error != BR_OK); if (in_reply_to) { thread->return_error = BR_TRANSACTION_COMPLETE; binder_send_failed_reply(in_reply_to, return_error); } else thread->return_error = return_error; } int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) { uint32_t cmd; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; struct binder_ref *ref; const char *debug_string; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (target == 0 && binder_context_mgr_node && (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ref = binder_get_ref_for_node(proc, binder_context_mgr_node); if (ref->desc != target) { binder_user_error("binder: %d:" "%d tried to acquire " "reference to desc 0, " "got %d instead\n", proc->pid, thread->pid, ref->desc); } } else ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("binder: %d:%d refcou" "nt change on invalid ref %d\n", proc->pid, thread->pid, target); break; } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; binder_dec_ref(ref, 0); break; } binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; } case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: { void __user *node_ptr; void *cookie; struct binder_node *node; if (get_user(node_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (get_user(cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); node = binder_get_node(proc, node_ptr); if (node == NULL) { binder_user_error("binder: %d:%d " "%s u%p no match\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr); break; } if (cookie != node->cookie) { binder_user_error("binder: %d:%d %s u%p node %d" " cookie mismatch %p != %p\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr, node->debug_id, cookie, node->cookie); break; } if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("binder: %d:%d " "BC_ACQUIRE_DONE node %d has " "no pending acquire request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_strong_ref = 0; } else { if (node->pending_weak_ref == 0) { binder_user_error("binder: %d:%d " "BC_INCREFS_DONE node %d has " "no pending increfs request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_weak_ref = 0; } binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { void __user *data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); buffer = binder_buffer_lookup(proc, data_ptr); if (buffer == NULL) { binder_user_error("binder: %d:%d " "BC_FREE_BUFFER u%p no match\n", proc->pid, thread->pid, data_ptr); break; } if (!buffer->allow_user_free) { binder_user_error("binder: %d:%d " "BC_FREE_BUFFER u%p matched " "unreturned buffer\n", proc->pid, thread->pid, data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", proc->pid, thread->pid, data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } binder_transaction_buffer_release(proc, buffer, NULL); binder_free_buf(proc, buffer); break; } case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; } case BC_REGISTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_ENTER_LOOPER called after " "BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; case BC_EXIT_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_EXIT_LOOPER\n", proc->pid, thread->pid); thread->looper |= BINDER_LOOPER_STATE_EXITED; break; case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; void __user *cookie; struct binder_ref *ref; struct binder_ref_death *death; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("binder: %d:%d %s " "invalid ref %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); break; } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", cookie, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("binder: %d:%" "d BC_REQUEST_DEATH_NOTI" "FICATION death notific" "ation already set\n", proc->pid, thread->pid); break; } death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: %d:%d " "BC_REQUEST_DEATH_NOTIFICATION failed\n", proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&ref->death->work.entry, &thread->todo); } else { list_add_tail(&ref->death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } else { if (ref->death == NULL) { binder_user_error("binder: %d:%" "d BC_CLEAR_DEATH_NOTIFI" "CATION death notificat" "ion not active\n", proc->pid, thread->pid); break; } death = ref->death; if (death->cookie != cookie) { binder_user_error("binder: %d:%" "d BC_CLEAR_DEATH_NOTIFI" "CATION death notificat" "ion cookie mismatch " "%p != %p\n", proc->pid, thread->pid, death->cookie, cookie); break; } ref->death = NULL; if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } } } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; void __user *cookie; struct binder_ref_death *death = NULL; if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); if (tmp_death->cookie == cookie) { death = tmp_death; break; } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", proc->pid, thread->pid, cookie, death); if (death == NULL) { binder_user_error("binder: %d:%d BC_DEAD" "_BINDER_DONE %p not found\n", proc->pid, thread->pid, cookie); break; } list_del_init(&death->work.entry); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } break; default: binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; } return 0; } void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { binder_stats.br[_IOC_NR(cmd)]++; proc->stats.br[_IOC_NR(cmd)]++; thread->stats.br[_IOC_NR(cmd)]++; } } static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block) { void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) proc->ready_threads++; mutex_unlock(&binder_lock); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("binder: %d:%d ERROR: Thread waiting " "for process work before calling BC_REGISTER_" "LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } mutex_lock(&binder_lock); if (wait_for_proc_work) proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w; struct binder_transaction *t = NULL; if (!list_empty(&thread->todo)) w = list_first_entry(&thread->todo, struct binder_work, entry); else if (!list_empty(&proc->todo) && wait_for_proc_work) w = list_first_entry(&proc->todo, struct binder_work, entry); else { if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ goto retry; break; } if (end - ptr < sizeof(tr) + 4) break; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "binder: %d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); uint32_t cmd = BR_NOOP; const char *cmd_name; int strong = node->internal_strong_refs || node->local_strong_refs; int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; if (weak && !node->has_weak_ref) { cmd = BR_INCREFS; cmd_name = "BR_INCREFS"; node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } else if (strong && !node->has_strong_ref) { cmd = BR_ACQUIRE; cmd_name = "BR_ACQUIRE"; node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } else if (!strong && node->has_strong_ref) { cmd = BR_RELEASE; cmd_name = "BR_RELEASE"; node->has_strong_ref = 0; } else if (!weak && node->has_weak_ref) { cmd = BR_DECREFS; cmd_name = "BR_DECREFS"; node->has_weak_ref = 0; } if (cmd != BR_NOOP) { if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node->ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (put_user(node->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s %d u%p c%p\n", proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); } else { list_del_init(&w->entry); if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p deleted\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); rb_erase(&node->rb_node, &proc->nodes); kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p state unchanged\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); } } } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(death->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "binder: %d:%d %s %p\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", death->cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { list_del(&w->entry); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else list_move(&w->entry, &proc->delivered_death); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = NULL; tr.cookie = NULL; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = t->sender_euid; if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns); } else { tr.sender_pid = 0; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d %s %d %d:%d, cmd %d" "size %zd-%zd ptr %p-%p\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, tr.data.ptr.buffer, tr.data.ptr.offsets); list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } done: *consumed = ptr - buffer; if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; } return 0; } static void binder_release_work(struct list_head *list) { struct binder_work *w; while (!list_empty(list)) { w = list_first_entry(list, struct binder_work, entry); list_del_init(&w->entry); switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, BR_DEAD_REPLY); } else { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered transaction %d\n", t->debug_id); t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; death = container_of(w, struct binder_ref_death, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered death notification, %p\n", death->cookie); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; default: pr_err("binder: unexpected work type, %d, not freed\n", w->type); break; } } } static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; } static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; if (t && t->to_thread == thread) send_reply = t; while (t) { active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: release %d:%d transaction %d " "%s, still active\n", proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { t->buffer->transaction = NULL; t->buffer = NULL; } t = t->to_parent; } else if (t->from == thread) { t->from = NULL; t = t->from_parent; } else BUG(); } if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(&thread->todo); kfree(thread); binder_stats_deleted(BINDER_STAT_THREAD); return active_transactions; } static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; int wait_for_proc_work; mutex_lock(&binder_lock); thread = binder_get_thread(proc); wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK; mutex_unlock(&binder_lock); if (wait_for_proc_work) { if (binder_has_proc_work(proc, thread)) return POLLIN; poll_wait(filp, &proc->wait, wait); if (binder_has_proc_work(proc, thread)) return POLLIN; } else { if (binder_has_thread_work(thread)) return POLLIN; poll_wait(filp, &thread->wait, wait); if (binder_has_thread_work(thread)) return POLLIN; } return 0; } static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) return ret; mutex_lock(&binder_lock); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto err; if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: BINDER_SET_" "CONTEXT_MGR bad uid %d != %d\n", current->cred->euid, binder_context_mgr_uid); ret = -EPERM; goto err; } } else binder_context_mgr_uid = current->cred->euid; binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; break; case BINDER_VERSION: if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { ret = -EINVAL; goto err; } break; default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; mutex_unlock(&binder_lock); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); return ret; } static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, }; static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(proc->tsk); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_mmap_lock); err_bad_arg: binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); mutex_lock(&binder_lock); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; mutex_unlock(&binder_lock); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; } static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH); return 0; } static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } wake_up_interruptible_all(&proc->wait); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); } static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; debugfs_remove(proc->debugfs_entry); binder_defer_work(proc, BINDER_DEFERRED_RELEASE); return 0; } static void binder_deferred_release(struct binder_proc *proc) { struct hlist_node *pos; struct binder_transaction *t; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; BUG_ON(proc->vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder_release: %d context_mgr_node gone\n", proc->pid); binder_context_mgr_node = NULL; } threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); threads++; active_transactions += binder_free_thread(proc, thread); } nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); list_del_init(&node->work.entry); binder_release_work(&node->async_todo); if (hlist_empty(&node->refs)) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { struct binder_ref *ref; int death = 0; node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; hlist_add_head(&node->dead_node, &binder_dead_nodes); hlist_for_each_entry(ref, pos, &node->refs, node_entry) { incoming_refs++; if (ref->death) { death++; if (list_empty(&ref->death->work.entry)) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; list_add_tail(&ref->death->work.entry, &ref->proc->todo); wake_up_interruptible(&ref->proc->wait); } else BUG(); } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: node %d now dead, " "refs %d, death %d\n", node->debug_id, incoming_refs, death); } } outgoing_refs = 0; while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_delete_ref(ref); } binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node); t = buffer->transaction; if (t) { t->buffer = NULL; buffer->transaction = NULL; binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder: release proc %d, " "transaction %d, not freed\n", proc->pid, t->debug_id); /*BUG();*/ } binder_free_buf(proc, buffer); buffers++; } binder_stats_deleted(BINDER_STAT_PROC); page_count = 0; if (proc->pages) { int i; for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { if (proc->pages[i]) { void *page_addr = proc->buffer + i * PAGE_SIZE; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder_release: %d: " "page %d at %p not freed\n", proc->pid, i, page_addr); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(proc->pages[i]); page_count++; } } kfree(proc->pages); vfree(proc->buffer); } put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_release: %d threads %d, nodes %d (ref %d), " "refs %d, active transactions %d, buffers %d, " "pages %d\n", proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); kfree(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; int defer; do { mutex_lock(&binder_lock); mutex_lock(&binder_deferred_lock); if (!hlist_empty(&binder_deferred_list)) { proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; proc->deferred_work = 0; } else { proc = NULL; defer = 0; } mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { files = proc->files; if (files) proc->files = NULL; } if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ mutex_unlock(&binder_lock); if (files) put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); queue_work(binder_deferred_workqueue, &binder_deferred_work); } mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction(struct seq_file *m, const char *prefix, struct binder_transaction *t) { seq_printf(m, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, t->to_proc ? t->to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, t->code, t->flags, t->priority, t->need_reply); if (t->buffer == NULL) { seq_puts(m, " buffer free\n"); return; } if (t->buffer->target_node) seq_printf(m, " node %d", t->buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", t->buffer->data_size, t->buffer->offsets_size, t->buffer->data); } static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %p size %zd:%zd %s\n", prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, buffer->transaction ? "active" : "delivered"); } static void print_binder_work(struct seq_file *m, const char *prefix, const char *transaction_prefix, struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); print_binder_transaction(m, transaction_prefix, t); break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); seq_printf(m, "%snode work %d: u%p c%p\n", prefix, node->debug_id, node->ptr, node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: seq_printf(m, "%shas cleared dead binder\n", prefix); break; case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; } } static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { print_binder_transaction(m, " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { print_binder_transaction(m, " incoming transaction", t); t = t->to_parent; } else { print_binder_transaction(m, " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { print_binder_work(m, " ", " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node(struct seq_file *m, struct binder_node *node) { struct binder_ref *ref; struct hlist_node *pos; struct binder_work *w; int count; count = 0; hlist_for_each_entry(ref, pos, &node->refs, node_entry) count++; seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", node->debug_id, node->ptr, node->cookie, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, pos, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); list_for_each_entry(w, &node->async_todo, entry) print_binder_work(m, " ", " pending async transaction", w); } static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) { seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->strong, ref->weak, ref->death); } static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { struct binder_work *w; struct rb_node *n; size_t start_pos = m->count; size_t header_pos; seq_printf(m, "proc %d\n", proc->pid); header_pos = m->count; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (print_all || node->has_async_transaction) print_binder_node(m, node); } if (print_all) { for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc)); } for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node)); list_for_each_entry(w, &proc->todo, entry) print_binder_work(m, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } if (!print_all && m->count == header_pos) m->count = start_pos; } static const char *binder_return_strings[] = { "BR_ERROR", "BR_OK", "BR_TRANSACTION", "BR_REPLY", "BR_ACQUIRE_RESULT", "BR_DEAD_REPLY", "BR_TRANSACTION_COMPLETE", "BR_INCREFS", "BR_ACQUIRE", "BR_RELEASE", "BR_DECREFS", "BR_ATTEMPT_ACQUIRE", "BR_NOOP", "BR_SPAWN_LOOPER", "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_FAILED_REPLY" }; static const char *binder_command_strings[] = { "BC_TRANSACTION", "BC_REPLY", "BC_ACQUIRE_RESULT", "BC_FREE_BUFFER", "BC_INCREFS", "BC_ACQUIRE", "BC_RELEASE", "BC_DECREFS", "BC_INCREFS_DONE", "BC_ACQUIRE_DONE", "BC_ATTEMPT_ACQUIRE", "BC_REGISTER_LOOPER", "BC_ENTER_LOOPER", "BC_EXIT_LOOPER", "BC_REQUEST_DEATH_NOTIFICATION", "BC_CLEAR_DEATH_NOTIFICATION", "BC_DEAD_BINDER_DONE" }; static const char *binder_objstat_strings[] = { "proc", "thread", "node", "ref", "death", "transaction", "transaction_complete" }; static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { if (stats->bc[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_command_strings[i], stats->bc[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { if (stats->br[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { if (stats->obj_created[i] || stats->obj_deleted[i]) seq_printf(m, "%s%s: active %d total %d\n", prefix, binder_objstat_strings[i], stats->obj_created[i] - stats->obj_deleted[i], stats->obj_created[i]); } } static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; struct rb_node *n; int count, strong, weak; seq_printf(m, "proc %d\n", proc->pid); count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, proc->ready_threads, proc->free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; strong += ref->strong; weak += ref->weak; } seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = 0; for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) count++; seq_printf(m, " buffers: %d\n", count); count = 0; list_for_each_entry(w, &proc->todo, entry) { switch (w->type) { case BINDER_WORK_TRANSACTION: count++; break; default: break; } } seq_printf(m, " pending transactions: %d\n", count); print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; struct binder_node *node; int do_lock = !binder_debug_no_lock; if (do_lock) mutex_lock(&binder_lock); seq_puts(m, "binder state:\n"); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) print_binder_node(m, node); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc(m, proc, 1); if (do_lock) mutex_unlock(&binder_lock); return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; int do_lock = !binder_debug_no_lock; if (do_lock) mutex_lock(&binder_lock); seq_puts(m, "binder stats:\n"); print_binder_stats(m, "", &binder_stats); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc_stats(m, proc); if (do_lock) mutex_unlock(&binder_lock); return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; int do_lock = !binder_debug_no_lock; if (do_lock) mutex_lock(&binder_lock); seq_puts(m, "binder transactions:\n"); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc(m, proc, 0); if (do_lock) mutex_unlock(&binder_lock); return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *itr; struct binder_proc *proc = m->private; struct hlist_node *pos; int do_lock = !binder_debug_no_lock; bool valid_proc = false; if (do_lock) mutex_lock(&binder_lock); hlist_for_each_entry(itr, pos, &binder_procs, proc_node) { if (itr == proc) { valid_proc = true; break; } } if (valid_proc) { seq_puts(m, "binder proc state:\n"); print_binder_proc(m, proc, 1); } if (do_lock) mutex_unlock(&binder_lock); return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { seq_printf(m, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->to_node, e->target_handle, e->data_size, e->offsets_size); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; int i; if (log->full) { for (i = log->next; i < ARRAY_SIZE(log->entry); i++) print_binder_transaction_log_entry(m, &log->entry[i]); } for (i = 0; i < log->next; i++) print_binder_transaction_log_entry(m, &log->entry[i]); return 0; } static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, }; static struct miscdevice binder_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "binder", .fops = &binder_fops }; BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); static int __init binder_init(void) { int ret; binder_deferred_workqueue = create_singlethread_workqueue("binder"); if (!binder_deferred_workqueue) return -ENOMEM; binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); ret = misc_register(&binder_miscdev); if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); } return ret; } device_initcall(binder_init); MODULE_LICENSE("GPL v2");
gpl-2.0
lg-devs/android_kernel_lge_msm8952
drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
11
153693
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <net/ip.h> #include <linux/genalloc.h> /* gen_pool_alloc() */ #include <linux/io.h> #include <linux/ratelimit.h> #include <linux/msm-bus.h> #include <linux/msm-bus-board.h> #include "ipa_i.h" #define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL) #define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL) #define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL) #define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) #define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL) #define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1) #define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1) #define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1) #define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL #define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000) #define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600) /* Max pipes + ICs for TAG process */ #define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6) #define IPA_TAG_SLEEP_MIN_USEC (1000) #define IPA_TAG_SLEEP_MAX_USEC (2000) #define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ) #define IPA_BCR_REG_VAL (0x001FFF7F) #define IPA_AGGR_GRAN_MIN (1) #define IPA_AGGR_GRAN_MAX (32) #define IPA_EOT_COAL_GRAN_MIN (1) #define IPA_EOT_COAL_GRAN_MAX (16) #define IPA_AGGR_BYTE_LIMIT (\ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \ IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT) #define IPA_AGGR_PKT_LIMIT (\ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT) static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0, IPA_OFFSET_MEQ32_1, -1 }; static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0, IPA_OFFSET_MEQ128_1, -1 }; static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0, IPA_IHL_OFFSET_RANGE16_1, -1 }; static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0, IPA_IHL_OFFSET_MEQ32_1, -1 }; #define IPA_1_1 (0) #define IPA_2_0 (1) #define IPA_2_6L (2) #define INVALID_EP_MAPPING_INDEX (-1) static const int ep_mapping[3][IPA_CLIENT_MAX] = { [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = 19, [IPA_1_1][IPA_CLIENT_WLAN1_PROD] = -1, [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = 12, [IPA_1_1][IPA_CLIENT_USB2_PROD] = 12, [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = 13, [IPA_1_1][IPA_CLIENT_USB3_PROD] = 13, [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = 0, [IPA_1_1][IPA_CLIENT_USB4_PROD] = 0, [IPA_1_1][IPA_CLIENT_HSIC5_PROD] = -1, [IPA_1_1][IPA_CLIENT_USB_PROD] = 11, [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = 15, [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = 8, [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = 6, [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = 2, [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = 1, [IPA_1_1][IPA_CLIENT_ODU_PROD] = -1, [IPA_1_1][IPA_CLIENT_MHI_PROD] = -1, [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = 5, [IPA_1_1][IPA_CLIENT_Q6_WAN_PROD] = -1, [IPA_1_1][IPA_CLIENT_Q6_CMD_PROD] = -1, [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = 14, [IPA_1_1][IPA_CLIENT_WLAN1_CONS] = -1, [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = 16, [IPA_1_1][IPA_CLIENT_USB2_CONS] = 16, [IPA_1_1][IPA_CLIENT_WLAN2_CONS] = -1, [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = 17, [IPA_1_1][IPA_CLIENT_USB3_CONS] = 17, [IPA_1_1][IPA_CLIENT_WLAN3_CONS] = -1, [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = 18, [IPA_1_1][IPA_CLIENT_USB4_CONS] = 18, [IPA_1_1][IPA_CLIENT_WLAN4_CONS] = -1, [IPA_1_1][IPA_CLIENT_HSIC5_CONS] = -1, [IPA_1_1][IPA_CLIENT_USB_CONS] = 10, [IPA_1_1][IPA_CLIENT_USB_DPL_CONS] = -1, [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = 9, [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = 7, [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = 3, [IPA_1_1][IPA_CLIENT_APPS_LAN_CONS] = -1, [IPA_1_1][IPA_CLIENT_APPS_WAN_CONS] = -1, [IPA_1_1][IPA_CLIENT_ODU_EMB_CONS] = -1, [IPA_1_1][IPA_CLIENT_ODU_TETH_CONS] = -1, [IPA_1_1][IPA_CLIENT_MHI_CONS] = -1, [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = 4, [IPA_1_1][IPA_CLIENT_Q6_WAN_CONS] = -1, [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = 12, [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = 18, [IPA_2_0][IPA_CLIENT_HSIC2_PROD] = -1, [IPA_2_0][IPA_CLIENT_USB2_PROD] = 12, [IPA_2_0][IPA_CLIENT_HSIC3_PROD] = -1, [IPA_2_0][IPA_CLIENT_USB3_PROD] = 13, [IPA_2_0][IPA_CLIENT_HSIC4_PROD] = -1, [IPA_2_0][IPA_CLIENT_USB4_PROD] = 0, [IPA_2_0][IPA_CLIENT_HSIC5_PROD] = -1, [IPA_2_0][IPA_CLIENT_USB_PROD] = 11, [IPA_2_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1, [IPA_2_0][IPA_CLIENT_A2_EMBEDDED_PROD] = -1, [IPA_2_0][IPA_CLIENT_A2_TETHERED_PROD] = -1, [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = 4, [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = 3, [IPA_2_0][IPA_CLIENT_ODU_PROD] = 12, [IPA_2_0][IPA_CLIENT_MHI_PROD] = 18, [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = 6, [IPA_2_0][IPA_CLIENT_Q6_WAN_PROD] = -1, [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = 7, [IPA_2_0][IPA_CLIENT_Q6_DECOMP_PROD] = -1, [IPA_2_0][IPA_CLIENT_Q6_DECOMP2_PROD] = -1, [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = 12, [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = 19, /* Only for test purpose */ [IPA_2_0][IPA_CLIENT_TEST_PROD] = 19, [IPA_2_0][IPA_CLIENT_TEST1_PROD] = 19, [IPA_2_0][IPA_CLIENT_TEST2_PROD] = 12, [IPA_2_0][IPA_CLIENT_TEST3_PROD] = 11, [IPA_2_0][IPA_CLIENT_TEST4_PROD] = 0, [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = 13, [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = 17, [IPA_2_0][IPA_CLIENT_HSIC2_CONS] = -1, [IPA_2_0][IPA_CLIENT_USB2_CONS] = -1, [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = 16, [IPA_2_0][IPA_CLIENT_HSIC3_CONS] = -1, [IPA_2_0][IPA_CLIENT_USB3_CONS] = -1, [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = 14, [IPA_2_0][IPA_CLIENT_HSIC4_CONS] = -1, [IPA_2_0][IPA_CLIENT_USB4_CONS] = -1, [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = 19, [IPA_2_0][IPA_CLIENT_HSIC5_CONS] = -1, [IPA_2_0][IPA_CLIENT_USB_CONS] = 15, [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = 0, [IPA_2_0][IPA_CLIENT_A2_EMBEDDED_CONS] = -1, [IPA_2_0][IPA_CLIENT_A2_TETHERED_CONS] = -1, [IPA_2_0][IPA_CLIENT_A5_LAN_WAN_CONS] = -1, [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = 2, [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = 5, [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = 13, [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = 1, [IPA_2_0][IPA_CLIENT_MHI_CONS] = 17, [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = 8, [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = 9, [IPA_2_0][IPA_CLIENT_Q6_DUN_CONS] = -1, [IPA_2_0][IPA_CLIENT_Q6_DECOMP_CONS] = -1, [IPA_2_0][IPA_CLIENT_Q6_DECOMP2_CONS] = -1, [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = 13, [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = 16, [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = 10, /* Only for test purpose */ [IPA_2_0][IPA_CLIENT_TEST_CONS] = 1, [IPA_2_0][IPA_CLIENT_TEST1_CONS] = 1, [IPA_2_0][IPA_CLIENT_TEST2_CONS] = 16, [IPA_2_0][IPA_CLIENT_TEST3_CONS] = 13, [IPA_2_0][IPA_CLIENT_TEST4_CONS] = 15, [IPA_2_6L][IPA_CLIENT_HSIC1_PROD] = -1, [IPA_2_6L][IPA_CLIENT_WLAN1_PROD] = -1, [IPA_2_6L][IPA_CLIENT_HSIC2_PROD] = -1, [IPA_2_6L][IPA_CLIENT_USB2_PROD] = -1, [IPA_2_6L][IPA_CLIENT_HSIC3_PROD] = -1, [IPA_2_6L][IPA_CLIENT_USB3_PROD] = -1, [IPA_2_6L][IPA_CLIENT_HSIC4_PROD] = -1, [IPA_2_6L][IPA_CLIENT_USB4_PROD] = -1, [IPA_2_6L][IPA_CLIENT_HSIC5_PROD] = -1, [IPA_2_6L][IPA_CLIENT_USB_PROD] = 1, [IPA_2_6L][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1, [IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_PROD] = -1, [IPA_2_6L][IPA_CLIENT_A2_TETHERED_PROD] = -1, [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = 4, [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = 3, [IPA_2_6L][IPA_CLIENT_ODU_PROD] = -1, [IPA_2_6L][IPA_CLIENT_MHI_PROD] = -1, [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = 6, [IPA_2_6L][IPA_CLIENT_Q6_WAN_PROD] = -1, [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = 7, [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = 11, [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = 13, [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = -1, [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = -1, /* Only for test purpose */ [IPA_2_6L][IPA_CLIENT_TEST_PROD] = 11, [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = 11, [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = 12, [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = 13, [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = 14, [IPA_2_6L][IPA_CLIENT_HSIC1_CONS] = -1, [IPA_2_6L][IPA_CLIENT_WLAN1_CONS] = -1, [IPA_2_6L][IPA_CLIENT_HSIC2_CONS] = -1, [IPA_2_6L][IPA_CLIENT_USB2_CONS] = -1, [IPA_2_6L][IPA_CLIENT_WLAN2_CONS] = -1, [IPA_2_6L][IPA_CLIENT_HSIC3_CONS] = -1, [IPA_2_6L][IPA_CLIENT_USB3_CONS] = -1, [IPA_2_6L][IPA_CLIENT_WLAN3_CONS] = -1, [IPA_2_6L][IPA_CLIENT_HSIC4_CONS] = -1, [IPA_2_6L][IPA_CLIENT_USB4_CONS] = -1, [IPA_2_6L][IPA_CLIENT_WLAN4_CONS] = -1, [IPA_2_6L][IPA_CLIENT_HSIC5_CONS] = -1, [IPA_2_6L][IPA_CLIENT_USB_CONS] = 0, [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = 10, [IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_CONS] = -1, [IPA_2_6L][IPA_CLIENT_A2_TETHERED_CONS] = -1, [IPA_2_6L][IPA_CLIENT_A5_LAN_WAN_CONS] = -1, [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = 2, [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = 5, [IPA_2_6L][IPA_CLIENT_ODU_EMB_CONS] = -1, [IPA_2_6L][IPA_CLIENT_ODU_TETH_CONS] = -1, [IPA_2_6L][IPA_CLIENT_MHI_CONS] = -1, [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = 8, [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = 9, [IPA_2_6L][IPA_CLIENT_Q6_DUN_CONS] = -1, [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = 12, [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = 14, [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = -1, [IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = -1, [IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = -1, /* Only for test purpose */ [IPA_2_6L][IPA_CLIENT_TEST_CONS] = 15, [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = 15, [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = 0, [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = 1, [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = 10, }; static struct msm_bus_vectors ipa_init_vectors_v1_1[] = { { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_BAM_DMA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_BAM_DMA, .dst = MSM_BUS_SLAVE_OCIMEM, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors ipa_init_vectors_v2_0[] = { { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_OCIMEM, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = { { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 50000000, .ib = 960000000, }, { .src = MSM_BUS_MASTER_BAM_DMA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 50000000, .ib = 960000000, }, { .src = MSM_BUS_MASTER_BAM_DMA, .dst = MSM_BUS_SLAVE_OCIMEM, .ab = 50000000, .ib = 960000000, }, }; static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = { { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 100000000, .ib = 1300000000, }, { .src = MSM_BUS_MASTER_IPA, .dst = MSM_BUS_SLAVE_OCIMEM, .ab = 100000000, .ib = 1300000000, }, }; static struct msm_bus_paths ipa_usecases_v1_1[] = { { ARRAY_SIZE(ipa_init_vectors_v1_1), ipa_init_vectors_v1_1, }, { ARRAY_SIZE(ipa_max_perf_vectors_v1_1), ipa_max_perf_vectors_v1_1, }, }; static struct msm_bus_paths ipa_usecases_v2_0[] = { { ARRAY_SIZE(ipa_init_vectors_v2_0), ipa_init_vectors_v2_0, }, { ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0), ipa_nominal_perf_vectors_v2_0, }, }; static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = { ipa_usecases_v1_1, ARRAY_SIZE(ipa_usecases_v1_1), .name = "ipa", }; static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = { ipa_usecases_v2_0, ARRAY_SIZE(ipa_usecases_v2_0), .name = "ipa", }; void ipa_active_clients_lock(void) { unsigned long flags; mutex_lock(&ipa_ctx->ipa_active_clients.mutex); spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); ipa_ctx->ipa_active_clients.mutex_locked = true; spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); } int ipa_active_clients_trylock(unsigned long *flags) { spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags); if (ipa_ctx->ipa_active_clients.mutex_locked) { spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags); return 0; } return 1; } void ipa_active_clients_trylock_unlock(unsigned long *flags) { spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags); } void ipa_active_clients_unlock(void) { unsigned long flags; spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); ipa_ctx->ipa_active_clients.mutex_locked = false; spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); mutex_unlock(&ipa_ctx->ipa_active_clients.mutex); } /** * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an * IPA_RM resource * * @resource: [IN] IPA Resource Manager resource * @clients: [OUT] Empty array which will contain the list of clients. The * caller must initialize this array. * * Return codes: 0 on success, negative on failure. */ int ipa_get_clients_from_rm_resource( enum ipa_rm_resource_name resource, struct ipa_client_names *clients) { int i = 0; if (resource < 0 || resource >= IPA_RM_RESOURCE_MAX || !clients) { IPAERR("Bad parameters\n"); return -EINVAL; } switch (resource) { case IPA_RM_RESOURCE_USB_CONS: clients->names[i++] = IPA_CLIENT_USB_CONS; break; case IPA_RM_RESOURCE_HSIC_CONS: clients->names[i++] = IPA_CLIENT_HSIC1_CONS; break; case IPA_RM_RESOURCE_WLAN_CONS: clients->names[i++] = IPA_CLIENT_WLAN1_CONS; clients->names[i++] = IPA_CLIENT_WLAN2_CONS; clients->names[i++] = IPA_CLIENT_WLAN3_CONS; clients->names[i++] = IPA_CLIENT_WLAN4_CONS; break; case IPA_RM_RESOURCE_MHI_CONS: clients->names[i++] = IPA_CLIENT_MHI_CONS; break; case IPA_RM_RESOURCE_USB_PROD: clients->names[i++] = IPA_CLIENT_USB_PROD; break; case IPA_RM_RESOURCE_HSIC_PROD: clients->names[i++] = IPA_CLIENT_HSIC1_PROD; break; case IPA_RM_RESOURCE_MHI_PROD: clients->names[i++] = IPA_CLIENT_MHI_PROD; break; default: break; } clients->length = i; return 0; } /** * ipa_should_pipe_be_suspended() - returns true when the client's pipe should * be suspended during a power save scenario. False otherwise. * * @client: [IN] IPA client */ bool ipa_should_pipe_be_suspended(enum ipa_client_type client) { struct ipa_ep_context *ep; int ipa_ep_idx; ipa_ep_idx = ipa2_get_ep_mapping(client); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); WARN_ON(1); return false; } ep = &ipa_ctx->ep[ipa_ep_idx]; if (ep->keep_ipa_awake) return false; if (client == IPA_CLIENT_USB_CONS || client == IPA_CLIENT_MHI_CONS || client == IPA_CLIENT_HSIC1_CONS || client == IPA_CLIENT_WLAN1_CONS || client == IPA_CLIENT_WLAN2_CONS || client == IPA_CLIENT_WLAN3_CONS || client == IPA_CLIENT_WLAN4_CONS) return true; return false; } /** * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM * resource and decrement active clients counter, which may result in clock * gating of IPA clocks. * * @resource: [IN] IPA Resource Manager resource * * Return codes: 0 on success, negative on failure. */ int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource) { struct ipa_client_names clients; int res; int index; struct ipa_ep_cfg_ctrl suspend; enum ipa_client_type client; int ipa_ep_idx; bool pipe_suspended = false; memset(&clients, 0, sizeof(clients)); res = ipa_get_clients_from_rm_resource(resource, &clients); if (res) { IPAERR("Bad params.\n"); return res; } for (index = 0; index < clients.length; index++) { client = clients.names[index]; ipa_ep_idx = ipa2_get_ep_mapping(client); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); res = -EINVAL; continue; } ipa_ctx->resume_on_connect[client] = false; if (ipa_ctx->ep[ipa_ep_idx].client == client && ipa_should_pipe_be_suspended(client)) { if (ipa_ctx->ep[ipa_ep_idx].valid) { /* suspend endpoint */ memset(&suspend, 0, sizeof(suspend)); suspend.ipa_ep_suspend = true; ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); pipe_suspended = true; } } } /* Sleep ~1 msec */ if (pipe_suspended) usleep_range(1000, 2000); /* before gating IPA clocks do TAG process */ ipa_ctx->tag_process_before_gating = true; ipa_dec_client_disable_clks(); return 0; } /** * ipa_suspend_resource_no_block() - suspend client endpoints related to the * IPA_RM resource and decrement active clients counter. This function is * guaranteed to avoid sleeping. * * @resource: [IN] IPA Resource Manager resource * * Return codes: 0 on success, negative on failure. */ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource) { int res; struct ipa_client_names clients; int index; enum ipa_client_type client; struct ipa_ep_cfg_ctrl suspend; int ipa_ep_idx; unsigned long flags; if (ipa_active_clients_trylock(&flags) == 0) return -EPERM; if (ipa_ctx->ipa_active_clients.cnt == 1) { res = -EPERM; goto bail; } memset(&clients, 0, sizeof(clients)); res = ipa_get_clients_from_rm_resource(resource, &clients); if (res) { IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n" , resource); goto bail; } for (index = 0; index < clients.length; index++) { client = clients.names[index]; ipa_ep_idx = ipa2_get_ep_mapping(client); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); res = -EINVAL; continue; } ipa_ctx->resume_on_connect[client] = false; if (ipa_ctx->ep[ipa_ep_idx].client == client && ipa_should_pipe_be_suspended(client)) { if (ipa_ctx->ep[ipa_ep_idx].valid) { /* suspend endpoint */ memset(&suspend, 0, sizeof(suspend)); suspend.ipa_ep_suspend = true; ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); } } } if (res == 0) { ipa_ctx->ipa_active_clients.cnt--; IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); } bail: ipa_active_clients_trylock_unlock(&flags); return res; } /** * ipa_resume_resource() - resume client endpoints related to the IPA_RM * resource. * * @resource: [IN] IPA Resource Manager resource * * Return codes: 0 on success, negative on failure. */ int ipa_resume_resource(enum ipa_rm_resource_name resource) { struct ipa_client_names clients; int res; int index; struct ipa_ep_cfg_ctrl suspend; enum ipa_client_type client; int ipa_ep_idx; memset(&clients, 0, sizeof(clients)); res = ipa_get_clients_from_rm_resource(resource, &clients); if (res) { IPAERR("ipa_get_clients_from_rm_resource() failed.\n"); return res; } for (index = 0; index < clients.length; index++) { client = clients.names[index]; ipa_ep_idx = ipa2_get_ep_mapping(client); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); res = -EINVAL; continue; } /* * The related ep, will be resumed on connect * while its resource is granted */ ipa_ctx->resume_on_connect[client] = true; IPADBG("%d will be resumed on connect.\n", client); if (ipa_ctx->ep[ipa_ep_idx].client == client && ipa_should_pipe_be_suspended(client)) { if (ipa_ctx->ep[ipa_ep_idx].valid) { memset(&suspend, 0, sizeof(suspend)); suspend.ipa_ep_suspend = false; ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); } } } return res; } /* read how much SRAM is available for SW use * In case of IPAv2.0 this will also supply an offset from * which we can start write */ void _ipa_sram_settings_read_v1_1(void) { ipa_ctx->smem_restricted_bytes = 0; ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v1_1); ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST; ipa_ctx->hdr_tbl_lcl = 1; ipa_ctx->ip4_rt_tbl_lcl = 0; ipa_ctx->ip6_rt_tbl_lcl = 0; ipa_ctx->ip4_flt_tbl_lcl = 1; ipa_ctx->ip6_flt_tbl_lcl = 1; } void _ipa_sram_settings_read_v2_0(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); ipa_ctx->hdr_tbl_lcl = 0; ipa_ctx->ip4_rt_tbl_lcl = 0; ipa_ctx->ip6_rt_tbl_lcl = 0; ipa_ctx->ip4_flt_tbl_lcl = 0; ipa_ctx->ip6_flt_tbl_lcl = 0; } void _ipa_sram_settings_read_v2_5(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); ipa_ctx->hdr_tbl_lcl = 0; ipa_ctx->hdr_proc_ctx_tbl_lcl = 1; /* * when proc ctx table is located in internal memory, * modem entries resides first. */ if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { ipa_ctx->hdr_proc_ctx_tbl.start_offset = IPA_MEM_PART(modem_hdr_proc_ctx_size); } ipa_ctx->ip4_rt_tbl_lcl = 0; ipa_ctx->ip6_rt_tbl_lcl = 0; ipa_ctx->ip4_flt_tbl_lcl = 0; ipa_ctx->ip6_flt_tbl_lcl = 0; } void _ipa_sram_settings_read_v2_6L(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); ipa_ctx->hdr_tbl_lcl = 0; ipa_ctx->ip4_rt_tbl_lcl = 0; ipa_ctx->ip6_rt_tbl_lcl = 0; ipa_ctx->ip4_flt_tbl_lcl = 0; ipa_ctx->ip6_flt_tbl_lcl = 0; } void _ipa_cfg_route_v1_1(struct ipa_route *route) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, route->route_dis, IPA_ROUTE_ROUTE_DIS_SHFT, IPA_ROUTE_ROUTE_DIS_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); } void _ipa_cfg_route_v2_0(struct ipa_route *route) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, route->route_dis, IPA_ROUTE_ROUTE_DIS_SHFT, IPA_ROUTE_ROUTE_DIS_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe, IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT, IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); } /** * ipa_cfg_route() - configure IPA route * @route: IPA route * * Return codes: * 0: success */ int ipa_cfg_route(struct ipa_route *route) { IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n", route->route_dis, route->route_def_pipe, route->route_def_hdr_table); IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n", route->route_def_hdr_ofst, route->route_frag_def_pipe); ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_route(route); ipa_dec_client_disable_clks(); return 0; } /** * ipa_cfg_filter() - configure filter * @disable: disable value * * Return codes: * 0: success */ int ipa_cfg_filter(u32 disable) { u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1; ipa_inc_client_enable_clks(); ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst, IPA_SETFIELD(!disable, IPA_FILTER_FILTER_EN_SHFT, IPA_FILTER_FILTER_EN_BMSK)); ipa_dec_client_disable_clks(); return 0; } /** * ipa_init_hw() - initialize HW * * Return codes: * 0: success */ int ipa_init_hw(void) { u32 ipa_version = 0; /* do soft reset of IPA */ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1); ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0); /* enable IPA */ ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1); /* Read IPA version and make sure we have access to the registers */ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST); if (ipa_version == 0) return -EFAULT; if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */ ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL); } return 0; } /** * ipa2_get_ep_mapping() - provide endpoint mapping * @client: client type * * Return value: endpoint mapping */ int ipa2_get_ep_mapping(enum ipa_client_type client) { u8 hw_type_index = IPA_1_1; if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return INVALID_EP_MAPPING_INDEX; } if (client >= IPA_CLIENT_MAX || client < 0) { IPAERR("Bad client number! client =%d\n", client); return INVALID_EP_MAPPING_INDEX; } switch (ipa_ctx->ipa_hw_type) { case IPA_HW_v2_0: case IPA_HW_v2_5: hw_type_index = IPA_2_0; break; case IPA_HW_v2_6L: hw_type_index = IPA_2_6L; break; default: hw_type_index = IPA_1_1; break; } return ep_mapping[hw_type_index][client]; } /* ipa2_set_client() - provide client mapping * @client: client type * * Return value: none */ void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink) { if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { IPAERR("Bad client number! client =%d\n", client); } else if (index >= IPA_MAX_NUM_PIPES || index < 0) { IPAERR("Bad pipe index! index =%d\n", index); } else { ipa_ctx->ipacm_client[index].client_enum = client; ipa_ctx->ipacm_client[index].uplink = uplink; } } /** * ipa2_get_client() - provide client mapping * @client: client type * * Return value: none */ enum ipacm_client_enum ipa2_get_client(int pipe_idx) { if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) { IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx); return IPACM_CLIENT_MAX; } else { return ipa_ctx->ipacm_client[pipe_idx].client_enum; } } /** * ipa2_get_client_uplink() - provide client mapping * @client: client type * * Return value: none */ bool ipa2_get_client_uplink(int pipe_idx) { return ipa_ctx->ipacm_client[pipe_idx].uplink; } /** * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to * the supplied pipe index. * * @pipe_idx: * * Return value: IPA_RM resource related to the pipe, -1 if a resource was not * found. */ enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx) { int i; int j; enum ipa_client_type client; struct ipa_client_names clients; bool found = false; if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return -EINVAL; } if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { IPAERR("Bad pipe index!\n"); return -EINVAL; } client = ipa_ctx->ep[pipe_idx].client; for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { memset(&clients, 0, sizeof(clients)); ipa_get_clients_from_rm_resource(i, &clients); for (j = 0; j < clients.length; j++) { if (clients.names[j] == client) { found = true; break; } } if (found) break; } if (!found) return -EFAULT; return i; } /** * ipa2_get_client_mapping() - provide client mapping * @pipe_idx: IPA end-point number * * Return value: client mapping */ enum ipa_client_type ipa2_get_client_mapping(int pipe_idx) { if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return -EINVAL; } if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { IPAERR("Bad pipe index!\n"); return -EINVAL; } return ipa_ctx->ep[pipe_idx].client; } /** * ipa_write_32() - convert 32 bit value to byte array * @w: 32 bit integer * @dest: byte array * * Return value: converted value */ u8 *ipa_write_32(u32 w, u8 *dest) { *dest++ = (u8)((w) & 0xFF); *dest++ = (u8)((w >> 8) & 0xFF); *dest++ = (u8)((w >> 16) & 0xFF); *dest++ = (u8)((w >> 24) & 0xFF); return dest; } /** * ipa_write_16() - convert 16 bit value to byte array * @hw: 16 bit integer * @dest: byte array * * Return value: converted value */ u8 *ipa_write_16(u16 hw, u8 *dest) { *dest++ = (u8)((hw) & 0xFF); *dest++ = (u8)((hw >> 8) & 0xFF); return dest; } /** * ipa_write_8() - convert 8 bit value to byte array * @hw: 8 bit integer * @dest: byte array * * Return value: converted value */ u8 *ipa_write_8(u8 b, u8 *dest) { *dest++ = (b) & 0xFF; return dest; } /** * ipa_pad_to_32() - pad byte array to 32 bit value * @dest: byte array * * Return value: padded value */ u8 *ipa_pad_to_32(u8 *dest) { int i = (long)dest & 0x3; int j; if (i) for (j = 0; j < (4 - i); j++) *dest++ = 0; return dest; } void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], const uint8_t mac_addr[ETH_ALEN]) { *buf = ipa_write_8(hdr_mac_addr_offset, *buf); /* MAC addr mask copied as little endian each 4 bytes */ *buf = ipa_write_8(mac_addr_mask[3], *buf); *buf = ipa_write_8(mac_addr_mask[2], *buf); *buf = ipa_write_8(mac_addr_mask[1], *buf); *buf = ipa_write_8(mac_addr_mask[0], *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_8(mac_addr_mask[5], *buf); *buf = ipa_write_8(mac_addr_mask[4], *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); /* MAC addr copied as little endian each 4 bytes */ *buf = ipa_write_8(mac_addr[3], *buf); *buf = ipa_write_8(mac_addr[2], *buf); *buf = ipa_write_8(mac_addr[1], *buf); *buf = ipa_write_8(mac_addr[0], *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_8(mac_addr[5], *buf); *buf = ipa_write_8(mac_addr[4], *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_pad_to_32(*buf); } /** * ipa_generate_hw_rule() - generate HW rule * @ip: IP address type * @attrib: IPA rule attribute * @buf: output buffer * @en_rule: rule * * Return codes: * 0: success * -EPERM: wrong input */ int ipa_generate_hw_rule(enum ipa_ip_type ip, const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule) { u8 ofst_meq32 = 0; u8 ihl_ofst_rng16 = 0; u8 ihl_ofst_meq32 = 0; u8 ofst_meq128 = 0; if (ip == IPA_IP_v4) { /* error check */ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { IPAERR("v6 attrib's specified for v4 rule\n"); return -EPERM; } if (attrib->attrib_mask & IPA_FLT_TOS) { *en_rule |= IPA_TOS_EQ; *buf = ipa_write_8(attrib->u.v4.tos, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; /* 0 => offset of TOS in v4 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32((attrib->tos_mask << 16), *buf); *buf = ipa_write_32((attrib->tos_value << 16), *buf); *buf = ipa_pad_to_32(*buf); ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { *en_rule |= IPA_PROTOCOL_EQ; *buf = ipa_write_8(attrib->u.v4.protocol, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; /* 12 => offset of src ip in v4 header */ *buf = ipa_write_8(12, *buf); *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf); *buf = ipa_write_32(attrib->u.v4.src_addr, *buf); *buf = ipa_pad_to_32(*buf); ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; /* 16 => offset of dst ip in v4 header */ *buf = ipa_write_8(16, *buf); *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf); *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf); *buf = ipa_pad_to_32(*buf); ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; /* -2 => offset of ether type in L2 hdr */ *buf = ipa_write_8((u8)-2, *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_16(htons(attrib->ether_type), *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_16(htons(attrib->ether_type), *buf); *buf = ipa_pad_to_32(*buf); ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->src_port_hi < attrib->src_port_lo) { IPAERR("bad src port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 0 => offset of src port after v4 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_16(attrib->src_port_hi, *buf); *buf = ipa_write_16(attrib->src_port_lo, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->dst_port_hi < attrib->dst_port_lo) { IPAERR("bad dst port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 2 => offset of dst port after v4 header */ *buf = ipa_write_8(2, *buf); *buf = ipa_write_16(attrib->dst_port_hi, *buf); *buf = ipa_write_16(attrib->dst_port_lo, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_TYPE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 0 => offset of type after v4 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32(0xFF, *buf); *buf = ipa_write_32(attrib->type, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_CODE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 1 => offset of code after v4 header */ *buf = ipa_write_8(1, *buf); *buf = ipa_write_32(0xFF, *buf); *buf = ipa_write_32(attrib->code, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SPI) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 0 => offset of SPI after v4 header FIXME */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32(0xFFFFFFFF, *buf); *buf = ipa_write_32(attrib->spi, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 0 => offset of src port after v4 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_16(attrib->src_port, *buf); *buf = ipa_write_16(attrib->src_port, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 2 => offset of dst port after v4 header */ *buf = ipa_write_8(2, *buf); *buf = ipa_write_16(attrib->dst_port, *buf); *buf = ipa_write_16(attrib->dst_port, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -14 => offset of dst mac addr in Ethernet II hdr */ ipa_generate_mac_addr_hw_rule( buf, -14, attrib->dst_mac_addr_mask, attrib->dst_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -8 => offset of src mac addr in Ethernet II hdr */ ipa_generate_mac_addr_hw_rule( buf, -8, attrib->src_mac_addr_mask, attrib->src_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -22 => offset of dst mac addr in 802.3 hdr */ ipa_generate_mac_addr_hw_rule( buf, -22, attrib->dst_mac_addr_mask, attrib->dst_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -16 => offset of src mac addr in 802.3 hdr */ ipa_generate_mac_addr_hw_rule( buf, -16, attrib->src_mac_addr_mask, attrib->src_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_METADATA_COMPARE; *buf = ipa_write_8(0, *buf); /* offset, reserved */ *buf = ipa_write_32(attrib->meta_data_mask, *buf); *buf = ipa_write_32(attrib->meta_data, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { *en_rule |= IPA_IS_FRAG; *buf = ipa_pad_to_32(*buf); } } else if (ip == IPA_IP_v6) { /* v6 code below assumes no extension headers TODO: fix this */ /* error check */ if (attrib->attrib_mask & IPA_FLT_TOS || attrib->attrib_mask & IPA_FLT_PROTOCOL) { IPAERR("v4 attrib's specified for v6 rule\n"); return -EPERM; } if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { *en_rule |= IPA_PROTOCOL_EQ; *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; /* -2 => offset of ether type in L2 hdr */ *buf = ipa_write_8((u8)-2, *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_16(htons(attrib->ether_type), *buf); *buf = ipa_write_16(0, *buf); *buf = ipa_write_16(htons(attrib->ether_type), *buf); *buf = ipa_pad_to_32(*buf); ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_TYPE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 0 => offset of type after v6 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32(0xFF, *buf); *buf = ipa_write_32(attrib->type, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_CODE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 1 => offset of code after v6 header */ *buf = ipa_write_8(1, *buf); *buf = ipa_write_32(0xFF, *buf); *buf = ipa_write_32(attrib->code, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SPI) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; /* 0 => offset of SPI after v6 header FIXME */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32(0xFFFFFFFF, *buf); *buf = ipa_write_32(attrib->spi, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 0 => offset of src port after v6 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_16(attrib->src_port, *buf); *buf = ipa_write_16(attrib->src_port, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 2 => offset of dst port after v6 header */ *buf = ipa_write_8(2, *buf); *buf = ipa_write_16(attrib->dst_port, *buf); *buf = ipa_write_16(attrib->dst_port, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->src_port_hi < attrib->src_port_lo) { IPAERR("bad src port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 0 => offset of src port after v6 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_16(attrib->src_port_hi, *buf); *buf = ipa_write_16(attrib->src_port_lo, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->dst_port_hi < attrib->dst_port_lo) { IPAERR("bad dst port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; /* 2 => offset of dst port after v6 header */ *buf = ipa_write_8(2, *buf); *buf = ipa_write_16(attrib->dst_port_hi, *buf); *buf = ipa_write_16(attrib->dst_port_lo, *buf); *buf = ipa_pad_to_32(*buf); ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* 8 => offset of src ip in v6 header */ *buf = ipa_write_8(8, *buf); *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf); *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf); *buf = ipa_pad_to_32(*buf); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* 24 => offset of dst ip in v6 header */ *buf = ipa_write_8(24, *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf); *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf); *buf = ipa_pad_to_32(*buf); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_TC) { *en_rule |= IPA_FLT_TC; *buf = ipa_write_8(attrib->u.v6.tc, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* 0 => offset of TOS in v6 header */ *buf = ipa_write_8(0, *buf); *buf = ipa_write_32((attrib->tos_mask << 20), *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32((attrib->tos_value << 20), *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_write_32(0, *buf); *buf = ipa_pad_to_32(*buf); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -14 => offset of dst mac addr in Ethernet II hdr */ ipa_generate_mac_addr_hw_rule( buf, -14, attrib->dst_mac_addr_mask, attrib->dst_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -8 => offset of src mac addr in Ethernet II hdr */ ipa_generate_mac_addr_hw_rule( buf, -8, attrib->src_mac_addr_mask, attrib->src_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -22 => offset of dst mac addr in 802.3 hdr */ ipa_generate_mac_addr_hw_rule( buf, -22, attrib->dst_mac_addr_mask, attrib->dst_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -16 => offset of src mac addr in 802.3 hdr */ ipa_generate_mac_addr_hw_rule( buf, -16, attrib->src_mac_addr_mask, attrib->src_mac_addr); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { *en_rule |= IPA_FLT_FLOW_LABEL; /* FIXME FL is only 20 bits */ *buf = ipa_write_32(attrib->u.v6.flow_label, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_METADATA_COMPARE; *buf = ipa_write_8(0, *buf); /* offset, reserved */ *buf = ipa_write_32(attrib->meta_data_mask, *buf); *buf = ipa_write_32(attrib->meta_data, *buf); *buf = ipa_pad_to_32(*buf); } if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { *en_rule |= IPA_IS_FRAG; *buf = ipa_pad_to_32(*buf); } } else { IPAERR("unsupported ip %d\n", ip); return -EPERM; } /* * default "rule" means no attributes set -> map to * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 */ if (attrib->attrib_mask == 0) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; *buf = ipa_write_8(0, *buf); /* offset */ *buf = ipa_write_32(0, *buf); /* mask */ *buf = ipa_write_32(0, *buf); /* val */ *buf = ipa_pad_to_32(*buf); ofst_meq32++; } return 0; } void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) { eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset; eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3]; eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2]; eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1]; eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0]; eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0; eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0; eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5]; eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4]; memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8); eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3]; eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2]; eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1]; eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0]; eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0; eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0; eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5]; eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4]; memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8); } int ipa_generate_flt_eq(enum ipa_ip_type ip, const struct ipa_rule_attrib *attrib, struct ipa_ipfltri_rule_eq *eq_atrb) { u8 ofst_meq32 = 0; u8 ihl_ofst_rng16 = 0; u8 ihl_ofst_meq32 = 0; u8 ofst_meq128 = 0; u16 eq_bitmap = 0; u16 *en_rule = &eq_bitmap; if (ip == IPA_IP_v4) { /* error check */ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { IPAERR("v6 attrib's specified for v4 rule\n"); return -EPERM; } if (attrib->attrib_mask & IPA_FLT_TOS) { *en_rule |= IPA_TOS_EQ; eq_atrb->tos_eq_present = 1; eq_atrb->tos_eq = attrib->u.v4.tos; } if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = 0; eq_atrb->offset_meq_32[ofst_meq32].mask = attrib->tos_mask << 16; eq_atrb->offset_meq_32[ofst_meq32].value = attrib->tos_value << 16; ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { *en_rule |= IPA_PROTOCOL_EQ; eq_atrb->protocol_eq_present = 1; eq_atrb->protocol_eq = attrib->u.v4.protocol; } if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = 12; eq_atrb->offset_meq_32[ofst_meq32].mask = attrib->u.v4.src_addr_mask; eq_atrb->offset_meq_32[ofst_meq32].value = attrib->u.v4.src_addr; ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = 16; eq_atrb->offset_meq_32[ofst_meq32].mask = attrib->u.v4.dst_addr_mask; eq_atrb->offset_meq_32[ofst_meq32].value = attrib->u.v4.dst_addr; ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->src_port_hi < attrib->src_port_lo) { IPAERR("bad src port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->src_port_lo; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->src_port_hi; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->dst_port_hi < attrib->dst_port_lo) { IPAERR("bad dst port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->dst_port_lo; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->dst_port_hi; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_TYPE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->type; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_CODE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->code; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SPI) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFFFFFFFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->spi; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->src_port; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->src_port; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->dst_port; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->dst_port; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_METADATA_COMPARE; eq_atrb->metadata_meq32_present = 1; eq_atrb->metadata_meq32.offset = 0; eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; eq_atrb->metadata_meq32.value = attrib->meta_data; } if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { *en_rule |= IPA_IS_FRAG; eq_atrb->ipv4_frag_eq_present = 1; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -14 => offset of dst mac addr in Ethernet II hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -14, attrib->dst_mac_addr_mask, attrib->dst_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -8 => offset of src mac addr in Ethernet II hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -8, attrib->src_mac_addr_mask, attrib->src_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -22 => offset of dst mac addr in 802.3 hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -22, attrib->dst_mac_addr_mask, attrib->dst_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -16 => offset of src mac addr in 802.3 hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -16, attrib->src_mac_addr_mask, attrib->src_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = -2; eq_atrb->offset_meq_32[ofst_meq32].mask = htons(attrib->ether_type); eq_atrb->offset_meq_32[ofst_meq32].value = htons(attrib->ether_type); ofst_meq32++; } } else if (ip == IPA_IP_v6) { /* v6 code below assumes no extension headers TODO: fix this */ /* error check */ if (attrib->attrib_mask & IPA_FLT_TOS || attrib->attrib_mask & IPA_FLT_PROTOCOL) { IPAERR("v4 attrib's specified for v6 rule\n"); return -EPERM; } if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { *en_rule |= IPA_PROTOCOL_EQ; eq_atrb->protocol_eq_present = 1; eq_atrb->protocol_eq = attrib->u.v6.next_hdr; } if (attrib->attrib_mask & IPA_FLT_TYPE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->type; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_CODE) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->code; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SPI) { if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { IPAERR("ran out of ihl_meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFFFFFFFF; eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = attrib->spi; ihl_ofst_meq32++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->src_port; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->src_port; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->dst_port; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->dst_port; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->src_port_hi < attrib->src_port_lo) { IPAERR("bad src port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->src_port_lo; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->src_port_hi; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { IPAERR("ran out of ihl_rng16 eq\n"); return -EPERM; } if (attrib->dst_port_hi < attrib->dst_port_lo) { IPAERR("bad dst port range param\n"); return -EPERM; } *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low = attrib->dst_port_lo; eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high = attrib->dst_port_hi; ihl_ofst_rng16++; } if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; eq_atrb->offset_meq_128[ofst_meq128].offset = 8; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) = attrib->u.v6.src_addr_mask[0]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) = attrib->u.v6.src_addr_mask[1]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) = attrib->u.v6.src_addr_mask[2]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) = attrib->u.v6.src_addr_mask[3]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) = attrib->u.v6.src_addr[0]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) = attrib->u.v6.src_addr[1]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) = attrib->u.v6.src_addr[2]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 12) = attrib->u.v6.src_addr[3]; ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; eq_atrb->offset_meq_128[ofst_meq128].offset = 24; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) = attrib->u.v6.dst_addr_mask[0]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) = attrib->u.v6.dst_addr_mask[1]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) = attrib->u.v6.dst_addr_mask[2]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) = attrib->u.v6.dst_addr_mask[3]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) = attrib->u.v6.dst_addr[0]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) = attrib->u.v6.dst_addr[1]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) = attrib->u.v6.dst_addr[2]; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 12) = attrib->u.v6.dst_addr[3]; ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_TC) { *en_rule |= IPA_FLT_TC; eq_atrb->tc_eq_present = 1; eq_atrb->tc_eq = attrib->u.v6.tc; } if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; eq_atrb->offset_meq_128[ofst_meq128].offset = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) = attrib->tos_mask << 20; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) = attrib->tos_value << 20; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) = 0; *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 12) = 0; ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { *en_rule |= IPA_FLT_FLOW_LABEL; eq_atrb->fl_eq_present = 1; eq_atrb->fl_eq = attrib->u.v6.flow_label; } if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_METADATA_COMPARE; eq_atrb->metadata_meq32_present = 1; eq_atrb->metadata_meq32.offset = 0; eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; eq_atrb->metadata_meq32.value = attrib->meta_data; } if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { *en_rule |= IPA_IS_FRAG; eq_atrb->ipv4_frag_eq_present = 1; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -14 => offset of dst mac addr in Ethernet II hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -14, attrib->dst_mac_addr_mask, attrib->dst_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -8 => offset of src mac addr in Ethernet II hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -8, attrib->src_mac_addr_mask, attrib->src_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -22 => offset of dst mac addr in 802.3 hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -22, attrib->dst_mac_addr_mask, attrib->dst_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { if (ipa_ofst_meq128[ofst_meq128] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq128[ofst_meq128]; /* -16 => offset of src mac addr in 802.3 hdr */ ipa_generate_flt_mac_addr_eq(eq_atrb, -16, attrib->src_mac_addr_mask, attrib->src_mac_addr, ofst_meq128); ofst_meq128++; } if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq128 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = -2; eq_atrb->offset_meq_32[ofst_meq32].mask = htons(attrib->ether_type); eq_atrb->offset_meq_32[ofst_meq32].value = htons(attrib->ether_type); ofst_meq32++; } } else { IPAERR("unsupported ip %d\n", ip); return -EPERM; } /* * default "rule" means no attributes set -> map to * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 */ if (attrib->attrib_mask == 0) { if (ipa_ofst_meq32[ofst_meq32] == -1) { IPAERR("ran out of meq32 eq\n"); return -EPERM; } *en_rule |= ipa_ofst_meq32[ofst_meq32]; eq_atrb->offset_meq_32[ofst_meq32].offset = 0; eq_atrb->offset_meq_32[ofst_meq32].mask = 0; eq_atrb->offset_meq_32[ofst_meq32].value = 0; ofst_meq32++; } eq_atrb->rule_eq_bitmap = *en_rule; eq_atrb->num_offset_meq_32 = ofst_meq32; eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; eq_atrb->num_offset_meq_128 = ofst_meq128; return 0; } /** * ipa2_cfg_ep - IPA end-point configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * This includes nat, header, mode, aggregation and route settings and is a one * shot API to configure the IPA end-point fully * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) { int result = -EINVAL; if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return -EINVAL; } if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) { IPAERR("bad parm.\n"); return -EINVAL; } result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr); if (result) return result; result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext); if (result) return result; result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr); if (result) return result; result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg); if (result) return result; if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat); if (result) return result; result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode); if (result) return result; result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route); if (result) return result; result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr); if (result) return result; } else { result = ipa2_cfg_ep_metadata_mask(clnt_hdl, &ipa_ep_cfg->metadata_mask); if (result) return result; } return 0; } const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) { switch (nat_en) { case (IPA_BYPASS_NAT): return "NAT disabled"; case (IPA_SRC_NAT): return "Source NAT"; case (IPA_DST_NAT): return "Dst NAT"; } return "undefined"; } void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl), reg_val); } void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl), reg_val); } /** * ipa2_cfg_ep_nat() - IPA end-point NAT configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl); return -EINVAL; } IPADBG("pipe=%d, nat_en=%d(%s)\n", clnt_hdl, ep_nat->nat_en, ipa_get_nat_en_str(ep_nat->nat_en)); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat); ipa_dec_client_disable_clks(); return 0; } static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) { IPADBG("Not supported for version 1.1\n"); } static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en, IPA_ENDP_STATUS_n_STATUS_EN_SHFT, IPA_ENDP_STATUS_n_STATUS_EN_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep, IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_STATUS_n_OFST(clnt_hdl), reg_val); } /** * ipa2_cfg_ep_status() - IPA end-point status configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d, status_en=%d status_ep=%d\n", clnt_hdl, ep_status->status_en, ep_status->status_ep); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].status = *ep_status; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status); ipa_dec_client_disable_clks(); return 0; } static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) { IPADBG("Not supported for version 1.1\n"); } static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en, IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT, IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK); IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en, IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT, IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK); IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset, IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT, IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl), reg_val); } /** * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n", clnt_hdl, cfg->frag_offload_en, cfg->cs_offload_en, cfg->cs_metadata_hdr_offset); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg); ipa_dec_client_disable_clks(); return 0; } static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask *metadata_mask) { IPADBG("Not supported for version 1.1\n"); } static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask *metadata_mask) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask, IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT, IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl), reg_val); } /** * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask *metadata_mask) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d, metadata_mask=0x%x\n", clnt_hdl, metadata_mask->metadata_mask); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask); ipa_dec_client_disable_clks(); return 0; } void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, const struct ipa_ep_cfg_hdr *ep_hdr) { u32 val = 0; val = IPA_SETFIELD(ep_hdr->hdr_len, IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) | IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) | IPA_SETFIELD(ep_hdr->hdr_ofst_metadata, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) | IPA_SETFIELD(ep_hdr->hdr_additional_const_len, IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) | IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) | IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) | IPA_SETFIELD(ep_hdr->hdr_a5_mux, IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val); } void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number, const struct ipa_ep_cfg_hdr *ep_hdr) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid, IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2, IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional, IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2, IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux, IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len, IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len, IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val); } /** * ipa2_cfg_ep_hdr() - IPA end-point header configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) { struct ipa_ep_context *ep; if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n", clnt_hdl, ep_hdr->hdr_remove_additional, ep_hdr->hdr_a5_mux, ep_hdr->hdr_ofst_pkt_size); IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n", ep_hdr->hdr_ofst_pkt_size_valid, ep_hdr->hdr_additional_const_len); IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x", ep_hdr->hdr_ofst_metadata, ep_hdr->hdr_ofst_metadata_valid, ep_hdr->hdr_len); ep = &ipa_ctx->ep[clnt_hdl]; /* copy over EP cfg */ ep->cfg.hdr = *ep_hdr; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr); ipa_dec_client_disable_clks(); return 0; } static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr) { IPADBG("Not supported for version 1.1\n"); return 0; } static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val) { u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK); IPA_SETFIELD_IN_REG(reg_val, hdr_endianness, IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val); return 0; } static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0); return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); } static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); } static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); } /** * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ep_hdr_ext: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) { struct ipa_ep_context *ep; if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d hdr_pad_to_alignment=%d\n", clnt_hdl, ep_hdr_ext->hdr_pad_to_alignment); IPADBG("hdr_total_len_or_pad_offset=%d\n", ep_hdr_ext->hdr_total_len_or_pad_offset); IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n", ep_hdr_ext->hdr_payload_len_inc_padding, ep_hdr_ext->hdr_total_len_or_pad); IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n", ep_hdr_ext->hdr_total_len_or_pad_valid, ep_hdr_ext->hdr_little_endian); ep = &ipa_ctx->ep[clnt_hdl]; /* copy over EP cfg */ ep->cfg.hdr_ext = *ep_hdr_ext; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext); ipa_dec_client_disable_clks(); return 0; } /** * ipa2_cfg_ep_hdr() - IPA end-point Control configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure */ int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) { u32 reg_val = 0; if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) { IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl); return -EINVAL; } IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n", clnt_hdl, ep_ctrl->ipa_ep_suspend, ep_ctrl->ipa_ep_delay); IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend, IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT, IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay, IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val); return 0; } /** * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration * @aggr_granularity: [in] defines the granularity of AGGR timers * number of units of 1/32msec * * Returns: 0 on success, negative on failure */ int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity) { u32 reg_val = 0; if (aggr_granularity <= IPA_AGGR_GRAN_MIN || aggr_granularity > IPA_AGGR_GRAN_MAX) { IPAERR("bad param, aggr_granularity = %d\n", aggr_granularity); return -EINVAL; } IPADBG("aggr_granularity=%d\n", aggr_granularity); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK); IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1, IPA_COUNTER_CFG_AGGR_GRAN_SHFT, IPA_COUNTER_CFG_AGGR_GRAN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST, reg_val); return 0; } EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity); /** * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer * configuration * @eot_coal_granularity: defines the granularity of EOT_COAL timers * number of units of 1/32msec * * Returns: 0 on success, negative on failure */ int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity) { u32 reg_val = 0; if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN || eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) { IPAERR("bad parm, eot_coal_granularity = %d\n", eot_coal_granularity); return -EINVAL; } IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1, IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT, IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST, reg_val); return 0; } EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity); const char *ipa_get_mode_type_str(enum ipa_mode_type mode) { switch (mode) { case (IPA_BASIC): return "Basic"; case (IPA_ENABLE_FRAMING_HDLC): return "HDLC framing"; case (IPA_ENABLE_DEFRAMING_HDLC): return "HDLC de-framing"; case (IPA_DMA): return "DMA"; } return "undefined"; } void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, const struct ipa_ep_cfg_mode *ep_mode) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, IPA_ENDP_INIT_MODE_N_MODE_SHFT, IPA_ENDP_INIT_MODE_N_MODE_BMSK); IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1, IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val); } void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number, const struct ipa_ep_cfg_mode *ep_mode) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, IPA_ENDP_INIT_MODE_N_MODE_SHFT, IPA_ENDP_INIT_MODE_N_MODE_BMSK); IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0, IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val); } /** * ipa2_cfg_ep_mode() - IPA end-point mode configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) { int ep; if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return -EINVAL; } if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl); return -EINVAL; } ep = ipa2_get_ep_mapping(ep_mode->dst); if (ep == -1 && ep_mode->mode == IPA_DMA) { IPAERR("dst %d does not exist\n", ep_mode->dst); return -EINVAL; } WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst)); if (!IPA_CLIENT_IS_CONS(ep_mode->dst)) ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d", clnt_hdl, ep_mode->mode, ipa_get_mode_type_str(ep_mode->mode), ep_mode->dst); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl, ipa_ctx->ep[clnt_hdl].dst_pipe_index, ep_mode); ipa_dec_client_disable_clks(); return 0; } const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) { switch (aggr_en) { case (IPA_BYPASS_AGGR): return "no aggregation"; case (IPA_ENABLE_AGGR): return "aggregation enabled"; case (IPA_ENABLE_DEAGGR): return "de-aggregation enabled"; } return "undefined"; } const char *get_aggr_type_str(enum ipa_aggr_type aggr_type) { switch (aggr_type) { case (IPA_MBIM_16): return "MBIM_16"; case (IPA_HDLC): return "HDLC"; case (IPA_TLP): return "TLP"; case (IPA_RNDIS): return "RNDIS"; case (IPA_GENERIC): return "GENERIC"; case (IPA_QCMAP): return "QCMAP"; } return "undefined"; } void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, const struct ipa_ep_cfg_aggr *ep_aggr) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val); } void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number, const struct ipa_ep_cfg_aggr *ep_aggr) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val); } /** * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n", clnt_hdl, ep_aggr->aggr_en, get_aggr_enable_str(ep_aggr->aggr_en), ep_aggr->aggr, get_aggr_type_str(ep_aggr->aggr), ep_aggr->aggr_byte_limit, ep_aggr->aggr_time_limit); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr); ipa_dec_client_disable_clks(); return 0; } void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) { int reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index), reg_val); } void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index) { int reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index), reg_val); } /** * ipa2_cfg_ep_route() - IPA end-point routing configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl); return -EINVAL; } /* * if DMA mode was configured previously for this EP, return with * success */ if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) { IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n", clnt_hdl); return 0; } if (ep_route->rt_tbl_hdl) IPAERR("client specified non-zero RT TBL hdl - ignore it\n"); IPADBG("pipe=%d, rt_tbl_hdl=%d\n", clnt_hdl, ep_route->rt_tbl_hdl); /* always use "default" routing table when programming EP ROUTE reg */ if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) ipa_ctx->ep[clnt_hdl].rt_tbl_idx = IPA_MEM_PART(v4_apps_rt_index_lo); else ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl, ipa_ctx->ep[clnt_hdl].rt_tbl_idx); ipa_dec_client_disable_clks(); return 0; } void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number), ep_holb->en); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number), (u16)ep_holb->tmr_val); } void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), ep_holb->en); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), (u16)ep_holb->tmr_val); } void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), ep_holb->en); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), ep_holb->tmr_val); } void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), ep_holb->en); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), ep_holb->tmr_val); } /** * ipa2_cfg_ep_holb() - IPA end-point holb configuration * * If an IPA producer pipe is full, IPA HW by default will block * indefinitely till space opens up. During this time no packets * including those from unrelated pipes will be processed. Enabling * HOLB means IPA HW will be allowed to drop packets as/when needed * and indefinite blocking is avoided. * * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure */ int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL || ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val || ep_holb->en > 1) { IPAERR("bad parm.\n"); return -EINVAL; } if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl); return -EINVAL; } if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) { IPAERR("HOLB is not supported for this IPA core\n"); return -EINVAL; } ipa_ctx->ep[clnt_hdl].holb = *ep_holb; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb); ipa_dec_client_disable_clks(); IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, ep_holb->tmr_val); return 0; } /** * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration * * Wrapper function for ipa_cfg_ep_holb() with client name instead of * client handle. This function is used for clients that does not have * client handle. * * @client: [in] client name * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure */ int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client, const struct ipa_ep_cfg_holb *ep_holb) { return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb); } static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_deaggr *ep_deaggr) { IPADBG("Not supported for version 1.1\n"); return 0; } static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_deaggr *ep_deaggr) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len, IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT, IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid, IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT, IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location, IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT, IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK); IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len, IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT, IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val); return 0; } /** * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ep_deaggr: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_deaggr(u32 clnt_hdl, const struct ipa_ep_cfg_deaggr *ep_deaggr) { struct ipa_ep_context *ep; if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d deaggr_hdr_len=%d\n", clnt_hdl, ep_deaggr->deaggr_hdr_len); IPADBG("packet_offset_valid=%d\n", ep_deaggr->packet_offset_valid); IPADBG("packet_offset_location=%d max_packet_len=%d\n", ep_deaggr->packet_offset_location, ep_deaggr->max_packet_len); ep = &ipa_ctx->ep[clnt_hdl]; /* copy over EP cfg */ ep->cfg.deaggr = *ep_deaggr; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr); ipa_dec_client_disable_clks(); return 0; } static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number, const struct ipa_ep_cfg_metadata *meta) { IPADBG("Not supported for version 1.1\n"); } static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number, const struct ipa_ep_cfg_metadata *meta) { u32 reg_val = 0; IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id, IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT, IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK); ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number), reg_val); } /** * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration * @clnt_hdl: [in] opaque client handle assigned by IPA to client * @ipa_ep_cfg: [in] IPA end-point configuration params * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); return -EINVAL; } IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id); /* copy over EP cfg */ ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md; ipa_inc_client_enable_clks(); ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md); ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr); ipa_dec_client_disable_clks(); return 0; } EXPORT_SYMBOL(ipa2_cfg_ep_metadata); int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) { struct ipa_ep_cfg_metadata meta; struct ipa_ep_context *ep; int ipa_ep_idx; int result = -EINVAL; if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return -EINVAL; } if (param_in->client >= IPA_CLIENT_MAX) { IPAERR("bad parm client:%d\n", param_in->client); goto fail; } ipa_ep_idx = ipa2_get_ep_mapping(param_in->client); if (ipa_ep_idx == -1) { IPAERR("Invalid client.\n"); goto fail; } ep = &ipa_ctx->ep[ipa_ep_idx]; if (!ep->valid) { IPAERR("EP not allocated.\n"); goto fail; } meta.qmap_id = param_in->qmap_id; if (param_in->client == IPA_CLIENT_USB_PROD || param_in->client == IPA_CLIENT_HSIC1_PROD || param_in->client == IPA_CLIENT_ODU_PROD) { result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta); } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) { ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta; result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id); if (result) IPAERR("qmap_id %d write failed on ep=%d\n", meta.qmap_id, ipa_ep_idx); result = 0; } fail: return result; } /** * ipa_dump_buff_internal() - dumps buffer for debug purposes * @base: buffer base address * @phy_base: buffer physical base address * @size: size of the buffer */ void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size) { int i; u32 *cur = (u32 *)base; u8 *byt; IPADBG("system phys addr=%pa len=%u\n", &phy_base, size); for (i = 0; i < size / 4; i++) { byt = (u8 *)(cur + i); IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i), byt[0], byt[1], byt[2], byt[3]); } IPADBG("END\n"); } /** * ipa_pipe_mem_init() - initialize the pipe memory * @start_ofst: start offset * @size: size * * Return value: * 0: success * -ENOMEM: no memory */ int ipa_pipe_mem_init(u32 start_ofst, u32 size) { int res; u32 aligned_start_ofst; u32 aligned_size; struct gen_pool *pool; if (!size) { IPAERR("no IPA pipe memory allocated\n"); goto fail; } aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst); aligned_size = size - (aligned_start_ofst - start_ofst); IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n", start_ofst, aligned_start_ofst, size, aligned_size); /* allocation order of 8 i.e. 128 bytes, global pool */ pool = gen_pool_create(8, -1); if (!pool) { IPAERR("Failed to create a new memory pool.\n"); goto fail; } res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1); if (res) { IPAERR("Failed to add memory to IPA pipe pool\n"); goto err_pool_add; } ipa_ctx->pipe_mem_pool = pool; return 0; err_pool_add: gen_pool_destroy(pool); fail: return -ENOMEM; } /** * ipa_pipe_mem_alloc() - allocate pipe memory * @ofst: offset * @size: size * * Return value: * 0: success */ int ipa_pipe_mem_alloc(u32 *ofst, u32 size) { u32 vaddr; int res = -1; if (!ipa_ctx->pipe_mem_pool || !size) { IPAERR("failed size=%u pipe_mem_pool=%p\n", size, ipa_ctx->pipe_mem_pool); return res; } vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size); if (vaddr) { *ofst = vaddr; res = 0; IPADBG("size=%u ofst=%u\n", size, vaddr); } else { IPAERR("size=%u failed\n", size); } return res; } /** * ipa_pipe_mem_free() - free pipe memory * @ofst: offset * @size: size * * Return value: * 0: success */ int ipa_pipe_mem_free(u32 ofst, u32 size) { IPADBG("size=%u ofst=%u\n", size, ofst); if (ipa_ctx->pipe_mem_pool && size) gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size); return 0; } /** * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, * etc * * Returns: 0 on success */ int ipa2_set_aggr_mode(enum ipa_aggr_mode mode) { u32 reg_val; ipa_inc_client_enable_clks(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) | (reg_val & 0xfffffffe)); ipa_dec_client_disable_clks(); return 0; } /** * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation * mode * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be * "QND") * * Set the NDP signature used for QCNCM aggregation mode. The fourth byte * (expected to be 'P') needs to be set using the header addition mechanism * * Returns: 0 on success, negative on failure */ int ipa2_set_qcncm_ndp_sig(char sig[3]) { u32 reg_val; if (sig == NULL) { IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n"); return -EINVAL; } ipa_inc_client_enable_clks(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 | (sig[1] << 12) | (sig[2] << 4) | (reg_val & 0xf000000f)); ipa_dec_client_disable_clks(); return 0; } /** * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame * configuration * @enable: [in] true for single NDP/MBIM; false otherwise * * Returns: 0 on success */ int ipa2_set_single_ndp_per_mbim(bool enable) { u32 reg_val; ipa_inc_client_enable_clks(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST, (enable & 0x1) | (reg_val & 0xfffffffe)); ipa_dec_client_disable_clks(); return 0; } /** * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix * for MBIM aggregation. * @enable: [in] true for enable HW fix; false otherwise * * Returns: 0 on success */ int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable) { u32 reg_val; ipa_inc_client_enable_clks(); reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST); ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) | (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK)); ipa_dec_client_disable_clks(); return 0; } EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr); /** * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary * @start: start address of the memory buffer * @end: end address of the memory buffer * @boundary: boundary * * Return value: * 1: if the interval [start, end] straddles boundary * 0: otherwise */ int ipa_straddle_boundary(u32 start, u32 end, u32 boundary) { u32 next_start; u32 prev_end; IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary); next_start = (start + (boundary - 1)) & ~(boundary - 1); prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary; while (next_start < prev_end) next_start += boundary; if (next_start == prev_end) return 1; else return 0; } /** * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM * * Function is rate limited to avoid flooding kernel log buffer */ void ipa2_bam_reg_dump(void) { static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1); if (__ratelimit(&_rs)) { ipa_inc_client_enable_clks(); pr_err("IPA BAM START\n"); if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, 511950, 0, 0); sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0, 0, 0); } else { sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS)) | SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))), 0, 2); } ipa_dec_client_disable_clks(); } } static void ipa_init_mem_partition_v2(void) { IPADBG("Memory partition IPA 2\n"); IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), IPA_MEM_PART(nat_size)); IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START; IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST; IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE; IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), IPA_MEM_PART(v4_flt_size_ddr)); IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST; IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE; IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), IPA_MEM_PART(v6_flt_size_ddr)); IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST; IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX; IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO; IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI; IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_modem_rt_index_lo), IPA_MEM_PART(v4_modem_rt_index_hi)); IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO; IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI; IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_apps_rt_index_lo), IPA_MEM_PART(v4_apps_rt_index_hi)); IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE; IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), IPA_MEM_PART(v4_rt_size_ddr)); IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST; IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX; IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO; IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI; IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_modem_rt_index_lo), IPA_MEM_PART(v6_modem_rt_index_hi)); IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO; IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI; IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_apps_rt_index_lo), IPA_MEM_PART(v6_apps_rt_index_hi)); IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE; IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), IPA_MEM_PART(v6_rt_size_ddr)); IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST; IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE; IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST; IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE; IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR; IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), IPA_MEM_PART(apps_hdr_size_ddr)); IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST; IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE; IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), IPA_MEM_PART(modem_size)); IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST; IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE; IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST; IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE; IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST; IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE; IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size)); IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST; IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST; IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE; IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST; IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE; } static void ipa_init_mem_partition_v2_5(void) { IPADBG("Memory partition IPA 2.5\n"); IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), IPA_MEM_PART(nat_size)); IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST; IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE; IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size)); IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START; IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST; IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE; IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), IPA_MEM_PART(v4_flt_size_ddr)); IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST; IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE; IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), IPA_MEM_PART(v6_flt_size_ddr)); IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST; IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX; IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO; IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI; IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_modem_rt_index_lo), IPA_MEM_PART(v4_modem_rt_index_hi)); IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO; IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI; IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_apps_rt_index_lo), IPA_MEM_PART(v4_apps_rt_index_hi)); IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE; IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), IPA_MEM_PART(v4_rt_size_ddr)); IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST; IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX; IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO; IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI; IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_modem_rt_index_lo), IPA_MEM_PART(v6_modem_rt_index_hi)); IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO; IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI; IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_apps_rt_index_lo), IPA_MEM_PART(v6_apps_rt_index_hi)); IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE; IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), IPA_MEM_PART(v6_rt_size_ddr)); IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST; IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE; IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST; IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE; IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR; IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), IPA_MEM_PART(apps_hdr_size_ddr)); IPA_MEM_PART(modem_hdr_proc_ctx_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST; IPA_MEM_PART(modem_hdr_proc_ctx_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE; IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_PART(modem_hdr_proc_ctx_size)); IPA_MEM_PART(apps_hdr_proc_ctx_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST; IPA_MEM_PART(apps_hdr_proc_ctx_size) = IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE; IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) = IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR; IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(apps_hdr_proc_ctx_ofst), IPA_MEM_PART(apps_hdr_proc_ctx_size), IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr)); IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST; IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE; IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), IPA_MEM_PART(modem_size)); IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST; IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE; IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST; IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE; IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST; IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST; IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE; IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST; IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE; } static void ipa_init_mem_partition_v2_6L(void) { IPADBG("Memory partition IPA 2.6Lite\n"); IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), IPA_MEM_PART(nat_size)); IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST; IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE; IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size)); IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START; IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST; IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE; IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), IPA_MEM_PART(v4_flt_size_ddr)); IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST; IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE; IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), IPA_MEM_PART(v6_flt_size_ddr)); IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST; IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX; IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO; IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI; IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_modem_rt_index_lo), IPA_MEM_PART(v4_modem_rt_index_hi)); IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO; IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI; IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v4_apps_rt_index_lo), IPA_MEM_PART(v4_apps_rt_index_hi)); IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE; IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), IPA_MEM_PART(v4_rt_size_ddr)); IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST; IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX; IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO; IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI; IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_modem_rt_index_lo), IPA_MEM_PART(v6_modem_rt_index_hi)); IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO; IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI; IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", IPA_MEM_PART(v6_apps_rt_index_lo), IPA_MEM_PART(v6_apps_rt_index_hi)); IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE; IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), IPA_MEM_PART(v6_rt_size_ddr)); IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST; IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE; IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST; IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE; IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR; IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), IPA_MEM_PART(apps_hdr_size_ddr)); IPA_MEM_PART(modem_comp_decomp_ofst) = IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST; IPA_MEM_PART(modem_comp_decomp_size) = IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE; IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_comp_decomp_ofst), IPA_MEM_PART(modem_comp_decomp_size)); IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST; IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE; IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), IPA_MEM_PART(modem_size)); IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST; IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE; IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST; IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE; IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST; IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST; IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE; IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST; IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE; } /** * ipa_controller_shared_static_bind() - set the appropriate shared methods for * for IPA HW version 2.0, 2.5, 2.6 and 2.6L * * @ctrl: data structure which holds the function pointers */ void ipa_controller_shared_static_bind(struct ipa_controller *ctrl) { ctrl->ipa_init_rt4 = _ipa_init_rt4_v2; ctrl->ipa_init_rt6 = _ipa_init_rt6_v2; ctrl->ipa_init_flt4 = _ipa_init_flt4_v2; ctrl->ipa_init_flt6 = _ipa_init_flt6_v2; ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0; ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0; ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0; ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0; ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0; ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0; ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0; ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0; ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0; ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0; ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO; ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL; ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS; ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0; ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0; ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0; ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0; ctrl->ipa_commit_flt = __ipa_commit_flt_v2; ctrl->ipa_commit_rt = __ipa_commit_rt_v2; ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0; ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0; ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0; ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0; ctrl->clock_scaling_bw_threshold_nominal = IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS; ctrl->clock_scaling_bw_threshold_turbo = IPA_V2_0_BW_THRESHOLD_TURBO_MBPS; } /** * ipa_ctrl_static_bind() - set the appropriate methods for * IPA Driver based on the HW version * * @ctrl: data structure which holds the function pointers * @hw_type: the HW type in use * * This function can avoid the runtime assignment by using C99 special * struct initialization - hard decision... time.vs.mem */ int ipa_controller_static_bind(struct ipa_controller *ctrl, enum ipa_hw_type hw_type) { switch (hw_type) { case (IPA_HW_v1_1): ipa_init_mem_partition_v2(); ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1; ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1; ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1; ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1; ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1; ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1; ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1; ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1; ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1; ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1; ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1; ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1; ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1; ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE; ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE; ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE; ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1; ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1; ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1; ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1; ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1; ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1; ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1; ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1; ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1; ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1; ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1; ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL; break; case (IPA_HW_v2_0): ipa_init_mem_partition_v2(); ipa_controller_shared_static_bind(ctrl); ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0; ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL; ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0; ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0; ctrl->ipa_init_sram = _ipa_init_sram_v2; ctrl->ipa_init_hdr = _ipa_init_hdr_v2; ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2; break; case (IPA_HW_v2_5): ipa_init_mem_partition_v2_5(); ipa_controller_shared_static_bind(ctrl); ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5; ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5; ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL; ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5; ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5; ctrl->ipa_init_sram = _ipa_init_sram_v2_5; ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5; ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5; ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5; break; case (IPA_HW_v2_6L): ipa_init_mem_partition_v2_6L(); ipa_controller_shared_static_bind(ctrl); ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L; ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L; ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL; ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L; ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L; ctrl->ipa_init_sram = _ipa_init_sram_v2_6L; ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L; ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L; ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L; break; default: return -EPERM; } return 0; } void ipa_skb_recycle(struct sk_buff *skb) { struct skb_shared_info *shinfo; shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->data = skb->head + NET_SKB_PAD; skb_reset_tail_pointer(skb); } int ipa_id_alloc(void *ptr) { int id; idr_preload(GFP_KERNEL); spin_lock(&ipa_ctx->idr_lock); id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT); spin_unlock(&ipa_ctx->idr_lock); idr_preload_end(); return id; } void *ipa_id_find(u32 id) { void *ptr; spin_lock(&ipa_ctx->idr_lock); ptr = idr_find(&ipa_ctx->ipa_idr, id); spin_unlock(&ipa_ctx->idr_lock); return ptr; } void ipa_id_remove(u32 id) { spin_lock(&ipa_ctx->idr_lock); idr_remove(&ipa_ctx->ipa_idr, id); spin_unlock(&ipa_ctx->idr_lock); } static void ipa_tag_free_buf(void *user1, int user2) { kfree(user1); } static void ipa_tag_free_skb(void *user1, int user2) { dev_kfree_skb_any((struct sk_buff *)user1); } #define REQUIRED_TAG_PROCESS_DESCRIPTORS 4 /* ipa_tag_process() - Initiates a tag process. Incorporates the input * descriptors * * @desc: descriptors with commands for IC * @desc_size: amount of descriptors in the above variable * * Note: The descriptors are copied (if there's room), the client needs to * free his descriptors afterwards * * Return: 0 or negative in case of failure */ int ipa_tag_process(struct ipa_desc desc[], int descs_num, unsigned long timeout) { struct ipa_sys_context *sys; struct ipa_desc *tag_desc; int desc_idx = 0; struct ipa_ip_packet_init *pkt_init; struct ipa_register_write *reg_write_nop; struct ipa_ip_packet_tag_status *status; int i; struct sk_buff *dummy_skb; int res; struct ipa_tag_completion *comp; int ep_idx; /* Not enough room for the required descriptors for the tag process */ if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) { IPAERR("up to %d descriptors are allowed (received %d)\n", IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS, descs_num); return -ENOMEM; } ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); if (-1 == ep_idx) { IPAERR("Client %u is not mapped\n", IPA_CLIENT_APPS_CMD_PROD); return -EFAULT; } sys = ipa_ctx->ep[ep_idx].sys; tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL); if (!tag_desc) { IPAERR("failed to allocate memory\n"); res = -ENOMEM; goto fail_alloc_desc; } /* IP_PACKET_INIT IC for tag status to be sent to apps */ pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL); if (!pkt_init) { IPAERR("failed to allocate memory\n"); res = -ENOMEM; goto fail_alloc_pkt_init; } pkt_init->destination_pipe_index = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT; tag_desc[desc_idx].pyld = pkt_init; tag_desc[desc_idx].len = sizeof(*pkt_init); tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; tag_desc[desc_idx].callback = ipa_tag_free_buf; tag_desc[desc_idx].user1 = pkt_init; desc_idx++; /* NO-OP IC for ensuring that IPA pipeline is empty */ reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL); if (!reg_write_nop) { IPAERR("no mem\n"); res = -ENOMEM; goto fail_free_desc; } reg_write_nop->skip_pipeline_clear = 0; reg_write_nop->value_mask = 0x0; tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE; tag_desc[desc_idx].pyld = reg_write_nop; tag_desc[desc_idx].len = sizeof(*reg_write_nop); tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; tag_desc[desc_idx].callback = ipa_tag_free_buf; tag_desc[desc_idx].user1 = reg_write_nop; desc_idx++; /* status IC */ status = kzalloc(sizeof(*status), GFP_KERNEL); if (!status) { IPAERR("no mem\n"); res = -ENOMEM; goto fail_free_desc; } status->tag_f_2 = IPA_COOKIE; tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS; tag_desc[desc_idx].pyld = status; tag_desc[desc_idx].len = sizeof(*status); tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; tag_desc[desc_idx].callback = ipa_tag_free_buf; tag_desc[desc_idx].user1 = status; desc_idx++; /* Copy the required descriptors from the client now */ if (desc) { memcpy(&(tag_desc[desc_idx]), desc, descs_num * sizeof(struct ipa_desc)); desc_idx += descs_num; } comp = kzalloc(sizeof(*comp), GFP_KERNEL); if (!comp) { IPAERR("no mem\n"); res = -ENOMEM; goto fail_free_desc; } init_completion(&comp->comp); /* completion needs to be released from both here and rx handler */ atomic_set(&comp->cnt, 2); /* dummy packet to send to IPA. packet payload is a completion object */ dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL); if (!dummy_skb) { IPAERR("failed to allocate memory\n"); res = -ENOMEM; goto fail_free_skb; } memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp)); tag_desc[desc_idx].pyld = dummy_skb->data; tag_desc[desc_idx].len = dummy_skb->len; tag_desc[desc_idx].type = IPA_DATA_DESC_SKB; tag_desc[desc_idx].callback = ipa_tag_free_skb; tag_desc[desc_idx].user1 = dummy_skb; desc_idx++; /* send all descriptors to IPA with single EOT */ res = ipa_send(sys, desc_idx, tag_desc, true); if (res) { IPAERR("failed to send TAG packets %d\n", res); res = -ENOMEM; goto fail_send; } kfree(tag_desc); tag_desc = NULL; IPADBG("waiting for TAG response\n"); res = wait_for_completion_timeout(&comp->comp, timeout); if (res == 0) { IPAERR("timeout (%lu msec) on waiting for TAG response\n", timeout); WARN_ON(1); if (atomic_dec_return(&comp->cnt) == 0) kfree(comp); return -ETIME; } IPADBG("TAG response arrived!\n"); if (atomic_dec_return(&comp->cnt) == 0) kfree(comp); /* sleep for short period to ensure IPA wrote all packets to BAM */ usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC); return 0; fail_send: dev_kfree_skb_any(dummy_skb); desc_idx--; fail_free_skb: kfree(comp); fail_free_desc: /* * Free only the first descriptors allocated here. * [pkt_init, status, nop] * The user is responsible to free his allocations * in case of failure. * The min is required because we may fail during * of the initial allocations above */ for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++) kfree(tag_desc[i].user1); fail_alloc_pkt_init: kfree(tag_desc); fail_alloc_desc: return res; } /** * ipa_tag_generate_force_close_desc() - generate descriptors for force close * immediate command * * @desc: descriptors for IC * @desc_size: desc array size * @start_pipe: first pipe to close aggregation * @end_pipe: last (non-inclusive) pipe to close aggregation * * Return: number of descriptors written or negative in case of failure */ static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[], int desc_size, int start_pipe, int end_pipe) { int i; u32 aggr_init; int desc_idx = 0; int res; struct ipa_register_write *reg_write_agg_close; for (i = start_pipe; i < end_pipe; i++) { aggr_init = ipa_read_reg(ipa_ctx->mmio, IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i)); if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR) continue; IPADBG("Force close ep: %d\n", i); if (desc_idx + 1 > desc_size) { IPAERR("Internal error - no descriptors\n"); res = -EFAULT; goto fail_no_desc; } reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close), GFP_KERNEL); if (!reg_write_agg_close) { IPAERR("no mem\n"); res = -ENOMEM; goto fail_alloc_reg_write_agg_close; } reg_write_agg_close->skip_pipeline_clear = 0; reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i); reg_write_agg_close->value = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) << IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; reg_write_agg_close->value_mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK << IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; desc[desc_idx].opcode = IPA_REGISTER_WRITE; desc[desc_idx].pyld = reg_write_agg_close; desc[desc_idx].len = sizeof(*reg_write_agg_close); desc[desc_idx].type = IPA_IMM_CMD_DESC; desc[desc_idx].callback = ipa_tag_free_buf; desc[desc_idx].user1 = reg_write_agg_close; desc_idx++; } return desc_idx; fail_alloc_reg_write_agg_close: for (i = 0; i < desc_idx; i++) kfree(desc[desc_idx].user1); fail_no_desc: return res; } /** * ipa_tag_aggr_force_close() - Force close aggregation * * @pipe_num: pipe number or -1 for all pipes */ int ipa_tag_aggr_force_close(int pipe_num) { struct ipa_desc *desc; int res = -1; int start_pipe; int end_pipe; int num_descs; int num_aggr_descs; if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) { IPAERR("Invalid pipe number %d\n", pipe_num); return -EINVAL; } if (pipe_num == -1) { start_pipe = 0; end_pipe = ipa_ctx->ipa_num_pipes; } else { start_pipe = pipe_num; end_pipe = pipe_num + 1; } num_descs = end_pipe - start_pipe; desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL); if (!desc) { IPAERR("no mem\n"); return -ENOMEM; } /* Force close aggregation on all valid pipes with aggregation */ num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs, start_pipe, end_pipe); if (num_aggr_descs < 0) { IPAERR("ipa_tag_generate_force_close_desc failed %d\n", num_aggr_descs); goto fail_free_desc; } res = ipa_tag_process(desc, num_aggr_descs, IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT); fail_free_desc: kfree(desc); return res; } /** * ipa2_is_ready() - check if IPA module was initialized * successfully * * Return value: true for yes; false for no */ bool ipa2_is_ready(void) { return (ipa_ctx != NULL) ? true : false; } /** * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle * * Return value: true for yes; false for no */ bool ipa2_is_client_handle_valid(u32 clnt_hdl) { if (unlikely(!ipa_ctx)) { IPAERR("IPA driver was not initialized\n"); return false; } if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes) return true; return false; } /** * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote * * Return value: none */ void ipa2_proxy_clk_unvote(void) { if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) { ipa_dec_client_disable_clks(); ipa_ctx->q6_proxy_clk_vote_valid = false; } } /** * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote * * Return value: none */ void ipa2_proxy_clk_vote(void) { if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) { ipa_inc_client_enable_clks(); ipa_ctx->q6_proxy_clk_vote_valid = true; } } /** * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes * * Return value: u16 - number of IPA smem restricted bytes */ u16 ipa2_get_smem_restr_bytes(void) { if (ipa_ctx) return ipa_ctx->smem_restricted_bytes; IPAERR("IPA Driver not initialized\n"); return 0; } /** * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt * * Return value: true if modem configures embedded pipe flt, false otherwise */ bool ipa2_get_modem_cfg_emb_pipe_flt(void) { if (ipa_ctx) return ipa_ctx->modem_cfg_emb_pipe_flt; IPAERR("IPA driver has not been initialized\n"); return false; } /** * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS * * Return value: enum ipa_transport_type */ enum ipa_transport_type ipa2_get_transport_type(void) { return IPA_TRANSPORT_TYPE_SPS; } u32 ipa_get_num_pipes(void) { if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST); else return IPA_MAX_NUM_PIPES; } EXPORT_SYMBOL(ipa_get_num_pipes); /** * ipa2_disable_apps_wan_cons_deaggr()- set ipa_ctx->ipa_client_apps_wan_cons_agg_gro * * Return value: 0 or negative in case of failure */ int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) { int res = -1; /* checking if IPA-HW can support */ if ((agg_size >> 10) > IPA_AGGR_BYTE_LIMIT) { IPAWANERR("IPA-AGG byte limit %d\n", IPA_AGGR_BYTE_LIMIT); IPAWANERR("exceed aggr_byte_limit\n"); return res; } if (agg_count > IPA_AGGR_PKT_LIMIT) { IPAWANERR("IPA-AGG pkt limit %d\n", IPA_AGGR_PKT_LIMIT); IPAWANERR("exceed aggr_pkt_limit\n"); return res; } if (ipa_ctx) { ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true; return 0; } return res; } static struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info(int ipa_ep_idx) { IPAERR("Not supported for IPA 2.x\n"); return NULL; } static int ipa2_stop_gsi_channel(u32 clnt_hdl) { IPAERR("Not supported for IPA 2.x\n"); return -EFAULT; } int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, struct ipa_api_controller *api_ctrl) { if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) { IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type); WARN_ON(1); return -EPERM; } api_ctrl->ipa_connect = ipa2_connect; api_ctrl->ipa_disconnect = ipa2_disconnect; api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint; api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay; api_ctrl->ipa_cfg_ep = ipa2_cfg_ep; api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat; api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr; api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext; api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode; api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr; api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr; api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route; api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb; api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg; api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask; api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client; api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl; api_ctrl->ipa_add_hdr = ipa2_add_hdr; api_ctrl->ipa_del_hdr = ipa2_del_hdr; api_ctrl->ipa_commit_hdr = ipa2_commit_hdr; api_ctrl->ipa_reset_hdr = ipa2_reset_hdr; api_ctrl->ipa_get_hdr = ipa2_get_hdr; api_ctrl->ipa_put_hdr = ipa2_put_hdr; api_ctrl->ipa_copy_hdr = ipa2_copy_hdr; api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx; api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx; api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule; api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule; api_ctrl->ipa_commit_rt = ipa2_commit_rt; api_ctrl->ipa_reset_rt = ipa2_reset_rt; api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl; api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl; api_ctrl->ipa_query_rt_index = ipa2_query_rt_index; api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule; api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule; api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule; api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule; api_ctrl->ipa_commit_flt = ipa2_commit_flt; api_ctrl->ipa_reset_flt = ipa2_reset_flt; api_ctrl->allocate_nat_device = ipa2_allocate_nat_device; api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd; api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd; api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd; api_ctrl->ipa_send_msg = ipa2_send_msg; api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg; api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg; api_ctrl->ipa_register_intf = ipa2_register_intf; api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext; api_ctrl->ipa_deregister_intf = ipa2_deregister_intf; api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode; api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig; api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim; api_ctrl->ipa_tx_dp = ipa2_tx_dp; api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul; api_ctrl->ipa_free_skb = ipa2_free_skb; api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe; api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe; api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls; api_ctrl->ipa_sys_setup = ipa2_sys_setup; api_ctrl->ipa_sys_teardown = ipa2_sys_teardown; api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe; api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe; api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe; api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe; api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe; api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe; api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats; api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes; api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa; api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB; api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping; api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping; api_ctrl->ipa_rm_create_resource = ipa2_rm_create_resource; api_ctrl->ipa_rm_delete_resource = ipa2_rm_delete_resource; api_ctrl->ipa_rm_register = ipa2_rm_register; api_ctrl->ipa_rm_deregister = ipa2_rm_deregister; api_ctrl->ipa_rm_set_perf_profile = ipa2_rm_set_perf_profile; api_ctrl->ipa_rm_add_dependency = ipa2_rm_add_dependency; api_ctrl->ipa_rm_delete_dependency = ipa2_rm_delete_dependency; api_ctrl->ipa_rm_request_resource = ipa2_rm_request_resource; api_ctrl->ipa_rm_release_resource = ipa2_rm_release_resource; api_ctrl->ipa_rm_notify_completion = ipa2_rm_notify_completion; api_ctrl->ipa_rm_inactivity_timer_init = ipa2_rm_inactivity_timer_init; api_ctrl->ipa_rm_inactivity_timer_destroy = ipa2_rm_inactivity_timer_destroy; api_ctrl->ipa_rm_inactivity_timer_request_resource = ipa2_rm_inactivity_timer_request_resource; api_ctrl->ipa_rm_inactivity_timer_release_resource = ipa2_rm_inactivity_timer_release_resource; api_ctrl->teth_bridge_init = ipa2_teth_bridge_init; api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect; api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect; api_ctrl->ipa_set_client = ipa2_set_client; api_ctrl->ipa_get_client = ipa2_get_client; api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink; api_ctrl->odu_bridge_init = ipa2_odu_bridge_init; api_ctrl->odu_bridge_connect = ipa2_odu_bridge_connect; api_ctrl->odu_bridge_disconnect = ipa2_odu_bridge_disconnect; api_ctrl->odu_bridge_tx_dp = ipa2_odu_bridge_tx_dp; api_ctrl->odu_bridge_cleanup = ipa2_odu_bridge_cleanup; api_ctrl->ipa_dma_init = ipa2_dma_init; api_ctrl->ipa_dma_enable = ipa2_dma_enable; api_ctrl->ipa_dma_disable = ipa2_dma_disable; api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy; api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy; api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy; api_ctrl->ipa_dma_destroy = ipa2_dma_destroy; api_ctrl->ipa_mhi_init = ipa2_mhi_init; api_ctrl->ipa_mhi_start = ipa2_mhi_start; api_ctrl->ipa_mhi_connect_pipe = ipa2_mhi_connect_pipe; api_ctrl->ipa_mhi_disconnect_pipe = ipa2_mhi_disconnect_pipe; api_ctrl->ipa_mhi_suspend = ipa2_mhi_suspend; api_ctrl->ipa_mhi_resume = ipa2_mhi_resume; api_ctrl->ipa_mhi_destroy = ipa2_mhi_destroy; api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id; api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler; api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler; api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler; api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump; api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping; api_ctrl->ipa_is_ready = ipa2_is_ready; api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote; api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote; api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid; api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping; api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep; api_ctrl->ipa_get_modem_cfg_emb_pipe_flt = ipa2_get_modem_cfg_emb_pipe_flt; api_ctrl->ipa_get_transport_type = ipa2_get_transport_type; api_ctrl->ipa_ap_suspend = ipa2_ap_suspend; api_ctrl->ipa_ap_resume = ipa2_ap_resume; api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain; api_ctrl->ipa_disable_apps_wan_cons_deaggr = ipa2_disable_apps_wan_cons_deaggr; api_ctrl->ipa_rm_add_dependency_sync = ipa2_rm_add_dependency_sync; api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev; api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info; api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel; return 0; } /** * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes. * * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L, * 0 otherwise. */ u32 ipa_get_sys_yellow_wm(void) { if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) return ipa_read_reg(ipa_ctx->mmio, IPA_YELLOW_MARKER_SYS_CFG_OFST); else return 0; } EXPORT_SYMBOL(ipa_get_sys_yellow_wm); void ipa_suspend_apps_pipes(bool suspend) { struct ipa_ep_cfg_ctrl cfg; int ipa_ep_idx; u32 lan_empty = 0, wan_empty = 0; int ret; struct sps_event_notify notify; struct ipa_ep_context *ep; memset(&cfg, 0, sizeof(cfg)); cfg.ipa_ep_suspend = suspend; ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); ep = &ipa_ctx->ep[ipa_ep_idx]; if (ep->valid) { ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); /* Check if the pipes are empty. */ ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty); if (ret) { IPAERR("%s: sps_is_pipe_empty failed with %d\n", __func__, ret); } if (!lan_empty) { IPADBG("LAN Cons is not-empty. Enter poll mode.\n"); notify.user = ep->sys; notify.event_id = SPS_EVENT_EOT; if (ep->sys->sps_callback) ep->sys->sps_callback(&notify); } } ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); /* Considering the case for SSR. */ if (ipa_ep_idx == -1) { IPADBG("Invalid client.\n"); return; } ep = &ipa_ctx->ep[ipa_ep_idx]; if (ep->valid) { ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); /* Check if the pipes are empty. */ ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty); if (ret) { IPAERR("%s: sps_is_pipe_empty failed with %d\n", __func__, ret); } if (!wan_empty) { IPADBG("WAN Cons is not-empty. Enter poll mode.\n"); notify.user = ep->sys; notify.event_id = SPS_EVENT_EOT; if (ep->sys->sps_callback) ep->sys->sps_callback(&notify); } } }
gpl-2.0
nmacs/lm3s-uclinux
lib/libldap/libraries/liblber/etest.c
11
3083
/* test.c - lber encoding test program */ /* $OpenLDAP: pkg/ldap/libraries/liblber/etest.c,v 1.24.2.2 2003/03/03 17:10:04 kurt Exp $ */ /* * Copyright 1998-2003 The OpenLDAP Foundation, All Rights Reserved. * COPYING RESTRICTIONS APPLY, see COPYRIGHT file */ /* Portions * Copyright (c) 1990 Regents of the University of Michigan. * All rights reserved. */ #include "portable.h" #include <stdio.h> #include <ac/stdlib.h> #include <ac/socket.h> #include <ac/string.h> #include <ac/unistd.h> #ifdef HAVE_CONSOLE_H #include <console.h> #endif /* HAVE_CONSOLE_H */ #include "lber.h" static void usage( const char *name ) { fprintf( stderr, "usage: %s fmtstring\n", name ); } static char* getbuf( void ) { char *p; static char buf[1024]; if ( fgets( buf, sizeof(buf), stdin ) == NULL ) return NULL; if ( (p = strchr( buf, '\n' )) != NULL ) *p = '\0'; return buf; } int main( int argc, char **argv ) { char *s; int fd, rc; BerElement *ber; Sockbuf *sb; /* enable debugging */ int ival = -1; ber_set_option( NULL, LBER_OPT_DEBUG_LEVEL, &ival ); if ( argc < 2 ) { usage( argv[0] ); return( EXIT_FAILURE ); } #ifdef HAVE_CONSOLE_H ccommand( &argv ); cshow( stdout ); if (( fd = open( "lber-test", O_WRONLY|O_CREAT|O_TRUNC|O_BINARY )) < 0 ) { perror( "open" ); return( EXIT_FAILURE ); } #else fd = fileno(stdout); #endif sb = ber_sockbuf_alloc(); ber_sockbuf_add_io( sb, &ber_sockbuf_io_fd, LBER_SBIOD_LEVEL_PROVIDER, (void *)&fd ); if( sb == NULL ) { perror( "ber_sockbuf_alloc_fd" ); return( EXIT_FAILURE ); } if ( (ber = ber_alloc_t( LBER_USE_DER )) == NULL ) { perror( "ber_alloc" ); return( EXIT_FAILURE ); } fprintf(stderr, "encode: start\n" ); if( ber_printf( ber, "{" /*}*/ ) ) { perror( "ber_printf {" /*}*/ ); return( EXIT_FAILURE ); } for ( s = argv[1]; *s; s++ ) { char *buf; char fmt[2]; fmt[0] = *s; fmt[1] = '\0'; fprintf(stderr, "encode: %s\n", fmt ); switch ( *s ) { case 'i': /* int */ case 'b': /* boolean */ case 'e': /* enumeration */ buf = getbuf(); rc = ber_printf( ber, fmt, atoi(buf) ); break; case 'n': /* null */ case '{': /* begin sequence */ case '}': /* end sequence */ case '[': /* begin set */ case ']': /* end set */ rc = ber_printf( ber, fmt ); break; case 'o': /* octet string (non-null terminated) */ case 'B': /* bit string */ buf = getbuf(); rc = ber_printf( ber, fmt, buf, strlen(buf) ); break; case 's': /* string */ case 't': /* tag for the next element */ buf = getbuf(); rc = ber_printf( ber, fmt, buf ); break; default: fprintf( stderr, "encode: unknown fmt %c\n", *fmt ); rc = -1; break; } if( rc == -1 ) { perror( "ber_printf" ); return( EXIT_FAILURE ); } } fprintf(stderr, "encode: end\n" ); if( ber_printf( ber, /*{*/ "N}" ) == -1 ) { perror( /*{*/ "ber_printf }" ); return( EXIT_FAILURE ); } if ( ber_flush( sb, ber, 1 ) == -1 ) { perror( "ber_flush" ); return( EXIT_FAILURE ); } ber_sockbuf_free( sb ); return( EXIT_SUCCESS ); }
gpl-2.0
ryo-on/gcc-4.2.4-SCO-OpenServer5
libgfortran/generated/_aint_r10.F90
11
1735
! Copyright 2002 Free Software Foundation, Inc. ! Contributed by Paul Brook <paul@nowt.org> ! !This file is part of the GNU Fortran 95 runtime library (libgfortran). ! !GNU libgfortran is free software; you can redistribute it and/or !modify it under the terms of the GNU General Public !License as published by the Free Software Foundation; either !version 2 of the License, or (at your option) any later version. !In addition to the permissions in the GNU General Public License, the !Free Software Foundation gives you unlimited permission to link the !compiled version of this file into combinations with other programs, !and to distribute those combinations without any restriction coming !from the use of this file. (The General Public License restrictions !do apply in other respects; for example, they cover modification of !the file, and distribution when not linked into a combine !executable.) ! !GNU libgfortran is distributed in the hope that it will be useful, !but WITHOUT ANY WARRANTY; without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the !GNU General Public License for more details. ! !You should have received a copy of the GNU General Public !License along with libgfortran; see the file COPYING. If not, !write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, !Boston, MA 02110-1301, USA. ! !This file is machine generated. #include "config.h" #include "kinds.inc" #include "c99_protos.inc" #if defined (HAVE_GFC_REAL_10) #ifdef HAVE_TRUNCL elemental function specific__aint_r10 (parm) real (kind=10), intent (in) :: parm real (kind=10) :: specific__aint_r10 specific__aint_r10 = aint (parm) end function #endif #endif
gpl-2.0
myri/lanai-gcc
boehm-gc/backgraph.c
11
14471
/* * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */ /* * This implements a full, though not well-tuned, representation of the * backwards points-to graph. This is used to test for non-GC-robust * data structures; the code is not used during normal garbage collection. * * One restriction is that we drop all back-edges from nodes with very * high in-degree, and simply add them add them to a list of such * nodes. They are then treated as permanent roots. Id this by itself * doesn't introduce a space leak, then such nodes can't contribute to * a growing space leak. */ #ifdef MAKE_BACK_GRAPH #define MAX_IN 10 /* Maximum in-degree we handle directly */ #include "private/dbg_mlc.h" #include <unistd.h> #if !defined(DBG_HDRS_ALL) || (ALIGNMENT != CPP_WORDSZ/8) || !defined(UNIX_LIKE) # error Configuration doesnt support MAKE_BACK_GRAPH #endif /* We store single back pointers directly in the object's oh_bg_ptr field. */ /* If there is more than one ptr to an object, we store q | FLAG_MANY, */ /* where q is a pointer to a back_edges object. */ /* Every once in a while we use a back_edges object even for a single */ /* pointer, since we need the other fields in the back_edges structure to */ /* be present in some fraction of the objects. Otherwise we get serious */ /* performance issues. */ #define FLAG_MANY 2 typedef struct back_edges_struct { word n_edges; /* Number of edges, including those in continuation */ /* structures. */ unsigned short flags; # define RETAIN 1 /* Directly points to a reachable object; */ /* retain for next GC. */ unsigned short height_gc_no; /* If height > 0, then the GC_gc_no value when it */ /* was computed. If it was computed this cycle, then */ /* it is current. If it was computed during the */ /* last cycle, then it represents the old height, */ /* which is only saved for live objects referenced by */ /* dead ones. This may grow due to refs from newly */ /* dead objects. */ signed_word height; /* Longest path through unreachable nodes to this node */ /* that we found using depth first search. */ # define HEIGHT_UNKNOWN ((signed_word)(-2)) # define HEIGHT_IN_PROGRESS ((signed_word)(-1)) ptr_t edges[MAX_IN]; struct back_edges_struct *cont; /* Pointer to continuation structure; we use only the */ /* edges field in the continuation. */ /* also used as free list link. */ } back_edges; /* Allocate a new back edge structure. Should be more sophisticated */ /* if this were production code. */ #define MAX_BACK_EDGE_STRUCTS 100000 static back_edges *back_edge_space = 0; int GC_n_back_edge_structs = 0; /* Serves as pointer to never used */ /* back_edges space. */ static back_edges *avail_back_edges = 0; /* Pointer to free list of deallocated */ /* back_edges structures. */ static back_edges * new_back_edges(void) { if (0 == back_edge_space) { back_edge_space = (back_edges *) sbrk(MAX_BACK_EDGE_STRUCTS*sizeof(back_edges)); } if (0 != avail_back_edges) { back_edges * result = avail_back_edges; avail_back_edges = result -> cont; result -> cont = 0; return result; } if (GC_n_back_edge_structs >= MAX_BACK_EDGE_STRUCTS - 1) { ABORT("needed too much space for back edges: adjust " "MAX_BACK_EDGE_STRUCTS"); } return back_edge_space + (GC_n_back_edge_structs++); } /* Deallocate p and its associated continuation structures. */ static void deallocate_back_edges(back_edges *p) { back_edges *last = p; while (0 != last -> cont) last = last -> cont; last -> cont = avail_back_edges; avail_back_edges = p; } /* Table of objects that are currently on the depth-first search */ /* stack. Only objects with in-degree one are in this table. */ /* Other objects are identified using HEIGHT_IN_PROGRESS. */ /* This data structure NEEDS IMPROVEMENT. */ #define MAX_IN_PROGRESS 10000 static ptr_t * in_progress_space = 0; static int n_in_progress = 0; static void push_in_progress(ptr_t p) { if (in_progress_space == 0) in_progress_space = sbrk(MAX_IN_PROGRESS * sizeof(ptr_t)); if (n_in_progress == MAX_IN_PROGRESS) ABORT("Exceeded MAX_IN_PROGRESS"); in_progress_space[n_in_progress++] = p; } static GC_bool is_in_progress(ptr_t p) { int i; for (i = 0; i < n_in_progress; ++i) { if (in_progress_space[i] == p) return TRUE; } return FALSE; } static void pop_in_progress(ptr_t p) { --n_in_progress; GC_ASSERT(in_progress_space[n_in_progress] == p); } #define GET_OH_BG_PTR(p) \ (ptr_t)REVEAL_POINTER(((oh *)(p)) -> oh_bg_ptr) #define SET_OH_BG_PTR(p,q) (((oh *)(p)) -> oh_bg_ptr) = HIDE_POINTER(q) /* Execute s once for each predecessor q of p in the points-to graph. */ /* s should be a bracketed statement. We declare q. */ #define FOR_EACH_PRED(q, p, s) \ { \ ptr_t q = GET_OH_BG_PTR(p); \ if (!((word)q & FLAG_MANY)) { \ if (q && !((word)q & 1)) s \ /* !((word)q & 1) checks for a misnterpreted freelist link */ \ } else { \ back_edges *orig_be_ = (back_edges *)((word)q & ~FLAG_MANY); \ back_edges *be_ = orig_be_; \ int total_, local_; \ int n_edges_ = be_ -> n_edges; \ for (total_ = 0, local_ = 0; total_ < n_edges_; ++local_, ++total_) { \ if (local_ == MAX_IN) { \ be_ = be_ -> cont; \ local_ = 0; \ } \ q = be_ -> edges[local_]; s \ } \ } \ } /* Ensure that p has a back_edges structure associated with it. */ static void ensure_struct(ptr_t p) { ptr_t old_back_ptr = GET_OH_BG_PTR(p); if (!((word)old_back_ptr & FLAG_MANY)) { back_edges *be = new_back_edges(); be -> flags = 0; if (0 == old_back_ptr) { be -> n_edges = 0; } else { be -> n_edges = 1; be -> edges[0] = old_back_ptr; } be -> height = HEIGHT_UNKNOWN; be -> height_gc_no = GC_gc_no - 1; GC_ASSERT(be >= back_edge_space); SET_OH_BG_PTR(p, (word)be | FLAG_MANY); } } /* Add the (forward) edge from p to q to the backward graph. Both p */ /* q are pointers to the object base, i.e. pointers to an oh. */ static void add_edge(ptr_t p, ptr_t q) { ptr_t old_back_ptr = GET_OH_BG_PTR(q); back_edges * be, *be_cont; word i; static unsigned random_number = 13; # define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0) /* A not very random number we use to occasionally allocate a */ /* back_edges structure even for a single backward edge. This */ /* prevents us from repeatedly tracing back through very long */ /* chains, since we will have some place to store height and */ /* in_progress flags along the way. */ GC_ASSERT(p == GC_base(p) && q == GC_base(q)); if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) { /* This is really a misinterpreted free list link, since we saw */ /* a pointer to a free list. Dont overwrite it! */ return; } if (0 == old_back_ptr) { SET_OH_BG_PTR(q, p); if (GOT_LUCKY_NUMBER) ensure_struct(q); return; } /* Check whether it was already in the list of predecessors. */ FOR_EACH_PRED(pred, q, { if (p == pred) return; }); ensure_struct(q); old_back_ptr = GET_OH_BG_PTR(q); be = (back_edges *)((word)old_back_ptr & ~FLAG_MANY); for (i = be -> n_edges, be_cont = be; i > MAX_IN; be_cont = be_cont -> cont, i -= MAX_IN) {} if (i == MAX_IN) { be_cont -> cont = new_back_edges(); be_cont = be_cont -> cont; i = 0; } be_cont -> edges[i] = p; be -> n_edges++; if (be -> n_edges == 100) { # if 0 if (GC_print_stats) { GC_err_printf0("The following object has in-degree >= 100:\n"); GC_print_heap_obj(q); } # endif } } typedef void (*per_object_func)(ptr_t p, word n_words, word gc_descr); static void per_object_helper(struct hblk *h, word fn) { hdr * hhdr = HDR(h); word sz = hhdr -> hb_sz; word descr = hhdr -> hb_descr; per_object_func f = (per_object_func)fn; int i = 0; do { f((ptr_t)(h -> hb_body + i), sz, descr); i += sz; } while (i + sz <= BYTES_TO_WORDS(HBLKSIZE)); } void GC_apply_to_each_object(per_object_func f) { GC_apply_to_all_blocks(per_object_helper, (word)f); } static void reset_back_edge(ptr_t p, word n_words, word gc_descr) { /* Skip any free list links, or dropped blocks */ if (GC_HAS_DEBUG_INFO(p)) { ptr_t old_back_ptr = GET_OH_BG_PTR(p); if ((word)old_back_ptr & FLAG_MANY) { back_edges *be = (back_edges *)((word)old_back_ptr & ~FLAG_MANY); if (!(be -> flags & RETAIN)) { deallocate_back_edges(be); SET_OH_BG_PTR(p, 0); } else { word *currentp; GC_ASSERT(GC_is_marked(p)); /* Back edges may point to objects that will not be retained. */ /* Delete them for now, but remember the height. */ /* Some will be added back at next GC. */ be -> n_edges = 0; if (0 != be -> cont) { deallocate_back_edges(be -> cont); be -> cont = 0; } GC_ASSERT(GC_is_marked(p)); /* We only retain things for one GC cycle at a time. */ be -> flags &= ~RETAIN; } } else /* Simple back pointer */ { /* Clear to avoid dangling pointer. */ SET_OH_BG_PTR(p, 0); } } } static void add_back_edges(ptr_t p, word n_words, word gc_descr) { word *currentp = (word *)(p + sizeof(oh)); /* For now, fix up non-length descriptors conservatively. */ if((gc_descr & GC_DS_TAGS) != GC_DS_LENGTH) { gc_descr = WORDS_TO_BYTES(n_words); } while (currentp < (word *)(p + gc_descr)) { word current = *currentp++; if (current >= (word)GC_least_plausible_heap_addr && current <= (word)GC_greatest_plausible_heap_addr) { ptr_t target = GC_base((GC_PTR)current); if (0 != target) { add_edge(p, target); } } } } /* Rebuild the reprentation of the backward reachability graph. */ /* Does not examine mark bits. Can be called before GC. */ void GC_build_back_graph(void) { GC_apply_to_each_object(add_back_edges); } /* Return an approximation to the length of the longest simple path */ /* through unreachable objects to p. We refer to this as the height */ /* of p. */ static word backwards_height(ptr_t p) { word result; ptr_t back_ptr = GET_OH_BG_PTR(p); back_edges *be; if (0 == back_ptr) return 1; if (!((word)back_ptr & FLAG_MANY)) { if (is_in_progress(p)) return 0; /* DFS back edge, i.e. we followed */ /* an edge to an object already */ /* on our stack: ignore */ push_in_progress(p); result = backwards_height(back_ptr)+1; pop_in_progress(p); return result; } be = (back_edges *)((word)back_ptr & ~FLAG_MANY); if (be -> height >= 0 && be -> height_gc_no == GC_gc_no) return be -> height; /* Ignore back edges in DFS */ if (be -> height == HEIGHT_IN_PROGRESS) return 0; result = (be -> height > 0? be -> height : 1); be -> height = HEIGHT_IN_PROGRESS; FOR_EACH_PRED(q, p, { word this_height; if (GC_is_marked(q) && !(FLAG_MANY & (word)GET_OH_BG_PTR(p))) { if (GC_print_stats) GC_printf2("Found bogus pointer from 0x%lx to 0x%lx\n", q, p); /* Reachable object "points to" unreachable one. */ /* Could be caused by our lax treatment of GC descriptors. */ this_height = 1; } else { this_height = backwards_height(q); } if (this_height >= result) result = this_height + 1; }); be -> height = result; be -> height_gc_no = GC_gc_no; return result; } word GC_max_height; ptr_t GC_deepest_obj; /* Compute the maximum height of every unreachable predecessor p of a */ /* reachable object. Arrange to save the heights of all such objects p */ /* so that they can be used in calculating the height of objects in the */ /* next GC. */ /* Set GC_max_height to be the maximum height we encounter, and */ /* GC_deepest_obj to be the corresponding object. */ static void update_max_height(ptr_t p, word n_words, word gc_descr) { if (GC_is_marked(p) && GC_HAS_DEBUG_INFO(p)) { int i; word p_height = 0; ptr_t p_deepest_obj = 0; ptr_t back_ptr; back_edges *be = 0; /* If we remembered a height last time, use it as a minimum. */ /* It may have increased due to newly unreachable chains pointing */ /* to p, but it can't have decreased. */ back_ptr = GET_OH_BG_PTR(p); if (0 != back_ptr && ((word)back_ptr & FLAG_MANY)) { be = (back_edges *)((word)back_ptr & ~FLAG_MANY); if (be -> height != HEIGHT_UNKNOWN) p_height = be -> height; } FOR_EACH_PRED(q, p, { if (!GC_is_marked(q) && GC_HAS_DEBUG_INFO(q)) { word q_height; q_height = backwards_height(q); if (q_height > p_height) { p_height = q_height; p_deepest_obj = q; } } }); if (p_height > 0) { /* Remember the height for next time. */ if (be == 0) { ensure_struct(p); back_ptr = GET_OH_BG_PTR(p); be = (back_edges *)((word)back_ptr & ~FLAG_MANY); } be -> flags |= RETAIN; be -> height = p_height; be -> height_gc_no = GC_gc_no; } if (p_height > GC_max_height) { GC_max_height = p_height; GC_deepest_obj = p_deepest_obj; } } } void GC_traverse_back_graph(void) { static word max_max_height = 0; GC_max_height = 0; GC_apply_to_each_object(update_max_height); GC_printf2("Maximum backwards height of reachable objects at GC %lu is %ld\n", (unsigned long) GC_gc_no, GC_max_height); if (GC_max_height > max_max_height) { max_max_height = GC_max_height; GC_printf0("The following unreachable object is last in a longest chain " "of unreachable objects:\n"); GC_print_heap_obj(GC_deepest_obj); } if (GC_print_stats) { GC_printf1("Needed max total of %ld back-edge structs\n", GC_n_back_edge_structs); } GC_apply_to_each_object(reset_back_edge); GC_deepest_obj = 0; } #endif /* MAKE_BACK_GRAPH */
gpl-2.0
bq/aquaris-E4
drivers/misc/mediatek/sensorHub/SCP_sensorHub/SCP_sensorHub.c
11
79350
/* SCP sensor hub driver * * * This software program is licensed subject to the GNU General Public License * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html * (C) Copyright 2011 Bosch Sensortec GmbH * All Rights Reserved */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/miscdevice.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/workqueue.h> #include <linux/kobject.h> #include <linux/earlysuspend.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <asm/atomic.h> //#include <mach/mt_devs.h> #include <mach/mt_typedefs.h> #include <mach/mt_gpio.h> #include <mach/mt_pm_ldo.h> #include "step_counter.h" #include "pedometer.h" #include "activity.h" #include "in_pocket.h" #include "face_down.h" #include "pick_up.h" #include "shake.h" #include "heart_rate.h" #include "tilt_detector.h" #include "wake_gesture.h" #include "glance_gesture.h" #include <linux/batch.h> #include <mach/md32_ipi.h> #define POWER_NONE_MACRO MT65XX_POWER_NONE #include <cust_sensorHub.h> #include <linux/hwmsensor.h> #include <linux/hwmsen_dev.h> #include <linux/sensors_io.h> #include "SCP_sensorHub.h" #include "cust_sensorHub.h" #include <linux/hwmsen_helper.h> #include <mach/mt_clkmgr.h> /*----------------------------------------------------------------------------*/ #define DEBUG 1 //#define SENSORHUB_UT /*----------------------------------------------------------------------------*/ //#define CONFIG_SCP_sensorHub_LOWPASS /*apply low pass filter on output*/ #define SW_CALIBRATION /*----------------------------------------------------------------------------*/ #define SCP_sensorHub_AXIS_X 0 #define SCP_sensorHub_AXIS_Y 1 #define SCP_sensorHub_AXIS_Z 2 #define SCP_sensorHub_AXES_NUM 3 #define SCP_sensorHub_DATA_LEN 6 #define SCP_sensorHub_DEV_NAME "SCP_sensorHub" /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_probe(void); static int SCP_sensorHub_remove(void); //static int SCP_sensorHub_suspend(struct platform_device *dev, pm_message_t state); //static int SCP_sensorHub_resume(struct platform_device *dev); static int SCP_sensorHub_local_init(void); #ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER static void SCP_sd_work(struct work_struct *work); static void SCP_sig_work(struct work_struct *work); static int SCP_sensorHub_step_counter_init(void); static int SCP_sensorHub_step_counter_uninit(void); #endif //#ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER #ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR static void SCP_inpk_work(struct work_struct *work); static int SCP_sensorHub_in_pocket_init(void); static int SCP_sensorHub_in_pocket_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER //static void SCP_pdr_work(struct work_struct *work); static int SCP_sensorHub_pedometer_init(void); static int SCP_sensorHub_pedometer_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_PEDOMETER #ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR //static void SCP_act_work(struct work_struct *work); static int SCP_sensorHub_activity_init(void); static int SCP_sensorHub_activity_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR static void SCP_shk_work(struct work_struct *work); static int SCP_sensorHub_shake_init(void); static int SCP_sensorHub_shake_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR static void SCP_pkup_work(struct work_struct *work); static int SCP_sensorHub_pick_up_init(void); static int SCP_sensorHub_pick_up_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR static void SCP_fdn_work(struct work_struct *work); static int SCP_sensorHub_face_down_init(void); static int SCP_sensorHub_face_down_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR //static void SCP_fdn_work(struct work_struct *work); static int SCP_sensorHub_heart_rate_init(void); static int SCP_sensorHub_heart_rate_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR static void SCP_tilt_work(struct work_struct *work); static int SCP_sensorHub_tilt_detector_init(void); static int SCP_sensorHub_tilt_detector_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR static void SCP_wag_work(struct work_struct *work); static int SCP_sensorHub_wake_gesture_init(void); static int SCP_sensorHub_wake_gesture_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR static void SCP_glg_work(struct work_struct *work); static int SCP_sensorHub_glance_gesture_init(void); static int SCP_sensorHub_glance_gesture_uninit(void); #endif //CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR /*----------------------------------------------------------------------------*/ typedef enum { SCP_TRC_FUN = 0x01, SCP_TRC_IPI = 0x02, SCP_TRC_BATCH = 0x04, } SCP_TRC; /*----------------------------------------------------------------------------*/ SCP_sensorHub_handler sensor_handler[ID_SENSOR_MAX_HANDLE+1]; /*----------------------------------------------------------------------------*/ #define C_MAX_FIR_LENGTH (32) //#define USE_EARLY_SUSPEND static DEFINE_MUTEX(SCP_sensorHub_op_mutex); static DEFINE_MUTEX(SCP_sensorHub_req_mutex); static DECLARE_WAIT_QUEUE_HEAD(SCP_sensorHub_req_wq); static int SCP_sensorHub_init_flag =-1; // 0<==>OK -1 <==> fail static struct batch_init_info SCP_sensorHub_init_info = { .name = "SCP_sensorHub", .init = SCP_sensorHub_local_init, .uninit = SCP_sensorHub_remove, .platform_diver_addr = NULL, }; #ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER static struct step_c_init_info SCP_step_counter_init_info = { .name = "SCP_step_counter", .init = SCP_sensorHub_step_counter_init, .uninit = SCP_sensorHub_step_counter_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER #ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR static struct inpk_init_info SCP_in_pocket_init_info = { .name = "SCP_in_pocket", .init = SCP_sensorHub_in_pocket_init, .uninit = SCP_sensorHub_in_pocket_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER static struct pdr_init_info SCP_pedometer_init_info = { .name = "SCP_pedometer", .init = SCP_sensorHub_pedometer_init, .uninit = SCP_sensorHub_pedometer_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER #ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR static struct act_init_info SCP_activity_init_info = { .name = "SCP_activity", .init = SCP_sensorHub_activity_init, .uninit = SCP_sensorHub_activity_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR static struct shk_init_info SCP_shake_init_info = { .name = "SCP_shake", .init = SCP_sensorHub_shake_init, .uninit = SCP_sensorHub_shake_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR static struct pkup_init_info SCP_pick_up_init_info = { .name = "SCP_pick_up", .init = SCP_sensorHub_pick_up_init, .uninit = SCP_sensorHub_pick_up_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR static struct fdn_init_info SCP_face_down_init_info = { .name = "SCP_face_down", .init = SCP_sensorHub_face_down_init, .uninit = SCP_sensorHub_face_down_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR static struct hrm_init_info SCP_heart_rate_init_info = { .name = "SCP_heart_rate", .init = SCP_sensorHub_heart_rate_init, .uninit = SCP_sensorHub_heart_rate_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR static struct tilt_init_info SCP_tilt_detector_init_info = { .name = "SCP_tilt_detector", .init = SCP_sensorHub_tilt_detector_init, .uninit = SCP_sensorHub_tilt_detector_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR static struct wag_init_info SCP_wake_gesture_init_info = { .name = "SCP_wake_gesture", .init = SCP_sensorHub_wake_gesture_init, .uninit = SCP_sensorHub_wake_gesture_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR static struct glg_init_info SCP_glance_gesture_init_info = { .name = "SCP_glance_gesture", .init = SCP_sensorHub_glance_gesture_init, .uninit = SCP_sensorHub_glance_gesture_uninit, }; #endif //#ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR /*----------------------------------------------------------------------------*/ struct data_filter { s16 raw[C_MAX_FIR_LENGTH][SCP_sensorHub_AXES_NUM]; int sum[SCP_sensorHub_AXES_NUM]; int num; int idx; }; /*----------------------------------------------------------------------------*/ struct SCP_sensorHub_data { struct sensorHub_hw *hw; struct work_struct ipi_work; struct work_struct fifo_full_work; struct work_struct sd_work; //step detect work struct work_struct sig_work; //significant motion work //struct work_struct pdr_work; //pedometer work //struct work_struct act_work; //activity work struct work_struct inpk_work;//in pocket work struct work_struct pkup_work;//pick up work struct work_struct fdn_work; //face down work struct work_struct shk_work; //shake work struct work_struct tilt_work; //tilt detector work struct work_struct wag_work; //wake gesture work struct work_struct glg_work; //glance gesture work struct timer_list timer; /*misc*/ atomic_t trace; atomic_t suspend; atomic_t filter; s16 cali_sw[SCP_sensorHub_AXES_NUM+1]; atomic_t wait_rsp; atomic_t ipi_handler_running; /*data*/ s8 offset[SCP_sensorHub_AXES_NUM+1]; /*+1: for 4-byte alignment*/ s16 data[SCP_sensorHub_AXES_NUM+1]; volatile struct sensorFIFO * volatile SCP_sensorFIFO; dma_addr_t mapping; #if defined(CONFIG_SCP_sensorHub_LOWPASS) atomic_t firlen; atomic_t fir_en; struct data_filter fir; #endif /*early suspend*/ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(USE_EARLY_SUSPEND) struct early_suspend early_drv; #endif }; /*----------------------------------------------------------------------------*/ static struct SCP_sensorHub_data *obj_data = NULL; static SCP_SENSOR_HUB_DATA_P userData = NULL; static uint *userDataLen = NULL; /*----------------------------------------------------------------------------*/ #define SCP_TAG "[sensorHub] " #define SCP_FUN(f) printk(KERN_ERR SCP_TAG"%s\n", __FUNCTION__) #define SCP_ERR(fmt, args...) printk(KERN_ERR SCP_TAG"%s %d : "fmt, __FUNCTION__, __LINE__, ##args) #define SCP_LOG(fmt, args...) printk(KERN_ERR SCP_TAG fmt, ##args) /*--------------------SCP_sensorHub power control function----------------------------------*/ static void SCP_sensorHub_power(struct sensorHub_hw *hw, unsigned int on) { } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_init_client(void) //call by init done workqueue { struct SCP_sensorHub_data *obj = obj_data; SCP_SENSOR_HUB_DATA data; unsigned int len = 0; SCP_FUN(); //enable_clock(MT_CG_INFRA_APDMA, "sensorHub"); //SCP_ERR("obj=%lld\n", obj); // obj->mapping = dma_map_single(NULL, (void *)obj->SCP_sensorFIFO, obj->SCP_sensorFIFO->FIFOSize, DMA_BIDIRECTIONAL);//(virt_to_phys(obj->SCP_sensorFIFO)); // dma_sync_single_for_device(NULL, obj->mapping, obj->SCP_sensorFIFO->FIFOSize, DMA_TO_DEVICE); data.set_config_req.sensorType = 0; data.set_config_req.action = SENSOR_HUB_SET_CONFIG; // data.set_config_req.bufferBase = (struct sensorFIFO *)obj->mapping; SCP_ERR("data.set_config_req.bufferBase = %p\n", data.set_config_req.bufferBase); // SCP_ERR("obj->SCP_sensorFIFO = %p, wp = %p, rp = %p, size = %d\n", obj->SCP_sensorFIFO, // obj->SCP_sensorFIFO->wp, obj->SCP_sensorFIFO->rp, obj->SCP_sensorFIFO->FIFOSize); // data.set_config_req.bufferSize = obj->SCP_sensorFIFO->FIFOSize; len = sizeof(data.set_config_req); SCP_sensorHub_req_send(&data, &len, 1); SCP_ERR("SCP_sensorHub_init_client done\n"); return SCP_SENSOR_HUB_SUCCESS; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_ReadChipInfo(char *buf, int bufsize) { if((NULL == buf)||(bufsize<=30)) { return -1; } sprintf(buf, "SCP_sensorHub Chip"); return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_ReadSensorData(int handle, hwm_sensor_data *sensorData) { // struct SCP_sensorHub_data *obj = obj_data; // char *pStart, *pEnd, *pNext; // struct SCP_sensorData *curData; // char *rp, *wp; // //#if 1//def SENSORHUB_UT // //SCP_FUN(); //#endif // // if(NULL == sensorData) // { // return -1; // } // // //data.get_data_req.sensorType = handle; // //data.get_data_req.action = SENSOR_HUB_GET_DATA; // //len = sizeof(data.get_data_req); // // //if (0 != (err = SCP_sensorHub_req_send(&data, &len, 1))) // //{ // // SCP_ERR("SCP_sensorHub_req_send error: ret value=%d\n", err); // // return -3; // //} // //else // //{ // // dma_sync_single_for_cpu(NULL, obj->mapping, obj->SCP_sensorFIFO->FIFOSize, DMA_FROM_DEVICE); // pStart = (char *)obj->SCP_sensorFIFO + offsetof(struct sensorFIFO, data);//sizeof(obj->SCP_sensorFIFO); // pEnd = (char *)pStart + obj->SCP_sensorFIFO->FIFOSize; // rp = pStart + (int)obj->SCP_sensorFIFO->rp; // wp = pStart + (int)obj->SCP_sensorFIFO->wp; // // //SCP_LOG("FIFO virt_to_phys(obj->SCP_sensorFIFO) = %p\n", (void *)virt_to_phys(obj->SCP_sensorFIFO)); // //SCP_LOG("FIFO obj->SCP_sensorFIFO = %p, pStart = %p, pEnd = %p\n", obj->SCP_sensorFIFO, pStart, pEnd); // //SCP_LOG("FIFO rp = %p, wp = %p, size = %d\n", rp, wp, obj->SCP_sensorFIFO->FIFOSize); // //SCP_LOG("FIFO sensorType = %d, dataLength = %d\n", obj->SCP_sensorFIFO->data[0].sensorType, // // obj->SCP_sensorFIFO->data[0].dataLength); // // if (rp < pStart || pEnd <= rp) // { // SCP_ERR("FIFO rp invalid : %p, %p, %p\n", pStart, pEnd, rp); // return -4; // } // // if (wp < pStart || pEnd <= wp) // { // SCP_ERR("FIFO wp invalid : %p, %p, %p\n", pStart, pEnd, wp); // return -5; // } // // if (rp == wp) // { // //obj->SCP_sensorFIFO->rp += 1; // //obj->SCP_sensorFIFO->wp += 1; // SCP_ERR("FIFO empty\n"); // return -6; // } // // //while(rp != wp) // if (rp != wp) // { // pNext = rp + offsetof(struct SCP_sensorData, data) + ((struct SCP_sensorData*)rp)->dataLength; // pNext = (char *)((((unsigned long)pNext + 3) >> 2 ) << 2); // SCP_LOG("dataLength = %d, pNext = %p, rp = %p, wp = %p\n", ((struct SCP_sensorData*)rp)->dataLength, pNext, rp, wp); // //SCP_LOG("[0] = %d, [1] = %d, [2] = %d\n", ((struct SCP_sensorData*)rp)->data[0], ((struct SCP_sensorData*)rp)->data[1], ((struct SCP_sensorData*)rp)->data[2]); // // if(!(curData = kzalloc(pNext - rp, GFP_KERNEL))) // { // SCP_ERR("Allocate curData fail\n"); // return -7; // } // // if (pNext < pEnd) // { // memcpy(curData, rp, pNext - rp); // //SCP_LOG("pNext < pEnd : pNext - rp = %d\n", pNext - rp); // rp = pNext; // } // else // { // memcpy(curData, rp, pEnd - rp); // memcpy((char *)curData + (int)pEnd - (int)rp, pStart, (int)pNext - (int)pEnd); // //SCP_LOG("!pNext < pEnd : pEnd - rp = %d\n", pEnd - rp); // //SCP_LOG("!pNext < pEnd : curData = %p, (char *)&curData + (int)pEnd - (int)rp = %p\n", curData, (char *)curData + (int)pEnd - (int)rp); // //SCP_LOG("!pNext < pEnd : (int)pNext - (int)pEnd = %d\n", (int)pNext - (int)pEnd); // // rp = pStart + (int)pNext - (int)pEnd; // } // // //SCP_LOG("rp = %p, curData.sensorType = %d\n", rp, curData.sensorType); // // sensorData->sensor = curData->sensorType; // sensorData->value_divide = 1000; //need to check // sensorData->status = SENSOR_STATUS_ACCURACY_MEDIUM; // sensorData->time = (((int64_t)curData->timeStampH) << 32) | curData->timeStampL; // //for (i=0;i<curData.dataLength;i++) // { // sensorData->values[0] = curData->data[0]; // sensorData->values[1] = curData->data[1]; // sensorData->values[2] = curData->data[2]; // } // //SCP_LOG("sensorData = %p, curData = %p\n", sensorData, curData); // //SCP_LOG("curData.data[0] = %d, curData.data[1] = %d, curData.data[2] = %d\n", sensorData->values[0], sensorData->values[1], sensorData->values[2]); // // obj->SCP_sensorFIFO->rp = (struct SCP_sensorData*)(rp - pStart); // // kfree(curData); // // dma_sync_single_for_device(NULL, obj->mapping, obj->SCP_sensorFIFO->FIFOSize, DMA_TO_DEVICE); // } // // // //} // return 0; } /*----------------------------------------------------------------------------*/ static ssize_t show_chipinfo_value(struct device_driver *ddri, char *buf) { char strbuf[SCP_SENSOR_HUB_TEMP_BUFSIZE]; SCP_sensorHub_ReadChipInfo(strbuf, SCP_SENSOR_HUB_TEMP_BUFSIZE); return snprintf(buf, PAGE_SIZE, "%s\n", strbuf); } /*----------------------------------------------------------------------------*/ static ssize_t show_trace_value(struct device_driver *ddri, char *buf) { ssize_t res; struct SCP_sensorHub_data *obj = obj_data; if (obj == NULL) { SCP_ERR("SCP_sensorHub_data obj is null!!\n"); return 0; } res = snprintf(buf, PAGE_SIZE, "0x%04X\n", atomic_read(&obj->trace)); return res; } /*----------------------------------------------------------------------------*/ static ssize_t store_trace_value(struct device_driver *ddri, const char *buf, size_t count) { struct SCP_sensorHub_data *obj = obj_data; int trace; if (obj == NULL) { SCP_ERR("SCP_sensorHub_data obj is null!!\n"); return 0; } if(1 == sscanf(buf, "0x%x", &trace)) { atomic_set(&obj->trace, trace); } else { SCP_ERR("invalid content: '%s', length = %d\n", buf, (int)count); } return count; } /*----------------------------------------------------------------------------*/ static DRIVER_ATTR(chipinfo, S_IWUSR | S_IRUGO, show_chipinfo_value, NULL); static DRIVER_ATTR(trace, S_IWUSR | S_IRUGO, show_trace_value, store_trace_value); /*----------------------------------------------------------------------------*/ static struct driver_attribute *SCP_sensorHub_attr_list[] = { &driver_attr_chipinfo, /*chip information*/ &driver_attr_trace, /*trace log*/ }; /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_create_attr(struct device_driver *driver) { int idx, err = 0; int num = (int)(sizeof(SCP_sensorHub_attr_list)/sizeof(SCP_sensorHub_attr_list[0])); if (driver == NULL) { return -EINVAL; } for(idx = 0; idx < num; idx++) { if((err = driver_create_file(driver, SCP_sensorHub_attr_list[idx]))) { SCP_ERR("driver_create_file (%s) = %d\n", SCP_sensorHub_attr_list[idx]->attr.name, err); break; } } return err; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_delete_attr(struct device_driver *driver) { int idx ,err = 0; int num = (int)(sizeof(SCP_sensorHub_attr_list)/sizeof(SCP_sensorHub_attr_list[0])); if(driver == NULL) { return -EINVAL; } for(idx = 0; idx < num; idx++) { driver_remove_file(driver, SCP_sensorHub_attr_list[idx]); } return err; } /****************************************************************************** * Function Configuration ******************************************************************************/ static int SCP_sensorHub_open(struct inode *inode, struct file *file) { file->private_data = obj_data; if(file->private_data == NULL) { SCP_ERR("null pointer!!\n"); return -EINVAL; } return nonseekable_open(inode, file); } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_release(struct inode *inode, struct file *file) { file->private_data = NULL; return 0; } /*----------------------------------------------------------------------------*/ static long SCP_sensorHub_unlocked_ioctl(struct file *file, unsigned int cmd,unsigned long arg) { char strbuf[SCP_SENSOR_HUB_TEMP_BUFSIZE]; void __user *data; long err = 0; #ifdef SENSORHUB_UT SCP_FUN(); #endif if(_IOC_DIR(cmd) & _IOC_READ) { err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); } else if(_IOC_DIR(cmd) & _IOC_WRITE) { err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); } if(err) { SCP_ERR("access error: %08X, (%2d, %2d)\n", cmd, _IOC_DIR(cmd), _IOC_SIZE(cmd)); return -EFAULT; } switch(cmd) { case GSENSOR_IOCTL_INIT: SCP_sensorHub_init_client(); break; case GSENSOR_IOCTL_READ_CHIPINFO: data = (void __user *) arg; if(data == NULL) { err = -EINVAL; break; } SCP_sensorHub_ReadChipInfo(strbuf, SCP_SENSOR_HUB_TEMP_BUFSIZE); if(copy_to_user(data, strbuf, strlen(strbuf)+1)) { err = -EFAULT; break; } break; case GSENSOR_IOCTL_READ_SENSORDATA: err = -EINVAL; break; case GSENSOR_IOCTL_READ_GAIN: err = -EINVAL; break; case GSENSOR_IOCTL_READ_RAW_DATA: err = -EFAULT; break; case GSENSOR_IOCTL_SET_CALI: err = -EINVAL; break; case GSENSOR_IOCTL_CLR_CALI: err = -EINVAL; break; case GSENSOR_IOCTL_GET_CALI: err = -EINVAL; break; default: SCP_ERR("unknown IOCTL: 0x%08x\n", cmd); err = -ENOIOCTLCMD; break; } return err; } /*----------------------------------------------------------------------------*/ static struct file_operations SCP_sensorHub_fops = { //.owner = THIS_MODULE, .open = SCP_sensorHub_open, .release = SCP_sensorHub_release, .unlocked_ioctl = SCP_sensorHub_unlocked_ioctl, }; /*----------------------------------------------------------------------------*/ static struct miscdevice SCP_sensorHub_device = { .minor = MISC_DYNAMIC_MINOR, .name = "SCP_sensorHub", .fops = &SCP_sensorHub_fops, }; /*----------------------------------------------------------------------------*/ #if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(USE_EARLY_SUSPEND) /*----------------------------------------------------------------------------*/ #if 0 static int SCP_sensorHub_suspend(struct platform_device *dev, pm_message_t state) { return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_resume(struct platform_device *dev) { return 0; } /*----------------------------------------------------------------------------*/ #endif #else //#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(USE_EARLY_SUSPEND) /*----------------------------------------------------------------------------*/ static void SCP_sensorHub_early_suspend(struct early_suspend *h) { } /*----------------------------------------------------------------------------*/ static void SCP_sensorHub_late_resume(struct early_suspend *h) { } /*----------------------------------------------------------------------------*/ #endif //#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(USE_EARLY_SUSPEND) /*----------------------------------------------------------------------------*/ int SCP_sensorHub_req_send(SCP_SENSOR_HUB_DATA_P data, uint *len, unsigned int wait) { ipi_status status; int err = 0; if (SCP_TRC_IPI & atomic_read(&(obj_data->trace))) SCP_ERR("len = %d, type = %d, action = %d\n", *len, data->req.sensorType, data->req.action); if (*len > 48) { SCP_ERR("!!\n"); return -1; } if (in_interrupt()) { SCP_ERR("Can't do %s in interrupt context!!\n", __FUNCTION__); return -1; } if (ID_SENSOR_MAX_HANDLE < data->rsp.sensorType) { SCP_ERR("SCP_sensorHub_IPI_handler invalid sensor type %d\n", data->rsp.sensorType); return -1; } else { mutex_lock(&SCP_sensorHub_req_mutex); userData = data; userDataLen = len; switch(data->req.action) { case SENSOR_HUB_ACTIVATE: break; case SENSOR_HUB_SET_DELAY: break; case SENSOR_HUB_GET_DATA: break; case SENSOR_HUB_BATCH: break; case SENSOR_HUB_SET_CONFIG: break; case SENSOR_HUB_SET_CUST: break; default: break; } if (1 == wait) { if(atomic_read(&(obj_data->wait_rsp)) == 1) { SCP_ERR("SCP_sensorHub_req_send reentry\n"); } atomic_set(&(obj_data->wait_rsp), 1); } do { status = md32_ipi_send(IPI_SENSOR, data, *len, wait); if (ERROR == status) { SCP_ERR("md32_ipi_send ERROR\n"); mutex_unlock(&SCP_sensorHub_req_mutex); return -1; } } while (BUSY == status); if (SCP_TRC_IPI & atomic_read(&(obj_data->trace))) SCP_ERR("md32_ipi_send DONE\n"); mod_timer(&obj_data->timer, jiffies + 3*HZ); wait_event_interruptible(SCP_sensorHub_req_wq, (atomic_read(&(obj_data->wait_rsp)) == 0)); del_timer_sync(&obj_data->timer); err = userData->rsp.errCode; mutex_unlock(&SCP_sensorHub_req_mutex); } if (SCP_TRC_IPI & atomic_read(&(obj_data->trace))) SCP_ERR("SCP_sensorHub_req_send end\n"); return err; } /*----------------------------------------------------------------------------*/ int SCP_sensorHub_rsp_registration(int sensor, SCP_sensorHub_handler handler) { #ifdef SENSORHUB_UT SCP_FUN(); #endif if (ID_SENSOR_MAX_HANDLE < sensor) { SCP_ERR("SCP_sensorHub_rsp_registration invalid sensor %d\n", sensor); } if (NULL == handler) { SCP_ERR("SCP_sensorHub_rsp_registration null handler\n"); } sensor_handler[sensor] = handler; return 0; } /*----------------------------------------------------------------------------*/ static void SCP_ipi_work(struct work_struct *work) { #ifdef SENSORHUB_UT SCP_FUN(); #endif SCP_sensorHub_init_client(); } /*----------------------------------------------------------------------------*/ static void SCP_fifo_full_work(struct work_struct *work) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); batch_notify(TYPE_BATCHFULL); } /*----------------------------------------------------------------------------*/ static void SCP_sensorHub_req_send_timeout(unsigned long data) { if(atomic_read(&(obj_data->wait_rsp)) == 1) { SCP_FUN(); if (NULL != userData && NULL != userDataLen) { userData->rsp.errCode = -1; *userDataLen = sizeof(userData->rsp); } atomic_set(&(obj_data->wait_rsp), 0); wake_up(&SCP_sensorHub_req_wq); } } /*----------------------------------------------------------------------------*/ static void SCP_sensorHub_IPI_handler(int id, void *data, unsigned int len) { struct SCP_sensorHub_data *obj = obj_data; SCP_SENSOR_HUB_DATA_P rsp = (SCP_SENSOR_HUB_DATA_P)data; bool wake_up_req = false; bool do_registed_handler = false; static int first_init_done = 0; #ifdef SENSORHUB_UT SCP_FUN(); #endif if (SCP_TRC_IPI & atomic_read(&(obj_data->trace))) SCP_ERR("len = %d, type = %d, action = %d, errCode = %d\n", len, rsp->rsp.sensorType, rsp->rsp.action, rsp->rsp.errCode); if (len > 48) { SCP_ERR("SCP_sensorHub_IPI_handler len=%d error\n", len); return; } else { switch(rsp->rsp.action) { case SENSOR_HUB_ACTIVATE: case SENSOR_HUB_SET_DELAY: case SENSOR_HUB_GET_DATA: case SENSOR_HUB_BATCH: case SENSOR_HUB_SET_CONFIG: case SENSOR_HUB_SET_CUST: wake_up_req = true; break; case SENSOR_HUB_NOTIFY: switch(rsp->notify_rsp.event) { case SCP_INIT_DONE: if (0 == first_init_done) { schedule_work(&obj->ipi_work); first_init_done = 1; } do_registed_handler = true; break; case SCP_FIFO_FULL: schedule_work(&obj->fifo_full_work); break; case SCP_NOTIFY: do_registed_handler = true; break; default: break; } break; default: SCP_ERR("SCP_sensorHub_IPI_handler unknow action=%d error\n", rsp->rsp.action); return; } if (ID_SENSOR_MAX_HANDLE < rsp->rsp.sensorType) { SCP_ERR("SCP_sensorHub_IPI_handler invalid sensor type %d\n", rsp->rsp.sensorType); return; } else if (true == do_registed_handler) { if (NULL != sensor_handler[rsp->rsp.sensorType]) { sensor_handler[rsp->rsp.sensorType](data, len); } } if(atomic_read(&(obj_data->wait_rsp)) == 1 && true == wake_up_req) { if (NULL == userData || NULL == userDataLen) { SCP_ERR("SCP_sensorHub_IPI_handler null pointer\n"); } else { if (userData->req.sensorType != rsp->rsp.sensorType) SCP_ERR("SCP_sensorHub_IPI_handler sensor type %d != %d\n", userData->req.sensorType, rsp->rsp.sensorType); if (userData->req.action != rsp->rsp.action) SCP_ERR("SCP_sensorHub_IPI_handler action %d != %d\n", userData->req.action, rsp->rsp.action); memcpy(userData, rsp, len); *userDataLen = len; } atomic_set(&(obj_data->wait_rsp), 0); wake_up(&SCP_sensorHub_req_wq); } } } /*----------------------------------------------------------------------------*/ int SCP_sensorHub_enable_hw_batch(int handle, int enable, long long samplingPeriodNs,long long maxBatchReportLatencyNs) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (samplingPeriodNs==0) return 0; if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); req.batch_req.sensorType = handle; req.batch_req.action = SENSOR_HUB_BATCH; req.batch_req.flag = 0; req.batch_req.period_ms = (unsigned int)samplingPeriodNs; req.batch_req.timeout_ms = (unsigned int)maxBatchReportLatencyNs; len = sizeof(req.batch_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } return err; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_flush(int handle) { return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_get_data(int handle, hwm_sensor_data *sensorData) { #ifdef SENSORHUB_UT SCP_FUN(); #endif SCP_sensorHub_ReadSensorData(handle, sensorData); return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_get_fifo_status(int *dataLen, int *status, char *reserved, struct batch_timestamp_info *p_batch_timestampe_info) { // struct SCP_sensorHub_data *obj = obj_data; // int err = 0; // SCP_SENSOR_HUB_DATA data; // char *pStart, *pEnd, *pNext; // unsigned int len = 0; // char *rp, *wp; // // *dataLen = 0; // *status = 1; // // data.get_data_req.sensorType = 0; // data.get_data_req.action = SENSOR_HUB_GET_DATA; // len = sizeof(data.get_data_req); // // if (0 != (err = SCP_sensorHub_req_send(&data, &len, 1))) // { // SCP_ERR("SCP_sensorHub_req_send error: ret value=%d\n", err); // return -3; // } // else // { // // dma_sync_single_for_cpu(NULL, obj->mapping, obj->SCP_sensorFIFO->FIFOSize, DMA_FROM_DEVICE); // pStart = (char *)obj->SCP_sensorFIFO + offsetof(struct sensorFIFO, data);//sizeof(obj->SCP_sensorFIFO); // pEnd = (char *)pStart + obj->SCP_sensorFIFO->FIFOSize; // rp = pStart + (int)obj->SCP_sensorFIFO->rp; // wp = pStart + (int)obj->SCP_sensorFIFO->wp; // // if (SCP_TRC_BATCH & atomic_read(&(obj_data->trace))) // { // //SCP_LOG("FIFO virt_to_phys(obj->SCP_sensorFIFO) = %p\n", (void *)virt_to_phys(obj->SCP_sensorFIFO)); // //SCP_LOG("FIFO obj->SCP_sensorFIFO = %p, pStart = %p, pEnd = %p\n", obj->SCP_sensorFIFO, pStart, pEnd); // SCP_LOG("FIFO rp = %p, wp = %p\n", rp, wp); // //SCP_LOG("FIFO sensorType = %d, dataLength = %d\n", obj->SCP_sensorFIFO->data[0].sensorType, // // obj->SCP_sensorFIFO->data[0].dataLength); // } // // if (rp < pStart || pEnd <= rp) // { // SCP_ERR("FIFO rp invalid : %p, %p, %p\n", pStart, pEnd, rp); // return -4; // } // // if (wp < pStart || pEnd <= wp) // { // SCP_ERR("FIFO wp invalid : %p, %p, %p\n", pStart, pEnd, wp); // return -5; // } // // if (rp == wp) // { // SCP_ERR("FIFO empty\n"); // return -6; // } // // while(rp != wp) // { // pNext = rp + offsetof(struct SCP_sensorData, data) + ((struct SCP_sensorData*)rp)->dataLength; // pNext = (char *)((((unsigned long)pNext + 3) >> 2 ) << 2); // //SCP_LOG("((struct SCP_sensorData*)rp)->dataLength = %d, pNext = %p, rp = %p\n", ((struct SCP_sensorData*)rp)->dataLength, pNext, rp); // // if (pNext < pEnd) // { // rp = pNext; // } // else // { // rp = pStart + (int)pNext - (int)pEnd; // } // (*dataLen)++; // } // // //obj->SCP_sensorFIFO->rp = (struct SCP_sensorData*)(rp - pStart); // dma_sync_single_for_device(NULL, obj->mapping, obj->SCP_sensorFIFO->FIFOSize, DMA_TO_DEVICE); // } // // if (SCP_TRC_BATCH & atomic_read(&(obj_data->trace))) // SCP_ERR("SCP_sensorHub_get_fifo_status dataLen = %d, status = %d\n", *dataLen, *status); // // //*len = 1; // //*status = 1; return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_probe(/*struct platform_device *pdev*/) { struct SCP_sensorHub_data *obj; int err = 0; struct batch_control_path ctl={0}; struct batch_data_path data={0}; SCP_FUN(); if(!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) { SCP_ERR("Allocate SCP_sensorHub_data fail\n"); err = -ENOMEM; goto exit; } memset(obj, 0, sizeof(struct SCP_sensorHub_data)); if(!(obj->SCP_sensorFIFO = kzalloc(SCP_SENSOR_HUB_FIFO_SIZE, GFP_KERNEL))) { SCP_ERR("Allocate SCP_sensorFIFO fail\n"); err = -ENOMEM; goto exit; } obj->SCP_sensorFIFO->wp = (struct SCP_sensorData*)0;//(struct SCP_sensorData *)((char *)obj->SCP_sensorFIFO + offsetof(struct sensorFIFO, data)); obj->SCP_sensorFIFO->rp = (struct SCP_sensorData*)0;//(struct SCP_sensorData *)((char *)obj->SCP_sensorFIFO + offsetof(struct sensorFIFO, data)); obj->SCP_sensorFIFO->FIFOSize = SCP_SENSOR_HUB_FIFO_SIZE - offsetof(struct sensorFIFO, data); obj->hw = get_cust_sensorHub_hw(); SCP_ERR("obj->SCP_sensorFIFO = %p, wp = %p, rp = %p, size = %d\n", obj->SCP_sensorFIFO, obj->SCP_sensorFIFO->wp, obj->SCP_sensorFIFO->rp, obj->SCP_sensorFIFO->FIFOSize); obj_data = obj; atomic_set(&obj->trace, 0xFF); atomic_set(&obj->suspend, 0); atomic_set(&obj->wait_rsp, 0); atomic_set(&obj->ipi_handler_running, 0); INIT_WORK(&obj->ipi_work, SCP_ipi_work); INIT_WORK(&obj->fifo_full_work, SCP_fifo_full_work); #ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER INIT_WORK(&obj->sd_work, SCP_sd_work); INIT_WORK(&obj->sig_work, SCP_sig_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER #ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR INIT_WORK(&obj->inpk_work,SCP_inpk_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER // INIT_WORK(&obj->pdr_work, SCP_pdr_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER #ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR // INIT_WORK(&obj->act_work, SCP_act_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR INIT_WORK(&obj->shk_work, SCP_shk_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR INIT_WORK(&obj->pkup_work,SCP_pkup_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR INIT_WORK(&obj->fdn_work, SCP_fdn_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR // INIT_WORK(&obj->hrm_work, SCP_hrm_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR INIT_WORK(&obj->tilt_work, SCP_tilt_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR INIT_WORK(&obj->wag_work, SCP_wag_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR INIT_WORK(&obj->glg_work, SCP_glg_work); #endif //#ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR init_waitqueue_head(&SCP_sensorHub_req_wq); init_timer(&obj->timer); obj->timer.expires = 3*HZ; obj->timer.function = SCP_sensorHub_req_send_timeout; obj->timer.data = (unsigned long)obj; md32_ipi_registration(IPI_SENSOR, SCP_sensorHub_IPI_handler, "SCP_sensorHub"); if((err = misc_register(&SCP_sensorHub_device))) { SCP_ERR("SCP_sensorHub_device register failed\n"); goto exit_misc_device_register_failed; } if((err = SCP_sensorHub_create_attr(&(SCP_sensorHub_init_info.platform_diver_addr->driver)))) { SCP_ERR("create attribute err = %d\n", err); goto exit_create_attr_failed; } ctl.enable_hw_batch = SCP_sensorHub_enable_hw_batch; ctl.flush = SCP_sensorHub_flush; err = batch_register_control_path(MAX_ANDROID_SENSOR_NUM, &ctl); if(err) { SCP_ERR("register SCP sensor hub control path err\n"); goto exit_kfree; } data.get_data = SCP_sensorHub_get_data; data.get_fifo_status = SCP_sensorHub_get_fifo_status; data.is_batch_supported = 1; err = batch_register_data_path(MAX_ANDROID_SENSOR_NUM, &data); if(err) { SCP_ERR("register SCP sensor hub control data path err\n"); goto exit_kfree; } #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(USE_EARLY_SUSPEND) obj->early_drv.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1, obj->early_drv.suspend = SCP_sensorHub_early_suspend, obj->early_drv.resume = SCP_sensorHub_late_resume, register_early_suspend(&obj->early_drv); #endif SCP_sensorHub_init_flag = 0; printk("%s: OK new\n", __func__); return 0; exit_create_attr_failed: misc_deregister(&SCP_sensorHub_device); exit_misc_device_register_failed: exit_kfree: kfree(obj); exit: SCP_ERR("%s: err = %d\n", __func__, err); SCP_sensorHub_init_flag = -1; return err; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_remove() { struct sensorHub_hw *hw = get_cust_sensorHub_hw(); int err = 0; SCP_FUN(); SCP_sensorHub_power(hw, 0); if((err = SCP_sensorHub_delete_attr(&(SCP_sensorHub_init_info.platform_diver_addr->driver)))) { SCP_ERR("SCP_sensorHub_delete_attr fail: %d\n", err); } if((err = misc_deregister(&SCP_sensorHub_device))) { SCP_ERR("misc_deregister fail: %d\n", err); } return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensor_enable(int sensorType, int en) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); req.activate_req.sensorType = sensorType; req.activate_req.action = SENSOR_HUB_ACTIVATE; req.activate_req.enable = en; len = sizeof(req.activate_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } return err; } static int SCP_sensor_set_delay(int sensorType, int delay) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); req.set_delay_req.sensorType = sensorType; req.set_delay_req.action = SENSOR_HUB_SET_DELAY; req.set_delay_req.delay = delay; len = sizeof(req.set_delay_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } return err; } /*----------------------------------------------------------------------------*/ static int SCP_sensor_get_data16(int sensorType, void *value, int *status) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); req.get_data_req.sensorType = sensorType; req.get_data_req.action = SENSOR_HUB_GET_DATA; len = sizeof(req.get_data_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } switch (sensorType) { case ID_ACTIVITY://there are 6 values in activity *(u16*)value = *req.get_data_rsp.int16_Data; *((u16*)value+1) = *(req.get_data_rsp.int16_Data+1); *((u16*)value+2) = *(req.get_data_rsp.int16_Data+2); *((u16*)value+3) = *(req.get_data_rsp.int16_Data+3); *((u16*)value+4) = *(req.get_data_rsp.int16_Data+4); *((u16*)value+5) = *(req.get_data_rsp.int16_Data+5); SCP_LOG("ID_ACTIVITY , value=%d value1=%d value2=%d value3=%d value4=%d value5=%d\n", *((u16*)value), *((u16*)value+1), *((u16*)value+2), *((u16*)value+3), *((u16*)value+4), *((u16*)value+5)); break; case ID_IN_POCKET: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_PICK_UP_GESTURE: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_FACE_DOWN: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_SHAKE: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_TILT_DETECTOR: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_WAKE_GESTURE: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; case ID_GLANCE_GESTURE: *((u16*)value) = *(req.get_data_rsp.int16_Data); break; default: err = -1; break; } SCP_LOG("sensorType = %d, value = %d\n", sensorType, *((u16 *)value)); return err; } static int SCP_sensor_get_data32(int sensorType, void *value, int *status) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); req.get_data_req.sensorType = sensorType; req.get_data_req.action = SENSOR_HUB_GET_DATA; len = sizeof(req.get_data_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } switch (sensorType) { case ID_STEP_COUNTER: *((u32*)value) = *(req.get_data_rsp.int32_Data); break; case ID_STEP_DETECTOR: *((u32*)value) = *(req.get_data_rsp.int32_Data); break; case ID_SIGNIFICANT_MOTION: *((u32*)value) = *(req.get_data_rsp.int32_Data); break; case ID_PEDOMETER://there are 4 values in pedometer *(u32*)value = *req.get_data_rsp.int32_Data; *((u32*)value+1) = *(req.get_data_rsp.int32_Data+1); *((u32*)value+2) = *(req.get_data_rsp.int32_Data+2); *((u32*)value+3) = *(req.get_data_rsp.int32_Data+3); SCP_LOG("ID_PEDOMETER, value=%d value1=%d value2=%d value3=%d\n", *((u32*)value), *((u32*)value+1), *((u32*)value+2), *((u32*)value+3)); break; case ID_HEART_RATE://there are 4 values in pedometer *(u32*)value = *req.get_data_rsp.int32_Data; *((u32*)value+1) = *(req.get_data_rsp.int32_Data+1); SCP_LOG("ID_HEART_RATE, value=%d value1=%d \n", *((u32*)value), *((u32*)value+1)); break; default: err = -1; break; } SCP_LOG("sensorType = %d, value = %d\n", sensorType, *((u32 *)value)); return err; } static int SCP_sensor_get_data(int sensorType, void *value, int *status) { SCP_SENSOR_HUB_DATA req; int len; int err = 0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); req.get_data_req.sensorType = sensorType; req.get_data_req.action = SENSOR_HUB_GET_DATA; len = sizeof(req.get_data_req); err = SCP_sensorHub_req_send(&req, &len, 1); if (err) { SCP_ERR("SCP_sensorHub_req_send fail!\n"); } switch (sensorType) { case ID_STEP_COUNTER: *((u64 *)value) = *(req.get_data_rsp.int32_Data); break; case ID_STEP_DETECTOR: *((u64 *)value) = *(req.get_data_rsp.int32_Data); break; case ID_SIGNIFICANT_MOTION: *((u64 *)value) = *(req.get_data_rsp.int32_Data); break; case ID_HEART_RATE://there are 2 values in heart rate *(u64 *)value = *req.get_data_rsp.int32_Data; *((u64 *)value+1) = *(req.get_data_rsp.int32_Data+1); SCP_LOG("ID_PEDOMETER, value=%lld value1=%lld\n", *((u64 *)value), *((u64 *)value+1)); break; case ID_PEDOMETER://there are 4 values in pedometer *(u64 *)value = *req.get_data_rsp.int32_Data; *((u64 *)value+1) = *(req.get_data_rsp.int32_Data+1); *((u64 *)value+2) = *(req.get_data_rsp.int32_Data+2); *((u64 *)value+3) = *(req.get_data_rsp.int32_Data+3); SCP_LOG("ID_PEDOMETER, value=%lld value1=%lld value2=%lld value3=%lld\n", *((u64 *)value), *((u64 *)value+1), *((u64 *)value+2), *((u64 *)value+3)); break; case ID_ACTIVITY://there are 6 values in activity *(u64 *)value = *req.get_data_rsp.int16_Data; *((u64 *)value+1) = *(req.get_data_rsp.int16_Data+1); *((u64 *)value+2) = *(req.get_data_rsp.int16_Data+2); *((u64 *)value+3) = *(req.get_data_rsp.int16_Data+3); *((u64 *)value+4) = *(req.get_data_rsp.int16_Data+4); *((u64 *)value+5) = *(req.get_data_rsp.int16_Data+5); *(u64 *)value &= 0xFFFF; *((u64 *)value+1) &= 0xFFFF; *((u64 *)value+2) &= 0xFFFF; *((u64 *)value+3) &= 0xFFFF; *((u64 *)value+4) &= 0xFFFF; *((u64 *)value+5) &= 0xFFFF; SCP_LOG("ID_ACTIVITY 16, Data=%d Data1=%d Data2=%d Data3=%d Data4=%d Data5=%d\n", *req.get_data_rsp.int16_Data, *(req.get_data_rsp.int16_Data+1), *(req.get_data_rsp.int16_Data+2), *(req.get_data_rsp.int16_Data+3), *(req.get_data_rsp.int16_Data+4), *(req.get_data_rsp.int16_Data+5)); SCP_LOG("ID_ACTIVITY 64, value=%lld value1=%lld value2=%lld value3=%lld value4=%lld value5=%lld\n", *((u64 *)value), *((u64 *)value+1), *((u64 *)value+2), *((u64 *)value+3), *((u64 *)value+4), *((u64 *)value+5)); break; case ID_IN_POCKET: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_PICK_UP_GESTURE: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_FACE_DOWN: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_SHAKE: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_TILT_DETECTOR: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_WAKE_GESTURE: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; case ID_GLANCE_GESTURE: *((u64 *)value) = *(req.get_data_rsp.int16_Data); break; default: err = -1; break; } SCP_LOG("sensorType = %d, value = %lld\n", sensorType, *((u64 *)value)); return err; } static int SCP_sensorHub_notify_handler(void* data, uint len) { SCP_SENSOR_HUB_DATA_P rsp = (SCP_SENSOR_HUB_DATA_P)data; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); if (SCP_TRC_IPI == atomic_read(&(obj_data->trace))) SCP_LOG("len = %d, type = %d, action = %d, errCode = %d\n", len, rsp->rsp.sensorType, rsp->rsp.action, rsp->rsp.errCode); if(!obj_data) { return -1; } switch(rsp->rsp.action) { case SENSOR_HUB_NOTIFY: SCP_LOG("SENSOR_HUB_NOTIFY sensorId = %d\n", rsp->notify_rsp.sensorType); switch(rsp->notify_rsp.event) { case SCP_NOTIFY: if (ID_STEP_DETECTOR == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->sd_work)); } else if (ID_SIGNIFICANT_MOTION == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->sig_work)); } else if (ID_IN_POCKET == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->inpk_work)); } else if (ID_PICK_UP_GESTURE == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->pkup_work)); } else if (ID_FACE_DOWN == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->fdn_work)); } else if (ID_SHAKE == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->shk_work)); } else if (ID_TILT_DETECTOR == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->tilt_work)); } else if (ID_WAKE_GESTURE == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->wag_work)); } else if (ID_GLANCE_GESTURE == rsp->notify_rsp.sensorType) { schedule_work(&(obj_data->glg_work)); } else { SCP_ERR("Unknow notify"); } break; default: SCP_ERR("Error sensor hub notify"); break; } break; default: SCP_ERR("Error sensor hub action"); break; } return 0; } /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR //static void SCP_hrm_work(struct work_struct *work) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // hrm_notify(); //} //static int hrm_enable(int en) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // return SCP_sensor_enable(ID_HEART_RATE, en); //} static int hrm_get_data(u32 *value, int *status) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data32(ID_HEART_RATE, value, status); } static int hrm_open_report_data(int open)//open data rerport to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } /*----------------------------------------------------------------------------*/ static int hrm_enable_nodata(int en)//only enable not report event to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_HEART_RATE, en); } static int hrm_set_delay(u64 delay) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_set_delay(ID_HEART_RATE, delay); } static int SCP_sensorHub_heart_rate_init() { struct hrm_control_path ctl={0}; struct hrm_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data= hrm_open_report_data; ctl.enable_nodata = hrm_enable_nodata; ctl.set_delay = hrm_set_delay; ctl.is_report_input_direct = false; ctl.is_support_batch = true; err = hrm_register_control_path(&ctl); if(err) { printk("register heart_rate control path err\n"); return -1; } data.get_data = hrm_get_data; //data.vender_div = 1; err = hrm_register_data_path(&data); if(err) { printk("register heart_rate data path err\n"); return -1; } return 0; } static int SCP_sensorHub_heart_rate_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER //static void SCP_hrm_work(struct work_struct *work) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // pdr_notify(); //} //static int pdr_enable(int en) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // return SCP_sensor_enable(ID_PEDOMETER, en); //} static int pdr_get_data(u32 *value, int *status) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data32(ID_PEDOMETER, value, status); } static int pdr_open_report_data(int open)//open data rerport to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } /*----------------------------------------------------------------------------*/ static int pdr_enable_nodata(int en)//only enable not report event to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_PEDOMETER, en); } static int pdr_set_delay(u64 delay) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_set_delay(ID_PEDOMETER, delay); } static int SCP_sensorHub_pedometer_init() { struct pdr_control_path ctl={0}; struct pdr_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data= pdr_open_report_data; ctl.enable_nodata = pdr_enable_nodata; ctl.set_delay = pdr_set_delay; ctl.is_report_input_direct = false; ctl.is_support_batch = true; err = pdr_register_control_path(&ctl); if(err) { printk("register pedometer control path err\n"); return -1; } data.get_data = pdr_get_data; //data.vender_div = 1; err = pdr_register_data_path(&data); if(err) { printk("register pedometer data path err\n"); return -1; } return 0; } static int SCP_sensorHub_pedometer_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR //static void SCP_act_work(struct work_struct *work) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // act_notify(); //} //static int act_enable(int en) //{ // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // return SCP_sensor_enable(ID_ACTIVITY, en); //} static int act_get_data(u16 *value ,int *status) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_ACTIVITY, value, status); } static int act_open_report_data(int open)//open data rerport to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } /*----------------------------------------------------------------------------*/ static int act_enable_nodata(int en)//only enable not report event to HAL { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_ACTIVITY, en); } static int act_set_delay(u64 delay) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_set_delay(ID_ACTIVITY, delay); } static int SCP_sensorHub_activity_init() { struct act_control_path ctl={0}; struct act_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data= act_open_report_data; ctl.enable_nodata = act_enable_nodata; ctl.set_delay = act_set_delay; ctl.is_report_input_direct = false; ctl.is_support_batch = true; err = act_register_control_path(&ctl); if(err) { printk("register pedometer control path err\n"); return -1; } data.get_data = act_get_data; //data.vender_div = 1; err = act_register_data_path(&data); if(err) { printk("register pedometer data path err\n"); return -1; } return 0; } static int SCP_sensorHub_activity_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR static void SCP_inpk_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); inpk_notify(); } static int inpk_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_IN_POCKET, open); } static int inpk_get_data(u16 *value, int *status ) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_IN_POCKET, value, status); } static int SCP_sensorHub_in_pocket_init() { struct inpk_control_path ctl={0}; struct inpk_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = inpk_open_report_data; err = inpk_register_control_path(&ctl); if(err) { printk("register in pocket control path err\n"); return -1; } data.get_data = inpk_get_data; err = inpk_register_data_path(&data); if(err) { printk("register in pocket data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_IN_POCKET, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_in_pocket_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR static void SCP_shk_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); shk_notify(); } static int shk_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_SHAKE, open); } static int shk_get_data(u16 *value, int *status ) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_SHAKE, value, status); } static int SCP_sensorHub_shake_init() { struct shk_control_path ctl={0}; struct shk_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = shk_open_report_data; err = shk_register_control_path(&ctl); if(err) { printk("register shake control path err\n"); return -1; } data.get_data = shk_get_data; err = shk_register_data_path(&data); if(err) { printk("register shake data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_SHAKE, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_shake_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR static void SCP_pkup_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); pkup_notify(); } static int pkup_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_PICK_UP_GESTURE, open); } static int pkup_get_data(u16 *value , int *status) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_PICK_UP_GESTURE, value, status); } static int SCP_sensorHub_pick_up_init() { struct pkup_control_path ctl={0}; struct pkup_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = pkup_open_report_data; err = pkup_register_control_path(&ctl); if(err) { printk("register pick up control path err\n"); return -1; } data.get_data = pkup_get_data; err = pkup_register_data_path(&data); if(err) { printk("register pick up data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_PICK_UP_GESTURE, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_pick_up_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR /*----------------------------------------------------------------------------*/ #ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR static void SCP_fdn_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); fdn_notify(); } static int fdn_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_FACE_DOWN, open); } static int fdn_get_data(u16 *value , int *status) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_FACE_DOWN, value, status); } static int SCP_sensorHub_face_down_init() { struct fdn_control_path ctl={0}; struct fdn_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = fdn_open_report_data; err = fdn_register_control_path(&ctl); if(err) { printk("register face down control path err\n"); return -1; } data.get_data = fdn_get_data; err = fdn_register_data_path(&data); if(err) { printk("register face down data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_FACE_DOWN, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_face_down_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR static void SCP_tilt_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); tilt_notify(); } static int tilt_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_TILT_DETECTOR, open); } static int tilt_get_data(u16 *value, int *status ) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_TILT_DETECTOR, value, status); } static int SCP_sensorHub_tilt_detector_init() { struct tilt_control_path ctl={0}; struct tilt_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = tilt_open_report_data; err = tilt_register_control_path(&ctl); if(err) { printk("register tilt_detector control path err\n"); return -1; } data.get_data = tilt_get_data; err = tilt_register_data_path(&data); if(err) { printk("register tilt_detector data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_TILT_DETECTOR, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_tilt_detector_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR static void SCP_wag_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); wag_notify(); } static int wag_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_WAKE_GESTURE, open); } static int wag_get_data(u16 *value, int *status ) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_WAKE_GESTURE, value, status); } static int SCP_sensorHub_wake_gesture_init() { struct wag_control_path ctl={0}; struct wag_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = wag_open_report_data; err = wag_register_control_path(&ctl); if(err) { printk("register wake_gesture control path err\n"); return -1; } data.get_data = wag_get_data; err = wag_register_data_path(&data); if(err) { printk("register wake_gesture data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_WAKE_GESTURE, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_wake_gesture_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR static void SCP_glg_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); glg_notify(); } static int glg_open_report_data(int open) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_GLANCE_GESTURE, open); } static int glg_get_data(u16 *value, int *status ) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data16(ID_GLANCE_GESTURE, value, status); } static int SCP_sensorHub_glance_gesture_init() { struct glg_control_path ctl={0}; struct glg_data_path data={0}; int err=0; if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); ctl.open_report_data = glg_open_report_data; err = glg_register_control_path(&ctl); if(err) { printk("register glance_gesture control path err\n"); return -1; } data.get_data = glg_get_data; err = glg_register_data_path(&data); if(err) { printk("register glance_gesture data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_GLANCE_GESTURE, SCP_sensorHub_notify_handler); return 0; } static int SCP_sensorHub_glance_gesture_uninit() { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER static void SCP_sd_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); step_notify(TYPE_STEP_DETECTOR); } /*----------------------------------------------------------------------------*/ static void SCP_sig_work(struct work_struct *work) { if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) SCP_FUN(); step_notify(TYPE_SIGNIFICANT); } /*----------------------------------------------------------------------------*/ //static int SCP_sensorHub_sd_handler(void* data, uint len) //{ // SCP_SENSOR_HUB_DATA_P rsp = (SCP_SENSOR_HUB_DATA_P)data; // // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // if (SCP_TRC_IPI == atomic_read(&(obj_data->trace))) // SCP_LOG("len = %d, type = %d, action = %d, errCode = %d\n", len, rsp->rsp.sensorType, rsp->rsp.action, rsp->rsp.errCode); // // if(!obj_data) // { // return -1; // } // // switch(rsp->rsp.action) // { // case SENSOR_HUB_NOTIFY: // switch(rsp->notify_rsp.event) // { // case SCP_NOTIFY: // if (ID_STEP_DETECTOR == rsp->notify_rsp.sensorType) // { // schedule_work(&(obj_data->sd_work)); // } // else // { // SCP_ERR("Unknow notify"); // } // break; // default: // SCP_ERR("Error sensor hub notify"); // break; // } // break; // default: // SCP_ERR("Error sensor hub action"); // break; // } // // return 0; //} ///*----------------------------------------------------------------------------*/ //static int SCP_sensorHub_sig_handler(void* data, uint len) //{ // SCP_SENSOR_HUB_DATA_P rsp = (SCP_SENSOR_HUB_DATA_P)data; // // if (SCP_TRC_FUN == atomic_read(&(obj_data->trace))) // SCP_FUN(); // // if (SCP_TRC_IPI == atomic_read(&(obj_data->trace))) // SCP_LOG("len = %d, type = %d, action = %d, errCode = %d\n", len, rsp->rsp.sensorType, rsp->rsp.action, rsp->rsp.errCode); // // if(!obj_data) // { // return -1; // } // // switch(rsp->rsp.action) // { // case SENSOR_HUB_NOTIFY: // switch(rsp->notify_rsp.event) // { // case SCP_NOTIFY: // if (ID_SIGNIFICANT_MOTION == rsp->notify_rsp.sensorType) // { // schedule_work(&(obj_data->sig_work)); // } // else // { // SCP_ERR("Unknow notify"); // } // break; // default: // SCP_ERR("Error sensor hub notify"); // break; // } // break; // default: // SCP_ERR("Error sensor hub action"); // break; // } // // return 0; //} /*----------------------------------------------------------------------------*/ static int step_counter_open_report_data(int open)//open data rerport to HAL { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } /*----------------------------------------------------------------------------*/ static int step_counter_enable_nodata(int en)//only enable not report event to HAL { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_STEP_COUNTER, en); } /*----------------------------------------------------------------------------*/ static int step_detect_enable(int en) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_STEP_DETECTOR, en); } /*----------------------------------------------------------------------------*/ static int significant_motion_enable(int en) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_enable(ID_SIGNIFICANT_MOTION, en); } /*----------------------------------------------------------------------------*/ static int step_counter_set_delay(u64 delay) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } /*----------------------------------------------------------------------------*/ static int step_counter_get_data(u32 *value, int *status) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); *status = 3; return SCP_sensor_get_data32(ID_STEP_COUNTER, value, status); } /*----------------------------------------------------------------------------*/ static int step_detect_get_data(u32 *value , int *status) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data32(ID_STEP_DETECTOR, value, status); } /*----------------------------------------------------------------------------*/ static int significant_motion_get_data(u32 *value , int *status) { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return SCP_sensor_get_data32(ID_SIGNIFICANT_MOTION, value, status); } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_step_counter_init() { struct step_c_control_path ctl={0}; struct step_c_data_path data={0}; int err=0; if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); //register step ctl.open_report_data= step_counter_open_report_data; ctl.enable_nodata = step_counter_enable_nodata; ctl.set_delay = step_counter_set_delay; ctl.is_report_input_direct = false; ctl.is_support_batch = true; ctl.enable_significant = significant_motion_enable; ctl.enable_step_detect = step_detect_enable; err = step_c_register_control_path(&ctl); if(err) { printk("register step_counter control path err\n"); return -1; } data.get_data = step_counter_get_data; data.get_data_significant = significant_motion_get_data; data.get_data_step_d = step_detect_get_data; data.vender_div = 1; err = step_c_register_data_path(&data); if(err) { printk("register step counter data path err\n"); return -1; } SCP_sensorHub_rsp_registration(ID_SIGNIFICANT_MOTION, SCP_sensorHub_notify_handler); SCP_sensorHub_rsp_registration(ID_STEP_DETECTOR, SCP_sensorHub_notify_handler); return 0; } /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_step_counter_uninit() { if (SCP_TRC_FUN & atomic_read(&(obj_data->trace))) SCP_FUN(); return 0; } #endif //#ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER /*----------------------------------------------------------------------------*/ static int SCP_sensorHub_local_init(void) { SCP_sensorHub_probe(); if(-1 == SCP_sensorHub_init_flag) { return -1; } return 0; } /*----------------------------------------------------------------------------*/ static int __init SCP_sensorHub_init(void) { SCP_FUN(); batch_driver_add(&SCP_sensorHub_init_info); #ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER step_c_driver_add(&SCP_step_counter_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_STEP_COUNTER #ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR inpk_driver_add(&SCP_in_pocket_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_IN_POCKET_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER pdr_driver_add(&SCP_pedometer_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_PEDOMETER #ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR act_driver_add(&SCP_activity_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_ACTIVITY_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR shk_driver_add(&SCP_shake_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_SHAKE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR pkup_driver_add(&SCP_pick_up_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_PICK_UP_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR fdn_driver_add(&SCP_face_down_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_FACE_DOWN_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR hrm_driver_add(&SCP_heart_rate_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_HEART_RATE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR tilt_driver_add(&SCP_tilt_detector_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_TILT_DETECTOR_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR wag_driver_add(&SCP_wake_gesture_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_WAKE_GESTURE_SENSOR #ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR glg_driver_add(&SCP_glance_gesture_init_info); #endif //#ifdef CONFIG_CUSTOM_KERNEL_GLANCE_GESTURE_SENSOR return 0; } /*----------------------------------------------------------------------------*/ static void __exit SCP_sensorHub_exit(void) { SCP_FUN(); } /*----------------------------------------------------------------------------*/ //late_initcall(SCP_sensorHub_init); module_init(SCP_sensorHub_init); module_exit(SCP_sensorHub_exit); /*----------------------------------------------------------------------------*/ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SCP sensor hub driver"); MODULE_AUTHOR("andrew.yang@mediatek.com");
gpl-2.0
CoreSecurity/pycodin
qemu-0.12.3/qemu-malloc.c
11
2445
/* * malloc-like functions for system emulation. * * Copyright (c) 2006 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu-common.h" #include <stdlib.h> static void *oom_check(void *ptr) { if (ptr == NULL) { abort(); } return ptr; } void *get_mmap_addr(unsigned long size) { return NULL; } void qemu_free(void *ptr) { free(ptr); } static int allow_zero_malloc(void) { #if defined(CONFIG_ZERO_MALLOC) return 1; #else return 0; #endif } void *qemu_malloc(size_t size) { if (!size && !allow_zero_malloc()) { abort(); } return oom_check(malloc(size ? size : 1)); } void *qemu_realloc(void *ptr, size_t size) { if (size) { return oom_check(realloc(ptr, size)); } else if (allow_zero_malloc()) { return oom_check(realloc(ptr, size ? size : 1)); } abort(); } void *qemu_mallocz(size_t size) { void *ptr; ptr = qemu_malloc(size); memset(ptr, 0, size); return ptr; } char *qemu_strdup(const char *str) { char *ptr; size_t len = strlen(str); ptr = qemu_malloc(len + 1); memcpy(ptr, str, len + 1); return ptr; } char *qemu_strndup(const char *str, size_t size) { const char *end = memchr(str, 0, size); char *new; if (end) { size = end - str; } new = qemu_malloc(size + 1); new[size] = 0; return memcpy(new, str, size); }
gpl-2.0
kito-cheng/riscv-gcc
gcc/testsuite/gcc.dg/gomp/declare-simd-5.c
11
1197
/* Test parsing of #pragma omp declare simd */ /* { dg-do compile } */ int f1 (int x) { if (x) #pragma omp declare simd simdlen (8) aligned (b : 8 * sizeof (int)) extern int f3 (int a, int *b, int c); /* { dg-error "must be followed by function declaration or definition" } */ while (x < 10) #pragma omp declare simd simdlen (8) aligned (b : 8 * sizeof (int)) extern int f4 (int a, int *b, int c); /* { dg-error "must be followed by function declaration or definition" } */ { lab: #pragma omp declare simd simdlen (8) aligned (b : 8 * sizeof (int)) extern int f5 (int a, int *b, int c); /* { dg-error "must be followed by function declaration or definition" } */ x++; /* { dg-error "expected expression before" "" { target *-*-* } .-1 } */ } return x; } int f2 (int x) { if (x) extern int f6 (int a, int *b, int c); /* { dg-error "expected expression before" } */ while (x < 10) extern int f7 (int a, int *b, int c); /* { dg-error "expected expression before" } */ { lab: extern int f8 (int a, int *b, int c); /* { dg-error "a label can only be part of a statement and a declaration is not a statement" } */ x++; } return x; }
gpl-2.0
romansavrulin/nixduino
board/micronas/vct/ehci.c
267
3445
/* * (C) Copyright 2009 Stefan Roese <sr@denx.de>, DENX Software Engineering * * Original Author Guenter Gebhardt * Copyright (C) 2006 Micronas GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include "vct.h" int vct_ehci_hcd_init(u32 *hccr, u32 *hcor) { int retval; u32 val; u32 addr; dcgu_set_reset_switch(DCGU_HW_MODULE_USB_24, DCGU_SWITCH_ON); dcgu_set_reset_switch(DCGU_HW_MODULE_USB_60, DCGU_SWITCH_ON); dcgu_set_clk_switch(DCGU_HW_MODULE_USB_24, DCGU_SWITCH_ON); dcgu_set_clk_switch(DCGU_HW_MODULE_USB_PLL, DCGU_SWITCH_ON); dcgu_set_reset_switch(DCGU_HW_MODULE_USB_24, DCGU_SWITCH_OFF); /* Wait until (DCGU_USBPHY_STAT == 7) */ addr = DCGU_USBPHY_STAT(DCGU_BASE); val = reg_read(addr); while (val != 7) val = reg_read(addr); dcgu_set_clk_switch(DCGU_HW_MODULE_USB_60, DCGU_SWITCH_ON); dcgu_set_reset_switch(DCGU_HW_MODULE_USB_60, DCGU_SWITCH_OFF); retval = scc_reset(SCC_USB_RW, 0); if (retval) { printf("scc_reset(SCC_USB_RW, 0) returned: 0x%x\n", retval); return retval; } else { retval = scc_reset(SCC_CPU1_SPDMA_RW, 0); if (retval) { printf("scc_reset(SCC_CPU1_SPDMA_RW, 0) returned: 0x%x\n", retval); return retval; } } if (!retval) { /* * For the AGU bypass, where the SCC client provides full * physical address */ scc_set_usb_address_generation_mode(1); scc_setup_dma(SCC_USB_RW, BCU_USB_BUFFER_1, DMA_LINEAR, USE_NO_FH, DMA_READ, 0); scc_setup_dma(SCC_CPU1_SPDMA_RW, BCU_USB_BUFFER_1, DMA_LINEAR, USE_NO_FH, DMA_WRITE, 0); scc_setup_dma(SCC_USB_RW, BCU_USB_BUFFER_0, DMA_LINEAR, USE_NO_FH, DMA_WRITE, 0); scc_setup_dma(SCC_CPU1_SPDMA_RW, BCU_USB_BUFFER_0, DMA_LINEAR, USE_NO_FH, DMA_READ, 0); /* Enable memory interface */ scc_enable(SCC_USB_RW, 1); /* Start (start_cmd=0) DMAs */ scc_dma_cmd(SCC_USB_RW, DMA_START, 0, DMA_READ); scc_dma_cmd(SCC_USB_RW, DMA_START, 0, DMA_WRITE); } else { printf("Cannot configure USB memory channel.\n"); printf("USB can not access RAM. SCC configuration failed.\n"); return retval; } /* Wait a short while */ udelay(300000); reg_write(USBH_BURSTSIZE(USBH_BASE), 0x00001c1c); /* Set EHCI structures and DATA in RAM */ reg_write(USBH_USBHMISC(USBH_BASE), 0x00840003); /* Set USBMODE to bigendian and set host mode */ reg_write(USBH_USBMODE(USBH_BASE), 0x00000007); /* * USBH_BURSTSIZE MUST EQUAL 0x00001c1c in order for * 512 byte USB transfers on the bulk pipe to work properly. * Set USBH_BURSTSIZE to 0x00001c1c */ reg_write(USBH_BURSTSIZE(USBH_BASE), 0x00001c1c); /* Insert access register addresses */ *hccr = REG_GLOBAL_START_ADDR + USBH_CAPLENGTH(USBH_BASE); *hcor = REG_GLOBAL_START_ADDR + USBH_USBCMD(USBH_BASE); return 0; }
gpl-2.0
TangCheng/hisi351x_uboot
board/micronas/vct/scc.c
267
24755
/* * (C) Copyright 2009 Stefan Roese <sr@denx.de>, DENX Software Engineering * * Copyright (C) 2006 Micronas GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <asm/errno.h> #include "vct.h" /* * List of statically defined buffers per SCC. * The first entry in the table is the number of fixed buffers * followed by the list of buffer IDs */ static u32 buffer_list_0[] = { 6, 120, 121, 122, 123, 139, 140 }; static u32 buffer_list_1[] = { 6, 120, 121, 122, 123, 139, 140 }; static u32 buffer_list_2[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_3[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_4[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_5[] = { 3, 127, 139, 140 }; static u32 buffer_list_6[] = { 3, 127, 139, 140 }; static u32 buffer_list_7[] = { 6, 128, 129, 130, 131, 139, 140 }; static u32 buffer_list_8[] = { 6, 128, 129, 130, 131, 139, 140 }; static u32 buffer_list_9[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_10[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_11[] = { 5, 124, 125, 126, 139, 140 }; static u32 buffer_list_12[] = { 6, 132, 133, 134, 135, 139, 140 }; static u32 buffer_list_13[] = { 6, 132, 133, 134, 135, 139, 140 }; static u32 buffer_list_14[] = { 4, 137, 138, 139, 140 }; static u32 buffer_list_15[] = { 6, 136, 136, 137, 138, 139, 140 }; /** Issue#7674 (new) - DP/DVP buffer assignment */ static u32 buffer_list_16[] = { 6, 106, 108, 109, 107, 139, 140 }; static u32 buffer_list_17[] = { 6, 106, 110, 107, 111, 139, 140 }; static u32 buffer_list_18[] = { 6, 106, 113, 107, 114, 139, 140 }; static u32 buffer_list_19[] = { 3, 112, 139, 140 }; static u32 buffer_list_20[] = { 35, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 79, 80, 81, 82, 83, 84, 85, 86, 139, 140 }; static u32 buffer_list_21[] = { 27, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 139, 140 }; static u32 buffer_list_22[] = { 81, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 139, 140 }; static u32 buffer_list_23[] = { 29, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 88, 89, 139, 140 }; static u32 buffer_list_24[] = { 6, 90, 91, 92, 93, 139, 140 }; static u32 buffer_list_25[] = { 18, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 139, 140 }; static u32 buffer_list_26[] = { 5, 94, 95, 96, 139, 140 }; static u32 buffer_list_27[] = { 5, 97, 98, 99, 139, 140 }; static u32 buffer_list_28[] = { 5, 100, 101, 102, 139, 140 }; static u32 buffer_list_29[] = { 5, 103, 104, 105, 139, 140 }; static u32 buffer_list_30[] = { 10, 108, 109, 110, 111, 113, 114, 116, 117, 139, 140 }; static u32 buffer_list_31[] = { 13, 106, 107, 108, 109, 110, 111, 113, 114, 115, 116, 117, 139, 140 }; static u32 buffer_list_32[] = { 13, 106, 107, 108, 109, 110, 111, 113, 114, 115, 116, 117, 139, 140 }; static u32 buffer_list_33[] = { 27, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 139, 140 }; static u32 buffer_list_34[] = { 27, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 139, 140 }; static u32 buffer_list_35[] = { 28, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 87, 139, 140 }; static u32 buffer_list_36[] = { 28, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 87, 139, 140 }; static u32 buffer_list_37[] = { 27, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 139, 140 }; static u32 buffer_list_38[] = { 29, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 118, 119, 139, 140 }; static u32 buffer_list_39[] = { 91, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 118, 119, 139, 140 }; static u32 buffer_list_40[] = { 0 }; /* * List of statically defined vcid.csize values. * The first entry in the table is the number of possible csize values * followed by the list of data path values in bits. */ static u32 csize_list_0[] = { 2, 0, 1 }; static u32 csize_list_1[] = { 2, 0, 1 }; static u32 csize_list_2[] = { 1, 1 }; static u32 csize_list_3[] = { 1, 1 }; static u32 csize_list_4[] = { 1, 1 }; static u32 csize_list_5[] = { 1, 0 }; static u32 csize_list_6[] = { 1, 0 }; static u32 csize_list_7[] = { 1, 1 }; static u32 csize_list_8[] = { 1, 1 }; static u32 csize_list_9[] = { 1, 1 }; static u32 csize_list_10[] = { 1, 1 }; static u32 csize_list_11[] = { 1, 1 }; static u32 csize_list_12[] = { 1, 1 }; static u32 csize_list_13[] = { 1, 1 }; static u32 csize_list_14[] = { 1, 2 }; static u32 csize_list_15[] = { 1, 4 }; static u32 csize_list_16[] = { 3, 0, 1, 2 }; static u32 csize_list_17[] = { 3, 0, 1, 2 }; static u32 csize_list_18[] = { 3, 0, 1, 2 }; static u32 csize_list_19[] = { 1, 2 }; static u32 csize_list_20[] = { 1, 0 }; static u32 csize_list_21[] = { 1, 0 }; static u32 csize_list_22[] = { 1, 2 }; static u32 csize_list_23[] = { 1, 3 }; static u32 csize_list_24[] = { 1, 3 }; static u32 csize_list_25[] = { 1, 3 }; static u32 csize_list_26[] = { 1, 0 }; static u32 csize_list_27[] = { 1, 0 }; static u32 csize_list_28[] = { 1, 0 }; static u32 csize_list_29[] = { 1, 0 }; static u32 csize_list_30[] = { 1, 2 }; static u32 csize_list_31[] = { 1, 2 }; static u32 csize_list_32[] = { 1, 2 }; static u32 csize_list_33[] = { 1, 2 }; static u32 csize_list_34[] = { 1, 2 }; static u32 csize_list_35[] = { 1, 2 }; static u32 csize_list_36[] = { 1, 2 }; static u32 csize_list_37[] = { 2, 0, 1 }; static u32 csize_list_38[] = { 1, 2 }; static u32 csize_list_39[] = { 1, 3 }; static u32 csize_list_40[] = { 1, 3 }; /* * SCC_Configuration table */ static const struct scc_descriptor scc_descriptor_table[] = { /* scn scc_name profile SCC scc_id mci_id rd wr m p fh si cfg sta */ {"fe_", "fe_3dcomb_wr", STRM_P, SCC0_BASE, 0, 0, 0, 4, 1, 1, 0, 0, 0, 1, buffer_list_0, csize_list_0}, {"fe_", "fe_3dcomb_rd", STRM_P, SCC1_BASE, 1, 18, 4, 0, 1, 1, 0, 1, 0, 1, buffer_list_1, csize_list_1}, {"di_", "di_tnr_wr", STRM_P, SCC2_BASE, 2, 1, 0, 3, 1, 1, 0, 2, 0, 1, buffer_list_2, csize_list_2}, {"di_", "di_tnr_field_rd", STRM_P, SCC3_BASE, 3, 19, 3, 0, 1, 1, 0, 3, 0, 1, buffer_list_3, csize_list_3}, {"di_", "di_tnr_frame_rd", STRM_P, SCC4_BASE, 4, 20, 3, 0, 1, 1, 0, 4, 0, 1, buffer_list_4, csize_list_4}, {"di_", "di_mval_wr", STRM_P, SCC5_BASE, 5, 2, 0, 1, 1, 1, 0, 5, 0, 1, buffer_list_5, csize_list_5}, {"di_", "di_mval_rd", STRM_P, SCC6_BASE, 6, 21, 1, 0, 1, 1, 0, 6, 0, 1, buffer_list_6, csize_list_6}, {"rc_", "rc_frame_wr", STRM_P, SCC7_BASE, 7, 3, 0, 4, 1, 1, 0, 7, 0, 1, buffer_list_7, csize_list_7}, {"rc_", "rc_frame0_rd", STRM_P, SCC8_BASE, 8, 22, 4, 0, 1, 1, 0, 8, 0, 1, buffer_list_8, csize_list_8}, {"opt", "opt_field0_rd", STRM_P, SCC9_BASE, 9, 23, 3, 0, 1, 1, 0, 9, 0, 1, buffer_list_9, csize_list_9}, {"opt", "opt_field1_rd", STRM_P, SCC10_BASE, 10, 24, 3, 0, 1, 1, 0, 10, 0, 1, buffer_list_10, csize_list_10}, {"opt", "opt_field2_rd", STRM_P, SCC11_BASE, 11, 25, 3, 0, 1, 1, 0, 11, 0, 1, buffer_list_11, csize_list_11}, {"pip", "pip_frame_wr", STRM_P, SCC12_BASE, 12, 4, 0, 4, 1, 1, 0, 12, 0, 1, buffer_list_12, csize_list_12}, {"pip", "pip_frame_rd", STRM_P, SCC13_BASE, 13, 26, 4, 0, 1, 1, 0, 13, 0, 1, buffer_list_13, csize_list_13}, {"dp_", "dp_agpu_rd", STRM_P, SCC14_BASE, 14, 27, 2, 0, 2, 1, 0, 14, 0, 1, buffer_list_14, csize_list_14}, {"ewa", "ewarp_rw", SRMD, SCC15_BASE, 15, 11, 1, 1, 0, 0, 0, -1, 0, 0, buffer_list_15, csize_list_15}, {"dp_", "dp_osd_rd", STRM_P, SCC16_BASE, 16, 28, 3, 0, 2, 1, 0, 15, 0, 1, buffer_list_16, csize_list_16}, {"dp_", "dp_graphic_rd", STRM_P, SCC17_BASE, 17, 29, 3, 0, 2, 1, 0, 16, 0, 1, buffer_list_17, csize_list_17}, {"dvp", "dvp_osd_rd", STRM_P, SCC18_BASE, 18, 30, 2, 0, 2, 1, 0, 17, 0, 1, buffer_list_18, csize_list_18}, {"dvp", "dvp_vbi_rd", STRM_D, SCC19_BASE, 19, 31, 1, 0, 0, 1, 0, -1, 0, 0, buffer_list_19, csize_list_19}, {"tsi", "tsio_wr", STRM_P, SCC20_BASE, 20, 5, 0, 8, 2, 1, 1, -1, 0, 0, buffer_list_20, csize_list_20}, {"tsi", "tsio_rd", STRM_P, SCC21_BASE, 21, 32, 4, 0, 2, 1, 1, -1, 0, 0, buffer_list_21, csize_list_21}, {"tsd", "tsd_wr", SRMD, SCC22_BASE, 22, 6, 0, 64, 0, 0, 1, -1, 0, 0, buffer_list_22, csize_list_22}, {"vd_", "vd_ud_st_rw", SRMD, SCC23_BASE, 23, 12, 2, 2, 0, 0, 1, -1, 0, 0, buffer_list_23, csize_list_23}, {"vd_", "vd_frr_rd", SRMD, SCC24_BASE, 24, 33, 4, 0, 0, 0, 0, -1, 0, 0, buffer_list_24, csize_list_24}, {"vd_", "vd_frw_disp_wr", SRMD, SCC25_BASE, 25, 7, 0, 16, 0, 0, 0, -1, 0, 0, buffer_list_25, csize_list_25}, {"mr_", "mr_vd_m_y_rd", STRM_P, SCC26_BASE, 26, 34, 3, 0, 2, 1, 0, 18, 0, 1, buffer_list_26, csize_list_26}, {"mr_", "mr_vd_m_c_rd", STRM_P, SCC27_BASE, 27, 35, 3, 0, 2, 1, 0, 19, 0, 1, buffer_list_27, csize_list_27}, {"mr_", "mr_vd_s_y_rd", STRM_P, SCC28_BASE, 28, 36, 3, 0, 2, 1, 0, 20, 0, 1, buffer_list_28, csize_list_28}, {"mr_", "mr_vd_s_c_rd", STRM_P, SCC29_BASE, 29, 37, 3, 0, 2, 1, 0, 21, 0, 1, buffer_list_29, csize_list_29}, {"ga_", "ga_wr", STRM_P, SCC30_BASE, 30, 8, 0, 1, 1, 1, 0, -1, 1, 1, buffer_list_30, csize_list_30}, {"ga_", "ga_src1_rd", STRM_P, SCC31_BASE, 31, 38, 1, 0, 1, 1, 0, -1, 1, 1, buffer_list_31, csize_list_31}, {"ga_", "ga_src2_rd", STRM_P, SCC32_BASE, 32, 39, 1, 0, 1, 1, 0, -1, 1, 1, buffer_list_32, csize_list_32}, {"ad_", "ad_rd", STRM_D, SCC33_BASE, 33, 40, 2, 0, 0, 1, 1, -1, 0, 0, buffer_list_33, csize_list_33}, {"ad_", "ad_wr", STRM_D, SCC34_BASE, 34, 9, 0, 3, 0, 1, 1, -1, 0, 0, buffer_list_34, csize_list_34}, {"abp", "abp_rd", STRM_D, SCC35_BASE, 35, 41, 5, 0, 0, 1, 1, -1, 0, 0, buffer_list_35, csize_list_35}, {"abp", "abp_wr", STRM_D, SCC36_BASE, 36, 10, 0, 3, 0, 1, 1, -1, 0, 0, buffer_list_36, csize_list_36}, {"ebi", "ebi_rw", STRM_P, SCC37_BASE, 37, 13, 4, 4, 2, 1, 1, -1, 0, 0, buffer_list_37, csize_list_37}, {"usb", "usb_rw", SRMD, SCC38_BASE, 38, 14, 1, 1, 0, 0, 1, -1, 0, 0, buffer_list_38, csize_list_38}, {"cpu", "cpu1_spdma_rw", SRMD, SCC39_BASE, 39, 15, 1, 1, 0, 0, 1, -1, 0, 0, buffer_list_39, csize_list_39}, {"cpu", "cpu1_bridge_rw", SRMD, SCC40_BASE, 40, 16, 0, 0, 0, 0, 0, -1, 0, 0, buffer_list_40, csize_list_40}, }; /* DMA state structures for read and write channels for each SCC */ static struct scc_dma_state scc_state_rd_0[] = { {-1} }; static struct scc_dma_state scc_state_wr_0[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_1[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_1[] = { {-1} }; static struct scc_dma_state scc_state_rd_2[] = { {-1} }; static struct scc_dma_state scc_state_wr_2[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_3[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_3[] = { {-1} }; static struct scc_dma_state scc_state_rd_4[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_4[] = { {-1} }; static struct scc_dma_state scc_state_rd_5[] = { {-1} }; static struct scc_dma_state scc_state_wr_5[] = { {0} }; static struct scc_dma_state scc_state_rd_6[] = { {0} }; static struct scc_dma_state scc_state_wr_6[] = { {-1} }; static struct scc_dma_state scc_state_rd_7[] = { {-1} }; static struct scc_dma_state scc_state_wr_7[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_8[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_8[] = { {-1} }; static struct scc_dma_state scc_state_rd_9[] = { {0}, {0}, {0}, }; static struct scc_dma_state scc_state_wr_9[] = { {-1} }; static struct scc_dma_state scc_state_rd_10[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_10[] = { {-1} }; static struct scc_dma_state scc_state_rd_11[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_11[] = { {-1} }; static struct scc_dma_state scc_state_rd_12[] = { {-1} }; static struct scc_dma_state scc_state_wr_12[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_13[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_13[] = { {-1} }; static struct scc_dma_state scc_state_rd_14[] = { {0}, {0} }; static struct scc_dma_state scc_state_wr_14[] = { {-1} }; static struct scc_dma_state scc_state_rd_15[] = { {0} }; static struct scc_dma_state scc_state_wr_15[] = { {0} }; static struct scc_dma_state scc_state_rd_16[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_16[] = { {-1} }; static struct scc_dma_state scc_state_rd_17[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_17[] = { {-1} }; static struct scc_dma_state scc_state_rd_18[] = { {0}, {0} }; static struct scc_dma_state scc_state_wr_18[] = { {-1} }; static struct scc_dma_state scc_state_rd_19[] = { {0} }; static struct scc_dma_state scc_state_wr_19[] = { {-1} }; static struct scc_dma_state scc_state_rd_20[] = { {-1} }; static struct scc_dma_state scc_state_wr_20[] = { {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_21[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_21[] = { {-1} }; static struct scc_dma_state scc_state_rd_22[] = { {-1} }; static struct scc_dma_state scc_state_wr_22[] = { {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_23[] = { {0}, {0} }; static struct scc_dma_state scc_state_wr_23[] = { {0}, {0} }; static struct scc_dma_state scc_state_rd_24[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_24[] = { {-1} }; static struct scc_dma_state scc_state_rd_25[] = { {-1} }; static struct scc_dma_state scc_state_wr_25[] = { {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_26[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_26[] = { {-1} }; static struct scc_dma_state scc_state_rd_27[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_27[] = { {-1} }; static struct scc_dma_state scc_state_rd_28[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_28[] = { {-1} }; static struct scc_dma_state scc_state_rd_29[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_29[] = { {-1} }; static struct scc_dma_state scc_state_rd_30[] = { {-1} }; static struct scc_dma_state scc_state_wr_30[] = { {0} }; static struct scc_dma_state scc_state_rd_31[] = { {0} }; static struct scc_dma_state scc_state_wr_31[] = { {-1} }; static struct scc_dma_state scc_state_rd_32[] = { {0} }; static struct scc_dma_state scc_state_wr_32[] = { {-1} }; static struct scc_dma_state scc_state_rd_33[] = { {0}, {0} }; static struct scc_dma_state scc_state_wr_33[] = { {-1} }; static struct scc_dma_state scc_state_rd_34[] = { {-1} }; static struct scc_dma_state scc_state_wr_34[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_35[] = { {0}, {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_35[] = { {-1} }; static struct scc_dma_state scc_state_rd_36[] = { {-1} }; static struct scc_dma_state scc_state_wr_36[] = { {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_37[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_wr_37[] = { {0}, {0}, {0}, {0} }; static struct scc_dma_state scc_state_rd_38[] = { {0} }; static struct scc_dma_state scc_state_wr_38[] = { {0} }; static struct scc_dma_state scc_state_rd_39[] = { {0} }; static struct scc_dma_state scc_state_wr_39[] = { {0} }; static struct scc_dma_state scc_state_rd_40[] = { {-1} }; static struct scc_dma_state scc_state_wr_40[] = { {-1} }; /* DMA state references to access from the driver */ static struct scc_dma_state *scc_state_rd[] = { scc_state_rd_0, scc_state_rd_1, scc_state_rd_2, scc_state_rd_3, scc_state_rd_4, scc_state_rd_5, scc_state_rd_6, scc_state_rd_7, scc_state_rd_8, scc_state_rd_9, scc_state_rd_10, scc_state_rd_11, scc_state_rd_12, scc_state_rd_13, scc_state_rd_14, scc_state_rd_15, scc_state_rd_16, scc_state_rd_17, scc_state_rd_18, scc_state_rd_19, scc_state_rd_20, scc_state_rd_21, scc_state_rd_22, scc_state_rd_23, scc_state_rd_24, scc_state_rd_25, scc_state_rd_26, scc_state_rd_27, scc_state_rd_28, scc_state_rd_29, scc_state_rd_30, scc_state_rd_31, scc_state_rd_32, scc_state_rd_33, scc_state_rd_34, scc_state_rd_35, scc_state_rd_36, scc_state_rd_37, scc_state_rd_38, scc_state_rd_39, scc_state_rd_40, }; static struct scc_dma_state *scc_state_wr[] = { scc_state_wr_0, scc_state_wr_1, scc_state_wr_2, scc_state_wr_3, scc_state_wr_4, scc_state_wr_5, scc_state_wr_6, scc_state_wr_7, scc_state_wr_8, scc_state_wr_9, scc_state_wr_10, scc_state_wr_11, scc_state_wr_12, scc_state_wr_13, scc_state_wr_14, scc_state_wr_15, scc_state_wr_16, scc_state_wr_17, scc_state_wr_18, scc_state_wr_19, scc_state_wr_20, scc_state_wr_21, scc_state_wr_22, scc_state_wr_23, scc_state_wr_24, scc_state_wr_25, scc_state_wr_26, scc_state_wr_27, scc_state_wr_28, scc_state_wr_29, scc_state_wr_30, scc_state_wr_31, scc_state_wr_32, scc_state_wr_33, scc_state_wr_34, scc_state_wr_35, scc_state_wr_36, scc_state_wr_37, scc_state_wr_38, scc_state_wr_39, scc_state_wr_40, }; static u32 scc_takeover_mode = SCC_TO_IMMEDIATE; /* Change mode of the SPDMA for given direction */ static u32 scc_agu_mode_sp = AGU_BYPASS; /* Change mode of the USB for given direction */ static u32 scc_agu_mode_usb = AGU_BYPASS; static union scc_softwareconfiguration scc_software_configuration[SCC_MAX]; static u32 dma_fsm[4][4] = { /* DMA_CMD_RESET DMA_CMD_SETUP DMA_CMD_START DMA_CMD_STOP */ /* DMA_STATE_RESET */ {DMA_STATE_RESET, DMA_STATE_SETUP, DMA_STATE_ERROR, DMA_STATE_ERROR}, /* DMA_STATE_SETUP */ {DMA_STATE_RESET, DMA_STATE_SETUP, DMA_STATE_START, DMA_STATE_SETUP}, /* DMA_STATE_START */ {DMA_STATE_RESET, DMA_STATE_ERROR, DMA_STATE_START, DMA_STATE_SETUP}, /* DMA_STATE_ERROR */ {DMA_STATE_RESET, DMA_STATE_ERROR, DMA_STATE_ERROR, DMA_STATE_ERROR}, }; static void dma_state_process(struct scc_dma_state *dma_state, u32 cmd) { dma_state->dma_status = dma_fsm[dma_state->dma_status][cmd]; dma_state->dma_cmd = cmd; } static void dma_state_process_dma_command(struct scc_dma_state *dma_state, u32 dma_cmd) { dma_state->dma_cmd = dma_cmd; switch (dma_cmd) { case DMA_START: case DMA_START_FH_RESET: dma_state_process(dma_state, DMA_CMD_START); break; case DMA_STOP: dma_state_process(dma_state, DMA_CMD_STOP); break; default: break; } } static void scc_takeover_dma(enum scc_id id, u32 dma_id, u32 drs) { union scc_cmd dma_cmd; dma_cmd.reg = 0; /* Prepare the takeover for the DMA channel */ dma_cmd.bits.action = DMA_TAKEOVER; dma_cmd.bits.id = dma_id; dma_cmd.bits.rid = TO_DMA_CFG; /* this is DMA_CFG register takeover */ if (drs == DMA_WRITE) dma_cmd.bits.drs = DMA_WRITE; reg_write(SCC_CMD(scc_descriptor_table[id].base_address), dma_cmd.reg); } int scc_dma_cmd(enum scc_id id, u32 cmd, u32 dma_id, u32 drs) { union scc_cmd dma_cmd; struct scc_dma_state *dma_state; if ((id >= SCC_MAX) || (id < 0)) return -EINVAL; dma_cmd.reg = 0; /* Prepare the takeover for the DMA channel */ dma_cmd.bits.action = cmd; dma_cmd.bits.id = dma_id; if (drs == DMA_WRITE) { dma_cmd.bits.drs = DMA_WRITE; dma_state = &scc_state_wr[id][dma_id]; } else { dma_state = &scc_state_rd[id][dma_id]; } dma_state->scc_id = id; dma_state->dma_id = dma_id; dma_state_process_dma_command(dma_state, cmd); reg_write(SCC_CMD(scc_descriptor_table[id].base_address), dma_cmd.reg); return 0; } int scc_set_usb_address_generation_mode(u32 agu_mode) { if (AGU_ACTIVE == agu_mode) { /* Ensure both DMAs are stopped */ scc_dma_cmd(SCC_USB_RW, DMA_STOP, 0, DMA_WRITE); scc_dma_cmd(SCC_USB_RW, DMA_STOP, 0, DMA_READ); } else { agu_mode = AGU_BYPASS; } scc_agu_mode_usb = agu_mode; return 0; } int scc_setup_dma(enum scc_id id, u32 buffer_tag, u32 type, u32 fh_mode, u32 drs, u32 dma_id) { struct scc_dma_state *dma_state; int return_value = 0; union scc_dma_cfg dma_cfg; u32 *buffer_tag_list = scc_descriptor_table[id].buffer_tag_list; u32 tag_count, t, t_valid; if ((id >= SCC_MAX) || (id < 0)) return -EINVAL; /* if the register is only configured by hw, cannot write! */ if (1 == scc_descriptor_table[id].hw_dma_cfg) return -EACCES; if (DMA_WRITE == drs) { if (dma_id >= scc_descriptor_table[id].p_dma_channels_wr) return -EINVAL; dma_state = &scc_state_wr[id][dma_id]; } else { if (dma_id >= scc_descriptor_table[id].p_dma_channels_rd) return -EINVAL; dma_state = &scc_state_rd[id][dma_id]; } /* Compose the DMA configuration register */ tag_count = buffer_tag_list[0]; t_valid = 0; for (t = 1; t <= tag_count; t++) { if (buffer_tag == buffer_tag_list[t]) { /* Tag found - validate */ t_valid = 1; break; } } if (!t_valid) return -EACCES; /* * Read the register first -- two functions write into the register * it does not make sense to read the DMA config back, because there * are two register configuration sets (drs) */ dma_cfg.reg = 0; dma_cfg.bits.buffer_id = buffer_tag; dma_state_process(dma_state, DMA_CMD_SETUP); /* * This is Packet CFG set select - usable for TSIO, EBI and those SCCs * which habe 2 packet configs */ dma_cfg.bits.packet_cfg_id = scc_software_configuration[id].bits.packet_select; if (type == DMA_CYCLIC) dma_cfg.bits.buffer_type = 1; else dma_cfg.bits.buffer_type = 0; if (fh_mode == USE_FH) dma_cfg.bits.fh_mode = 1; else dma_cfg.bits.fh_mode = 0; if (id == SCC_CPU1_SPDMA_RW) dma_cfg.bits.agu_mode = scc_agu_mode_sp; if (id == SCC_USB_RW) dma_cfg.bits.agu_mode = scc_agu_mode_usb; reg_write(SCC_DMA_CFG(scc_descriptor_table[id].base_address), dma_cfg.reg); /* The DMA_CFG needs a takeover! */ if (SCC_TO_IMMEDIATE == scc_takeover_mode) scc_takeover_dma(id, dma_id, drs); /* if (buffer_tag is not used) */ dma_state->buffer_tag = buffer_tag; dma_state->scc_id = id; dma_state->dma_id = dma_id; return return_value; } int scc_enable(enum scc_id id, u32 value) { if ((id >= SCC_MAX) || (id < 0)) return -EINVAL; if (value == 0) { scc_software_configuration[id].bits.enable_status = 0; } else { value = 1; scc_software_configuration[id].bits.enable_status = 1; } reg_write(SCC_ENABLE(scc_descriptor_table[id].base_address), value); return 0; } static inline void ehb(void) { __asm__ __volatile__( " .set mips32r2 \n" " ehb \n" " .set mips0 \n"); } int scc_reset(enum scc_id id, u32 value) { if ((id >= SCC_MAX) || (id < 0)) return -EINVAL; /* Invert value to the strait logic from the negative hardware logic */ if (value == 0) value = 1; else value = 0; /* Write the value to the register */ reg_write(SCC_RESET(scc_descriptor_table[id].base_address), value); /* sync flush */ asm("sync"); /* request bus write queue flush */ ehb(); /* wait until previous bus commit instr has finished */ asm("nop"); /* wait for flush to occur */ asm("nop"); /* wait for flush to occur */ udelay(100); return 0; }
gpl-2.0
chadoe/xbmc
lib/libUPnP/Neptune/Source/Data/TLS/Base/NptTlsTrustAnchor_Base_0084.cpp
267
7686
/***************************************************************** | | Neptune - Trust Anchors | | This file is automatically generated by a script, do not edit! | | Copyright (c) 2002-2010, Axiomatic Systems, LLC. | All rights reserved. | | Redistribution and use in source and binary forms, with or without | modification, are permitted provided that the following conditions are met: | * Redistributions of source code must retain the above copyright | notice, this list of conditions and the following disclaimer. | * Redistributions in binary form must reproduce the above copyright | notice, this list of conditions and the following disclaimer in the | documentation and/or other materials provided with the distribution. | * Neither the name of Axiomatic Systems nor the | names of its contributors may be used to endorse or promote products | derived from this software without specific prior written permission. | | THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ****************************************************************/ /* UTN DATACorp SGC Root CA */ const unsigned char NptTlsTrustAnchor_Base_0084_Data[1122] = { 0x30,0x82,0x04,0x5e,0x30,0x82,0x03,0x46 ,0xa0,0x03,0x02,0x01,0x02,0x02,0x10,0x44 ,0xbe,0x0c,0x8b,0x50,0x00,0x21,0xb4,0x11 ,0xd3,0x2a,0x68,0x06,0xa9,0xad,0x69,0x30 ,0x0d,0x06,0x09,0x2a,0x86,0x48,0x86,0xf7 ,0x0d,0x01,0x01,0x05,0x05,0x00,0x30,0x81 ,0x93,0x31,0x0b,0x30,0x09,0x06,0x03,0x55 ,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x0b ,0x30,0x09,0x06,0x03,0x55,0x04,0x08,0x13 ,0x02,0x55,0x54,0x31,0x17,0x30,0x15,0x06 ,0x03,0x55,0x04,0x07,0x13,0x0e,0x53,0x61 ,0x6c,0x74,0x20,0x4c,0x61,0x6b,0x65,0x20 ,0x43,0x69,0x74,0x79,0x31,0x1e,0x30,0x1c ,0x06,0x03,0x55,0x04,0x0a,0x13,0x15,0x54 ,0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54 ,0x52,0x55,0x53,0x54,0x20,0x4e,0x65,0x74 ,0x77,0x6f,0x72,0x6b,0x31,0x21,0x30,0x1f ,0x06,0x03,0x55,0x04,0x0b,0x13,0x18,0x68 ,0x74,0x74,0x70,0x3a,0x2f,0x2f,0x77,0x77 ,0x77,0x2e,0x75,0x73,0x65,0x72,0x74,0x72 ,0x75,0x73,0x74,0x2e,0x63,0x6f,0x6d,0x31 ,0x1b,0x30,0x19,0x06,0x03,0x55,0x04,0x03 ,0x13,0x12,0x55,0x54,0x4e,0x20,0x2d,0x20 ,0x44,0x41,0x54,0x41,0x43,0x6f,0x72,0x70 ,0x20,0x53,0x47,0x43,0x30,0x1e,0x17,0x0d ,0x39,0x39,0x30,0x36,0x32,0x34,0x31,0x38 ,0x35,0x37,0x32,0x31,0x5a,0x17,0x0d,0x31 ,0x39,0x30,0x36,0x32,0x34,0x31,0x39,0x30 ,0x36,0x33,0x30,0x5a,0x30,0x81,0x93,0x31 ,0x0b,0x30,0x09,0x06,0x03,0x55,0x04,0x06 ,0x13,0x02,0x55,0x53,0x31,0x0b,0x30,0x09 ,0x06,0x03,0x55,0x04,0x08,0x13,0x02,0x55 ,0x54,0x31,0x17,0x30,0x15,0x06,0x03,0x55 ,0x04,0x07,0x13,0x0e,0x53,0x61,0x6c,0x74 ,0x20,0x4c,0x61,0x6b,0x65,0x20,0x43,0x69 ,0x74,0x79,0x31,0x1e,0x30,0x1c,0x06,0x03 ,0x55,0x04,0x0a,0x13,0x15,0x54,0x68,0x65 ,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55 ,0x53,0x54,0x20,0x4e,0x65,0x74,0x77,0x6f ,0x72,0x6b,0x31,0x21,0x30,0x1f,0x06,0x03 ,0x55,0x04,0x0b,0x13,0x18,0x68,0x74,0x74 ,0x70,0x3a,0x2f,0x2f,0x77,0x77,0x77,0x2e ,0x75,0x73,0x65,0x72,0x74,0x72,0x75,0x73 ,0x74,0x2e,0x63,0x6f,0x6d,0x31,0x1b,0x30 ,0x19,0x06,0x03,0x55,0x04,0x03,0x13,0x12 ,0x55,0x54,0x4e,0x20,0x2d,0x20,0x44,0x41 ,0x54,0x41,0x43,0x6f,0x72,0x70,0x20,0x53 ,0x47,0x43,0x30,0x82,0x01,0x22,0x30,0x0d ,0x06,0x09,0x2a,0x86,0x48,0x86,0xf7,0x0d ,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01 ,0x0f,0x00,0x30,0x82,0x01,0x0a,0x02,0x82 ,0x01,0x01,0x00,0xdf,0xee,0x58,0x10,0xa2 ,0x2b,0x6e,0x55,0xc4,0x8e,0xbf,0x2e,0x46 ,0x09,0xe7,0xe0,0x08,0x0f,0x2e,0x2b,0x7a ,0x13,0x94,0x1b,0xbd,0xf6,0xb6,0x80,0x8e ,0x65,0x05,0x93,0x00,0x1e,0xbc,0xaf,0xe2 ,0x0f,0x8e,0x19,0x0d,0x12,0x47,0xec,0xac ,0xad,0xa3,0xfa,0x2e,0x70,0xf8,0xde,0x6e ,0xfb,0x56,0x42,0x15,0x9e,0x2e,0x5c,0xef ,0x23,0xde,0x21,0xb9,0x05,0x76,0x27,0x19 ,0x0f,0x4f,0xd6,0xc3,0x9c,0xb4,0xbe,0x94 ,0x19,0x63,0xf2,0xa6,0x11,0x0a,0xeb,0x53 ,0x48,0x9c,0xbe,0xf2,0x29,0x3b,0x16,0xe8 ,0x1a,0xa0,0x4c,0xa6,0xc9,0xf4,0x18,0x59 ,0x68,0xc0,0x70,0xf2,0x53,0x00,0xc0,0x5e ,0x50,0x82,0xa5,0x56,0x6f,0x36,0xf9,0x4a ,0xe0,0x44,0x86,0xa0,0x4d,0x4e,0xd6,0x47 ,0x6e,0x49,0x4a,0xcb,0x67,0xd7,0xa6,0xc4 ,0x05,0xb9,0x8e,0x1e,0xf4,0xfc,0xff,0xcd ,0xe7,0x36,0xe0,0x9c,0x05,0x6c,0xb2,0x33 ,0x22,0x15,0xd0,0xb4,0xe0,0xcc,0x17,0xc0 ,0xb2,0xc0,0xf4,0xfe,0x32,0x3f,0x29,0x2a ,0x95,0x7b,0xd8,0xf2,0xa7,0x4e,0x0f,0x54 ,0x7c,0xa1,0x0d,0x80,0xb3,0x09,0x03,0xc1 ,0xff,0x5c,0xdd,0x5e,0x9a,0x3e,0xbc,0xae ,0xbc,0x47,0x8a,0x6a,0xae,0x71,0xca,0x1f ,0xb1,0x2a,0xb8,0x5f,0x42,0x05,0x0b,0xec ,0x46,0x30,0xd1,0x72,0x0b,0xca,0xe9,0x56 ,0x6d,0xf5,0xef,0xdf,0x78,0xbe,0x61,0xba ,0xb2,0xa5,0xae,0x04,0x4c,0xbc,0xa8,0xac ,0x69,0x15,0x97,0xbd,0xef,0xeb,0xb4,0x8c ,0xbf,0x35,0xf8,0xd4,0xc3,0xd1,0x28,0x0e ,0x5c,0x3a,0x9f,0x70,0x18,0x33,0x20,0x77 ,0xc4,0xa2,0xaf,0x02,0x03,0x01,0x00,0x01 ,0xa3,0x81,0xab,0x30,0x81,0xa8,0x30,0x0b ,0x06,0x03,0x55,0x1d,0x0f,0x04,0x04,0x03 ,0x02,0x01,0xc6,0x30,0x0f,0x06,0x03,0x55 ,0x1d,0x13,0x01,0x01,0xff,0x04,0x05,0x30 ,0x03,0x01,0x01,0xff,0x30,0x1d,0x06,0x03 ,0x55,0x1d,0x0e,0x04,0x16,0x04,0x14,0x53 ,0x32,0xd1,0xb3,0xcf,0x7f,0xfa,0xe0,0xf1 ,0xa0,0x5d,0x85,0x4e,0x92,0xd2,0x9e,0x45 ,0x1d,0xb4,0x4f,0x30,0x3d,0x06,0x03,0x55 ,0x1d,0x1f,0x04,0x36,0x30,0x34,0x30,0x32 ,0xa0,0x30,0xa0,0x2e,0x86,0x2c,0x68,0x74 ,0x74,0x70,0x3a,0x2f,0x2f,0x63,0x72,0x6c ,0x2e,0x75,0x73,0x65,0x72,0x74,0x72,0x75 ,0x73,0x74,0x2e,0x63,0x6f,0x6d,0x2f,0x55 ,0x54,0x4e,0x2d,0x44,0x41,0x54,0x41,0x43 ,0x6f,0x72,0x70,0x53,0x47,0x43,0x2e,0x63 ,0x72,0x6c,0x30,0x2a,0x06,0x03,0x55,0x1d ,0x25,0x04,0x23,0x30,0x21,0x06,0x08,0x2b ,0x06,0x01,0x05,0x05,0x07,0x03,0x01,0x06 ,0x0a,0x2b,0x06,0x01,0x04,0x01,0x82,0x37 ,0x0a,0x03,0x03,0x06,0x09,0x60,0x86,0x48 ,0x01,0x86,0xf8,0x42,0x04,0x01,0x30,0x0d ,0x06,0x09,0x2a,0x86,0x48,0x86,0xf7,0x0d ,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01 ,0x01,0x00,0x27,0x35,0x97,0x00,0x8a,0x8b ,0x28,0xbd,0xc6,0x33,0x30,0x1e,0x29,0xfc ,0xe2,0xf7,0xd5,0x98,0xd4,0x40,0xbb,0x60 ,0xca,0xbf,0xab,0x17,0x2c,0x09,0x36,0x7f ,0x50,0xfa,0x41,0xdc,0xae,0x96,0x3a,0x0a ,0x23,0x3e,0x89,0x59,0xc9,0xa3,0x07,0xed ,0x1b,0x37,0xad,0xfc,0x7c,0xbe,0x51,0x49 ,0x5a,0xde,0x3a,0x0a,0x54,0x08,0x16,0x45 ,0xc2,0x99,0xb1,0x87,0xcd,0x8c,0x68,0xe0 ,0x69,0x03,0xe9,0xc4,0x4e,0x98,0xb2,0x3b ,0x8c,0x16,0xb3,0x0e,0xa0,0x0c,0x98,0x50 ,0x9b,0x93,0xa9,0x70,0x09,0xc8,0x2c,0xa3 ,0x8f,0xdf,0x02,0xe4,0xe0,0x71,0x3a,0xf1 ,0xb4,0x23,0x72,0xa0,0xaa,0x01,0xdf,0xdf ,0x98,0x3e,0x14,0x50,0xa0,0x31,0x26,0xbd ,0x28,0xe9,0x5a,0x30,0x26,0x75,0xf9,0x7b ,0x60,0x1c,0x8d,0xf3,0xcd,0x50,0x26,0x6d ,0x04,0x27,0x9a,0xdf,0xd5,0x0d,0x45,0x47 ,0x29,0x6b,0x2c,0xe6,0x76,0xd9,0xa9,0x29 ,0x7d,0x32,0xdd,0xc9,0x36,0x3c,0xbd,0xae ,0x35,0xf1,0x11,0x9e,0x1d,0xbb,0x90,0x3f ,0x12,0x47,0x4e,0x8e,0xd7,0x7e,0x0f,0x62 ,0x73,0x1d,0x52,0x26,0x38,0x1c,0x18,0x49 ,0xfd,0x30,0x74,0x9a,0xc4,0xe5,0x22,0x2f ,0xd8,0xc0,0x8d,0xed,0x91,0x7a,0x4c,0x00 ,0x8f,0x72,0x7f,0x5d,0xda,0xdd,0x1b,0x8b ,0x45,0x6b,0xe7,0xdd,0x69,0x97,0xa8,0xc5 ,0x56,0x4c,0x0f,0x0c,0xf6,0x9f,0x7a,0x91 ,0x37,0xf6,0x97,0x82,0xe0,0xdd,0x71,0x69 ,0xff,0x76,0x3f,0x60,0x4d,0x3c,0xcf,0xf7 ,0x99,0xf9,0xc6,0x57,0xf4,0xc9,0x55,0x39 ,0x78,0xba,0x2c,0x79,0xc9,0xa6,0x88,0x2b ,0xf4,0x08}; const unsigned int NptTlsTrustAnchor_Base_0084_Size = 1122;
gpl-2.0
n-aizu/linux-linaro-stable-mx6
arch/arm/mach-s5pv210/pm.c
523
3964
/* linux/arch/arm/mach-s5pv210/pm.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5PV210 - Power Management support * * Based on arch/arm/mach-s3c2410/pm.c * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <plat/cpu.h> #include <plat/pm.h> #include <mach/regs-irq.h> #include <mach/regs-clock.h> static struct sleep_save s5pv210_core_save[] = { /* Clock source */ SAVE_ITEM(S5P_CLK_SRC0), SAVE_ITEM(S5P_CLK_SRC1), SAVE_ITEM(S5P_CLK_SRC2), SAVE_ITEM(S5P_CLK_SRC3), SAVE_ITEM(S5P_CLK_SRC4), SAVE_ITEM(S5P_CLK_SRC5), SAVE_ITEM(S5P_CLK_SRC6), /* Clock source Mask */ SAVE_ITEM(S5P_CLK_SRC_MASK0), SAVE_ITEM(S5P_CLK_SRC_MASK1), /* Clock Divider */ SAVE_ITEM(S5P_CLK_DIV0), SAVE_ITEM(S5P_CLK_DIV1), SAVE_ITEM(S5P_CLK_DIV2), SAVE_ITEM(S5P_CLK_DIV3), SAVE_ITEM(S5P_CLK_DIV4), SAVE_ITEM(S5P_CLK_DIV5), SAVE_ITEM(S5P_CLK_DIV6), SAVE_ITEM(S5P_CLK_DIV7), /* Clock Main Gate */ SAVE_ITEM(S5P_CLKGATE_MAIN0), SAVE_ITEM(S5P_CLKGATE_MAIN1), SAVE_ITEM(S5P_CLKGATE_MAIN2), /* Clock source Peri Gate */ SAVE_ITEM(S5P_CLKGATE_PERI0), SAVE_ITEM(S5P_CLKGATE_PERI1), /* Clock source SCLK Gate */ SAVE_ITEM(S5P_CLKGATE_SCLK0), SAVE_ITEM(S5P_CLKGATE_SCLK1), /* Clock IP Clock gate */ SAVE_ITEM(S5P_CLKGATE_IP0), SAVE_ITEM(S5P_CLKGATE_IP1), SAVE_ITEM(S5P_CLKGATE_IP2), SAVE_ITEM(S5P_CLKGATE_IP3), SAVE_ITEM(S5P_CLKGATE_IP4), /* Clock Blcok and Bus gate */ SAVE_ITEM(S5P_CLKGATE_BLOCK), SAVE_ITEM(S5P_CLKGATE_BUS0), /* Clock ETC */ SAVE_ITEM(S5P_CLK_OUT), SAVE_ITEM(S5P_MDNIE_SEL), }; static int s5pv210_cpu_suspend(unsigned long arg) { unsigned long tmp; /* issue the standby signal into the pm unit. Note, we * issue a write-buffer drain just in case */ tmp = 0; asm("b 1f\n\t" ".align 5\n\t" "1:\n\t" "mcr p15, 0, %0, c7, c10, 5\n\t" "mcr p15, 0, %0, c7, c10, 4\n\t" "wfi" : : "r" (tmp)); pr_info("Failed to suspend the system\n"); return 1; /* Aborting suspend */ } static void s5pv210_pm_prepare(void) { unsigned int tmp; /* ensure at least INFORM0 has the resume address */ __raw_writel(virt_to_phys(s3c_cpu_resume), S5P_INFORM0); tmp = __raw_readl(S5P_SLEEP_CFG); tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN); __raw_writel(tmp, S5P_SLEEP_CFG); /* WFI for SLEEP mode configuration by SYSCON */ tmp = __raw_readl(S5P_PWR_CFG); tmp &= S5P_CFG_WFI_CLEAN; tmp |= S5P_CFG_WFI_SLEEP; __raw_writel(tmp, S5P_PWR_CFG); /* SYSCON interrupt handling disable */ tmp = __raw_readl(S5P_OTHERS); tmp |= S5P_OTHER_SYSC_INTOFF; __raw_writel(tmp, S5P_OTHERS); s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } static int s5pv210_pm_add(struct device *dev, struct subsys_interface *sif) { pm_cpu_prep = s5pv210_pm_prepare; pm_cpu_sleep = s5pv210_cpu_suspend; return 0; } static struct subsys_interface s5pv210_pm_interface = { .name = "s5pv210_pm", .subsys = &s5pv210_subsys, .add_dev = s5pv210_pm_add, }; static __init int s5pv210_pm_drvinit(void) { return subsys_interface_register(&s5pv210_pm_interface); } arch_initcall(s5pv210_pm_drvinit); static void s5pv210_pm_resume(void) { u32 tmp; tmp = __raw_readl(S5P_OTHERS); tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF |\ S5P_OTHERS_RET_MMC | S5P_OTHERS_RET_UART); __raw_writel(tmp , S5P_OTHERS); s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); } static struct syscore_ops s5pv210_pm_syscore_ops = { .resume = s5pv210_pm_resume, }; static __init int s5pv210_pm_syscore_init(void) { register_syscore_ops(&s5pv210_pm_syscore_ops); return 0; } arch_initcall(s5pv210_pm_syscore_init);
gpl-2.0
qnhoang81/Moment_kernel
security/tomoyo/realpath.c
523
12985
/* * security/tomoyo/realpath.c * * Get the canonicalized absolute pathnames. The basis for TOMOYO. * * Copyright (C) 2005-2009 NTT DATA CORPORATION * * Version: 2.2.0 2009/04/01 * */ #include <linux/types.h> #include <linux/mount.h> #include <linux/mnt_namespace.h> #include <linux/fs_struct.h> #include "common.h" #include "realpath.h" /** * tomoyo_encode: Convert binary string to ascii string. * * @buffer: Buffer for ASCII string. * @buflen: Size of @buffer. * @str: Binary string. * * Returns 0 on success, -ENOMEM otherwise. */ int tomoyo_encode(char *buffer, int buflen, const char *str) { while (1) { const unsigned char c = *(unsigned char *) str++; if (tomoyo_is_valid(c)) { if (--buflen <= 0) break; *buffer++ = (char) c; if (c != '\\') continue; if (--buflen <= 0) break; *buffer++ = (char) c; continue; } if (!c) { if (--buflen <= 0) break; *buffer = '\0'; return 0; } buflen -= 4; if (buflen <= 0) break; *buffer++ = '\\'; *buffer++ = (c >> 6) + '0'; *buffer++ = ((c >> 3) & 7) + '0'; *buffer++ = (c & 7) + '0'; } return -ENOMEM; } /** * tomoyo_realpath_from_path2 - Returns realpath(3) of the given dentry but ignores chroot'ed root. * * @path: Pointer to "struct path". * @newname: Pointer to buffer to return value in. * @newname_len: Size of @newname. * * Returns 0 on success, negative value otherwise. * * If dentry is a directory, trailing '/' is appended. * Characters out of 0x20 < c < 0x7F range are converted to * \ooo style octal string. * Character \ is converted to \\ string. */ int tomoyo_realpath_from_path2(struct path *path, char *newname, int newname_len) { int error = -ENOMEM; struct dentry *dentry = path->dentry; char *sp; if (!dentry || !path->mnt || !newname || newname_len <= 2048) return -EINVAL; if (dentry->d_op && dentry->d_op->d_dname) { /* For "socket:[\$]" and "pipe:[\$]". */ static const int offset = 1536; sp = dentry->d_op->d_dname(dentry, newname + offset, newname_len - offset); } else { /* Taken from d_namespace_path(). */ struct path root; struct path ns_root = { }; struct path tmp; read_lock(&current->fs->lock); root = current->fs->root; path_get(&root); read_unlock(&current->fs->lock); spin_lock(&vfsmount_lock); if (root.mnt && root.mnt->mnt_ns) ns_root.mnt = mntget(root.mnt->mnt_ns->root); if (ns_root.mnt) ns_root.dentry = dget(ns_root.mnt->mnt_root); spin_unlock(&vfsmount_lock); spin_lock(&dcache_lock); tmp = ns_root; sp = __d_path(path, &tmp, newname, newname_len); spin_unlock(&dcache_lock); path_put(&root); path_put(&ns_root); } if (IS_ERR(sp)) error = PTR_ERR(sp); else error = tomoyo_encode(newname, sp - newname, sp); /* Append trailing '/' if dentry is a directory. */ if (!error && dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode) && *newname) { sp = newname + strlen(newname); if (*(sp - 1) != '/') { if (sp < newname + newname_len - 4) { *sp++ = '/'; *sp = '\0'; } else { error = -ENOMEM; } } } if (error) printk(KERN_WARNING "tomoyo_realpath: Pathname too long.\n"); return error; } /** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * * @path: Pointer to "struct path". * * Returns the realpath of the given @path on success, NULL otherwise. * * These functions use tomoyo_alloc(), so the caller must call tomoyo_free() * if these functions didn't return NULL. */ char *tomoyo_realpath_from_path(struct path *path) { char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer)); BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer) <= TOMOYO_MAX_PATHNAME_LEN - 1); if (!buf) return NULL; if (tomoyo_realpath_from_path2(path, buf, TOMOYO_MAX_PATHNAME_LEN - 1) == 0) return buf; tomoyo_free(buf); return NULL; } /** * tomoyo_realpath - Get realpath of a pathname. * * @pathname: The pathname to solve. * * Returns the realpath of @pathname on success, NULL otherwise. */ char *tomoyo_realpath(const char *pathname) { struct path path; if (pathname && kern_path(pathname, LOOKUP_FOLLOW, &path) == 0) { char *buf = tomoyo_realpath_from_path(&path); path_put(&path); return buf; } return NULL; } /** * tomoyo_realpath_nofollow - Get realpath of a pathname. * * @pathname: The pathname to solve. * * Returns the realpath of @pathname on success, NULL otherwise. */ char *tomoyo_realpath_nofollow(const char *pathname) { struct path path; if (pathname && kern_path(pathname, 0, &path) == 0) { char *buf = tomoyo_realpath_from_path(&path); path_put(&path); return buf; } return NULL; } /* Memory allocated for non-string data. */ static unsigned int tomoyo_allocated_memory_for_elements; /* Quota for holding non-string data. */ static unsigned int tomoyo_quota_for_elements; /** * tomoyo_alloc_element - Allocate permanent memory for structures. * * @size: Size in bytes. * * Returns pointer to allocated memory on success, NULL otherwise. * * Memory has to be zeroed. * The RAM is chunked, so NEVER try to kfree() the returned pointer. */ void *tomoyo_alloc_element(const unsigned int size) { static char *buf; static DEFINE_MUTEX(lock); static unsigned int buf_used_len = PATH_MAX; char *ptr = NULL; /*Assumes sizeof(void *) >= sizeof(long) is true. */ const unsigned int word_aligned_size = roundup(size, max(sizeof(void *), sizeof(long))); if (word_aligned_size > PATH_MAX) return NULL; mutex_lock(&lock); if (buf_used_len + word_aligned_size > PATH_MAX) { if (!tomoyo_quota_for_elements || tomoyo_allocated_memory_for_elements + PATH_MAX <= tomoyo_quota_for_elements) ptr = kzalloc(PATH_MAX, GFP_KERNEL); if (!ptr) { printk(KERN_WARNING "ERROR: Out of memory " "for tomoyo_alloc_element().\n"); if (!tomoyo_policy_loaded) panic("MAC Initialization failed.\n"); } else { buf = ptr; tomoyo_allocated_memory_for_elements += PATH_MAX; buf_used_len = word_aligned_size; ptr = buf; } } else if (word_aligned_size) { int i; ptr = buf + buf_used_len; buf_used_len += word_aligned_size; for (i = 0; i < word_aligned_size; i++) { if (!ptr[i]) continue; printk(KERN_ERR "WARNING: Reserved memory was tainted! " "The system might go wrong.\n"); ptr[i] = '\0'; } } mutex_unlock(&lock); return ptr; } /* Memory allocated for string data in bytes. */ static unsigned int tomoyo_allocated_memory_for_savename; /* Quota for holding string data in bytes. */ static unsigned int tomoyo_quota_for_savename; /* * TOMOYO uses this hash only when appending a string into the string * table. Frequency of appending strings is very low. So we don't need * large (e.g. 64k) hash size. 256 will be sufficient. */ #define TOMOYO_MAX_HASH 256 /* * tomoyo_name_entry is a structure which is used for linking * "struct tomoyo_path_info" into tomoyo_name_list . * * Since tomoyo_name_list manages a list of strings which are shared by * multiple processes (whereas "struct tomoyo_path_info" inside * "struct tomoyo_path_info_with_data" is not shared), a reference counter will * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info" * when TOMOYO starts supporting garbage collector. */ struct tomoyo_name_entry { struct list_head list; struct tomoyo_path_info entry; }; /* Structure for available memory region. */ struct tomoyo_free_memory_block_list { struct list_head list; char *ptr; /* Pointer to a free area. */ int len; /* Length of the area. */ }; /* * tomoyo_name_list is used for holding string data used by TOMOYO. * Since same string data is likely used for multiple times (e.g. * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of * "const struct tomoyo_path_info *". */ static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; /** * tomoyo_save_name - Allocate permanent memory for string data. * * @name: The string to store into the permernent memory. * * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise. * * The RAM is shared, so NEVER try to modify or kfree() the returned name. */ const struct tomoyo_path_info *tomoyo_save_name(const char *name) { static LIST_HEAD(fmb_list); static DEFINE_MUTEX(lock); struct tomoyo_name_entry *ptr; unsigned int hash; /* fmb contains available size in bytes. fmb is removed from the fmb_list when fmb->len becomes 0. */ struct tomoyo_free_memory_block_list *fmb; int len; char *cp; if (!name) return NULL; len = strlen(name) + 1; if (len > TOMOYO_MAX_PATHNAME_LEN) { printk(KERN_WARNING "ERROR: Name too long " "for tomoyo_save_name().\n"); return NULL; } hash = full_name_hash((const unsigned char *) name, len - 1); mutex_lock(&lock); list_for_each_entry(ptr, &tomoyo_name_list[hash % TOMOYO_MAX_HASH], list) { if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) goto out; } list_for_each_entry(fmb, &fmb_list, list) { if (len <= fmb->len) goto ready; } if (!tomoyo_quota_for_savename || tomoyo_allocated_memory_for_savename + PATH_MAX <= tomoyo_quota_for_savename) cp = kzalloc(PATH_MAX, GFP_KERNEL); else cp = NULL; fmb = kzalloc(sizeof(*fmb), GFP_KERNEL); if (!cp || !fmb) { kfree(cp); kfree(fmb); printk(KERN_WARNING "ERROR: Out of memory " "for tomoyo_save_name().\n"); if (!tomoyo_policy_loaded) panic("MAC Initialization failed.\n"); ptr = NULL; goto out; } tomoyo_allocated_memory_for_savename += PATH_MAX; list_add(&fmb->list, &fmb_list); fmb->ptr = cp; fmb->len = PATH_MAX; ready: ptr = tomoyo_alloc_element(sizeof(*ptr)); if (!ptr) goto out; ptr->entry.name = fmb->ptr; memmove(fmb->ptr, name, len); tomoyo_fill_path_info(&ptr->entry); fmb->ptr += len; fmb->len -= len; list_add_tail(&ptr->list, &tomoyo_name_list[hash % TOMOYO_MAX_HASH]); if (fmb->len == 0) { list_del(&fmb->list); kfree(fmb); } out: mutex_unlock(&lock); return ptr ? &ptr->entry : NULL; } /** * tomoyo_realpath_init - Initialize realpath related code. */ void __init tomoyo_realpath_init(void) { int i; BUILD_BUG_ON(TOMOYO_MAX_PATHNAME_LEN > PATH_MAX); for (i = 0; i < TOMOYO_MAX_HASH; i++) INIT_LIST_HEAD(&tomoyo_name_list[i]); INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME); list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list); down_read(&tomoyo_domain_list_lock); if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain) panic("Can't register tomoyo_kernel_domain"); up_read(&tomoyo_domain_list_lock); } /* Memory allocated for temporary purpose. */ static atomic_t tomoyo_dynamic_memory_size; /** * tomoyo_alloc - Allocate memory for temporary purpose. * * @size: Size in bytes. * * Returns pointer to allocated memory on success, NULL otherwise. */ void *tomoyo_alloc(const size_t size) { void *p = kzalloc(size, GFP_KERNEL); if (p) atomic_add(ksize(p), &tomoyo_dynamic_memory_size); return p; } /** * tomoyo_free - Release memory allocated by tomoyo_alloc(). * * @p: Pointer returned by tomoyo_alloc(). May be NULL. * * Returns nothing. */ void tomoyo_free(const void *p) { if (p) { atomic_sub(ksize(p), &tomoyo_dynamic_memory_size); kfree(p); } } /** * tomoyo_read_memory_counter - Check for memory usage in bytes. * * @head: Pointer to "struct tomoyo_io_buffer". * * Returns memory usage. */ int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head) { if (!head->read_eof) { const unsigned int shared = tomoyo_allocated_memory_for_savename; const unsigned int private = tomoyo_allocated_memory_for_elements; const unsigned int dynamic = atomic_read(&tomoyo_dynamic_memory_size); char buffer[64]; memset(buffer, 0, sizeof(buffer)); if (tomoyo_quota_for_savename) snprintf(buffer, sizeof(buffer) - 1, " (Quota: %10u)", tomoyo_quota_for_savename); else buffer[0] = '\0'; tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer); if (tomoyo_quota_for_elements) snprintf(buffer, sizeof(buffer) - 1, " (Quota: %10u)", tomoyo_quota_for_elements); else buffer[0] = '\0'; tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer); tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic); tomoyo_io_printf(head, "Total: %10u\n", shared + private + dynamic); head->read_eof = true; } return 0; } /** * tomoyo_write_memory_quota - Set memory quota. * * @head: Pointer to "struct tomoyo_io_buffer". * * Returns 0. */ int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head) { char *data = head->write_buf; unsigned int size; if (sscanf(data, "Shared: %u", &size) == 1) tomoyo_quota_for_savename = size; else if (sscanf(data, "Private: %u", &size) == 1) tomoyo_quota_for_elements = size; return 0; }
gpl-2.0
tsh-xx/folio100
arch/cris/kernel/profile.c
779
1786
#include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/types.h> #include <asm/ptrace.h> #include <asm/uaccess.h> #define SAMPLE_BUFFER_SIZE 8192 static char* sample_buffer; static char* sample_buffer_pos; static int prof_running = 0; void cris_profile_sample(struct pt_regs* regs) { if (!prof_running) return; if (user_mode(regs)) *(unsigned int*)sample_buffer_pos = current->pid; else *(unsigned int*)sample_buffer_pos = 0; *(unsigned int*)(sample_buffer_pos + 4) = instruction_pointer(regs); sample_buffer_pos += 8; if (sample_buffer_pos == sample_buffer + SAMPLE_BUFFER_SIZE) sample_buffer_pos = sample_buffer; } static ssize_t read_cris_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t ret; ret = simple_read_from_buffer(buf, count, ppos, sample_buffer, SAMPLE_BUFFER_SIZE); if (ret < 0) return ret; memset(sample_buffer + p, 0, ret); return ret; } static ssize_t write_cris_profile(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { sample_buffer_pos = sample_buffer; memset(sample_buffer, 0, SAMPLE_BUFFER_SIZE); } static const struct file_operations cris_proc_profile_operations = { .read = read_cris_profile, .write = write_cris_profile, }; static int __init init_cris_profile(void) { struct proc_dir_entry *entry; sample_buffer = kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL); if (!sample_buffer) { return -ENOMEM; } sample_buffer_pos = sample_buffer; entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL, &cris_proc_profile_operations); if (entry) { entry->size = SAMPLE_BUFFER_SIZE; } prof_running = 1; return 0; } __initcall(init_cris_profile);
gpl-2.0
dragonzkiller/daytona-kernel
arch/cris/kernel/profile.c
779
1786
#include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/types.h> #include <asm/ptrace.h> #include <asm/uaccess.h> #define SAMPLE_BUFFER_SIZE 8192 static char* sample_buffer; static char* sample_buffer_pos; static int prof_running = 0; void cris_profile_sample(struct pt_regs* regs) { if (!prof_running) return; if (user_mode(regs)) *(unsigned int*)sample_buffer_pos = current->pid; else *(unsigned int*)sample_buffer_pos = 0; *(unsigned int*)(sample_buffer_pos + 4) = instruction_pointer(regs); sample_buffer_pos += 8; if (sample_buffer_pos == sample_buffer + SAMPLE_BUFFER_SIZE) sample_buffer_pos = sample_buffer; } static ssize_t read_cris_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t ret; ret = simple_read_from_buffer(buf, count, ppos, sample_buffer, SAMPLE_BUFFER_SIZE); if (ret < 0) return ret; memset(sample_buffer + p, 0, ret); return ret; } static ssize_t write_cris_profile(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { sample_buffer_pos = sample_buffer; memset(sample_buffer, 0, SAMPLE_BUFFER_SIZE); } static const struct file_operations cris_proc_profile_operations = { .read = read_cris_profile, .write = write_cris_profile, }; static int __init init_cris_profile(void) { struct proc_dir_entry *entry; sample_buffer = kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL); if (!sample_buffer) { return -ENOMEM; } sample_buffer_pos = sample_buffer; entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL, &cris_proc_profile_operations); if (entry) { entry->size = SAMPLE_BUFFER_SIZE; } prof_running = 1; return 0; } __initcall(init_cris_profile);
gpl-2.0
jrior001/android_kernel_asus_moorefield
drivers/net/ethernet/ti/davinci_cpdma.c
2059
27322
/* * Texas Instruments CPDMA Driver * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/delay.h> #include "davinci_cpdma.h" /* DMA Registers */ #define CPDMA_TXIDVER 0x00 #define CPDMA_TXCONTROL 0x04 #define CPDMA_TXTEARDOWN 0x08 #define CPDMA_RXIDVER 0x10 #define CPDMA_RXCONTROL 0x14 #define CPDMA_SOFTRESET 0x1c #define CPDMA_RXTEARDOWN 0x18 #define CPDMA_TXINTSTATRAW 0x80 #define CPDMA_TXINTSTATMASKED 0x84 #define CPDMA_TXINTMASKSET 0x88 #define CPDMA_TXINTMASKCLEAR 0x8c #define CPDMA_MACINVECTOR 0x90 #define CPDMA_MACEOIVECTOR 0x94 #define CPDMA_RXINTSTATRAW 0xa0 #define CPDMA_RXINTSTATMASKED 0xa4 #define CPDMA_RXINTMASKSET 0xa8 #define CPDMA_RXINTMASKCLEAR 0xac #define CPDMA_DMAINTSTATRAW 0xb0 #define CPDMA_DMAINTSTATMASKED 0xb4 #define CPDMA_DMAINTMASKSET 0xb8 #define CPDMA_DMAINTMASKCLEAR 0xbc #define CPDMA_DMAINT_HOSTERR BIT(1) /* the following exist only if has_ext_regs is set */ #define CPDMA_DMACONTROL 0x20 #define CPDMA_DMASTATUS 0x24 #define CPDMA_RXBUFFOFS 0x28 #define CPDMA_EM_CONTROL 0x2c /* Descriptor mode bits */ #define CPDMA_DESC_SOP BIT(31) #define CPDMA_DESC_EOP BIT(30) #define CPDMA_DESC_OWNER BIT(29) #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) #define CPDMA_DESC_TO_PORT_EN BIT(20) #define CPDMA_TO_PORT_SHIFT 16 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) #define CPDMA_TEARDOWN_VALUE 0xfffffffc struct cpdma_desc { /* hardware fields */ u32 hw_next; u32 hw_buffer; u32 hw_len; u32 hw_mode; /* software fields */ void *sw_token; u32 sw_buffer; u32 sw_len; }; struct cpdma_desc_pool { u32 phys; u32 hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; int num_desc, used_desc; unsigned long *bitmap; struct device *dev; spinlock_t lock; }; enum cpdma_state { CPDMA_STATE_IDLE, CPDMA_STATE_ACTIVE, CPDMA_STATE_TEARDOWN, }; static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; struct cpdma_ctlr { enum cpdma_state state; struct cpdma_params params; struct device *dev; struct cpdma_desc_pool *pool; spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; }; struct cpdma_chan { struct cpdma_desc __iomem *head, *tail; void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; int count; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; struct cpdma_chan_stats stats; /* offsets into dmaregs */ int int_set, int_clear, td; }; /* The following make access to common cpdma_ctlr params more readable */ #define dmaregs params.dmaregs #define num_chan params.num_chan /* various accessors */ #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) #define chan_read(chan, fld) __raw_readl((chan)->fld) #define desc_read(desc, fld) __raw_readl(&(desc)->fld) #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) #define cpdma_desc_to_port(chan, mode, directed) \ do { \ if (!is_rx_chan(chan) && ((directed == 1) || \ (directed == 2))) \ mode |= (CPDMA_DESC_TO_PORT_EN | \ (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ static struct cpdma_desc_pool * cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, int size, int align) { int bitmap_size; struct cpdma_desc_pool *pool; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; spin_lock_init(&pool->lock); pool->dev = dev; pool->mem_size = size; pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->num_desc = size / pool->desc_size; bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!pool->bitmap) goto fail; if (phys) { pool->phys = phys; pool->iomap = ioremap(phys, size); pool->hw_addr = hw_addr; } else { pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, GFP_KERNEL); pool->iomap = pool->cpumap; pool->hw_addr = pool->phys; } if (pool->iomap) return pool; fail: kfree(pool->bitmap); kfree(pool); return NULL; } static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) { unsigned long flags; if (!pool) return; spin_lock_irqsave(&pool->lock, flags); WARN_ON(pool->used_desc); kfree(pool->bitmap); if (pool->cpumap) { dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, pool->phys); } else { iounmap(pool->iomap); } spin_unlock_irqrestore(&pool->lock, flags); kfree(pool); } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc) { if (!desc) return 0; return pool->hw_addr + (__force dma_addr_t)desc - (__force dma_addr_t)pool->iomap; } static inline struct cpdma_desc __iomem * desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) { return dma ? pool->iomap + dma - pool->hw_addr : NULL; } static struct cpdma_desc __iomem * cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; int desc_start; int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); if (is_rx) { desc_start = 0; desc_end = pool->num_desc/2; } else { desc_start = pool->num_desc/2; desc_end = pool->num_desc; } index = bitmap_find_next_zero_area(pool->bitmap, desc_end, desc_start, num_desc, 0); if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; } spin_unlock_irqrestore(&pool->lock, flags); return desc; } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { unsigned long flags, index; index = ((unsigned long)desc - (unsigned long)pool->iomap) / pool->desc_size; spin_lock_irqsave(&pool->lock, flags); bitmap_clear(pool->bitmap, index, num_desc); pool->used_desc--; spin_unlock_irqrestore(&pool->lock, flags); } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) { struct cpdma_ctlr *ctlr; ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); if (!ctlr) return NULL; ctlr->state = CPDMA_STATE_IDLE; ctlr->params = *params; ctlr->dev = params->dev; spin_lock_init(&ctlr->lock); ctlr->pool = cpdma_desc_pool_create(ctlr->dev, ctlr->params.desc_mem_phys, ctlr->params.desc_hw_addr, ctlr->params.desc_mem_size, ctlr->params.desc_align); if (!ctlr->pool) { kfree(ctlr); return NULL; } if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; return ctlr; } EXPORT_SYMBOL_GPL(cpdma_ctlr_create); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EBUSY; } if (ctlr->params.has_soft_reset) { unsigned timeout = 10 * 100; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); while (timeout) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; udelay(10); timeout--; } WARN_ON(!timeout); } for (i = 0; i < ctlr->num_chan; i++) { __raw_writel(0, ctlr->params.txhdp + 4 * i); __raw_writel(0, ctlr->params.rxhdp + 4 * i); __raw_writel(0, ctlr->params.txcp + 4 * i); __raw_writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); ctlr->state = CPDMA_STATE_ACTIVE; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_start(ctlr->channels[i]); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_start); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } ctlr->state = CPDMA_STATE_TEARDOWN; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); ctlr->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) { struct device *dev = ctlr->dev; unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); dev_info(dev, "CPDMA: txidver: %x", dma_reg_read(ctlr, CPDMA_TXIDVER)); dev_info(dev, "CPDMA: txcontrol: %x", dma_reg_read(ctlr, CPDMA_TXCONTROL)); dev_info(dev, "CPDMA: txteardown: %x", dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); dev_info(dev, "CPDMA: rxidver: %x", dma_reg_read(ctlr, CPDMA_RXIDVER)); dev_info(dev, "CPDMA: rxcontrol: %x", dma_reg_read(ctlr, CPDMA_RXCONTROL)); dev_info(dev, "CPDMA: softreset: %x", dma_reg_read(ctlr, CPDMA_SOFTRESET)); dev_info(dev, "CPDMA: rxteardown: %x", dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); dev_info(dev, "CPDMA: txintstatraw: %x", dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); dev_info(dev, "CPDMA: txintstatmasked: %x", dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); dev_info(dev, "CPDMA: txintmaskset: %x", dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); dev_info(dev, "CPDMA: txintmaskclear: %x", dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); dev_info(dev, "CPDMA: macinvector: %x", dma_reg_read(ctlr, CPDMA_MACINVECTOR)); dev_info(dev, "CPDMA: maceoivector: %x", dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); dev_info(dev, "CPDMA: rxintstatraw: %x", dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); dev_info(dev, "CPDMA: rxintstatmasked: %x", dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); dev_info(dev, "CPDMA: rxintmaskset: %x", dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); dev_info(dev, "CPDMA: rxintmaskclear: %x", dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); dev_info(dev, "CPDMA: dmaintstatraw: %x", dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); dev_info(dev, "CPDMA: dmaintstatmasked: %x", dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); dev_info(dev, "CPDMA: dmaintmaskset: %x", dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); dev_info(dev, "CPDMA: dmaintmaskclear: %x", dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); if (!ctlr->params.has_ext_regs) { dev_info(dev, "CPDMA: dmacontrol: %x", dma_reg_read(ctlr, CPDMA_DMACONTROL)); dev_info(dev, "CPDMA: dmastatus: %x", dma_reg_read(ctlr, CPDMA_DMASTATUS)); dev_info(dev, "CPDMA: rxbuffofs: %x", dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); } for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) if (ctlr->channels[i]) cpdma_chan_dump(ctlr->channels[i]); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { unsigned long flags; int ret = 0, i; if (!ctlr) return -EINVAL; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); kfree(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { unsigned long flags; int i, reg; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_int_ctrl(ctlr->channels[i], enable); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) { struct cpdma_chan *chan; int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; unsigned long flags; if (__chan_linear(chan_num) >= ctlr->num_chan) return NULL; ret = -ENOMEM; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) goto err_chan_alloc; spin_lock_irqsave(&ctlr->lock, flags); ret = -EBUSY; if (ctlr->channels[chan_num]) goto err_chan_busy; chan->ctlr = ctlr; chan->state = CPDMA_STATE_IDLE; chan->chan_num = chan_num; chan->handler = handler; if (is_rx_chan(chan)) { chan->hdp = ctlr->params.rxhdp + offset; chan->cp = ctlr->params.rxcp + offset; chan->rxfree = ctlr->params.rxfree + offset; chan->int_set = CPDMA_RXINTMASKSET; chan->int_clear = CPDMA_RXINTMASKCLEAR; chan->td = CPDMA_RXTEARDOWN; chan->dir = DMA_FROM_DEVICE; } else { chan->hdp = ctlr->params.txhdp + offset; chan->cp = ctlr->params.txcp + offset; chan->int_set = CPDMA_TXINTMASKSET; chan->int_clear = CPDMA_TXINTMASKCLEAR; chan->td = CPDMA_TXTEARDOWN; chan->dir = DMA_TO_DEVICE; } chan->mask = BIT(chan_linear(chan)); spin_lock_init(&chan->lock); ctlr->channels[chan_num] = chan; spin_unlock_irqrestore(&ctlr->lock, flags); return chan; err_chan_busy: spin_unlock_irqrestore(&ctlr->lock, flags); kfree(chan); err_chan_alloc: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) cpdma_chan_stop(chan); ctlr->channels[chan->chan_num] = NULL; spin_unlock_irqrestore(&ctlr->lock, flags); kfree(chan); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_destroy); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats) { unsigned long flags; if (!chan) return -EINVAL; spin_lock_irqsave(&chan->lock, flags); memcpy(stats, &chan->stats, sizeof(*stats)); spin_unlock_irqrestore(&chan->lock, flags); return 0; } int cpdma_chan_dump(struct cpdma_chan *chan) { unsigned long flags; struct device *dev = chan->ctlr->dev; spin_lock_irqsave(&chan->lock, flags); dev_info(dev, "channel %d (%s %d) state %s", chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", chan_linear(chan), cpdma_state_str[chan->state]); dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); if (chan->rxfree) { dev_info(dev, "\trxfree: %x\n", chan_read(chan, rxfree)); } dev_info(dev, "\tstats head_enqueue: %d\n", chan->stats.head_enqueue); dev_info(dev, "\tstats tail_enqueue: %d\n", chan->stats.tail_enqueue); dev_info(dev, "\tstats pad_enqueue: %d\n", chan->stats.pad_enqueue); dev_info(dev, "\tstats misqueued: %d\n", chan->stats.misqueued); dev_info(dev, "\tstats desc_alloc_fail: %d\n", chan->stats.desc_alloc_fail); dev_info(dev, "\tstats pad_alloc_fail: %d\n", chan->stats.pad_alloc_fail); dev_info(dev, "\tstats runt_receive_buff: %d\n", chan->stats.runt_receive_buff); dev_info(dev, "\tstats runt_transmit_buff: %d\n", chan->stats.runt_transmit_buff); dev_info(dev, "\tstats empty_dequeue: %d\n", chan->stats.empty_dequeue); dev_info(dev, "\tstats busy_dequeue: %d\n", chan->stats.busy_dequeue); dev_info(dev, "\tstats good_dequeue: %d\n", chan->stats.good_dequeue); dev_info(dev, "\tstats requeue: %d\n", chan->stats.requeue); dev_info(dev, "\tstats teardown_dequeue: %d\n", chan->stats.teardown_dequeue); spin_unlock_irqrestore(&chan->lock, flags); return 0; } static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *prev = chan->tail; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; u32 mode; desc_dma = desc_phys(pool, desc); /* simple case - idle channel */ if (!chan->head) { chan->stats.head_enqueue++; chan->head = desc; chan->tail = desc; if (chan->state == CPDMA_STATE_ACTIVE) chan_write(chan, hdp, desc_dma); return; } /* first chain the descriptor at the tail of the list */ desc_write(prev, hw_next, desc_dma); chan->tail = desc; chan->stats.tail_enqueue++; /* next check if EOQ has been triggered already */ mode = desc_read(prev, hw_mode); if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && (chan->state == CPDMA_STATE_ACTIVE)) { desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); chan_write(chan, hdp, desc_dma); chan->stats.misqueued++; } } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, int len, int directed) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; dma_addr_t buffer; unsigned long flags; u32 mode; int ret = 0; spin_lock_irqsave(&chan->lock, flags); if (chan->state == CPDMA_STATE_TEARDOWN) { ret = -EINVAL; goto unlock_ret; } desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; goto unlock_ret; } if (len < ctlr->params.min_packet_size) { len = ctlr->params.min_packet_size; chan->stats.runt_transmit_buff++; } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); ret = dma_mapping_error(ctlr->dev, buffer); if (ret) { cpdma_desc_free(ctlr->pool, desc, 1); ret = -EINVAL; goto unlock_ret; } mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); desc_write(desc, hw_len, len); desc_write(desc, hw_mode, mode | len); desc_write(desc, sw_token, token); desc_write(desc, sw_buffer, buffer); desc_write(desc, sw_len, len); __cpdma_chan_submit(chan, desc); if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) chan_write(chan, rxfree, 1); chan->count++; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { unsigned long flags; int index; bool ret; struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; spin_lock_irqsave(&pool->lock, flags); index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, pool->num_desc/2, 1, 0); if (index < pool->num_desc) ret = true; else ret = false; spin_unlock_irqrestore(&pool->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t buff_dma; int origlen; void *token; token = (void *)desc_read(desc, sw_token); buff_dma = desc_read(desc, sw_buffer); origlen = desc_read(desc, sw_len); dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); cpdma_desc_free(pool, desc, 1); (*chan->handler)(token, outlen, status); } static int __cpdma_chan_process(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); desc = chan->head; if (!desc) { chan->stats.empty_dequeue++; status = -ENOENT; goto unlock_ret; } desc_dma = desc_phys(pool, desc); status = __raw_readl(&desc->hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; status = -EBUSY; goto unlock_ret; } status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); chan->count--; chan->stats.good_dequeue++; if (status & CPDMA_DESC_EOQ) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } spin_unlock_irqrestore(&chan->lock, flags); if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) cb_status = -ENOSYS; else cb_status = status; __cpdma_chan_free(chan, desc, outlen, cb_status); return status; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return status; } int cpdma_chan_process(struct cpdma_chan *chan, int quota) { int used = 0, ret = 0; if (chan->state != CPDMA_STATE_ACTIVE) return -EINVAL; while (used < quota) { ret = __cpdma_chan_process(chan); if (ret < 0) break; used++; } return used; } EXPORT_SYMBOL_GPL(cpdma_chan_process); int cpdma_chan_start(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&chan->lock, flags); return -EBUSY; } if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(ctlr, chan->int_set, chan->mask); chan->state = CPDMA_STATE_ACTIVE; if (chan->head) { chan_write(chan, hdp, desc_phys(pool, chan->head)); if (chan->rxfree) chan_write(chan, rxfree, chan->count); } spin_unlock_irqrestore(&chan->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_start); int cpdma_chan_stop(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; unsigned timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } chan->state = CPDMA_STATE_TEARDOWN; dma_reg_write(ctlr, chan->int_clear, chan->mask); /* trigger teardown */ dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ timeout = 100 * 100; /* 100 ms */ while (timeout) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; udelay(10); timeout--; } WARN_ON(!timeout); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { struct cpdma_desc __iomem *desc = chan->head; dma_addr_t next_dma; next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ spin_unlock_irqrestore(&chan->lock, flags); __cpdma_chan_free(chan, desc, 0, -ENOSYS); spin_lock_irqsave(&chan->lock, flags); } chan->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&chan->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_stop); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, chan->mask); spin_unlock_irqrestore(&chan->lock, flags); return 0; } struct cpdma_control_info { u32 reg; u32 shift, mask; int access; #define ACCESS_RO BIT(0) #define ACCESS_WO BIT(1) #define ACCESS_RW (ACCESS_RO | ACCESS_WO) }; struct cpdma_control_info controls[] = { [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, }; int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) { unsigned long flags; struct cpdma_control_info *info = &controls[control]; int ret; spin_lock_irqsave(&ctlr->lock, flags); ret = -ENOTSUPP; if (!ctlr->params.has_ext_regs) goto unlock_ret; ret = -EINVAL; if (ctlr->state != CPDMA_STATE_ACTIVE) goto unlock_ret; ret = -ENOENT; if (control < 0 || control >= ARRAY_SIZE(controls)) goto unlock_ret; ret = -EPERM; if ((info->access & ACCESS_RO) != ACCESS_RO) goto unlock_ret; ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) { unsigned long flags; struct cpdma_control_info *info = &controls[control]; int ret; u32 val; spin_lock_irqsave(&ctlr->lock, flags); ret = -ENOTSUPP; if (!ctlr->params.has_ext_regs) goto unlock_ret; ret = -EINVAL; if (ctlr->state != CPDMA_STATE_ACTIVE) goto unlock_ret; ret = -ENOENT; if (control < 0 || control >= ARRAY_SIZE(controls)) goto unlock_ret; ret = -EPERM; if ((info->access & ACCESS_WO) != ACCESS_WO) goto unlock_ret; val = dma_reg_read(ctlr, info->reg); val &= ~(info->mask << info->shift); val |= (value & info->mask) << info->shift; dma_reg_write(ctlr, info->reg, val); ret = 0; unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_control_set); MODULE_LICENSE("GPL");
gpl-2.0
HRTKernel/test
drivers/net/ethernet/ti/davinci_cpdma.c
2059
27322
/* * Texas Instruments CPDMA Driver * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/delay.h> #include "davinci_cpdma.h" /* DMA Registers */ #define CPDMA_TXIDVER 0x00 #define CPDMA_TXCONTROL 0x04 #define CPDMA_TXTEARDOWN 0x08 #define CPDMA_RXIDVER 0x10 #define CPDMA_RXCONTROL 0x14 #define CPDMA_SOFTRESET 0x1c #define CPDMA_RXTEARDOWN 0x18 #define CPDMA_TXINTSTATRAW 0x80 #define CPDMA_TXINTSTATMASKED 0x84 #define CPDMA_TXINTMASKSET 0x88 #define CPDMA_TXINTMASKCLEAR 0x8c #define CPDMA_MACINVECTOR 0x90 #define CPDMA_MACEOIVECTOR 0x94 #define CPDMA_RXINTSTATRAW 0xa0 #define CPDMA_RXINTSTATMASKED 0xa4 #define CPDMA_RXINTMASKSET 0xa8 #define CPDMA_RXINTMASKCLEAR 0xac #define CPDMA_DMAINTSTATRAW 0xb0 #define CPDMA_DMAINTSTATMASKED 0xb4 #define CPDMA_DMAINTMASKSET 0xb8 #define CPDMA_DMAINTMASKCLEAR 0xbc #define CPDMA_DMAINT_HOSTERR BIT(1) /* the following exist only if has_ext_regs is set */ #define CPDMA_DMACONTROL 0x20 #define CPDMA_DMASTATUS 0x24 #define CPDMA_RXBUFFOFS 0x28 #define CPDMA_EM_CONTROL 0x2c /* Descriptor mode bits */ #define CPDMA_DESC_SOP BIT(31) #define CPDMA_DESC_EOP BIT(30) #define CPDMA_DESC_OWNER BIT(29) #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) #define CPDMA_DESC_TO_PORT_EN BIT(20) #define CPDMA_TO_PORT_SHIFT 16 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) #define CPDMA_TEARDOWN_VALUE 0xfffffffc struct cpdma_desc { /* hardware fields */ u32 hw_next; u32 hw_buffer; u32 hw_len; u32 hw_mode; /* software fields */ void *sw_token; u32 sw_buffer; u32 sw_len; }; struct cpdma_desc_pool { u32 phys; u32 hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; int num_desc, used_desc; unsigned long *bitmap; struct device *dev; spinlock_t lock; }; enum cpdma_state { CPDMA_STATE_IDLE, CPDMA_STATE_ACTIVE, CPDMA_STATE_TEARDOWN, }; static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; struct cpdma_ctlr { enum cpdma_state state; struct cpdma_params params; struct device *dev; struct cpdma_desc_pool *pool; spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; }; struct cpdma_chan { struct cpdma_desc __iomem *head, *tail; void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; int count; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; struct cpdma_chan_stats stats; /* offsets into dmaregs */ int int_set, int_clear, td; }; /* The following make access to common cpdma_ctlr params more readable */ #define dmaregs params.dmaregs #define num_chan params.num_chan /* various accessors */ #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) #define chan_read(chan, fld) __raw_readl((chan)->fld) #define desc_read(desc, fld) __raw_readl(&(desc)->fld) #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) #define cpdma_desc_to_port(chan, mode, directed) \ do { \ if (!is_rx_chan(chan) && ((directed == 1) || \ (directed == 2))) \ mode |= (CPDMA_DESC_TO_PORT_EN | \ (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ static struct cpdma_desc_pool * cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, int size, int align) { int bitmap_size; struct cpdma_desc_pool *pool; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; spin_lock_init(&pool->lock); pool->dev = dev; pool->mem_size = size; pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->num_desc = size / pool->desc_size; bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!pool->bitmap) goto fail; if (phys) { pool->phys = phys; pool->iomap = ioremap(phys, size); pool->hw_addr = hw_addr; } else { pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, GFP_KERNEL); pool->iomap = pool->cpumap; pool->hw_addr = pool->phys; } if (pool->iomap) return pool; fail: kfree(pool->bitmap); kfree(pool); return NULL; } static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) { unsigned long flags; if (!pool) return; spin_lock_irqsave(&pool->lock, flags); WARN_ON(pool->used_desc); kfree(pool->bitmap); if (pool->cpumap) { dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, pool->phys); } else { iounmap(pool->iomap); } spin_unlock_irqrestore(&pool->lock, flags); kfree(pool); } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc) { if (!desc) return 0; return pool->hw_addr + (__force dma_addr_t)desc - (__force dma_addr_t)pool->iomap; } static inline struct cpdma_desc __iomem * desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) { return dma ? pool->iomap + dma - pool->hw_addr : NULL; } static struct cpdma_desc __iomem * cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; int desc_start; int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); if (is_rx) { desc_start = 0; desc_end = pool->num_desc/2; } else { desc_start = pool->num_desc/2; desc_end = pool->num_desc; } index = bitmap_find_next_zero_area(pool->bitmap, desc_end, desc_start, num_desc, 0); if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; } spin_unlock_irqrestore(&pool->lock, flags); return desc; } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { unsigned long flags, index; index = ((unsigned long)desc - (unsigned long)pool->iomap) / pool->desc_size; spin_lock_irqsave(&pool->lock, flags); bitmap_clear(pool->bitmap, index, num_desc); pool->used_desc--; spin_unlock_irqrestore(&pool->lock, flags); } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) { struct cpdma_ctlr *ctlr; ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); if (!ctlr) return NULL; ctlr->state = CPDMA_STATE_IDLE; ctlr->params = *params; ctlr->dev = params->dev; spin_lock_init(&ctlr->lock); ctlr->pool = cpdma_desc_pool_create(ctlr->dev, ctlr->params.desc_mem_phys, ctlr->params.desc_hw_addr, ctlr->params.desc_mem_size, ctlr->params.desc_align); if (!ctlr->pool) { kfree(ctlr); return NULL; } if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) ctlr->num_chan = CPDMA_MAX_CHANNELS; return ctlr; } EXPORT_SYMBOL_GPL(cpdma_ctlr_create); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EBUSY; } if (ctlr->params.has_soft_reset) { unsigned timeout = 10 * 100; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); while (timeout) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; udelay(10); timeout--; } WARN_ON(!timeout); } for (i = 0; i < ctlr->num_chan; i++) { __raw_writel(0, ctlr->params.txhdp + 4 * i); __raw_writel(0, ctlr->params.rxhdp + 4 * i); __raw_writel(0, ctlr->params.txcp + 4 * i); __raw_writel(0, ctlr->params.rxcp + 4 * i); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); ctlr->state = CPDMA_STATE_ACTIVE; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_start(ctlr->channels[i]); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_start); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } ctlr->state = CPDMA_STATE_TEARDOWN; for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); ctlr->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) { struct device *dev = ctlr->dev; unsigned long flags; int i; spin_lock_irqsave(&ctlr->lock, flags); dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); dev_info(dev, "CPDMA: txidver: %x", dma_reg_read(ctlr, CPDMA_TXIDVER)); dev_info(dev, "CPDMA: txcontrol: %x", dma_reg_read(ctlr, CPDMA_TXCONTROL)); dev_info(dev, "CPDMA: txteardown: %x", dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); dev_info(dev, "CPDMA: rxidver: %x", dma_reg_read(ctlr, CPDMA_RXIDVER)); dev_info(dev, "CPDMA: rxcontrol: %x", dma_reg_read(ctlr, CPDMA_RXCONTROL)); dev_info(dev, "CPDMA: softreset: %x", dma_reg_read(ctlr, CPDMA_SOFTRESET)); dev_info(dev, "CPDMA: rxteardown: %x", dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); dev_info(dev, "CPDMA: txintstatraw: %x", dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); dev_info(dev, "CPDMA: txintstatmasked: %x", dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); dev_info(dev, "CPDMA: txintmaskset: %x", dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); dev_info(dev, "CPDMA: txintmaskclear: %x", dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); dev_info(dev, "CPDMA: macinvector: %x", dma_reg_read(ctlr, CPDMA_MACINVECTOR)); dev_info(dev, "CPDMA: maceoivector: %x", dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); dev_info(dev, "CPDMA: rxintstatraw: %x", dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); dev_info(dev, "CPDMA: rxintstatmasked: %x", dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); dev_info(dev, "CPDMA: rxintmaskset: %x", dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); dev_info(dev, "CPDMA: rxintmaskclear: %x", dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); dev_info(dev, "CPDMA: dmaintstatraw: %x", dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); dev_info(dev, "CPDMA: dmaintstatmasked: %x", dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); dev_info(dev, "CPDMA: dmaintmaskset: %x", dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); dev_info(dev, "CPDMA: dmaintmaskclear: %x", dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); if (!ctlr->params.has_ext_regs) { dev_info(dev, "CPDMA: dmacontrol: %x", dma_reg_read(ctlr, CPDMA_DMACONTROL)); dev_info(dev, "CPDMA: dmastatus: %x", dma_reg_read(ctlr, CPDMA_DMASTATUS)); dev_info(dev, "CPDMA: rxbuffofs: %x", dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); } for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) if (ctlr->channels[i]) cpdma_chan_dump(ctlr->channels[i]); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { unsigned long flags; int ret = 0, i; if (!ctlr) return -EINVAL; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); kfree(ctlr); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { unsigned long flags; int i, reg; spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&ctlr->lock, flags); return -EINVAL; } reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_int_ctrl(ctlr->channels[i], enable); } spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) { struct cpdma_chan *chan; int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; unsigned long flags; if (__chan_linear(chan_num) >= ctlr->num_chan) return NULL; ret = -ENOMEM; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) goto err_chan_alloc; spin_lock_irqsave(&ctlr->lock, flags); ret = -EBUSY; if (ctlr->channels[chan_num]) goto err_chan_busy; chan->ctlr = ctlr; chan->state = CPDMA_STATE_IDLE; chan->chan_num = chan_num; chan->handler = handler; if (is_rx_chan(chan)) { chan->hdp = ctlr->params.rxhdp + offset; chan->cp = ctlr->params.rxcp + offset; chan->rxfree = ctlr->params.rxfree + offset; chan->int_set = CPDMA_RXINTMASKSET; chan->int_clear = CPDMA_RXINTMASKCLEAR; chan->td = CPDMA_RXTEARDOWN; chan->dir = DMA_FROM_DEVICE; } else { chan->hdp = ctlr->params.txhdp + offset; chan->cp = ctlr->params.txcp + offset; chan->int_set = CPDMA_TXINTMASKSET; chan->int_clear = CPDMA_TXINTMASKCLEAR; chan->td = CPDMA_TXTEARDOWN; chan->dir = DMA_TO_DEVICE; } chan->mask = BIT(chan_linear(chan)); spin_lock_init(&chan->lock); ctlr->channels[chan_num] = chan; spin_unlock_irqrestore(&ctlr->lock, flags); return chan; err_chan_busy: spin_unlock_irqrestore(&ctlr->lock, flags); kfree(chan); err_chan_alloc: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) cpdma_chan_stop(chan); ctlr->channels[chan->chan_num] = NULL; spin_unlock_irqrestore(&ctlr->lock, flags); kfree(chan); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_destroy); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats) { unsigned long flags; if (!chan) return -EINVAL; spin_lock_irqsave(&chan->lock, flags); memcpy(stats, &chan->stats, sizeof(*stats)); spin_unlock_irqrestore(&chan->lock, flags); return 0; } int cpdma_chan_dump(struct cpdma_chan *chan) { unsigned long flags; struct device *dev = chan->ctlr->dev; spin_lock_irqsave(&chan->lock, flags); dev_info(dev, "channel %d (%s %d) state %s", chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", chan_linear(chan), cpdma_state_str[chan->state]); dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); if (chan->rxfree) { dev_info(dev, "\trxfree: %x\n", chan_read(chan, rxfree)); } dev_info(dev, "\tstats head_enqueue: %d\n", chan->stats.head_enqueue); dev_info(dev, "\tstats tail_enqueue: %d\n", chan->stats.tail_enqueue); dev_info(dev, "\tstats pad_enqueue: %d\n", chan->stats.pad_enqueue); dev_info(dev, "\tstats misqueued: %d\n", chan->stats.misqueued); dev_info(dev, "\tstats desc_alloc_fail: %d\n", chan->stats.desc_alloc_fail); dev_info(dev, "\tstats pad_alloc_fail: %d\n", chan->stats.pad_alloc_fail); dev_info(dev, "\tstats runt_receive_buff: %d\n", chan->stats.runt_receive_buff); dev_info(dev, "\tstats runt_transmit_buff: %d\n", chan->stats.runt_transmit_buff); dev_info(dev, "\tstats empty_dequeue: %d\n", chan->stats.empty_dequeue); dev_info(dev, "\tstats busy_dequeue: %d\n", chan->stats.busy_dequeue); dev_info(dev, "\tstats good_dequeue: %d\n", chan->stats.good_dequeue); dev_info(dev, "\tstats requeue: %d\n", chan->stats.requeue); dev_info(dev, "\tstats teardown_dequeue: %d\n", chan->stats.teardown_dequeue); spin_unlock_irqrestore(&chan->lock, flags); return 0; } static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *prev = chan->tail; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; u32 mode; desc_dma = desc_phys(pool, desc); /* simple case - idle channel */ if (!chan->head) { chan->stats.head_enqueue++; chan->head = desc; chan->tail = desc; if (chan->state == CPDMA_STATE_ACTIVE) chan_write(chan, hdp, desc_dma); return; } /* first chain the descriptor at the tail of the list */ desc_write(prev, hw_next, desc_dma); chan->tail = desc; chan->stats.tail_enqueue++; /* next check if EOQ has been triggered already */ mode = desc_read(prev, hw_mode); if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && (chan->state == CPDMA_STATE_ACTIVE)) { desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); chan_write(chan, hdp, desc_dma); chan->stats.misqueued++; } } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, int len, int directed) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; dma_addr_t buffer; unsigned long flags; u32 mode; int ret = 0; spin_lock_irqsave(&chan->lock, flags); if (chan->state == CPDMA_STATE_TEARDOWN) { ret = -EINVAL; goto unlock_ret; } desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; goto unlock_ret; } if (len < ctlr->params.min_packet_size) { len = ctlr->params.min_packet_size; chan->stats.runt_transmit_buff++; } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); ret = dma_mapping_error(ctlr->dev, buffer); if (ret) { cpdma_desc_free(ctlr->pool, desc, 1); ret = -EINVAL; goto unlock_ret; } mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); desc_write(desc, hw_len, len); desc_write(desc, hw_mode, mode | len); desc_write(desc, sw_token, token); desc_write(desc, sw_buffer, buffer); desc_write(desc, sw_len, len); __cpdma_chan_submit(chan, desc); if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) chan_write(chan, rxfree, 1); chan->count++; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { unsigned long flags; int index; bool ret; struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; spin_lock_irqsave(&pool->lock, flags); index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, pool->num_desc/2, 1, 0); if (index < pool->num_desc) ret = true; else ret = false; spin_unlock_irqrestore(&pool->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t buff_dma; int origlen; void *token; token = (void *)desc_read(desc, sw_token); buff_dma = desc_read(desc, sw_buffer); origlen = desc_read(desc, sw_len); dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); cpdma_desc_free(pool, desc, 1); (*chan->handler)(token, outlen, status); } static int __cpdma_chan_process(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); desc = chan->head; if (!desc) { chan->stats.empty_dequeue++; status = -ENOENT; goto unlock_ret; } desc_dma = desc_phys(pool, desc); status = __raw_readl(&desc->hw_mode); outlen = status & 0x7ff; if (status & CPDMA_DESC_OWNER) { chan->stats.busy_dequeue++; status = -EBUSY; goto unlock_ret; } status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); chan->count--; chan->stats.good_dequeue++; if (status & CPDMA_DESC_EOQ) { chan->stats.requeue++; chan_write(chan, hdp, desc_phys(pool, chan->head)); } spin_unlock_irqrestore(&chan->lock, flags); if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) cb_status = -ENOSYS; else cb_status = status; __cpdma_chan_free(chan, desc, outlen, cb_status); return status; unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return status; } int cpdma_chan_process(struct cpdma_chan *chan, int quota) { int used = 0, ret = 0; if (chan->state != CPDMA_STATE_ACTIVE) return -EINVAL; while (used < quota) { ret = __cpdma_chan_process(chan); if (ret < 0) break; used++; } return used; } EXPORT_SYMBOL_GPL(cpdma_chan_process); int cpdma_chan_start(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_IDLE) { spin_unlock_irqrestore(&chan->lock, flags); return -EBUSY; } if (ctlr->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(ctlr, chan->int_set, chan->mask); chan->state = CPDMA_STATE_ACTIVE; if (chan->head) { chan_write(chan, hdp, desc_phys(pool, chan->head)); if (chan->rxfree) chan_write(chan, rxfree, chan->count); } spin_unlock_irqrestore(&chan->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_start); int cpdma_chan_stop(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; unsigned timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } chan->state = CPDMA_STATE_TEARDOWN; dma_reg_write(ctlr, chan->int_clear, chan->mask); /* trigger teardown */ dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ timeout = 100 * 100; /* 100 ms */ while (timeout) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; udelay(10); timeout--; } WARN_ON(!timeout); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ spin_unlock_irqrestore(&chan->lock, flags); do { ret = __cpdma_chan_process(chan); if (ret < 0) break; } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); spin_lock_irqsave(&chan->lock, flags); /* remaining packets haven't been tx/rx'ed, clean them up */ while (chan->head) { struct cpdma_desc __iomem *desc = chan->head; dma_addr_t next_dma; next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ spin_unlock_irqrestore(&chan->lock, flags); __cpdma_chan_free(chan, desc, 0, -ENOSYS); spin_lock_irqsave(&chan->lock, flags); } chan->state = CPDMA_STATE_IDLE; spin_unlock_irqrestore(&chan->lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpdma_chan_stop); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { spin_unlock_irqrestore(&chan->lock, flags); return -EINVAL; } dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, chan->mask); spin_unlock_irqrestore(&chan->lock, flags); return 0; } struct cpdma_control_info { u32 reg; u32 shift, mask; int access; #define ACCESS_RO BIT(0) #define ACCESS_WO BIT(1) #define ACCESS_RW (ACCESS_RO | ACCESS_WO) }; struct cpdma_control_info controls[] = { [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, }; int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) { unsigned long flags; struct cpdma_control_info *info = &controls[control]; int ret; spin_lock_irqsave(&ctlr->lock, flags); ret = -ENOTSUPP; if (!ctlr->params.has_ext_regs) goto unlock_ret; ret = -EINVAL; if (ctlr->state != CPDMA_STATE_ACTIVE) goto unlock_ret; ret = -ENOENT; if (control < 0 || control >= ARRAY_SIZE(controls)) goto unlock_ret; ret = -EPERM; if ((info->access & ACCESS_RO) != ACCESS_RO) goto unlock_ret; ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) { unsigned long flags; struct cpdma_control_info *info = &controls[control]; int ret; u32 val; spin_lock_irqsave(&ctlr->lock, flags); ret = -ENOTSUPP; if (!ctlr->params.has_ext_regs) goto unlock_ret; ret = -EINVAL; if (ctlr->state != CPDMA_STATE_ACTIVE) goto unlock_ret; ret = -ENOENT; if (control < 0 || control >= ARRAY_SIZE(controls)) goto unlock_ret; ret = -EPERM; if ((info->access & ACCESS_WO) != ACCESS_WO) goto unlock_ret; val = dma_reg_read(ctlr, info->reg); val &= ~(info->mask << info->shift); val |= (value & info->mask) << info->shift; dma_reg_write(ctlr, info->reg, val); ret = 0; unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_control_set); MODULE_LICENSE("GPL");
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
drivers/edac/tile_edac.c
2315
6597
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * Tilera-specific EDAC driver. * * This source code is derived from the following driver: * * Cell MIC driver for ECC counting * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/edac.h> #include <hv/hypervisor.h> #include <hv/drv_mshim_intf.h> #include "edac_core.h" #define DRV_NAME "tile-edac" /* Number of cs_rows needed per memory controller on TILEPro. */ #define TILE_EDAC_NR_CSROWS 1 /* Number of channels per memory controller on TILEPro. */ #define TILE_EDAC_NR_CHANS 1 /* Granularity of reported error in bytes on TILEPro. */ #define TILE_EDAC_ERROR_GRAIN 8 /* TILE processor has multiple independent memory controllers. */ struct platform_device *mshim_pdev[TILE_MAX_MSHIMS]; struct tile_edac_priv { int hv_devhdl; /* Hypervisor device handle. */ int node; /* Memory controller instance #. */ unsigned int ce_count; /* * Correctable-error counter * kept by the driver. */ }; static void tile_edac_check(struct mem_ctl_info *mci) { struct tile_edac_priv *priv = mci->pvt_info; struct mshim_mem_error mem_error; if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error, sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) != sizeof(struct mshim_mem_error)) { pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n"); return; } /* Check if the current error count is different from the saved one. */ if (mem_error.sbe_count != priv->ce_count) { dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node); priv->ce_count = mem_error.sbe_count; edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, 0, 0, -1, mci->ctl_name, ""); } } /* * Initialize the 'csrows' table within the mci control structure with the * addressing of memory. */ static int tile_edac_init_csrows(struct mem_ctl_info *mci) { struct csrow_info *csrow = mci->csrows[0]; struct tile_edac_priv *priv = mci->pvt_info; struct mshim_mem_info mem_info; struct dimm_info *dimm = csrow->channels[0]->dimm; if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != sizeof(struct mshim_mem_info)) { pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n"); return -1; } if (mem_info.mem_ecc) dimm->edac_mode = EDAC_SECDED; else dimm->edac_mode = EDAC_NONE; switch (mem_info.mem_type) { case DDR2: dimm->mtype = MEM_DDR2; break; case DDR3: dimm->mtype = MEM_DDR3; break; default: return -1; } dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT; dimm->grain = TILE_EDAC_ERROR_GRAIN; dimm->dtype = DEV_UNKNOWN; return 0; } static int tile_edac_mc_probe(struct platform_device *pdev) { char hv_file[32]; int hv_devhdl; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct tile_edac_priv *priv; int rc; sprintf(hv_file, "mshim/%d", pdev->id); hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0); if (hv_devhdl < 0) return -EINVAL; /* A TILE MC has a single channel and one chip-select row. */ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = TILE_EDAC_NR_CSROWS; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = TILE_EDAC_NR_CHANS; layers[1].is_virt_csrow = false; mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers, sizeof(struct tile_edac_priv)); if (mci == NULL) return -ENOMEM; priv = mci->pvt_info; priv->node = pdev->id; priv->hv_devhdl = hv_devhdl; mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->mod_name = DRV_NAME; #ifdef __tilegx__ mci->ctl_name = "TILEGx_Memory_Controller"; #else mci->ctl_name = "TILEPro_Memory_Controller"; #endif mci->dev_name = dev_name(&pdev->dev); mci->edac_check = tile_edac_check; /* * Initialize the MC control structure 'csrows' table * with the mapping and control information. */ if (tile_edac_init_csrows(mci)) { /* No csrows found. */ mci->edac_cap = EDAC_FLAG_NONE; } else { mci->edac_cap = EDAC_FLAG_SECDED; } platform_set_drvdata(pdev, mci); /* Register with EDAC core */ rc = edac_mc_add_mc(mci); if (rc) { dev_err(&pdev->dev, "failed to register with EDAC core\n"); edac_mc_free(mci); return rc; } return 0; } static int tile_edac_mc_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); edac_mc_del_mc(&pdev->dev); if (mci) edac_mc_free(mci); return 0; } static struct platform_driver tile_edac_mc_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = tile_edac_mc_probe, .remove = tile_edac_mc_remove, }; /* * Driver init routine. */ static int __init tile_edac_init(void) { char hv_file[32]; struct platform_device *pdev; int i, err, num = 0; /* Only support POLL mode. */ edac_op_state = EDAC_OPSTATE_POLL; err = platform_driver_register(&tile_edac_mc_driver); if (err) return err; for (i = 0; i < TILE_MAX_MSHIMS; i++) { /* * Not all memory controllers are configured such as in the * case of a simulator. So we register only those mshims * that are configured by the hypervisor. */ sprintf(hv_file, "mshim/%d", i); if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0) continue; pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0); if (IS_ERR(pdev)) continue; mshim_pdev[i] = pdev; num++; } if (num == 0) { platform_driver_unregister(&tile_edac_mc_driver); return -ENODEV; } return 0; } /* * Driver cleanup routine. */ static void __exit tile_edac_exit(void) { int i; for (i = 0; i < TILE_MAX_MSHIMS; i++) { struct platform_device *pdev = mshim_pdev[i]; if (!pdev) continue; platform_set_drvdata(pdev, NULL); platform_device_unregister(pdev); } platform_driver_unregister(&tile_edac_mc_driver); } module_init(tile_edac_init); module_exit(tile_edac_exit);
gpl-2.0
Radium-Devices/Radium_yu
arch/x86/kernel/apic/ipi.c
2315
3807
#include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/kernel_stat.h> #include <linux/mc146818rtc.h> #include <linux/cache.h> #include <linux/cpu.h> #include <linux/module.h> #include <asm/smp.h> #include <asm/mtrr.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/apic.h> #include <asm/proto.h> #include <asm/ipi.h> void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) { unsigned long query_cpu; unsigned long flags; /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicast to each CPU instead. * - mbligh */ local_irq_save(flags); for_each_cpu(query_cpu, mask) { __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector) { unsigned int this_cpu = smp_processor_id(); unsigned int query_cpu; unsigned long flags; /* See Hack comment above */ local_irq_save(flags); for_each_cpu(query_cpu, mask) { if (query_cpu == this_cpu) continue; __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } #ifdef CONFIG_X86_32 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicasts to each CPU instead. This * should be modified to do 1 message per cluster ID - mbligh */ local_irq_save(flags); for_each_cpu(query_cpu, mask) __default_send_IPI_dest_field( early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); local_irq_restore(flags); } void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; unsigned int this_cpu = smp_processor_id(); /* See Hack comment above */ local_irq_save(flags); for_each_cpu(query_cpu, mask) { if (query_cpu == this_cpu) continue; __default_send_IPI_dest_field( early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); } local_irq_restore(flags); } /* * This is only used on smaller machines. */ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long flags; if (!mask) return; local_irq_save(flags); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); __default_send_IPI_dest_field(mask, vector, apic->dest_logical); local_irq_restore(flags); } void default_send_IPI_allbutself(int vector) { /* * if there are no other CPUs in the system then we get an APIC send * error if we try to broadcast, thus avoid sending IPIs in this case. */ if (!(num_online_cpus() > 1)) return; __default_local_send_IPI_allbutself(vector); } void default_send_IPI_all(int vector) { __default_local_send_IPI_all(vector); } void default_send_IPI_self(int vector) { __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); } /* must come after the send_IPI functions above for inlining */ static int convert_apicid_to_cpu(int apic_id) { int i; for_each_possible_cpu(i) { if (per_cpu(x86_cpu_to_apicid, i) == apic_id) return i; } return -1; } int safe_smp_processor_id(void) { int apicid, cpuid; if (!cpu_has_apic) return 0; apicid = hard_smp_processor_id(); if (apicid == BAD_APICID) return 0; cpuid = convert_apicid_to_cpu(apicid); return cpuid >= 0 ? cpuid : 0; } #endif
gpl-2.0
BenefitA3/android_kernel_ark_msm8916
arch/arm/mach-prima2/l2x0.c
2571
1135
/* * l2 cache initialization for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/of.h> #include <asm/hardware/cache-l2x0.h> struct l2x0_aux { u32 val; u32 mask; }; static struct l2x0_aux prima2_l2x0_aux __initconst = { .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT, .mask = 0, }; static struct l2x0_aux marco_l2x0_aux __initconst = { .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT), .mask = L2X0_AUX_CTRL_MASK, }; static struct of_device_id sirf_l2x0_ids[] __initconst = { { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, }, { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, }, {}, }; static int __init sirfsoc_l2x0_init(void) { struct device_node *np; const struct l2x0_aux *aux; np = of_find_matching_node(NULL, sirf_l2x0_ids); if (np) { aux = of_match_node(sirf_l2x0_ids, np)->data; return l2x0_of_init(aux->val, aux->mask); } return 0; } early_initcall(sirfsoc_l2x0_init);
gpl-2.0
hiikezoe/android_kernel_kyocera_dm015k
drivers/firmware/dmi_scan.c
3083
17888
#include <linux/types.h> #include <linux/string.h> #include <linux/init.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/dmi.h> #include <linux/efi.h> #include <linux/bootmem.h> #include <asm/dmi.h> /* * DMI stands for "Desktop Management Interface". It is part * of and an antecedent to, SMBIOS, which stands for System * Management BIOS. See further: http://www.dmtf.org/standards */ static char dmi_empty_string[] = " "; /* * Catch too early calls to dmi_check_system(): */ static int dmi_initialized; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; if (s) { s--; while (s > 0 && *bp) { bp += strlen(bp) + 1; s--; } if (*bp != 0) { size_t len = strlen(bp)+1; size_t cmp_len = len > 8 ? 8 : len; if (!memcmp(bp, dmi_empty_string, cmp_len)) return dmi_empty_string; return bp; } } return ""; } static char * __init dmi_string(const struct dmi_header *dm, u8 s) { const char *bp = dmi_string_nosave(dm, s); char *str; size_t len; if (bp == dmi_empty_string) return dmi_empty_string; len = strlen(bp) + 1; str = dmi_alloc(len); if (str != NULL) strcpy(str, bp); else printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len); return str; } /* * We have to be cautious here. We have seen BIOSes with DMI pointers * pointing to completely the wrong place for example */ static void dmi_table(u8 *buf, int len, int num, void (*decode)(const struct dmi_header *, void *), void *private_data) { u8 *data = buf; int i = 0; /* * Stop when we see all the items the table claimed to have * OR we run off the end of the table (also happens) */ while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { const struct dmi_header *dm = (const struct dmi_header *)data; /* * We want to know the total length (formatted area and * strings) before decoding to make sure we won't run off the * table in dmi_decode or dmi_string */ data += dm->length; while ((data - buf < len - 1) && (data[0] || data[1])) data++; if (data - buf < len - 1) decode(dm, private_data); data += 2; i++; } } static u32 dmi_base; static u16 dmi_len; static u16 dmi_num; static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, void *)) { u8 *buf; buf = dmi_ioremap(dmi_base, dmi_len); if (buf == NULL) return -1; dmi_table(buf, dmi_len, dmi_num, decode, NULL); dmi_iounmap(buf, dmi_len); return 0; } static int __init dmi_checksum(const u8 *buf) { u8 sum = 0; int a; for (a = 0; a < 15; a++) sum += buf[a]; return sum == 0; } static char *dmi_ident[DMI_STRING_MAX]; static LIST_HEAD(dmi_devices); int dmi_available; /* * Save a DMI string */ static void __init dmi_save_ident(const struct dmi_header *dm, int slot, int string) { const char *d = (const char*) dm; char *p; if (dmi_ident[slot]) return; p = dmi_string(dm, d[string]); if (p == NULL) return; dmi_ident[slot] = p; } static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int index) { const u8 *d = (u8*) dm + index; char *s; int is_ff = 1, is_00 = 1, i; if (dmi_ident[slot]) return; for (i = 0; i < 16 && (is_ff || is_00); i++) { if(d[i] != 0x00) is_ff = 0; if(d[i] != 0xFF) is_00 = 0; } if (is_ff || is_00) return; s = dmi_alloc(16*2+4+1); if (!s) return; sprintf(s, "%pUB", d); dmi_ident[slot] = s; } static void __init dmi_save_type(const struct dmi_header *dm, int slot, int index) { const u8 *d = (u8*) dm + index; char *s; if (dmi_ident[slot]) return; s = dmi_alloc(4); if (!s) return; sprintf(s, "%u", *d & 0x7F); dmi_ident[slot] = s; } static void __init dmi_save_one_device(int type, const char *name) { struct dmi_device *dev; /* No duplicate device */ if (dmi_find_device(type, name, NULL)) return; dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1); if (!dev) { printk(KERN_ERR "dmi_save_one_device: out of memory.\n"); return; } dev->type = type; strcpy((char *)(dev + 1), name); dev->name = (char *)(dev + 1); dev->device_data = NULL; list_add(&dev->list, &dmi_devices); } static void __init dmi_save_devices(const struct dmi_header *dm) { int i, count = (dm->length - sizeof(struct dmi_header)) / 2; for (i = 0; i < count; i++) { const char *d = (char *)(dm + 1) + (i * 2); /* Skip disabled device */ if ((*d & 0x80) == 0) continue; dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1))); } } static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) { int i, count = *(u8 *)(dm + 1); struct dmi_device *dev; for (i = 1; i <= count; i++) { char *devname = dmi_string(dm, i); if (devname == dmi_empty_string) continue; dev = dmi_alloc(sizeof(*dev)); if (!dev) { printk(KERN_ERR "dmi_save_oem_strings_devices: out of memory.\n"); break; } dev->type = DMI_DEV_TYPE_OEM_STRING; dev->name = devname; dev->device_data = NULL; list_add(&dev->list, &dmi_devices); } } static void __init dmi_save_ipmi_device(const struct dmi_header *dm) { struct dmi_device *dev; void * data; data = dmi_alloc(dm->length); if (data == NULL) { printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); return; } memcpy(data, dm, dm->length); dev = dmi_alloc(sizeof(*dev)); if (!dev) { printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n"); return; } dev->type = DMI_DEV_TYPE_IPMI; dev->name = "IPMI controller"; dev->device_data = data; list_add_tail(&dev->list, &dmi_devices); } static void __init dmi_save_dev_onboard(int instance, int segment, int bus, int devfn, const char *name) { struct dmi_dev_onboard *onboard_dev; onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1); if (!onboard_dev) { printk(KERN_ERR "dmi_save_dev_onboard: out of memory.\n"); return; } onboard_dev->instance = instance; onboard_dev->segment = segment; onboard_dev->bus = bus; onboard_dev->devfn = devfn; strcpy((char *)&onboard_dev[1], name); onboard_dev->dev.type = DMI_DEV_TYPE_DEV_ONBOARD; onboard_dev->dev.name = (char *)&onboard_dev[1]; onboard_dev->dev.device_data = onboard_dev; list_add(&onboard_dev->dev.list, &dmi_devices); } static void __init dmi_save_extended_devices(const struct dmi_header *dm) { const u8 *d = (u8*) dm + 5; /* Skip disabled device */ if ((*d & 0x80) == 0) return; dmi_save_dev_onboard(*(d+1), *(u16 *)(d+2), *(d+4), *(d+5), dmi_string_nosave(dm, *(d-1))); dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1))); } /* * Process a DMI table entry. Right now all we care about are the BIOS * and machine entries. For 2.5 we should pull the smbus controller info * out of here. */ static void __init dmi_decode(const struct dmi_header *dm, void *dummy) { switch(dm->type) { case 0: /* BIOS Information */ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); dmi_save_ident(dm, DMI_BIOS_VERSION, 5); dmi_save_ident(dm, DMI_BIOS_DATE, 8); break; case 1: /* System Information */ dmi_save_ident(dm, DMI_SYS_VENDOR, 4); dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); break; case 2: /* Base Board Information */ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); dmi_save_ident(dm, DMI_BOARD_NAME, 5); dmi_save_ident(dm, DMI_BOARD_VERSION, 6); dmi_save_ident(dm, DMI_BOARD_SERIAL, 7); dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8); break; case 3: /* Chassis Information */ dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4); dmi_save_type(dm, DMI_CHASSIS_TYPE, 5); dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6); dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7); dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8); break; case 10: /* Onboard Devices Information */ dmi_save_devices(dm); break; case 11: /* OEM Strings */ dmi_save_oem_strings_devices(dm); break; case 38: /* IPMI Device Information */ dmi_save_ipmi_device(dm); break; case 41: /* Onboard Devices Extended Information */ dmi_save_extended_devices(dm); } } static void __init print_filtered(const char *info) { const char *p; if (!info) return; for (p = info; *p; p++) if (isprint(*p)) printk(KERN_CONT "%c", *p); else printk(KERN_CONT "\\x%02x", *p & 0xff); } static void __init dmi_dump_ids(void) { const char *board; /* Board Name is optional */ printk(KERN_DEBUG "DMI: "); print_filtered(dmi_get_system_info(DMI_SYS_VENDOR)); printk(KERN_CONT " "); print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); board = dmi_get_system_info(DMI_BOARD_NAME); if (board) { printk(KERN_CONT "/"); print_filtered(board); } printk(KERN_CONT ", BIOS "); print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); printk(KERN_CONT " "); print_filtered(dmi_get_system_info(DMI_BIOS_DATE)); printk(KERN_CONT "\n"); } static int __init dmi_present(const char __iomem *p) { u8 buf[15]; memcpy_fromio(buf, p, 15); if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { dmi_num = (buf[13] << 8) | buf[12]; dmi_len = (buf[7] << 8) | buf[6]; dmi_base = (buf[11] << 24) | (buf[10] << 16) | (buf[9] << 8) | buf[8]; /* * DMI version 0.0 means that the real version is taken from * the SMBIOS version, which we don't know at this point. */ if (buf[14] != 0) printk(KERN_INFO "DMI %d.%d present.\n", buf[14] >> 4, buf[14] & 0xF); else printk(KERN_INFO "DMI present.\n"); if (dmi_walk_early(dmi_decode) == 0) { dmi_dump_ids(); return 0; } } return 1; } void __init dmi_scan_machine(void) { char __iomem *p, *q; int rc; if (efi_enabled) { if (efi.smbios == EFI_INVALID_TABLE_ADDR) goto error; /* This is called as a core_initcall() because it isn't * needed during early boot. This also means we can * iounmap the space when we're done with it. */ p = dmi_ioremap(efi.smbios, 32); if (p == NULL) goto error; rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ dmi_iounmap(p, 32); if (!rc) { dmi_available = 1; goto out; } } else { /* * no iounmap() for that ioremap(); it would be a no-op, but * it's so early in setup that sucker gets confused into doing * what it shouldn't if we actually call it. */ p = dmi_ioremap(0xF0000, 0x10000); if (p == NULL) goto error; for (q = p; q < p + 0x10000; q += 16) { rc = dmi_present(q); if (!rc) { dmi_available = 1; dmi_iounmap(p, 0x10000); goto out; } } dmi_iounmap(p, 0x10000); } error: printk(KERN_INFO "DMI not present or invalid.\n"); out: dmi_initialized = 1; } /** * dmi_matches - check if dmi_system_id structure matches system DMI data * @dmi: pointer to the dmi_system_id structure to check */ static bool dmi_matches(const struct dmi_system_id *dmi) { int i; WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n"); for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) { int s = dmi->matches[i].slot; if (s == DMI_NONE) break; if (dmi_ident[s] && strstr(dmi_ident[s], dmi->matches[i].substr)) continue; /* No match */ return false; } return true; } /** * dmi_is_end_of_table - check for end-of-table marker * @dmi: pointer to the dmi_system_id structure to check */ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi) { return dmi->matches[0].slot == DMI_NONE; } /** * dmi_check_system - check system DMI data * @list: array of dmi_system_id structures to match against * All non-null elements of the list must match * their slot's (field index's) data (i.e., each * list string must be a substring of the specified * DMI slot's string data) to be considered a * successful match. * * Walk the blacklist table running matching functions until someone * returns non zero or we hit the end. Callback function is called for * each successful match. Returns the number of matches. */ int dmi_check_system(const struct dmi_system_id *list) { int count = 0; const struct dmi_system_id *d; for (d = list; !dmi_is_end_of_table(d); d++) if (dmi_matches(d)) { count++; if (d->callback && d->callback(d)) break; } return count; } EXPORT_SYMBOL(dmi_check_system); /** * dmi_first_match - find dmi_system_id structure matching system DMI data * @list: array of dmi_system_id structures to match against * All non-null elements of the list must match * their slot's (field index's) data (i.e., each * list string must be a substring of the specified * DMI slot's string data) to be considered a * successful match. * * Walk the blacklist table until the first match is found. Return the * pointer to the matching entry or NULL if there's no match. */ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list) { const struct dmi_system_id *d; for (d = list; !dmi_is_end_of_table(d); d++) if (dmi_matches(d)) return d; return NULL; } EXPORT_SYMBOL(dmi_first_match); /** * dmi_get_system_info - return DMI data value * @field: data index (see enum dmi_field) * * Returns one DMI data value, can be used to perform * complex DMI data checks. */ const char *dmi_get_system_info(int field) { return dmi_ident[field]; } EXPORT_SYMBOL(dmi_get_system_info); /** * dmi_name_in_serial - Check if string is in the DMI product serial information * @str: string to check for */ int dmi_name_in_serial(const char *str) { int f = DMI_PRODUCT_SERIAL; if (dmi_ident[f] && strstr(dmi_ident[f], str)) return 1; return 0; } /** * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name * @str: Case sensitive Name */ int dmi_name_in_vendors(const char *str) { static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE }; int i; for (i = 0; fields[i] != DMI_NONE; i++) { int f = fields[i]; if (dmi_ident[f] && strstr(dmi_ident[f], str)) return 1; } return 0; } EXPORT_SYMBOL(dmi_name_in_vendors); /** * dmi_find_device - find onboard device by type/name * @type: device type or %DMI_DEV_TYPE_ANY to match all device types * @name: device name string or %NULL to match all * @from: previous device found in search, or %NULL for new search. * * Iterates through the list of known onboard devices. If a device is * found with a matching @vendor and @device, a pointer to its device * structure is returned. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * If @from is not %NULL, searches continue from next device. */ const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { const struct list_head *head = from ? &from->list : &dmi_devices; struct list_head *d; for(d = head->next; d != &dmi_devices; d = d->next) { const struct dmi_device *dev = list_entry(d, struct dmi_device, list); if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) && ((name == NULL) || (strcmp(dev->name, name) == 0))) return dev; } return NULL; } EXPORT_SYMBOL(dmi_find_device); /** * dmi_get_date - parse a DMI date * @field: data index (see enum dmi_field) * @yearp: optional out parameter for the year * @monthp: optional out parameter for the month * @dayp: optional out parameter for the day * * The date field is assumed to be in the form resembling * [mm[/dd]]/yy[yy] and the result is stored in the out * parameters any or all of which can be omitted. * * If the field doesn't exist, all out parameters are set to zero * and false is returned. Otherwise, true is returned with any * invalid part of date set to zero. * * On return, year, month and day are guaranteed to be in the * range of [0,9999], [0,12] and [0,31] respectively. */ bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { int year = 0, month = 0, day = 0; bool exists; const char *s, *y; char *e; s = dmi_get_system_info(field); exists = s; if (!exists) goto out; /* * Determine year first. We assume the date string resembles * mm/dd/yy[yy] but the original code extracted only the year * from the end. Keep the behavior in the spirit of no * surprises. */ y = strrchr(s, '/'); if (!y) goto out; y++; year = simple_strtoul(y, &e, 10); if (y != e && year < 100) { /* 2-digit year */ year += 1900; if (year < 1996) /* no dates < spec 1.0 */ year += 100; } if (year > 9999) /* year should fit in %04d */ year = 0; /* parse the mm and dd */ month = simple_strtoul(s, &e, 10); if (s == e || *e != '/' || !month || month > 12) { month = 0; goto out; } s = e + 1; day = simple_strtoul(s, &e, 10); if (s == y || s == e || *e != '/' || day > 31) day = 0; out: if (yearp) *yearp = year; if (monthp) *monthp = month; if (dayp) *dayp = day; return exists; } EXPORT_SYMBOL(dmi_get_date); /** * dmi_walk - Walk the DMI table and get called back for every record * @decode: Callback function * @private_data: Private data to be passed to the callback function * * Returns -1 when the DMI table can't be reached, 0 on success. */ int dmi_walk(void (*decode)(const struct dmi_header *, void *), void *private_data) { u8 *buf; if (!dmi_available) return -1; buf = ioremap(dmi_base, dmi_len); if (buf == NULL) return -1; dmi_table(buf, dmi_len, dmi_num, decode, private_data); iounmap(buf); return 0; } EXPORT_SYMBOL_GPL(dmi_walk); /** * dmi_match - compare a string to the dmi field (if exists) * @f: DMI field identifier * @str: string to compare the DMI field to * * Returns true if the requested field equals to the str (including NULL). */ bool dmi_match(enum dmi_field f, const char *str) { const char *info = dmi_get_system_info(f); if (info == NULL || str == NULL) return info == str; return !strcmp(info, str); } EXPORT_SYMBOL_GPL(dmi_match);
gpl-2.0
maxwen/android_kernel_oppo_n1
sound/soc/pxa/pxa-ssp.c
4107
20043
/* * pxa-ssp.c -- ALSA Soc Audio Layer * * Copyright 2005,2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood * Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Test network mode for > 16bit sample size */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pxa2xx_ssp.h> #include <asm/irq.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/pxa2xx-lib.h> #include <mach/hardware.h> #include <mach/dma.h> #include <mach/audio.h> #include "../../arm/pxa2xx-pcm.h" #include "pxa-ssp.h" /* * SSP audio private data */ struct ssp_priv { struct ssp_device *ssp; unsigned int sysclk; int dai_fmt; #ifdef CONFIG_PM uint32_t cr0; uint32_t cr1; uint32_t to; uint32_t psp; #endif }; static void dump_registers(struct ssp_device *ssp) { dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n", pxa_ssp_read_reg(ssp, SSCR0), pxa_ssp_read_reg(ssp, SSCR1), pxa_ssp_read_reg(ssp, SSTO)); dev_dbg(&ssp->pdev->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n", pxa_ssp_read_reg(ssp, SSPSP), pxa_ssp_read_reg(ssp, SSSR), pxa_ssp_read_reg(ssp, SSACD)); } static void pxa_ssp_enable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } static void pxa_ssp_disable(struct ssp_device *ssp) { uint32_t sscr0; sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE; __raw_writel(sscr0, ssp->mmio_base + SSCR0); } struct pxa2xx_pcm_dma_data { struct pxa2xx_pcm_dma_params params; char name[20]; }; static struct pxa2xx_pcm_dma_params * pxa_ssp_get_dma_params(struct ssp_device *ssp, int width4, int out) { struct pxa2xx_pcm_dma_data *dma; dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL); if (dma == NULL) return NULL; snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id, width4 ? "32-bit" : "16-bit", out ? "out" : "in"); dma->params.name = dma->name; dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx); dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) : (DCMD_INCTRGADDR | DCMD_FLOWSRC)) | (width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16; dma->params.dev_addr = ssp->phys_base + SSDR; return &dma->params; } static int pxa_ssp_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int ret = 0; if (!cpu_dai->active) { clk_enable(ssp->clk); pxa_ssp_disable(ssp); } kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); return ret; } static void pxa_ssp_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) { pxa_ssp_disable(ssp); clk_disable(ssp->clk); } kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); } #ifdef CONFIG_PM static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; if (!cpu_dai->active) clk_enable(ssp->clk); priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0); priv->cr1 = __raw_readl(ssp->mmio_base + SSCR1); priv->to = __raw_readl(ssp->mmio_base + SSTO); priv->psp = __raw_readl(ssp->mmio_base + SSPSP); pxa_ssp_disable(ssp); clk_disable(ssp->clk); return 0; } static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE; clk_enable(ssp->clk); __raw_writel(sssr, ssp->mmio_base + SSSR); __raw_writel(priv->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0); __raw_writel(priv->cr1, ssp->mmio_base + SSCR1); __raw_writel(priv->to, ssp->mmio_base + SSTO); __raw_writel(priv->psp, ssp->mmio_base + SSPSP); if (cpu_dai->active) pxa_ssp_enable(ssp); else clk_disable(ssp->clk); return 0; } #else #define pxa_ssp_suspend NULL #define pxa_ssp_resume NULL #endif /** * ssp_set_clkdiv - set SSP clock divider * @div: serial clock rate divider */ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) { sscr0 &= ~0x0000ff00; sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ } else { sscr0 &= ~0x000fff00; sscr0 |= (div - 1) << 8; /* 1..4096 */ } pxa_ssp_write_reg(ssp, SSCR0, sscr0); } /** * pxa_ssp_get_clkdiv - get SSP clock divider */ static u32 pxa_ssp_get_scr(struct ssp_device *ssp) { u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); u32 div; if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) div = ((sscr0 >> 8) & 0xff) * 2 + 2; else div = ((sscr0 >> 8) & 0xfff) + 1; return div; } /* * Set the SSP ports SYSCLK. */ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); dev_dbg(&ssp->pdev->dev, "pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n", cpu_dai->id, clk_id, freq); switch (clk_id) { case PXA_SSP_CLK_NET_PLL: sscr0 |= SSCR0_MOD; break; case PXA_SSP_CLK_PLL: /* Internal PLL is fixed */ if (cpu_is_pxa25x()) priv->sysclk = 1843200; else priv->sysclk = 13000000; break; case PXA_SSP_CLK_EXT: priv->sysclk = freq; sscr0 |= SSCR0_ECS; break; case PXA_SSP_CLK_NET: priv->sysclk = freq; sscr0 |= SSCR0_NCS | SSCR0_MOD; break; case PXA_SSP_CLK_AUDIO: priv->sysclk = 0; pxa_ssp_set_scr(ssp, 1); sscr0 |= SSCR0_ACS; break; default: return -ENODEV; } /* The SSP clock must be disabled when changing SSP clock mode * on PXA2xx. On PXA3xx it must be enabled when doing so. */ if (!cpu_is_pxa3xx()) clk_disable(ssp->clk); val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; pxa_ssp_write_reg(ssp, SSCR0, val); if (!cpu_is_pxa3xx()) clk_enable(ssp->clk); return 0; } /* * Set the SSP clock dividers. */ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (div_id) { case PXA_SSP_AUDIO_DIV_ACDS: val = (pxa_ssp_read_reg(ssp, SSACD) & ~0x7) | SSACD_ACDS(div); pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_AUDIO_DIV_SCDB: val = pxa_ssp_read_reg(ssp, SSACD); val &= ~SSACD_SCDB; #if defined(CONFIG_PXA3xx) if (cpu_is_pxa3xx()) val &= ~SSACD_SCDX8; #endif switch (div) { case PXA_SSP_CLK_SCDB_1: val |= SSACD_SCDB; break; case PXA_SSP_CLK_SCDB_4: break; #if defined(CONFIG_PXA3xx) case PXA_SSP_CLK_SCDB_8: if (cpu_is_pxa3xx()) val |= SSACD_SCDX8; else return -EINVAL; break; #endif default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, val); break; case PXA_SSP_DIV_SCR: pxa_ssp_set_scr(ssp, div); break; default: return -ENODEV; } return 0; } /* * Configure the PLL frequency pxa27x and (afaik - pxa320 only) */ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; #if defined(CONFIG_PXA3xx) if (cpu_is_pxa3xx()) pxa_ssp_write_reg(ssp, SSACDD, 0); #endif switch (freq_out) { case 5622000: break; case 11345000: ssacd |= (0x1 << 4); break; case 12235000: ssacd |= (0x2 << 4); break; case 14857000: ssacd |= (0x3 << 4); break; case 32842000: ssacd |= (0x4 << 4); break; case 48000000: ssacd |= (0x5 << 4); break; case 0: /* Disable */ break; default: #ifdef CONFIG_PXA3xx /* PXA3xx has a clock ditherer which can be used to generate * a wider range of frequencies - calculate a value for it. */ if (cpu_is_pxa3xx()) { u32 val; u64 tmp = 19968; tmp *= 1000000; do_div(tmp, freq_out); val = tmp; val = (val << 16) | 64; pxa_ssp_write_reg(ssp, SSACDD, val); ssacd |= (0x6 << 4); dev_dbg(&ssp->pdev->dev, "Using SSACDD %x to supply %uHz\n", val, freq_out); break; } #endif return -EINVAL; } pxa_ssp_write_reg(ssp, SSACD, ssacd); return 0; } /* * Set the active slots in TDM/Network mode */ static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0; sscr0 = pxa_ssp_read_reg(ssp, SSCR0); sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS); /* set slot width */ if (slot_width > 16) sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16); else sscr0 |= SSCR0_DataSize(slot_width); if (slots > 1) { /* enable network mode */ sscr0 |= SSCR0_MOD; /* set number of active slots */ sscr0 |= SSCR0_SlotsPerFrm(slots); /* set active slot mask */ pxa_ssp_write_reg(ssp, SSTSA, tx_mask); pxa_ssp_write_reg(ssp, SSRSA, rx_mask); } pxa_ssp_write_reg(ssp, SSCR0, sscr0); return 0; } /* * Tristate the SSP DAI lines */ static int pxa_ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai, int tristate) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr1; sscr1 = pxa_ssp_read_reg(ssp, SSCR1); if (tristate) sscr1 &= ~SSCR1_TTE; else sscr1 |= SSCR1_TTE; pxa_ssp_write_reg(ssp, SSCR1, sscr1); return 0; } /* * Set up the SSP DAI format. * The SSP Port must be inactive before calling this function as the * physical interface format is changed. */ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; u32 sscr0, sscr1, sspsp, scfr; /* check if we need to change anything at all */ if (priv->dai_fmt == fmt) return 0; /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) { dev_err(&ssp->pdev->dev, "can't change hardware dai format: stream is in use"); return -EINVAL; } /* reset port settings */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS); sscr1 = SSCR1_RxTresh(8) | SSCR1_TxTresh(7); sspsp = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBM_CFS: sscr1 |= SSCR1_SCLKDIR | SSCR1_SCFR; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: sspsp |= SSPSP_SFRMP; break; case SND_SOC_DAIFMT_NB_IF: break; case SND_SOC_DAIFMT_IB_IF: sspsp |= SSPSP_SCMODE(2); break; case SND_SOC_DAIFMT_IB_NF: sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sscr0 |= SSCR0_PSP; sscr1 |= SSCR1_RWOT | SSCR1_TRAIL; /* See hw_params() */ break; case SND_SOC_DAIFMT_DSP_A: sspsp |= SSPSP_FSRT; case SND_SOC_DAIFMT_DSP_B: sscr0 |= SSCR0_MOD | SSCR0_PSP; sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; break; default: return -EINVAL; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); pxa_ssp_write_reg(ssp, SSCR1, sscr1); pxa_ssp_write_reg(ssp, SSPSP, sspsp); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: scfr = pxa_ssp_read_reg(ssp, SSCR1) | SSCR1_SCFR; pxa_ssp_write_reg(ssp, SSCR1, scfr); while (pxa_ssp_read_reg(ssp, SSSR) & SSSR_BSY) cpu_relax(); break; } dump_registers(ssp); /* Since we are configuring the timings for the format by hand * we have to defer some things until hw_params() where we * know parameters like the sample size. */ priv->dai_fmt = fmt; return 0; } /* * Set the SSP audio DMA parameters and sample size. * Can be called multiple times by oss emulation. */ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int chn = params_channels(params); u32 sscr0; u32 sspsp; int width = snd_pcm_format_physical_width(params_format(params)); int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf; struct pxa2xx_pcm_dma_params *dma_data; dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream); /* generate correct DMA params */ kfree(dma_data); /* Network mode with one active slot (ttsa == 1) can be used * to force 16-bit frame width on the wire (for S16_LE), even * with two channels. Use 16-bit DMA transfers for this case. */ dma_data = pxa_ssp_get_dma_params(ssp, ((chn == 2) && (ttsa != 1)) || (width == 32), substream->stream == SNDRV_PCM_STREAM_PLAYBACK); snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); /* we can only change the settings if the port is not in use */ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) return 0; /* clear selected SSP bits */ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_DSS | SSCR0_EDSS); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: #ifdef CONFIG_PXA3xx if (cpu_is_pxa3xx()) sscr0 |= SSCR0_FPCKE; #endif sscr0 |= SSCR0_DataSize(16); break; case SNDRV_PCM_FORMAT_S24_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(8)); break; case SNDRV_PCM_FORMAT_S32_LE: sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(16)); break; } pxa_ssp_write_reg(ssp, SSCR0, sscr0); switch (priv->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: sspsp = pxa_ssp_read_reg(ssp, SSPSP); if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) { /* This is a special case where the bitclk is 64fs * and we're not dealing with 2*32 bits of audio * samples. * * The SSP values used for that are all found out by * trying and failing a lot; some of the registers * needed for that mode are only available on PXA3xx. */ #ifdef CONFIG_PXA3xx if (!cpu_is_pxa3xx()) return -EINVAL; sspsp |= SSPSP_SFRMWDTH(width * 2); sspsp |= SSPSP_SFRMDLY(width * 4); sspsp |= SSPSP_EDMYSTOP(3); sspsp |= SSPSP_DMYSTOP(3); sspsp |= SSPSP_DMYSTRT(1); #else return -EINVAL; #endif } else { /* The frame width is the width the LRCLK is * asserted for; the delay is expressed in * half cycle units. We need the extra cycle * because the data starts clocking out one BCLK * after LRCLK changes polarity. */ sspsp |= SSPSP_SFRMWDTH(width + 1); sspsp |= SSPSP_SFRMDLY((width + 1) * 2); sspsp |= SSPSP_DMYSTRT(1); } pxa_ssp_write_reg(ssp, SSPSP, sspsp); break; default: break; } /* When we use a network mode, we always require TDM slots * - complain loudly and fail if they've not been set up yet. */ if ((sscr0 & SSCR0_MOD) && !ttsa) { dev_err(&ssp->pdev->dev, "No TDM timeslot configured\n"); return -EINVAL; } dump_registers(ssp); return 0; } static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream, struct ssp_device *ssp, int value) { uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0); uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1); uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP); uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR); if (value && (sscr0 & SSCR0_SSE)) pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (value) sscr1 |= SSCR1_TSRE; else sscr1 &= ~SSCR1_TSRE; } else { if (value) sscr1 |= SSCR1_RSRE; else sscr1 &= ~SSCR1_RSRE; } pxa_ssp_write_reg(ssp, SSCR1, sscr1); if (value) { pxa_ssp_write_reg(ssp, SSSR, sssr); pxa_ssp_write_reg(ssp, SSPSP, sspsp); pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE); } } static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { int ret = 0; struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai); struct ssp_device *ssp = priv->ssp; int val; switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: pxa_ssp_enable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pxa_ssp_set_running_bit(substream, ssp, 1); val = pxa_ssp_read_reg(ssp, SSSR); pxa_ssp_write_reg(ssp, SSSR, val); break; case SNDRV_PCM_TRIGGER_START: pxa_ssp_set_running_bit(substream, ssp, 1); break; case SNDRV_PCM_TRIGGER_STOP: pxa_ssp_set_running_bit(substream, ssp, 0); break; case SNDRV_PCM_TRIGGER_SUSPEND: pxa_ssp_disable(ssp); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pxa_ssp_set_running_bit(substream, ssp, 0); break; default: ret = -EINVAL; } dump_registers(ssp); return ret; } static int pxa_ssp_probe(struct snd_soc_dai *dai) { struct ssp_priv *priv; int ret; priv = kzalloc(sizeof(struct ssp_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio"); if (priv->ssp == NULL) { ret = -ENODEV; goto err_priv; } priv->dai_fmt = (unsigned int) -1; snd_soc_dai_set_drvdata(dai, priv); return 0; err_priv: kfree(priv); return ret; } static int pxa_ssp_remove(struct snd_soc_dai *dai) { struct ssp_priv *priv = snd_soc_dai_get_drvdata(dai); pxa_ssp_free(priv->ssp); kfree(priv); return 0; } #define PXA_SSP_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops pxa_ssp_dai_ops = { .startup = pxa_ssp_startup, .shutdown = pxa_ssp_shutdown, .trigger = pxa_ssp_trigger, .hw_params = pxa_ssp_hw_params, .set_sysclk = pxa_ssp_set_dai_sysclk, .set_clkdiv = pxa_ssp_set_dai_clkdiv, .set_pll = pxa_ssp_set_dai_pll, .set_fmt = pxa_ssp_set_dai_fmt, .set_tdm_slot = pxa_ssp_set_dai_tdm_slot, .set_tristate = pxa_ssp_set_dai_tristate, }; static struct snd_soc_dai_driver pxa_ssp_dai = { .probe = pxa_ssp_probe, .remove = pxa_ssp_remove, .suspend = pxa_ssp_suspend, .resume = pxa_ssp_resume, .playback = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 8, .rates = PXA_SSP_RATES, .formats = PXA_SSP_FORMATS, }, .ops = &pxa_ssp_dai_ops, }; static __devinit int asoc_ssp_probe(struct platform_device *pdev) { return snd_soc_register_dai(&pdev->dev, &pxa_ssp_dai); } static int __devexit asoc_ssp_remove(struct platform_device *pdev) { snd_soc_unregister_dai(&pdev->dev); return 0; } static struct platform_driver asoc_ssp_driver = { .driver = { .name = "pxa-ssp-dai", .owner = THIS_MODULE, }, .probe = asoc_ssp_probe, .remove = __devexit_p(asoc_ssp_remove), }; module_platform_driver(asoc_ssp_driver); /* Module information */ MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
jawad6233/android_kernel_samsung_t110
sound/ppc/pmac.c
5643
38204
/* * PMac DBDMA lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * code based on dmasound.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <asm/irq.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include "pmac.h" #include <sound/pcm_params.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> /* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */ static int awacs_freqs[8] = { 44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350 }; /* fixed frequency table for tumbler */ static int tumbler_freqs[1] = { 44100 }; /* * we will allocate a single 'emergency' dbdma cmd block to use if the * tx status comes up "DEAD". This happens on some PowerComputing Pmac * clones, either owing to a bug in dbdma or some interaction between * IDE and sound. However, this measure would deal with DEAD status if * it appeared elsewhere. */ static struct pmac_dbdma emergency_dbdma; static int emergency_in_use; /* * allocate DBDMA command arrays */ static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size) { unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1); rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize, &rec->dma_base, GFP_KERNEL); if (rec->space == NULL) return -ENOMEM; rec->size = size; memset(rec->space, 0, rsize); rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space); rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space); return 0; } static void snd_pmac_dbdma_free(struct snd_pmac *chip, struct pmac_dbdma *rec) { if (rec->space) { unsigned int rsize = sizeof(struct dbdma_cmd) * (rec->size + 1); dma_free_coherent(&chip->pdev->dev, rsize, rec->space, rec->dma_base); } } /* * pcm stuff */ /* * look up frequency table */ unsigned int snd_pmac_rate_index(struct snd_pmac *chip, struct pmac_stream *rec, unsigned int rate) { int i, ok, found; ok = rec->cur_freqs; if (rate > chip->freq_table[0]) return 0; found = 0; for (i = 0; i < chip->num_freqs; i++, ok >>= 1) { if (! (ok & 1)) continue; found = i; if (rate >= chip->freq_table[i]) break; } return found; } /* * check whether another stream is active */ static inline int another_stream(int stream) { return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; } /* * allocate buffers */ static int snd_pmac_pcm_hw_params(struct snd_pcm_substream *subs, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params)); } /* * release buffers */ static int snd_pmac_pcm_hw_free(struct snd_pcm_substream *subs) { snd_pcm_lib_free_pages(subs); return 0; } /* * get a stream of the opposite direction */ static struct pmac_stream *snd_pmac_get_stream(struct snd_pmac *chip, int stream) { switch (stream) { case SNDRV_PCM_STREAM_PLAYBACK: return &chip->playback; case SNDRV_PCM_STREAM_CAPTURE: return &chip->capture; default: snd_BUG(); return NULL; } } /* * wait while run status is on */ static inline void snd_pmac_wait_ack(struct pmac_stream *rec) { int timeout = 50000; while ((in_le32(&rec->dma->status) & RUN) && timeout-- > 0) udelay(1); } /* * set the format and rate to the chip. * call the lowlevel function if defined (e.g. for AWACS). */ static void snd_pmac_pcm_set_format(struct snd_pmac *chip) { /* set up frequency and format */ out_le32(&chip->awacs->control, chip->control_mask | (chip->rate_index << 8)); out_le32(&chip->awacs->byteswap, chip->format == SNDRV_PCM_FORMAT_S16_LE ? 1 : 0); if (chip->set_format) chip->set_format(chip); } /* * stop the DMA transfer */ static inline void snd_pmac_dma_stop(struct pmac_stream *rec) { out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16); snd_pmac_wait_ack(rec); } /* * set the command pointer address */ static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd) { out_le32(&rec->dma->cmdptr, cmd->addr); } /* * start the DMA */ static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status) { out_le32(&rec->dma->control, status | (status << 16)); } /* * prepare playback/capture stream */ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int i; volatile struct dbdma_cmd __iomem *cp; struct snd_pcm_runtime *runtime = subs->runtime; int rate_index; long offset; struct pmac_stream *astr; rec->dma_size = snd_pcm_lib_buffer_bytes(subs); rec->period_size = snd_pcm_lib_period_bytes(subs); rec->nperiods = rec->dma_size / rec->period_size; rec->cur_period = 0; rate_index = snd_pmac_rate_index(chip, rec, runtime->rate); /* set up constraints */ astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; astr->cur_freqs = 1 << rate_index; astr->cur_formats = 1 << runtime->format; chip->rate_index = rate_index; chip->format = runtime->format; /* We really want to execute a DMA stop command, after the AWACS * is initialized. * For reasons I don't understand, it stops the hissing noise * common to many PowerBook G3 systems and random noise otherwise * captured on iBook2's about every third time. -ReneR */ spin_lock_irq(&chip->reg_lock); snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); spin_unlock_irq(&chip->reg_lock); mdelay(5); spin_lock_irq(&chip->reg_lock); /* continuous DMA memory type doesn't provide the physical address, * so we need to resolve the address here... */ offset = runtime->dma_addr; for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) { st_le32(&cp->phy_addr, offset); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ st_le16(&cp->xfer_status, 0); offset += rec->period_size; } /* make loop */ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); st_le32(&cp->cmd_dep, rec->cmd.addr); snd_pmac_dma_stop(rec); snd_pmac_dma_set_command(rec, &rec->cmd); spin_unlock_irq(&chip->reg_lock); return 0; } /* * PCM trigger/stop */ static int snd_pmac_pcm_trigger(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs, int cmd) { volatile struct dbdma_cmd __iomem *cp; int i, command; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (rec->running) return -EBUSY; command = (subs->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUTPUT_MORE : INPUT_MORE) + INTR_ALWAYS; spin_lock(&chip->reg_lock); snd_pmac_beep_stop(chip); snd_pmac_pcm_set_format(chip); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, command); snd_pmac_dma_set_command(rec, &rec->cmd); (void)in_le32(&rec->dma->status); snd_pmac_dma_run(rec, RUN|WAKE); rec->running = 1; spin_unlock(&chip->reg_lock); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: spin_lock(&chip->reg_lock); rec->running = 0; /*printk(KERN_DEBUG "stopped!!\n");*/ snd_pmac_dma_stop(rec); for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) out_le16(&cp->command, DBDMA_STOP); spin_unlock(&chip->reg_lock); break; default: return -EINVAL; } return 0; } /* * return the current pointer */ inline static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { int count = 0; #if 1 /* hmm.. how can we get the current dma pointer?? */ int stat; volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & (ACTIVE|DEAD)) { count = in_le16(&cp->res_count); if (count) count = rec->period_size - count; } #endif count += rec->cur_period * rec->period_size; /*printk(KERN_DEBUG "pointer=%d\n", count);*/ return bytes_to_frames(subs->runtime, count); } /* * playback */ static int snd_pmac_playback_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->playback, subs); } static int snd_pmac_playback_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->playback, subs, cmd); } static snd_pcm_uframes_t snd_pmac_playback_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->playback, subs); } /* * capture */ static int snd_pmac_capture_prepare(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_prepare(chip, &chip->capture, subs); } static int snd_pmac_capture_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_trigger(chip, &chip->capture, subs, cmd); } static snd_pcm_uframes_t snd_pmac_capture_pointer(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_pointer(chip, &chip->capture, subs); } /* * Handle DEAD DMA transfers: * if the TX status comes up "DEAD" - reported on some Power Computing machines * we need to re-start the dbdma - but from a different physical start address * and with a different transfer length. It would get very messy to do this * with the normal dbdma_cmd blocks - we would have to re-write the buffer start * addresses each time. So, we will keep a single dbdma_cmd block which can be * fiddled with. * When DEAD status is first reported the content of the faulted dbdma block is * copied into the emergency buffer and we note that the buffer is in use. * we then bump the start physical address by the amount that was successfully * output before it died. * On any subsequent DEAD result we just do the bump-ups (we know that we are * already using the emergency dbdma_cmd). * CHECK: this just tries to "do it". It is possible that we should abandon * xfers when the number of residual bytes gets below a certain value - I can * see that this might cause a loop-forever if a too small transfer causes * DEAD status. However this is a TODO for now - we'll see what gets reported. * When we get a successful transfer result with the emergency buffer we just * pretend that it completed using the original dmdma_cmd and carry on. The * 'next_cmd' field will already point back to the original loop of blocks. */ static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec, volatile struct dbdma_cmd __iomem *cp) { unsigned short req, res ; unsigned int phy ; /* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */ /* to clear DEAD status we must first clear RUN set it to quiescent to be on the safe side */ (void)in_le32(&rec->dma->status); out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16); if (!emergency_in_use) { /* new problem */ memcpy((void *)emergency_dbdma.cmds, (void *)cp, sizeof(struct dbdma_cmd)); emergency_in_use = 1; st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); cp = emergency_dbdma.cmds; } /* now bump the values to reflect the amount we haven't yet shifted */ req = ld_le16(&cp->req_count); res = ld_le16(&cp->res_count); phy = ld_le32(&cp->phy_addr); phy += (req - res); st_le16(&cp->req_count, res); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); st_le32(&cp->phy_addr, phy); st_le32(&cp->cmd_dep, rec->cmd.addr + sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods)); st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS); /* point at our patched up command block */ out_le32(&rec->dma->cmdptr, emergency_dbdma.addr); /* we must re-start the controller */ (void)in_le32(&rec->dma->status); /* should complete clearing the DEAD status */ out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); } /* * update playback/capture pointer from interrupts */ static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec) { volatile struct dbdma_cmd __iomem *cp; int c; int stat; spin_lock(&chip->reg_lock); if (rec->running) { for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */ if (emergency_in_use) /* already using DEAD xfer? */ cp = emergency_dbdma.cmds; else cp = &rec->cmd.cmds[rec->cur_period]; stat = ld_le16(&cp->xfer_status); if (stat & DEAD) { snd_pmac_pcm_dead_xfer(rec, cp); break; /* this block is still going */ } if (emergency_in_use) emergency_in_use = 0 ; /* done that */ if (! (stat & ACTIVE)) break; /*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/ st_le16(&cp->xfer_status, 0); st_le16(&cp->req_count, rec->period_size); /*st_le16(&cp->res_count, 0);*/ rec->cur_period++; if (rec->cur_period >= rec->nperiods) { rec->cur_period = 0; } spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(rec->substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } /* * hw info */ static struct snd_pcm_hardware snd_pmac_playback = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; static struct snd_pcm_hardware snd_pmac_capture = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_44100, .rate_min = 7350, .rate_max = 44100, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 131072, .period_bytes_min = 256, .period_bytes_max = 16384, .periods_min = 3, .periods_max = PMAC_MAX_FRAGS, }; #if 0 // NYI static int snd_pmac_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); int i, freq_table[8], num_freqs; if (! rec) return -EINVAL; num_freqs = 0; for (i = chip->num_freqs - 1; i >= 0; i--) { if (rec->cur_freqs & (1 << i)) freq_table[num_freqs++] = chip->freq_table[i]; } return snd_interval_list(hw_param_interval(params, rule->var), num_freqs, freq_table, 0); } static int snd_pmac_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pmac *chip = rule->private; struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]); if (! rec) return -EINVAL; return snd_mask_refine_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), rec->cur_formats); } #endif // NYI static int snd_pmac_pcm_open(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; int i; /* look up frequency table and fill bit mask */ runtime->hw.rates = 0; for (i = 0; i < chip->num_freqs; i++) if (chip->freqs_ok & (1 << i)) runtime->hw.rates |= snd_pcm_rate_to_rate_bit(chip->freq_table[i]); /* check for minimum and maximum rates */ for (i = 0; i < chip->num_freqs; i++) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_max = chip->freq_table[i]; break; } } for (i = chip->num_freqs - 1; i >= 0; i--) { if (chip->freqs_ok & (1 << i)) { runtime->hw.rate_min = chip->freq_table[i]; break; } } runtime->hw.formats = chip->formats_ok; if (chip->can_capture) { if (! chip->can_duplex) runtime->hw.info |= SNDRV_PCM_INFO_HALF_DUPLEX; runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX; } runtime->private_data = rec; rec->substream = subs; #if 0 /* FIXME: still under development.. */ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pmac_hw_rule_rate, chip, rec->stream, -1); snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pmac_hw_rule_format, chip, rec->stream, -1); #endif runtime->hw.periods_max = rec->cmd.size - 1; /* constraints to fix choppy sound */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); return 0; } static int snd_pmac_pcm_close(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs) { struct pmac_stream *astr; snd_pmac_dma_stop(rec); astr = snd_pmac_get_stream(chip, another_stream(rec->stream)); if (! astr) return -EINVAL; /* reset constraints */ astr->cur_freqs = chip->freqs_ok; astr->cur_formats = chip->formats_ok; return 0; } static int snd_pmac_playback_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_playback; return snd_pmac_pcm_open(chip, &chip->playback, subs); } static int snd_pmac_capture_open(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); subs->runtime->hw = snd_pmac_capture; return snd_pmac_pcm_open(chip, &chip->capture, subs); } static int snd_pmac_playback_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->playback, subs); } static int snd_pmac_capture_close(struct snd_pcm_substream *subs) { struct snd_pmac *chip = snd_pcm_substream_chip(subs); return snd_pmac_pcm_close(chip, &chip->capture, subs); } /* */ static struct snd_pcm_ops snd_pmac_playback_ops = { .open = snd_pmac_playback_open, .close = snd_pmac_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_playback_prepare, .trigger = snd_pmac_playback_trigger, .pointer = snd_pmac_playback_pointer, }; static struct snd_pcm_ops snd_pmac_capture_ops = { .open = snd_pmac_capture_open, .close = snd_pmac_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_pmac_pcm_hw_params, .hw_free = snd_pmac_pcm_hw_free, .prepare = snd_pmac_capture_prepare, .trigger = snd_pmac_capture_trigger, .pointer = snd_pmac_capture_pointer, }; int __devinit snd_pmac_pcm_new(struct snd_pmac *chip) { struct snd_pcm *pcm; int err; int num_captures = 1; if (! chip->can_capture) num_captures = 0; err = snd_pcm_new(chip->card, chip->card->driver, 0, 1, num_captures, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_pmac_playback_ops); if (chip->can_capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_pmac_capture_ops); pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; strcpy(pcm->name, chip->card->shortname); chip->pcm = pcm; chip->formats_ok = SNDRV_PCM_FMTBIT_S16_BE; if (chip->can_byte_swap) chip->formats_ok |= SNDRV_PCM_FMTBIT_S16_LE; chip->playback.cur_formats = chip->formats_ok; chip->capture.cur_formats = chip->formats_ok; chip->playback.cur_freqs = chip->freqs_ok; chip->capture.cur_freqs = chip->freqs_ok; /* preallocate 64k buffer */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &chip->pdev->dev, 64 * 1024, 64 * 1024); return 0; } static void snd_pmac_dbdma_reset(struct snd_pmac *chip) { out_le32(&chip->playback.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->playback); out_le32(&chip->capture.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); snd_pmac_wait_ack(&chip->capture); } /* * handling beep */ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long addr, int speed) { struct pmac_stream *rec = &chip->playback; snd_pmac_dma_stop(rec); st_le16(&chip->extra_dma.cmds->req_count, bytes); st_le16(&chip->extra_dma.cmds->xfer_status, 0); st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr); st_le32(&chip->extra_dma.cmds->phy_addr, addr); st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS); out_le32(&chip->awacs->control, (in_le32(&chip->awacs->control) & ~0x1f00) | (speed << 8)); out_le32(&chip->awacs->byteswap, 0); snd_pmac_dma_set_command(rec, &chip->extra_dma); snd_pmac_dma_run(rec, RUN); } void snd_pmac_beep_dma_stop(struct snd_pmac *chip) { snd_pmac_dma_stop(&chip->playback); st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP); snd_pmac_pcm_set_format(chip); /* reset format */ } /* * interrupt handlers */ static irqreturn_t snd_pmac_tx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->playback); return IRQ_HANDLED; } static irqreturn_t snd_pmac_rx_intr(int irq, void *devid) { struct snd_pmac *chip = devid; snd_pmac_pcm_update(chip, &chip->capture); return IRQ_HANDLED; } static irqreturn_t snd_pmac_ctrl_intr(int irq, void *devid) { struct snd_pmac *chip = devid; int ctrl = in_le32(&chip->awacs->control); /*printk(KERN_DEBUG "pmac: control interrupt.. 0x%x\n", ctrl);*/ if (ctrl & MASK_PORTCHG) { /* do something when headphone is plugged/unplugged? */ if (chip->update_automute) chip->update_automute(chip, 1); } if (ctrl & MASK_CNTLERR) { int err = (in_le32(&chip->awacs->codec_stat) & MASK_ERRCODE) >> 16; if (err && chip->model <= PMAC_SCREAMER) snd_printk(KERN_DEBUG "error %x\n", err); } /* Writing 1s to the CNTLERR and PORTCHG bits clears them... */ out_le32(&chip->awacs->control, ctrl); return IRQ_HANDLED; } /* * a wrapper to feature call for compatibility */ static void snd_pmac_sound_feature(struct snd_pmac *chip, int enable) { if (ppc_md.feature_call) ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable); } /* * release resources */ static int snd_pmac_free(struct snd_pmac *chip) { /* stop sounds */ if (chip->initialized) { snd_pmac_dbdma_reset(chip); /* disable interrupts from awacs interface */ out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff); } if (chip->node) snd_pmac_sound_feature(chip, 0); /* clean up mixer if any */ if (chip->mixer_free) chip->mixer_free(chip); snd_pmac_detach_beep(chip); /* release resources */ if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (chip->tx_irq >= 0) free_irq(chip->tx_irq, (void*)chip); if (chip->rx_irq >= 0) free_irq(chip->rx_irq, (void*)chip); snd_pmac_dbdma_free(chip, &chip->playback.cmd); snd_pmac_dbdma_free(chip, &chip->capture.cmd); snd_pmac_dbdma_free(chip, &chip->extra_dma); snd_pmac_dbdma_free(chip, &emergency_dbdma); if (chip->macio_base) iounmap(chip->macio_base); if (chip->latch_base) iounmap(chip->latch_base); if (chip->awacs) iounmap(chip->awacs); if (chip->playback.dma) iounmap(chip->playback.dma); if (chip->capture.dma) iounmap(chip->capture.dma); if (chip->node) { int i; for (i = 0; i < 3; i++) { if (chip->requested & (1 << i)) release_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i])); } } if (chip->pdev) pci_dev_put(chip->pdev); of_node_put(chip->node); kfree(chip); return 0; } /* * free the device */ static int snd_pmac_dev_free(struct snd_device *device) { struct snd_pmac *chip = device->device_data; return snd_pmac_free(chip); } /* * check the machine support byteswap (little-endian) */ static void __devinit detect_byte_swap(struct snd_pmac *chip) { struct device_node *mio; /* if seems that Keylargo can't byte-swap */ for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { if (of_device_is_compatible(mio, "Keylargo")) chip->can_byte_swap = 0; break; } } /* it seems the Pismo & iBook can't byte-swap in hardware. */ if (of_machine_is_compatible("PowerBook3,1") || of_machine_is_compatible("PowerBook2,1")) chip->can_byte_swap = 0 ; if (of_machine_is_compatible("PowerBook2,1")) chip->can_duplex = 0; } /* * detect a sound chip */ static int __devinit snd_pmac_detect(struct snd_pmac *chip) { struct device_node *sound; struct device_node *dn; const unsigned int *prop; unsigned int l; struct macio_chip* macio; if (!machine_is(powermac)) return -ENODEV; chip->subframe = 0; chip->revision = 0; chip->freqs_ok = 0xff; /* all ok */ chip->model = PMAC_AWACS; chip->can_byte_swap = 1; chip->can_duplex = 1; chip->can_capture = 1; chip->num_freqs = ARRAY_SIZE(awacs_freqs); chip->freq_table = awacs_freqs; chip->pdev = NULL; chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */ /* check machine type */ if (of_machine_is_compatible("AAPL,3400/2400") || of_machine_is_compatible("AAPL,3500")) chip->is_pbook_3400 = 1; else if (of_machine_is_compatible("PowerBook1,1") || of_machine_is_compatible("AAPL,PowerBook1998")) chip->is_pbook_G3 = 1; chip->node = of_find_node_by_name(NULL, "awacs"); sound = of_node_get(chip->node); /* * powermac G3 models have a node called "davbus" * with a child called "sound". */ if (!chip->node) chip->node = of_find_node_by_name(NULL, "davbus"); /* * if we didn't find a davbus device, try 'i2s-a' since * this seems to be what iBooks have */ if (! chip->node) { chip->node = of_find_node_by_name(NULL, "i2s-a"); if (chip->node && chip->node->parent && chip->node->parent->parent) { if (of_device_is_compatible(chip->node->parent->parent, "K2-Keylargo")) chip->is_k2 = 1; } } if (! chip->node) return -ENODEV; if (!sound) { sound = of_find_node_by_name(NULL, "sound"); while (sound && sound->parent != chip->node) sound = of_find_node_by_name(sound, "sound"); } if (! sound) { of_node_put(chip->node); chip->node = NULL; return -ENODEV; } prop = of_get_property(sound, "sub-frame", NULL); if (prop && *prop < 16) chip->subframe = *prop; prop = of_get_property(sound, "layout-id", NULL); if (prop) { /* partly deprecate snd-powermac, for those machines * that have a layout-id property for now */ printk(KERN_INFO "snd-powermac no longer handles any " "machines with a layout-id property " "in the device-tree, use snd-aoa.\n"); of_node_put(sound); of_node_put(chip->node); chip->node = NULL; return -ENODEV; } /* This should be verified on older screamers */ if (of_device_is_compatible(sound, "screamer")) { chip->model = PMAC_SCREAMER; // chip->can_byte_swap = 0; /* FIXME: check this */ } if (of_device_is_compatible(sound, "burgundy")) { chip->model = PMAC_BURGUNDY; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "daca")) { chip->model = PMAC_DACA; chip->can_capture = 0; /* no capture */ chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "tumbler")) { chip->model = PMAC_TUMBLER; chip->can_capture = of_machine_is_compatible("PowerMac4,2") || of_machine_is_compatible("PowerBook3,2") || of_machine_is_compatible("PowerBook3,3") || of_machine_is_compatible("PowerBook4,1") || of_machine_is_compatible("PowerBook4,2") || of_machine_is_compatible("PowerBook4,3"); chip->can_duplex = 0; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } if (of_device_is_compatible(sound, "snapper")) { chip->model = PMAC_SNAPPER; // chip->can_byte_swap = 0; /* FIXME: check this */ chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->freq_table = tumbler_freqs; chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */ } prop = of_get_property(sound, "device-id", NULL); if (prop) chip->device_id = *prop; dn = of_find_node_by_name(NULL, "perch"); chip->has_iic = (dn != NULL); of_node_put(dn); /* We need the PCI device for DMA allocations, let's use a crude method * for now ... */ macio = macio_find(chip->node, macio_unknown); if (macio == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio !\n"); else { struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) { struct device_node *np = pci_device_to_OF_node(pdev); if (np && np == macio->of_node) { chip->pdev = pdev; break; } } } if (chip->pdev == NULL) printk(KERN_WARNING "snd-powermac: can't locate macio PCI" " device !\n"); detect_byte_swap(chip); /* look for a property saying what sample rates are available */ prop = of_get_property(sound, "sample-rates", &l); if (! prop) prop = of_get_property(sound, "output-frame-rates", &l); if (prop) { int i; chip->freqs_ok = 0; for (l /= sizeof(int); l > 0; --l) { unsigned int r = *prop++; /* Apple 'Fixed' format */ if (r >= 0x10000) r >>= 16; for (i = 0; i < chip->num_freqs; ++i) { if (r == chip->freq_table[i]) { chip->freqs_ok |= (1 << i); break; } } } } else { /* assume only 44.1khz */ chip->freqs_ok = 1; } of_node_put(sound); return 0; } #ifdef PMAC_SUPPORT_AUTOMUTE /* * auto-mute */ static int pmac_auto_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->auto_mute; return 0; } static int pmac_auto_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (ucontrol->value.integer.value[0] != chip->auto_mute) { chip->auto_mute = !!ucontrol->value.integer.value[0]; if (chip->update_automute) chip->update_automute(chip, 1); return 1; } return 0; } static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); if (chip->detect_headphone) ucontrol->value.integer.value[0] = chip->detect_headphone(chip); else ucontrol->value.integer.value[0] = 0; return 0; } static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Auto Mute Switch", .info = snd_pmac_boolean_mono_info, .get = pmac_auto_mute_get, .put = pmac_auto_mute_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Detection", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_pmac_boolean_mono_info, .get = pmac_hp_detect_get, }, }; int __devinit snd_pmac_add_automute(struct snd_pmac *chip) { int err; chip->auto_mute = 1; err = snd_ctl_add(chip->card, snd_ctl_new1(&auto_mute_controls[0], chip)); if (err < 0) { printk(KERN_ERR "snd-powermac: Failed to add automute control\n"); return err; } chip->hp_detect_ctl = snd_ctl_new1(&auto_mute_controls[1], chip); return snd_ctl_add(chip->card, chip->hp_detect_ctl); } #endif /* PMAC_SUPPORT_AUTOMUTE */ /* * create and detect a pmac chip record */ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) { struct snd_pmac *chip; struct device_node *np; int i, err; unsigned int irq; unsigned long ctrl_addr, txdma_addr, rxdma_addr; static struct snd_device_ops ops = { .dev_free = snd_pmac_dev_free, }; *chip_return = NULL; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip->card = card; spin_lock_init(&chip->reg_lock); chip->irq = chip->tx_irq = chip->rx_irq = -1; chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK; chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE; if ((err = snd_pmac_detect(chip)) < 0) goto __error; if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->capture.cmd, PMAC_MAX_FRAGS + 1) < 0 || snd_pmac_dbdma_alloc(chip, &chip->extra_dma, 2) < 0 || snd_pmac_dbdma_alloc(chip, &emergency_dbdma, 2) < 0) { err = -ENOMEM; goto __error; } np = chip->node; chip->requested = 0; if (chip->is_k2) { static char *rnames[] = { "Sound Control", "Sound DMA" }; for (i = 0; i < 2; i ++) { if (of_address_to_resource(np->parent, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = txdma_addr + 0x100; } else { static char *rnames[] = { "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; for (i = 0; i < 3; i ++) { if (of_address_to_resource(np, i, &chip->rsrc[i])) { printk(KERN_ERR "snd: can't translate rsrc " " %d (%s)\n", i, rnames[i]); err = -ENODEV; goto __error; } if (request_mem_region(chip->rsrc[i].start, resource_size(&chip->rsrc[i]), rnames[i]) == NULL) { printk(KERN_ERR "snd: can't request rsrc " " %d (%s: %pR)\n", i, rnames[i], &chip->rsrc[i]); err = -ENODEV; goto __error; } chip->requested |= (1 << i); } ctrl_addr = chip->rsrc[0].start; txdma_addr = chip->rsrc[1].start; rxdma_addr = chip->rsrc[2].start; } chip->awacs = ioremap(ctrl_addr, 0x1000); chip->playback.dma = ioremap(txdma_addr, 0x100); chip->capture.dma = ioremap(rxdma_addr, 0x100); if (chip->model <= PMAC_BURGUNDY) { irq = irq_of_parse_and_map(np, 0); if (request_irq(irq, snd_pmac_ctrl_intr, 0, "PMac", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->irq = irq; } irq = irq_of_parse_and_map(np, 1); if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){ snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->tx_irq = irq; irq = irq_of_parse_and_map(np, 2); if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) { snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); err = -EBUSY; goto __error; } chip->rx_irq = irq; snd_pmac_sound_feature(chip, 1); /* reset & enable interrupts */ if (chip->model <= PMAC_BURGUNDY) out_le32(&chip->awacs->control, chip->control_mask); /* Powerbooks have odd ways of enabling inputs such as an expansion-bay CD or sound from an internal modem or a PC-card modem. */ if (chip->is_pbook_3400) { /* Enable CD and PC-card sound inputs. */ /* This is done by reading from address * f301a000, + 0x10 to enable the expansion-bay * CD sound input, + 0x80 to enable the PC-card * sound input. The 0x100 enables the SCSI bus * terminator power. */ chip->latch_base = ioremap (0xf301a000, 0x1000); in_8(chip->latch_base + 0x190); } else if (chip->is_pbook_G3) { struct device_node* mio; for (mio = chip->node->parent; mio; mio = mio->parent) { if (strcmp(mio->name, "mac-io") == 0) { struct resource r; if (of_address_to_resource(mio, 0, &r) == 0) chip->macio_base = ioremap(r.start, 0x40); break; } } /* Enable CD sound input. */ /* The relevant bits for writing to this byte are 0x8f. * I haven't found out what the 0x80 bit does. * For the 0xf bits, writing 3 or 7 enables the CD * input, any other value disables it. Values * 1, 3, 5, 7 enable the microphone. Values 0, 2, * 4, 6, 8 - f enable the input from the modem. */ if (chip->macio_base) out_8(chip->macio_base + 0x37, 3); } /* Reset dbdma channels */ snd_pmac_dbdma_reset(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) goto __error; *chip_return = chip; return 0; __error: snd_pmac_free(chip); return err; } /* * sleep notify for powerbook */ #ifdef CONFIG_PM /* * Save state when going to sleep, restore it afterwards. */ void snd_pmac_suspend(struct snd_pmac *chip) { unsigned long flags; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (chip->suspend) chip->suspend(chip); snd_pcm_suspend_all(chip->pcm); spin_lock_irqsave(&chip->reg_lock, flags); snd_pmac_beep_stop(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); if (chip->irq >= 0) disable_irq(chip->irq); if (chip->tx_irq >= 0) disable_irq(chip->tx_irq); if (chip->rx_irq >= 0) disable_irq(chip->rx_irq); snd_pmac_sound_feature(chip, 0); } void snd_pmac_resume(struct snd_pmac *chip) { snd_pmac_sound_feature(chip, 1); if (chip->resume) chip->resume(chip); /* enable CD sound input */ if (chip->macio_base && chip->is_pbook_G3) out_8(chip->macio_base + 0x37, 3); else if (chip->is_pbook_3400) in_8(chip->latch_base + 0x190); snd_pmac_pcm_set_format(chip); if (chip->irq >= 0) enable_irq(chip->irq); if (chip->tx_irq >= 0) enable_irq(chip->tx_irq); if (chip->rx_irq >= 0) enable_irq(chip->rx_irq); snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); } #endif /* CONFIG_PM */
gpl-2.0
drowningchild/lgog_old
drivers/infiniband/hw/ipath/ipath_srq.c
9483
9398
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ipath_verbs.h" /** * ipath_post_srq_receive - post a receive on a shared receive queue * @ibsrq: the SRQ to post the receive on * @wr: the list of work requests to post * @bad_wr: the first WR to cause a problem is put here * * This may be called from interrupt context. */ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_rwq *wq; unsigned long flags; int ret; for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; int i; if ((unsigned) wr->num_sge > srq->rq.max_sge) { *bad_wr = wr; ret = -EINVAL; goto bail; } spin_lock_irqsave(&srq->rq.lock, flags); wq = srq->rq.wq; next = wq->head + 1; if (next >= srq->rq.size) next = 0; if (next == wq->tail) { spin_unlock_irqrestore(&srq->rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&srq->rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; /* Make sure queue entry is written before the head index. */ smp_wmb(); wq->head = next; spin_unlock_irqrestore(&srq->rq.lock, flags); } ret = 0; bail: return ret; } /** * ipath_create_srq - create a shared receive queue * @ibpd: the protection domain of the SRQ to create * @srq_init_attr: the attributes of the SRQ * @udata: data from libipathverbs when creating a user SRQ */ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibpd->device); struct ipath_srq *srq; u32 sz; struct ib_srq *ret; if (srq_init_attr->srq_type != IB_SRQT_BASIC) { ret = ERR_PTR(-ENOSYS); goto done; } if (srq_init_attr->attr.max_wr == 0) { ret = ERR_PTR(-EINVAL); goto done; } if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { ret = ERR_PTR(-EINVAL); goto done; } srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) { ret = ERR_PTR(-ENOMEM); goto done; } /* * Need to use vmalloc() if we want to support large #s of entries. */ srq->rq.size = srq_init_attr->attr.max_wr + 1; srq->rq.max_sge = srq_init_attr->attr.max_sge; sz = sizeof(struct ib_sge) * srq->rq.max_sge + sizeof(struct ipath_rwqe); srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); if (!srq->rq.wq) { ret = ERR_PTR(-ENOMEM); goto bail_srq; } /* * Return the address of the RWQ as the offset to mmap. * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { int err; u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz; srq->ip = ipath_create_mmap_info(dev, s, ibpd->uobject->context, srq->rq.wq); if (!srq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wq; } err = ib_copy_to_udata(udata, &srq->ip->offset, sizeof(srq->ip->offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } else srq->ip = NULL; /* * ib_create_srq() will initialize srq->ibsrq. */ spin_lock_init(&srq->rq.lock); srq->rq.wq->head = 0; srq->rq.wq->tail = 0; srq->limit = srq_init_attr->attr.srq_limit; spin_lock(&dev->n_srqs_lock); if (dev->n_srqs_allocated == ib_ipath_max_srqs) { spin_unlock(&dev->n_srqs_lock); ret = ERR_PTR(-ENOMEM); goto bail_ip; } dev->n_srqs_allocated++; spin_unlock(&dev->n_srqs_lock); if (srq->ip) { spin_lock_irq(&dev->pending_lock); list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } ret = &srq->ibsrq; goto done; bail_ip: kfree(srq->ip); bail_wq: vfree(srq->rq.wq); bail_srq: kfree(srq); done: return ret; } /** * ipath_modify_srq - modify a shared receive queue * @ibsrq: the SRQ to modify * @attr: the new attributes of the SRQ * @attr_mask: indicates which attributes to modify * @udata: user data for ipathverbs.so */ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_rwq *wq; int ret = 0; if (attr_mask & IB_SRQ_MAX_WR) { struct ipath_rwq *owq; struct ipath_rwqe *p; u32 sz, size, n, head, tail; /* Check that the requested sizes are below the limits. */ if ((attr->max_wr > ib_ipath_max_srq_wrs) || ((attr_mask & IB_SRQ_LIMIT) ? attr->srq_limit : srq->limit) > attr->max_wr) { ret = -EINVAL; goto bail; } sz = sizeof(struct ipath_rwqe) + srq->rq.max_sge * sizeof(struct ib_sge); size = attr->max_wr + 1; wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz); if (!wq) { ret = -ENOMEM; goto bail; } /* Check that we can write the offset to mmap. */ if (udata && udata->inlen >= sizeof(__u64)) { __u64 offset_addr; __u64 offset = 0; ret = ib_copy_from_udata(&offset_addr, udata, sizeof(offset_addr)); if (ret) goto bail_free; udata->outbuf = (void __user *) (unsigned long) offset_addr; ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (ret) goto bail_free; } spin_lock_irq(&srq->rq.lock); /* * validate head pointer value and compute * the number of remaining WQEs. */ owq = srq->rq.wq; head = owq->head; if (head >= srq->rq.size) head = 0; tail = owq->tail; if (tail >= srq->rq.size) tail = 0; n = head; if (n < tail) n += srq->rq.size - tail; else n -= tail; if (size <= n) { ret = -EINVAL; goto bail_unlock; } n = 0; p = wq->wq; while (tail != head) { struct ipath_rwqe *wqe; int i; wqe = get_rwqe_ptr(&srq->rq, tail); p->wr_id = wqe->wr_id; p->num_sge = wqe->num_sge; for (i = 0; i < wqe->num_sge; i++) p->sg_list[i] = wqe->sg_list[i]; n++; p = (struct ipath_rwqe *)((char *) p + sz); if (++tail >= srq->rq.size) tail = 0; } srq->rq.wq = wq; srq->rq.size = size; wq->head = n; wq->tail = 0; if (attr_mask & IB_SRQ_LIMIT) srq->limit = attr->srq_limit; spin_unlock_irq(&srq->rq.lock); vfree(owq); if (srq->ip) { struct ipath_mmap_info *ip = srq->ip; struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); u32 s = sizeof(struct ipath_rwq) + size * sz; ipath_update_mmap_info(dev, ip, s, wq); /* * Return the offset to mmap. * See ipath_mmap() for details. */ if (udata && udata->inlen >= sizeof(__u64)) { ret = ib_copy_to_udata(udata, &ip->offset, sizeof(ip->offset)); if (ret) goto bail; } spin_lock_irq(&dev->pending_lock); if (list_empty(&ip->pending_mmaps)) list_add(&ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } } else if (attr_mask & IB_SRQ_LIMIT) { spin_lock_irq(&srq->rq.lock); if (attr->srq_limit >= srq->rq.size) ret = -EINVAL; else srq->limit = attr->srq_limit; spin_unlock_irq(&srq->rq.lock); } goto bail; bail_unlock: spin_unlock_irq(&srq->rq.lock); bail_free: vfree(wq); bail: return ret; } int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct ipath_srq *srq = to_isrq(ibsrq); attr->max_wr = srq->rq.size - 1; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; } /** * ipath_destroy_srq - destroy a shared receive queue * @ibsrq: the SRQ to destroy */ int ipath_destroy_srq(struct ib_srq *ibsrq) { struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_ibdev *dev = to_idev(ibsrq->device); spin_lock(&dev->n_srqs_lock); dev->n_srqs_allocated--; spin_unlock(&dev->n_srqs_lock); if (srq->ip) kref_put(&srq->ip->ref, ipath_release_mmap_info); else vfree(srq->rq.wq); kfree(srq); return 0; }
gpl-2.0
srisurya95/android_kernel_motorola_msm8226
drivers/net/ppp/bsd_comp.c
9995
29584
/* * Update: The Berkeley copyright was changed, and the change * is retroactive to all "true" BSD software (ie everything * from UCB as opposed to other peoples code that just carried * the same license). The new copyright doesn't clash with the * GPL, so the module-only restriction has been removed.. */ /* Because this code is derived from the 4.3BSD compress source: * * Copyright (c) 1985, 1986 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * James A. Woods, derived from original work by Spencer Thomas * and Joseph Orost. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This version is for use with contiguous buffers on Linux-derived systems. * * ==FILEVERSION 20000226== * * NOTE TO MAINTAINERS: * If you modify this file at all, please set the number above to the * date of the modification as YYMMDD (year month day). * bsd_comp.c is shipped with a PPP distribution as well as with * the kernel; if everyone increases the FILEVERSION number above, * then scripts can do the right thing when deciding whether to * install a new bsd_comp.c file. Don't change the format of that * line otherwise, so the installation script can recognize it. * * From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/ppp_defs.h> #undef PACKETPTR #define PACKETPTR 1 #include <linux/ppp-comp.h> #undef PACKETPTR #include <asm/byteorder.h> /* * PPP "BSD compress" compression * The differences between this compression and the classic BSD LZW * source are obvious from the requirement that the classic code worked * with files while this handles arbitrarily long streams that * are broken into packets. They are: * * When the code size expands, a block of junk is not emitted by * the compressor and not expected by the decompressor. * * New codes are not necessarily assigned every time an old * code is output by the compressor. This is because a packet * end forces a code to be emitted, but does not imply that a * new sequence has been seen. * * The compression ratio is checked at the first end of a packet * after the appropriate gap. Besides simplifying and speeding * things up, this makes it more likely that the transmitter * and receiver will agree when the dictionary is cleared when * compression is not going well. */ /* * Macros to extract protocol version and number of bits * from the third byte of the BSD Compress CCP configuration option. */ #define BSD_VERSION(x) ((x) >> 5) #define BSD_NBITS(x) ((x) & 0x1F) #define BSD_CURRENT_VERSION 1 /* * A dictionary for doing BSD compress. */ struct bsd_dict { union { /* hash value */ unsigned long fcode; struct { #if defined(__LITTLE_ENDIAN) /* Little endian order */ unsigned short prefix; /* preceding code */ unsigned char suffix; /* last character of new code */ unsigned char pad; #elif defined(__BIG_ENDIAN) /* Big endian order */ unsigned char pad; unsigned char suffix; /* last character of new code */ unsigned short prefix; /* preceding code */ #else #error Endianness not defined... #endif } hs; } f; unsigned short codem1; /* output of hash table -1 */ unsigned short cptr; /* map code to hash table entry */ }; struct bsd_db { int totlen; /* length of this structure */ unsigned int hsize; /* size of the hash table */ unsigned char hshift; /* used in hash function */ unsigned char n_bits; /* current bits/code */ unsigned char maxbits; /* maximum bits/code */ unsigned char debug; /* non-zero if debug desired */ unsigned char unit; /* ppp unit number */ unsigned short seqno; /* sequence # of next packet */ unsigned int mru; /* size of receive (decompress) bufr */ unsigned int maxmaxcode; /* largest valid code */ unsigned int max_ent; /* largest code in use */ unsigned int in_count; /* uncompressed bytes, aged */ unsigned int bytes_out; /* compressed bytes, aged */ unsigned int ratio; /* recent compression ratio */ unsigned int checkpoint; /* when to next check the ratio */ unsigned int clear_count; /* times dictionary cleared */ unsigned int incomp_count; /* incompressible packets */ unsigned int incomp_bytes; /* incompressible bytes */ unsigned int uncomp_count; /* uncompressed packets */ unsigned int uncomp_bytes; /* uncompressed bytes */ unsigned int comp_count; /* compressed packets */ unsigned int comp_bytes; /* compressed bytes */ unsigned short *lens; /* array of lengths of codes */ struct bsd_dict *dict; /* dictionary */ }; #define BSD_OVHD 2 /* BSD compress overhead/packet */ #define MIN_BSD_BITS 9 #define BSD_INIT_BITS MIN_BSD_BITS #define MAX_BSD_BITS 15 static void bsd_free (void *state); static void *bsd_alloc(unsigned char *options, int opt_len, int decomp); static void *bsd_comp_alloc (unsigned char *options, int opt_len); static void *bsd_decomp_alloc (unsigned char *options, int opt_len); static int bsd_init (void *db, unsigned char *options, int opt_len, int unit, int debug, int decomp); static int bsd_comp_init (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int debug); static int bsd_decomp_init (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int mru, int debug); static void bsd_reset (void *state); static void bsd_comp_stats (void *state, struct compstat *stats); static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf, int isize, int osize); static void bsd_incomp (void *state, unsigned char *ibuf, int icnt); static int bsd_decompress (void *state, unsigned char *ibuf, int isize, unsigned char *obuf, int osize); /* These are in ppp_generic.c */ extern int ppp_register_compressor (struct compressor *cp); extern void ppp_unregister_compressor (struct compressor *cp); /* * the next two codes should not be changed lightly, as they must not * lie within the contiguous general code space. */ #define CLEAR 256 /* table clear output code */ #define FIRST 257 /* first free entry */ #define LAST 255 #define MAXCODE(b) ((1 << (b)) - 1) #define BADCODEM1 MAXCODE(MAX_BSD_BITS) #define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \ ^ (unsigned long)(prefix)) #define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \ + (unsigned long)(prefix)) #define CHECK_GAP 10000 /* Ratio check interval */ #define RATIO_SCALE_LOG 8 #define RATIO_SCALE (1<<RATIO_SCALE_LOG) #define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG) /* * clear the dictionary */ static void bsd_clear(struct bsd_db *db) { db->clear_count++; db->max_ent = FIRST-1; db->n_bits = BSD_INIT_BITS; db->bytes_out = 0; db->in_count = 0; db->ratio = 0; db->checkpoint = CHECK_GAP; } /* * If the dictionary is full, then see if it is time to reset it. * * Compute the compression ratio using fixed-point arithmetic * with 8 fractional bits. * * Since we have an infinite stream instead of a single file, * watch only the local compression ratio. * * Since both peers must reset the dictionary at the same time even in * the absence of CLEAR codes (while packets are incompressible), they * must compute the same ratio. */ static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */ { unsigned int new_ratio; if (db->in_count >= db->checkpoint) { /* age the ratio by limiting the size of the counts */ if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX) { db->in_count -= (db->in_count >> 2); db->bytes_out -= (db->bytes_out >> 2); } db->checkpoint = db->in_count + CHECK_GAP; if (db->max_ent >= db->maxmaxcode) { /* Reset the dictionary only if the ratio is worse, * or if it looks as if it has been poisoned * by incompressible data. * * This does not overflow, because * db->in_count <= RATIO_MAX. */ new_ratio = db->in_count << RATIO_SCALE_LOG; if (db->bytes_out != 0) { new_ratio /= db->bytes_out; } if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE) { bsd_clear (db); return 1; } db->ratio = new_ratio; } } return 0; } /* * Return statistics. */ static void bsd_comp_stats (void *state, struct compstat *stats) { struct bsd_db *db = (struct bsd_db *) state; stats->unc_bytes = db->uncomp_bytes; stats->unc_packets = db->uncomp_count; stats->comp_bytes = db->comp_bytes; stats->comp_packets = db->comp_count; stats->inc_bytes = db->incomp_bytes; stats->inc_packets = db->incomp_count; stats->in_count = db->in_count; stats->bytes_out = db->bytes_out; } /* * Reset state, as on a CCP ResetReq. */ static void bsd_reset (void *state) { struct bsd_db *db = (struct bsd_db *) state; bsd_clear(db); db->seqno = 0; db->clear_count = 0; } /* * Release the compression structure */ static void bsd_free (void *state) { struct bsd_db *db = state; if (!db) return; /* * Release the dictionary */ vfree(db->dict); db->dict = NULL; /* * Release the string buffer */ vfree(db->lens); db->lens = NULL; /* * Finally release the structure itself. */ kfree(db); } /* * Allocate space for a (de) compressor. */ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp) { int bits; unsigned int hsize, hshift, maxmaxcode; struct bsd_db *db; if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3 || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION) { return NULL; } bits = BSD_NBITS(options[2]); switch (bits) { case 9: /* needs 82152 for both directions */ case 10: /* needs 84144 */ case 11: /* needs 88240 */ case 12: /* needs 96432 */ hsize = 5003; hshift = 4; break; case 13: /* needs 176784 */ hsize = 9001; hshift = 5; break; case 14: /* needs 353744 */ hsize = 18013; hshift = 6; break; case 15: /* needs 691440 */ hsize = 35023; hshift = 7; break; case 16: /* needs 1366160--far too much, */ /* hsize = 69001; */ /* and 69001 is too big for cptr */ /* hshift = 8; */ /* in struct bsd_db */ /* break; */ default: return NULL; } /* * Allocate the main control structure for this instance. */ maxmaxcode = MAXCODE(bits); db = kzalloc(sizeof (struct bsd_db), GFP_KERNEL); if (!db) { return NULL; } /* * Allocate space for the dictionary. This may be more than one page in * length. */ db->dict = vmalloc(hsize * sizeof(struct bsd_dict)); if (!db->dict) { bsd_free (db); return NULL; } /* * If this is the compression buffer then there is no length data. */ if (!decomp) { db->lens = NULL; } /* * For decompression, the length information is needed as well. */ else { db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0])); if (!db->lens) { bsd_free (db); return NULL; } } /* * Initialize the data information for the compression code */ db->totlen = sizeof (struct bsd_db) + (sizeof (struct bsd_dict) * hsize); db->hsize = hsize; db->hshift = hshift; db->maxmaxcode = maxmaxcode; db->maxbits = bits; return (void *) db; } static void *bsd_comp_alloc (unsigned char *options, int opt_len) { return bsd_alloc (options, opt_len, 0); } static void *bsd_decomp_alloc (unsigned char *options, int opt_len) { return bsd_alloc (options, opt_len, 1); } /* * Initialize the database. */ static int bsd_init (void *state, unsigned char *options, int opt_len, int unit, int debug, int decomp) { struct bsd_db *db = state; int indx; if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3) || (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION) || (BSD_NBITS(options[2]) != db->maxbits) || (decomp && db->lens == NULL)) { return 0; } if (decomp) { indx = LAST; do { db->lens[indx] = 1; } while (indx-- > 0); } indx = db->hsize; while (indx-- != 0) { db->dict[indx].codem1 = BADCODEM1; db->dict[indx].cptr = 0; } db->unit = unit; db->mru = 0; #ifndef DEBUG if (debug) #endif db->debug = 1; bsd_reset(db); return 1; } static int bsd_comp_init (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int debug) { return bsd_init (state, options, opt_len, unit, debug, 0); } static int bsd_decomp_init (void *state, unsigned char *options, int opt_len, int unit, int opthdr, int mru, int debug) { return bsd_init (state, options, opt_len, unit, debug, 1); } /* * Obtain pointers to the various structures in the compression tables */ #define dict_ptrx(p,idx) &(p->dict[idx]) #define lens_ptrx(p,idx) &(p->lens[idx]) #ifdef DEBUG static unsigned short *lens_ptr(struct bsd_db *db, int idx) { if ((unsigned int) idx > (unsigned int) db->maxmaxcode) { printk ("<9>ppp: lens_ptr(%d) > max\n", idx); idx = 0; } return lens_ptrx (db, idx); } static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx) { if ((unsigned int) idx >= (unsigned int) db->hsize) { printk ("<9>ppp: dict_ptr(%d) > max\n", idx); idx = 0; } return dict_ptrx (db, idx); } #else #define lens_ptr(db,idx) lens_ptrx(db,idx) #define dict_ptr(db,idx) dict_ptrx(db,idx) #endif /* * compress a packet * * The result of this function is the size of the compressed * packet. A zero is returned if the packet was not compressed * for some reason, such as the size being larger than uncompressed. * * One change from the BSD compress command is that when the * code size expands, we do not output a bunch of padding. */ static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf, int isize, int osize) { struct bsd_db *db; int hshift; unsigned int max_ent; unsigned int n_bits; unsigned int bitno; unsigned long accm; int ent; unsigned long fcode; struct bsd_dict *dictp; unsigned char c; int hval; int disp; int ilen; int mxcode; unsigned char *wptr; int olen; #define PUTBYTE(v) \ { \ ++olen; \ if (wptr) \ { \ *wptr++ = (unsigned char) (v); \ if (olen >= osize) \ { \ wptr = NULL; \ } \ } \ } #define OUTPUT(ent) \ { \ bitno -= n_bits; \ accm |= ((ent) << bitno); \ do \ { \ PUTBYTE(accm >> 24); \ accm <<= 8; \ bitno += 8; \ } \ while (bitno <= 24); \ } /* * If the protocol is not in the range we're interested in, * just return without compressing the packet. If it is, * the protocol becomes the first byte to compress. */ ent = PPP_PROTOCOL(rptr); if (ent < 0x21 || ent > 0xf9) { return 0; } db = (struct bsd_db *) state; hshift = db->hshift; max_ent = db->max_ent; n_bits = db->n_bits; bitno = 32; accm = 0; mxcode = MAXCODE (n_bits); /* Initialize the output pointers */ wptr = obuf; olen = PPP_HDRLEN + BSD_OVHD; if (osize > isize) { osize = isize; } /* This is the PPP header information */ if (wptr) { *wptr++ = PPP_ADDRESS(rptr); *wptr++ = PPP_CONTROL(rptr); *wptr++ = 0; *wptr++ = PPP_COMP; *wptr++ = db->seqno >> 8; *wptr++ = db->seqno; } /* Skip the input header */ rptr += PPP_HDRLEN; isize -= PPP_HDRLEN; ilen = ++isize; /* Low byte of protocol is counted as input */ while (--ilen > 0) { c = *rptr++; fcode = BSD_KEY (ent, c); hval = BSD_HASH (ent, c, hshift); dictp = dict_ptr (db, hval); /* Validate and then check the entry. */ if (dictp->codem1 >= max_ent) { goto nomatch; } if (dictp->f.fcode == fcode) { ent = dictp->codem1 + 1; continue; /* found (prefix,suffix) */ } /* continue probing until a match or invalid entry */ disp = (hval == 0) ? 1 : hval; do { hval += disp; if (hval >= db->hsize) { hval -= db->hsize; } dictp = dict_ptr (db, hval); if (dictp->codem1 >= max_ent) { goto nomatch; } } while (dictp->f.fcode != fcode); ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */ continue; nomatch: OUTPUT(ent); /* output the prefix */ /* code -> hashtable */ if (max_ent < db->maxmaxcode) { struct bsd_dict *dictp2; struct bsd_dict *dictp3; int indx; /* expand code size if needed */ if (max_ent >= mxcode) { db->n_bits = ++n_bits; mxcode = MAXCODE (n_bits); } /* Invalidate old hash table entry using * this code, and then take it over. */ dictp2 = dict_ptr (db, max_ent + 1); indx = dictp2->cptr; dictp3 = dict_ptr (db, indx); if (dictp3->codem1 == max_ent) { dictp3->codem1 = BADCODEM1; } dictp2->cptr = hval; dictp->codem1 = max_ent; dictp->f.fcode = fcode; db->max_ent = ++max_ent; if (db->lens) { unsigned short *len1 = lens_ptr (db, max_ent); unsigned short *len2 = lens_ptr (db, ent); *len1 = *len2 + 1; } } ent = c; } OUTPUT(ent); /* output the last code */ db->bytes_out += olen - PPP_HDRLEN - BSD_OVHD; db->uncomp_bytes += isize; db->in_count += isize; ++db->uncomp_count; ++db->seqno; if (bitno < 32) { ++db->bytes_out; /* must be set before calling bsd_check */ } /* * Generate the clear command if needed */ if (bsd_check(db)) { OUTPUT (CLEAR); } /* * Pad dribble bits of last code with ones. * Do not emit a completely useless byte of ones. */ if (bitno != 32) { PUTBYTE((accm | (0xff << (bitno-8))) >> 24); } /* * Increase code size if we would have without the packet * boundary because the decompressor will do so. */ if (max_ent >= mxcode && max_ent < db->maxmaxcode) { db->n_bits++; } /* If output length is too large then this is an incomplete frame. */ if (wptr == NULL) { ++db->incomp_count; db->incomp_bytes += isize; olen = 0; } else /* Count the number of compressed frames */ { ++db->comp_count; db->comp_bytes += olen; } /* Return the resulting output length */ return olen; #undef OUTPUT #undef PUTBYTE } /* * Update the "BSD Compress" dictionary on the receiver for * incompressible data by pretending to compress the incoming data. */ static void bsd_incomp (void *state, unsigned char *ibuf, int icnt) { (void) bsd_compress (state, ibuf, (char *) 0, icnt, 0); } /* * Decompress "BSD Compress". * * Because of patent problems, we return DECOMP_ERROR for errors * found by inspecting the input data and for system problems, but * DECOMP_FATALERROR for any errors which could possibly be said to * be being detected "after" decompression. For DECOMP_ERROR, * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be * infringing a patent of Motorola's if we do, so we take CCP down * instead. * * Given that the frame has the correct sequence number and a good FCS, * errors such as invalid codes in the input most likely indicate a * bug, so we return DECOMP_FATALERROR for them in order to turn off * compression, even though they are detected by inspecting the input. */ static int bsd_decompress (void *state, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct bsd_db *db; unsigned int max_ent; unsigned long accm; unsigned int bitno; /* 1st valid bit in accm */ unsigned int n_bits; unsigned int tgtbitno; /* bitno when we have a code */ struct bsd_dict *dictp; int explen; int seq; unsigned int incode; unsigned int oldcode; unsigned int finchar; unsigned char *p; unsigned char *wptr; int adrs; int ctrl; int ilen; int codelen; int extra; db = (struct bsd_db *) state; max_ent = db->max_ent; accm = 0; bitno = 32; /* 1st valid bit in accm */ n_bits = db->n_bits; tgtbitno = 32 - n_bits; /* bitno when we have a code */ /* * Save the address/control from the PPP header * and then get the sequence number. */ adrs = PPP_ADDRESS (ibuf); ctrl = PPP_CONTROL (ibuf); seq = (ibuf[4] << 8) + ibuf[5]; ibuf += (PPP_HDRLEN + 2); ilen = isize - (PPP_HDRLEN + 2); /* * Check the sequence number and give up if it differs from * the value we're expecting. */ if (seq != db->seqno) { if (db->debug) { printk("bsd_decomp%d: bad sequence # %d, expected %d\n", db->unit, seq, db->seqno - 1); } return DECOMP_ERROR; } ++db->seqno; db->bytes_out += ilen; /* * Fill in the ppp header, but not the last byte of the protocol * (that comes from the decompressed data). */ wptr = obuf; *wptr++ = adrs; *wptr++ = ctrl; *wptr++ = 0; oldcode = CLEAR; explen = 3; /* * Keep the checkpoint correctly so that incompressible packets * clear the dictionary at the proper times. */ for (;;) { if (ilen-- <= 0) { db->in_count += (explen - 3); /* don't count the header */ break; } /* * Accumulate bytes until we have a complete code. * Then get the next code, relying on the 32-bit, * unsigned accm to mask the result. */ bitno -= 8; accm |= *ibuf++ << bitno; if (tgtbitno < bitno) { continue; } incode = accm >> tgtbitno; accm <<= n_bits; bitno += n_bits; /* * The dictionary must only be cleared at the end of a packet. */ if (incode == CLEAR) { if (ilen > 0) { if (db->debug) { printk("bsd_decomp%d: bad CLEAR\n", db->unit); } return DECOMP_FATALERROR; /* probably a bug */ } bsd_clear(db); break; } if ((incode > max_ent + 2) || (incode > db->maxmaxcode) || (incode > max_ent && oldcode == CLEAR)) { if (db->debug) { printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ", db->unit, incode, oldcode); printk("max_ent=0x%x explen=%d seqno=%d\n", max_ent, explen, db->seqno); } return DECOMP_FATALERROR; /* probably a bug */ } /* Special case for KwKwK string. */ if (incode > max_ent) { finchar = oldcode; extra = 1; } else { finchar = incode; extra = 0; } codelen = *(lens_ptr (db, finchar)); explen += codelen + extra; if (explen > osize) { if (db->debug) { printk("bsd_decomp%d: ran out of mru\n", db->unit); #ifdef DEBUG printk(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n", ilen, finchar, codelen, explen); #endif } return DECOMP_FATALERROR; } /* * Decode this code and install it in the decompressed buffer. */ wptr += codelen; p = wptr; while (finchar > LAST) { struct bsd_dict *dictp2 = dict_ptr (db, finchar); dictp = dict_ptr (db, dictp2->cptr); #ifdef DEBUG if (--codelen <= 0 || dictp->codem1 != finchar-1) { if (codelen <= 0) { printk("bsd_decomp%d: fell off end of chain ", db->unit); printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n", incode, finchar, dictp2->cptr, max_ent); } else { if (dictp->codem1 != finchar-1) { printk("bsd_decomp%d: bad code chain 0x%x " "finchar=0x%x ", db->unit, incode, finchar); printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode, dictp2->cptr, dictp->codem1); } } return DECOMP_FATALERROR; } #endif *--p = dictp->f.hs.suffix; finchar = dictp->f.hs.prefix; } *--p = finchar; #ifdef DEBUG if (--codelen != 0) { printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n", db->unit, codelen, incode, max_ent); } #endif if (extra) /* the KwKwK case again */ { *wptr++ = finchar; } /* * If not first code in a packet, and * if not out of code space, then allocate a new code. * * Keep the hash table correct so it can be used * with uncompressed packets. */ if (oldcode != CLEAR && max_ent < db->maxmaxcode) { struct bsd_dict *dictp2, *dictp3; unsigned short *lens1, *lens2; unsigned long fcode; int hval, disp, indx; fcode = BSD_KEY(oldcode,finchar); hval = BSD_HASH(oldcode,finchar,db->hshift); dictp = dict_ptr (db, hval); /* look for a free hash table entry */ if (dictp->codem1 < max_ent) { disp = (hval == 0) ? 1 : hval; do { hval += disp; if (hval >= db->hsize) { hval -= db->hsize; } dictp = dict_ptr (db, hval); } while (dictp->codem1 < max_ent); } /* * Invalidate previous hash table entry * assigned this code, and then take it over */ dictp2 = dict_ptr (db, max_ent + 1); indx = dictp2->cptr; dictp3 = dict_ptr (db, indx); if (dictp3->codem1 == max_ent) { dictp3->codem1 = BADCODEM1; } dictp2->cptr = hval; dictp->codem1 = max_ent; dictp->f.fcode = fcode; db->max_ent = ++max_ent; /* Update the length of this string. */ lens1 = lens_ptr (db, max_ent); lens2 = lens_ptr (db, oldcode); *lens1 = *lens2 + 1; /* Expand code size if needed. */ if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) { db->n_bits = ++n_bits; tgtbitno = 32-n_bits; } } oldcode = incode; } ++db->comp_count; ++db->uncomp_count; db->comp_bytes += isize - BSD_OVHD - PPP_HDRLEN; db->uncomp_bytes += explen; if (bsd_check(db)) { if (db->debug) { printk("bsd_decomp%d: peer should have cleared dictionary on %d\n", db->unit, db->seqno - 1); } } return explen; } /************************************************************* * Table of addresses for the BSD compression module *************************************************************/ static struct compressor ppp_bsd_compress = { .compress_proto = CI_BSD_COMPRESS, .comp_alloc = bsd_comp_alloc, .comp_free = bsd_free, .comp_init = bsd_comp_init, .comp_reset = bsd_reset, .compress = bsd_compress, .comp_stat = bsd_comp_stats, .decomp_alloc = bsd_decomp_alloc, .decomp_free = bsd_free, .decomp_init = bsd_decomp_init, .decomp_reset = bsd_reset, .decompress = bsd_decompress, .incomp = bsd_incomp, .decomp_stat = bsd_comp_stats, .owner = THIS_MODULE }; /************************************************************* * Module support routines *************************************************************/ static int __init bsdcomp_init(void) { int answer = ppp_register_compressor(&ppp_bsd_compress); if (answer == 0) printk(KERN_INFO "PPP BSD Compression module registered\n"); return answer; } static void __exit bsdcomp_cleanup(void) { ppp_unregister_compressor(&ppp_bsd_compress); } module_init(bsdcomp_init); module_exit(bsdcomp_cleanup); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
gpl-2.0
hgl888/linux
arch/sparc/mm/init_64.c
12
75254
/* * arch/sparc64/mm/init.c * * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/extable.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kprobes.h> #include <linux/cache.h> #include <linux/sort.h> #include <linux/ioport.h> #include <linux/percpu.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/gfp.h> #include <asm/head.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/iommu.h> #include <asm/io.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/starfire.h> #include <asm/tlb.h> #include <asm/spitfire.h> #include <asm/sections.h> #include <asm/tsb.h> #include <asm/hypervisor.h> #include <asm/prom.h> #include <asm/mdesc.h> #include <asm/cpudata.h> #include <asm/setup.h> #include <asm/irq.h> #include "init_64.h" unsigned long kern_linear_pte_xor[4] __read_mostly; static unsigned long page_cache4v_flag; /* A bitmap, two bits for every 256MB of physical memory. These two * bits determine what page size we use for kernel linear * translations. They form an index into kern_linear_pte_xor[]. The * value in the indexed slot is XOR'd with the TLB miss virtual * address to form the resulting TTE. The mapping is: * * 0 ==> 4MB * 1 ==> 256MB * 2 ==> 2GB * 3 ==> 16GB * * All sun4v chips support 256MB pages. Only SPARC-T4 and later * support 2GB pages, and hopefully future cpus will support the 16GB * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there * if these larger page sizes are not supported by the cpu. * * It would be nice to determine this from the machine description * 'cpu' properties, but we need to have this table setup before the * MDESC is initialized. */ #ifndef CONFIG_DEBUG_PAGEALLOC /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. * Space is allocated for this right after the trap table in * arch/sparc64/kernel/head.S */ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; #endif extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static unsigned long cpu_pgsz_mask; #define MAX_BANKS 1024 static struct linux_prom64_registers pavail[MAX_BANKS]; static int pavail_ents; u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES]; static int cmp_p64(const void *a, const void *b) { const struct linux_prom64_registers *x = a, *y = b; if (x->phys_addr > y->phys_addr) return 1; if (x->phys_addr < y->phys_addr) return -1; return 0; } static void __init read_obp_memory(const char *property, struct linux_prom64_registers *regs, int *num_ents) { phandle node = prom_finddevice("/memory"); int prop_size = prom_getproplen(node, property); int ents, ret, i; ents = prop_size / sizeof(struct linux_prom64_registers); if (ents > MAX_BANKS) { prom_printf("The machine has more %s property entries than " "this kernel can support (%d).\n", property, MAX_BANKS); prom_halt(); } ret = prom_getproperty(node, property, (char *) regs, prop_size); if (ret == -1) { prom_printf("Couldn't get %s property from /memory.\n", property); prom_halt(); } /* Sanitize what we got from the firmware, by page aligning * everything. */ for (i = 0; i < ents; i++) { unsigned long base, size; base = regs[i].phys_addr; size = regs[i].reg_size; size &= PAGE_MASK; if (base & ~PAGE_MASK) { unsigned long new_base = PAGE_ALIGN(base); size -= new_base - base; if ((long) size < 0L) size = 0UL; base = new_base; } if (size == 0UL) { /* If it is empty, simply get rid of it. * This simplifies the logic of the other * functions that process these arrays. */ memmove(&regs[i], &regs[i + 1], (ents - i - 1) * sizeof(regs[0])); i--; ents--; continue; } regs[i].phys_addr = base; regs[i].reg_size = size; } *num_ents = ents; sort(regs, ents, sizeof(struct linux_prom64_registers), cmp_p64, NULL); } /* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; /* Initial ramdisk setup */ extern unsigned long sparc_ramdisk_image64; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; struct page *mem_map_zero __read_mostly; EXPORT_SYMBOL(mem_map_zero); unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; unsigned long sparc64_kern_pri_context __read_mostly; unsigned long sparc64_kern_pri_nuc_bits __read_mostly; unsigned long sparc64_kern_sec_context __read_mostly; int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH atomic_t dcpage_flushes = ATOMIC_INIT(0); #ifdef CONFIG_SMP atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif inline void flush_dcache_page_impl(struct page *page) { BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), ((tlb_type == spitfire) && page_mapping(page) != NULL)); #else if (page_mapping(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page_address(page))); #endif } #define PG_dcache_dirty PG_arch_1 #define PG_dcache_cpu_shift 32UL #define PG_dcache_cpu_mask \ ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) #define dcache_dirty_cpu(page) \ (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) static inline void set_dcache_dirty(struct page *page, int this_cpu) { unsigned long mask = this_cpu; unsigned long non_cpu_bits; non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); __asm__ __volatile__("1:\n\t" "ldx [%2], %%g7\n\t" "and %%g7, %1, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) : "g1", "g7"); } static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) { unsigned long mask = (1UL << PG_dcache_dirty); __asm__ __volatile__("! test_and_clear_dcache_dirty\n" "1:\n\t" "ldx [%2], %%g7\n\t" "srlx %%g7, %4, %%g1\n\t" "and %%g1, %3, %%g1\n\t" "cmp %%g1, %0\n\t" "bne,pn %%icc, 2f\n\t" " andn %%g7, %1, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop\n" "2:" : /* no outputs */ : "r" (cpu), "r" (mask), "r" (&page->flags), "i" (PG_dcache_cpu_mask), "i" (PG_dcache_cpu_shift) : "g1", "g7"); } static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) { unsigned long tsb_addr = (unsigned long) ent; if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); } unsigned long _PAGE_ALL_SZ_BITS __read_mostly; static void flush_dcache(unsigned long pfn) { struct page *page; page = pfn_to_page(pfn); if (page) { unsigned long pg_flags; pg_flags = page->flags; if (pg_flags & (1UL << PG_dcache_dirty)) { int cpu = ((pg_flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask); int this_cpu = get_cpu(); /* This is just to optimize away some function calls * in the SMP case. */ if (cpu == this_cpu) flush_dcache_page_impl(page); else smp_flush_dcache_page_impl(page, cpu); clear_dcache_dirty_cpu(page, cpu); put_cpu(); } } } /* mm->context.lock must be held */ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, unsigned long tsb_hash_shift, unsigned long address, unsigned long tte) { struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; unsigned long tag; if (unlikely(!tsb)) return; tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, tte); } #ifdef CONFIG_HUGETLB_PAGE static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; unsigned int hugepage_shift; unsigned short hv_pgsz_idx; unsigned int hv_pgsz_mask; int rc = 0; hugepage_size = memparse(string, &string); hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { case HPAGE_256MB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_256MB; hv_pgsz_idx = HV_PGSZ_IDX_256MB; break; case HPAGE_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_4MB; hv_pgsz_idx = HV_PGSZ_IDX_4MB; break; case HPAGE_64K_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_64K; hv_pgsz_idx = HV_PGSZ_IDX_64K; break; default: hv_pgsz_mask = 0; } if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { pr_warn("hugepagesz=%llu not supported by MMU.\n", hugepage_size); goto out; } hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); rc = 1; out: return rc; } __setup("hugepagesz=", setup_hugepagesz); #endif /* CONFIG_HUGETLB_PAGE */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; unsigned long flags; pte_t pte = *ptep; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) flush_dcache(pfn); } mm = vma->vm_mm; /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ if (!pte_accessible(mm, pte)) return; spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && is_hugetlb_pmd(__pmd(pte_val(pte)))) { /* We are fabricating 8MB pages using 4MB real hw pages. */ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, address, pte_val(pte)); } else #endif __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) { struct address_space *mapping; int this_cpu; if (tlb_type == hypervisor) return; /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. */ if (page == ZERO_PAGE(0)) return; this_cpu = get_cpu(); mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) { int dirty = test_bit(PG_dcache_dirty, &page->flags); if (dirty) { int dirty_cpu = dcache_dirty_cpu(page); if (dirty_cpu == this_cpu) goto out; smp_flush_dcache_page_impl(page, dirty_cpu); } set_dcache_dirty(page, this_cpu); } else { /* We could delay the flush for the !page_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. */ flush_dcache_page_impl(page); } out: put_cpu(); } EXPORT_SYMBOL(flush_dcache_page); void __kprobes flush_icache_range(unsigned long start, unsigned long end) { /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; /* This code only runs on Spitfire cpus so this is * why we can assume _PAGE_PADDR_4U. */ for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { unsigned long paddr, mask = _PAGE_PADDR_4U; if (kaddr >= PAGE_OFFSET) paddr = kaddr & mask; else { pgd_t *pgdp = pgd_offset_k(kaddr); pud_t *pudp = pud_offset(pgdp, kaddr); pmd_t *pmdp = pmd_offset(pudp, kaddr); pte_t *ptep = pte_offset_kernel(pmdp, kaddr); paddr = pte_val(*ptep) & mask; } __flush_icache_page(paddr); } } } EXPORT_SYMBOL(flush_icache_range); void mmu_info(struct seq_file *m) { static const char *pgsz_strings[] = { "8K", "64K", "512K", "4MB", "32MB", "256MB", "2GB", "16GB", }; int i, printed; if (tlb_type == cheetah) seq_printf(m, "MMU Type\t: Cheetah\n"); else if (tlb_type == cheetah_plus) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); else if (tlb_type == hypervisor) seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); seq_printf(m, "MMU PGSZs\t: "); printed = 0; for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) { if (cpu_pgsz_mask & (1UL << i)) { seq_printf(m, "%s%s", printed ? "," : "", pgsz_strings[i]); printed++; } } seq_putc(m, '\n'); #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", atomic_read(&dcpage_flushes)); #ifdef CONFIG_SMP seq_printf(m, "DCPageFlushesXC\t: %d\n", atomic_read(&dcpage_flushes_xcall)); #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; unsigned long kern_locked_tte_data; /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { return (vaddr >= LOW_OBP_ADDRESS && vaddr < HI_OBP_ADDRESS); } static int cmp_ptrans(const void *a, const void *b) { const struct linux_prom_translation *x = a, *y = b; if (x->virt > y->virt) return 1; if (x->virt < y->virt) return -1; return 0; } /* Read OBP translations property into 'prom_trans[]'. */ static void __init read_obp_translations(void) { int n, node, ents, first, last, i; node = prom_finddevice("/virtual-memory"); n = prom_getproplen(node, "translations"); if (unlikely(n == 0 || n == -1)) { prom_printf("prom_mappings: Couldn't get size.\n"); prom_halt(); } if (unlikely(n > sizeof(prom_trans))) { prom_printf("prom_mappings: Size %d is too big.\n", n); prom_halt(); } if ((n = prom_getproperty(node, "translations", (char *)&prom_trans[0], sizeof(prom_trans))) == -1) { prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } n = n / sizeof(struct linux_prom_translation); ents = n; sort(prom_trans, ents, sizeof(struct linux_prom_translation), cmp_ptrans, NULL); /* Now kick out all the non-OBP entries. */ for (i = 0; i < ents; i++) { if (in_obp_range(prom_trans[i].virt)) break; } first = i; for (; i < ents; i++) { if (!in_obp_range(prom_trans[i].virt)) break; } last = i; for (i = 0; i < (last - first); i++) { struct linux_prom_translation *src = &prom_trans[i + first]; struct linux_prom_translation *dest = &prom_trans[i]; *dest = *src; } for (; i < ents; i++) { struct linux_prom_translation *dest = &prom_trans[i]; dest->virt = dest->size = dest->data = 0x0UL; } prom_trans_ents = last - first; if (tlb_type == spitfire) { /* Clear diag TTE bits. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data &= ~0x0003fe0000000000UL; } /* Force execute bit on. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data |= (tlb_type == hypervisor ? _PAGE_EXEC_4V : _PAGE_EXEC_4U); } static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); if (ret != 0) { prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: " "errors with %lx\n", vaddr, 0, pte, mmu, ret); prom_halt(); } } static unsigned long kern_large_tte(unsigned long paddr); static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; int i, tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; /* Now lock us into the TLBs via Hypervisor or OBP. */ if (tlb_type == hypervisor) { for (i = 0; i < num_kernel_image_mappings; i++) { hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); tte_vaddr += 0x400000; tte_data += 0x400000; } } else { for (i = 0; i < num_kernel_image_mappings; i++) { prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); tte_vaddr += 0x400000; tte_data += 0x400000; } sparc64_highest_unlocked_tlb_ent = tlb_ent - i; } if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; } } static void __init inherit_prom_mappings(void) { /* Now fixup OBP's idea about where we really are mapped. */ printk("Remapping the kernel... "); remap_kernel(); printk("done.\n"); } void prom_world(int enter) { if (!enter) set_fs(get_fs()); __asm__ __volatile__("flushw"); } void __flush_dcache_range(unsigned long start, unsigned long end) { unsigned long va; if (tlb_type == spitfire) { int n = 0; for (va = start; va < end; va += 32) { spitfire_put_dcache_tag(va & 0x3fe0, 0x0); if (++n >= 512) break; } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (va), "i" (ASI_DCACHE_INVALIDATE)); } } EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). * * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; int new_version; spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { int i; new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; if (new_ctx == 1) new_ctx = CTX_FIRST_VERSION; /* Don't call memset, for 16 entries that's just * plain silly... */ mmu_context_bmap[0] = 3; mmu_context_bmap[1] = 0; mmu_context_bmap[2] = 0; mmu_context_bmap[3] = 0; for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { mmu_context_bmap[i + 0] = 0; mmu_context_bmap[i + 1] = 0; mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } new_version = 1; goto out; } } mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock(&ctx_alloc_lock); if (unlikely(new_version)) smp_new_mmu_context_version(); } static int numa_enabled = 1; static int numa_debug; static int __init early_numa(char *p) { if (!p) return 0; if (strstr(p, "off")) numa_enabled = 0; if (strstr(p, "debug")) numa_debug = 1; return 0; } early_param("numa", early_numa); #define numadbg(f, a...) \ do { if (numa_debug) \ printk(KERN_INFO f, ## a); \ } while (0) static void __init find_ramdisk(unsigned long phys_base) { #ifdef CONFIG_BLK_DEV_INITRD if (sparc_ramdisk_image || sparc_ramdisk_image64) { unsigned long ramdisk_image; /* Older versions of the bootloader only supported a * 32-bit physical address for the ramdisk image * location, stored at sparc_ramdisk_image. Newer * SILO versions set sparc_ramdisk_image to zero and * provide a full 64-bit physical address at * sparc_ramdisk_image64. */ ramdisk_image = sparc_ramdisk_image; if (!ramdisk_image) ramdisk_image = sparc_ramdisk_image64; /* Another bootloader quirk. The bootloader normalizes * the physical address to KERNBASE, so we have to * factor that back out and add in the lowest valid * physical page address to get the true physical address. */ ramdisk_image -= KERNBASE; ramdisk_image += phys_base; numadbg("Found ramdisk at physical address 0x%lx, size %u\n", ramdisk_image, sparc_ramdisk_size); initrd_start = ramdisk_image; initrd_end = ramdisk_image + sparc_ramdisk_size; memblock_reserve(initrd_start, sparc_ramdisk_size); initrd_start += PAGE_OFFSET; initrd_end += PAGE_OFFSET; } #endif } struct node_mem_mask { unsigned long mask; unsigned long match; }; static struct node_mem_mask node_masks[MAX_NUMNODES]; static int num_node_masks; #ifdef CONFIG_NEED_MULTIPLE_NODES struct mdesc_mlgroup { u64 node; u64 latency; u64 match; u64 mask; }; static struct mdesc_mlgroup *mlgroups; static int num_mlgroups; int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; struct mdesc_mblock { u64 base; u64 size; u64 offset; /* RA-to-PA */ }; static struct mdesc_mblock *mblocks; static int num_mblocks; static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr) { struct mdesc_mblock *m = NULL; int i; for (i = 0; i < num_mblocks; i++) { m = &mblocks[i]; if (addr >= m->base && addr < (m->base + m->size)) { break; } } return m; } static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) { int prev_nid, new_nid; prev_nid = -1; for ( ; start < end; start += PAGE_SIZE) { for (new_nid = 0; new_nid < num_node_masks; new_nid++) { struct node_mem_mask *p = &node_masks[new_nid]; if ((start & p->mask) == p->match) { if (prev_nid == -1) prev_nid = new_nid; break; } } if (new_nid == num_node_masks) { prev_nid = 0; WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.", start); break; } if (prev_nid != new_nid) break; } *nid = prev_nid; return start > end ? end : start; } static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { u64 ret_end, pa_start, m_mask, m_match, m_end; struct mdesc_mblock *mblock; int _nid, i; if (tlb_type != hypervisor) return memblock_nid_range_sun4u(start, end, nid); mblock = addr_to_mblock(start); if (!mblock) { WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]", start); _nid = 0; ret_end = end; goto done; } pa_start = start + mblock->offset; m_match = 0; m_mask = 0; for (_nid = 0; _nid < num_node_masks; _nid++) { struct node_mem_mask *const m = &node_masks[_nid]; if ((pa_start & m->mask) == m->match) { m_match = m->match; m_mask = m->mask; break; } } if (num_node_masks == _nid) { /* We could not find NUMA group, so default to 0, but lets * search for latency group, so we could calculate the correct * end address that we return */ _nid = 0; for (i = 0; i < num_mlgroups; i++) { struct mdesc_mlgroup *const m = &mlgroups[i]; if ((pa_start & m->mask) == m->match) { m_match = m->match; m_mask = m->mask; break; } } if (i == num_mlgroups) { WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]", start); ret_end = end; goto done; } } /* * Each latency group has match and mask, and each memory block has an * offset. An address belongs to a latency group if its address matches * the following formula: ((addr + offset) & mask) == match * It is, however, slow to check every single page if it matches a * particular latency group. As optimization we calculate end value by * using bit arithmetics. */ m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset; m_end += pa_start & ~((1ul << fls64(m_mask)) - 1); ret_end = m_end > end ? end : m_end; done: *nid = _nid; return ret_end; } #endif /* This must be invoked after performing all of the necessary * memblock_set_node() calls for 'nid'. We need to be able to get * correct data from get_pfn_range_for_nid(). */ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; #ifdef CONFIG_NEED_MULTIPLE_NODES unsigned long paddr; paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); } NODE_DATA(nid) = __va(paddr); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->node_id = nid; #endif p = NODE_DATA(nid); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); p->node_start_pfn = start_pfn; p->node_spanned_pages = end_pfn - start_pfn; } static void init_node_masks_nonnuma(void) { #ifdef CONFIG_NEED_MULTIPLE_NODES int i; #endif numadbg("Initializing tables for non-numa.\n"); node_masks[0].mask = 0; node_masks[0].match = 0; num_node_masks = 1; #ifdef CONFIG_NEED_MULTIPLE_NODES for (i = 0; i < NR_CPUS; i++) numa_cpu_lookup_table[i] = 0; cpumask_setall(&numa_cpumask_lookup_table[0]); #endif } #ifdef CONFIG_NEED_MULTIPLE_NODES struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_data); static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) { u64 arc; mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const u64 *val; val = mdesc_get_property(md, target, "cfg-handle", NULL); if (val && *val == cfg_handle) return 0; } return -ENODEV; } static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, u32 cfg_handle) { u64 arc, candidate, best_latency = ~(u64)0; candidate = MDESC_NODE_NULL; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *val; if (strcmp(name, "pio-latency-group")) continue; val = mdesc_get_property(md, target, "latency", NULL); if (!val) continue; if (*val < best_latency) { candidate = target; best_latency = *val; } } if (candidate == MDESC_NODE_NULL) return -ENODEV; return scan_pio_for_cfg_handle(md, candidate, cfg_handle); } int of_node_to_nid(struct device_node *dp) { const struct linux_prom64_registers *regs; struct mdesc_handle *md; u32 cfg_handle; int count, nid; u64 grp; /* This is the right thing to do on currently supported * SUN4U NUMA platforms as well, as the PCI controller does * not sit behind any particular memory controller. */ if (!mlgroups) return -1; regs = of_get_property(dp, "reg", NULL); if (!regs) return -1; cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; md = mdesc_grab(); count = 0; nid = -1; mdesc_for_each_node_by_name(md, grp, "group") { if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { nid = count; break; } count++; } mdesc_release(md); return nid; } static void __init add_node_ranges(void) { struct memblock_region *reg; unsigned long prev_max; memblock_resized: prev_max = memblock.memory.max; for_each_memblock(memory, reg) { unsigned long size = reg->size; unsigned long start, end; start = reg->base; end = start + size; while (start < end) { unsigned long this_end; int nid; this_end = memblock_nid_range(start, end, &nid); numadbg("Setting memblock NUMA node nid[%d] " "start[%lx] end[%lx]\n", nid, start, this_end); memblock_set_node(start, this_end - start, &memblock.memory, nid); if (memblock.memory.max != prev_max) goto memblock_resized; start = this_end; } } } static int __init grab_mlgroups(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "memory-latency-group") count++; if (!count) return -ENOENT; paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mlgroups = __va(paddr); num_mlgroups = count; count = 0; mdesc_for_each_node_by_name(md, node, "memory-latency-group") { struct mdesc_mlgroup *m = &mlgroups[count++]; const u64 *val; m->node = node; val = mdesc_get_property(md, node, "latency", NULL); m->latency = *val; val = mdesc_get_property(md, node, "address-match", NULL); m->match = *val; val = mdesc_get_property(md, node, "address-mask", NULL); m->mask = *val; numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " "match[%llx] mask[%llx]\n", count - 1, m->node, m->latency, m->match, m->mask); } return 0; } static int __init grab_mblocks(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "mblock") count++; if (!count) return -ENOENT; paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mblocks = __va(paddr); num_mblocks = count; count = 0; mdesc_for_each_node_by_name(md, node, "mblock") { struct mdesc_mblock *m = &mblocks[count++]; const u64 *val; val = mdesc_get_property(md, node, "base", NULL); m->base = *val; val = mdesc_get_property(md, node, "size", NULL); m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); /* The address-congruence-offset property is optional. * Explicity zero it be identifty this. */ if (val) m->offset = *val; else m->offset = 0UL; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); } return 0; } static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, u64 grp, cpumask_t *mask) { u64 arc; cpumask_clear(mask); mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *id; if (strcmp(name, "cpu")) continue; id = mdesc_get_property(md, target, "id", NULL); if (*id < nr_cpu_ids) cpumask_set_cpu(*id, mask); } } static struct mdesc_mlgroup * __init find_mlgroup(u64 node) { int i; for (i = 0; i < num_mlgroups; i++) { struct mdesc_mlgroup *m = &mlgroups[i]; if (m->node == node) return m; } return NULL; } int __node_distance(int from, int to) { if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) { pr_warn("Returning default NUMA distance value for %d->%d\n", from, to); return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; } return numa_latency[from][to]; } static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; for (i = 0; i < MAX_NUMNODES; i++) { struct node_mem_mask *n = &node_masks[i]; if ((grp->mask == n->mask) && (grp->match == n->match)) break; } return i; } static void __init find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, int index) { u64 arc; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { int tnode; u64 target = mdesc_arc_target(md, arc); struct mdesc_mlgroup *m = find_mlgroup(target); if (!m) continue; tnode = find_best_numa_node_for_mlgroup(m); if (tnode == MAX_NUMNODES) continue; numa_latency[index][tnode] = m->latency; } } static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, int index) { struct mdesc_mlgroup *candidate = NULL; u64 arc, best_latency = ~(u64)0; struct node_mem_mask *n; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); struct mdesc_mlgroup *m = find_mlgroup(target); if (!m) continue; if (m->latency < best_latency) { candidate = m; best_latency = m->latency; } } if (!candidate) return -ENOENT; if (num_node_masks != index) { printk(KERN_ERR "Inconsistent NUMA state, " "index[%d] != num_node_masks[%d]\n", index, num_node_masks); return -EINVAL; } n = &node_masks[num_node_masks++]; n->mask = candidate->mask; n->match = candidate->match; numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n", index, n->mask, n->match, candidate->latency); return 0; } static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, int index) { cpumask_t mask; int cpu; numa_parse_mdesc_group_cpus(md, grp, &mask); for_each_cpu(cpu, &mask) numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], &mask); if (numa_debug) { printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); for_each_cpu(cpu, &mask) printk("%d ", cpu); printk("]\n"); } return numa_attach_mlgroup(md, grp, index); } static int __init numa_parse_mdesc(void) { struct mdesc_handle *md = mdesc_grab(); int i, j, err, count; u64 node; node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); if (node == MDESC_NODE_NULL) { mdesc_release(md); return -ENOENT; } err = grab_mblocks(md); if (err < 0) goto out; err = grab_mlgroups(md); if (err < 0) goto out; count = 0; mdesc_for_each_node_by_name(md, node, "group") { err = numa_parse_mdesc_group(md, node, count); if (err < 0) break; count++; } count = 0; mdesc_for_each_node_by_name(md, node, "group") { find_numa_latencies_for_group(md, node, count); count++; } /* Normalize numa latency matrix according to ACPI SLIT spec. */ for (i = 0; i < MAX_NUMNODES; i++) { u64 self_latency = numa_latency[i][i]; for (j = 0; j < MAX_NUMNODES; j++) { numa_latency[i][j] = (numa_latency[i][j] * LOCAL_DISTANCE) / self_latency; } } add_node_ranges(); for (i = 0; i < num_node_masks; i++) { allocate_node_data(i); node_set_online(i); } err = 0; out: mdesc_release(md); return err; } static int __init numa_parse_jbus(void) { unsigned long cpu, index; /* NUMA node id is encoded in bits 36 and higher, and there is * a 1-to-1 mapping from CPU ID to NUMA node ID. */ index = 0; for_each_present_cpu(cpu) { numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); node_masks[index].mask = ~((1UL << 36UL) - 1UL); node_masks[index].match = cpu << 36UL; index++; } num_node_masks = index; add_node_ranges(); for (index = 0; index < num_node_masks; index++) { allocate_node_data(index); node_set_online(index); } return 0; } static int __init numa_parse_sun4u(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) return numa_parse_jbus(); } return -1; } static int __init bootmem_init_numa(void) { int i, j; int err = -1; numadbg("bootmem_init_numa()\n"); /* Some sane defaults for numa latency values */ for (i = 0; i < MAX_NUMNODES; i++) { for (j = 0; j < MAX_NUMNODES; j++) numa_latency[i][j] = (i == j) ? LOCAL_DISTANCE : REMOTE_DISTANCE; } if (numa_enabled) { if (tlb_type == hypervisor) err = numa_parse_mdesc(); else err = numa_parse_sun4u(); } return err; } #else static int bootmem_init_numa(void) { return -1; } #endif static void __init bootmem_init_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); numadbg("bootmem_init_nonnuma()\n"); printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_INFO "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); allocate_node_data(0); node_set_online(0); } static unsigned long __init bootmem_init(unsigned long phys_base) { unsigned long end_pfn; end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn = end_pfn; min_low_pfn = (phys_base >> PAGE_SHIFT); if (bootmem_init_numa() < 0) bootmem_init_nonnuma(); /* Dump memblock with node info. */ memblock_dump_all(); /* XXX cpu notifier XXX */ sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); return end_pfn; } static struct linux_prom64_registers pall[MAX_BANKS] __initdata; static int pall_ents __initdata; static unsigned long max_phys_bits = 40; bool kern_addr_valid(unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; if ((long)addr < 0L) { unsigned long pa = __pa(addr); if ((addr >> max_phys_bits) != 0UL) return false; return pfn_valid(pa >> PAGE_SHIFT); } if (addr >= (unsigned long) KERNBASE && addr < (unsigned long)&_end) return true; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return 0; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) return 0; return pfn_valid(pte_pfn(*pte)); } EXPORT_SYMBOL(kern_addr_valid); static unsigned long __ref kernel_map_hugepud(unsigned long vstart, unsigned long vend, pud_t *pud) { const unsigned long mask16gb = (1UL << 34) - 1UL; u64 pte_val = vstart; /* Each PUD is 8GB */ if ((vstart & mask16gb) || (vend - vstart <= mask16gb)) { pte_val ^= kern_linear_pte_xor[2]; pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; return vstart + PUD_SIZE; } pte_val ^= kern_linear_pte_xor[3]; pte_val |= _PAGE_PUD_HUGE; vend = vstart + mask16gb + 1UL; while (vstart < vend) { pud_val(*pud) = pte_val; pte_val += PUD_SIZE; vstart += PUD_SIZE; pud++; } return vstart; } static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, bool guard) { if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) return true; return false; } static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, unsigned long vend, pmd_t *pmd) { const unsigned long mask256mb = (1UL << 28) - 1UL; const unsigned long mask2gb = (1UL << 31) - 1UL; u64 pte_val = vstart; /* Each PMD is 8MB */ if ((vstart & mask256mb) || (vend - vstart <= mask256mb)) { pte_val ^= kern_linear_pte_xor[0]; pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; return vstart + PMD_SIZE; } if ((vstart & mask2gb) || (vend - vstart <= mask2gb)) { pte_val ^= kern_linear_pte_xor[1]; pte_val |= _PAGE_PMD_HUGE; vend = vstart + mask256mb + 1UL; } else { pte_val ^= kern_linear_pte_xor[2]; pte_val |= _PAGE_PMD_HUGE; vend = vstart + mask2gb + 1UL; } while (vstart < vend) { pmd_val(*pmd) = pte_val; pte_val += PMD_SIZE; vstart += PMD_SIZE; pmd++; } return vstart; } static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, bool guard) { if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) return true; return false; } static unsigned long __ref kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot, bool use_huge) { unsigned long vstart = PAGE_OFFSET + pstart; unsigned long vend = PAGE_OFFSET + pend; unsigned long alloc_bytes = 0UL; if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", vstart, vend); prom_halt(); } while (vstart < vend) { unsigned long this_end, paddr = __pa(vstart); pgd_t *pgd = pgd_offset_k(vstart); pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd)) { pud_t *new; new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pgd_populate(&init_mm, pgd, new); } pud = pud_offset(pgd, vstart); if (pud_none(*pud)) { pmd_t *new; if (kernel_can_map_hugepud(vstart, vend, use_huge)) { vstart = kernel_map_hugepud(vstart, vend, pud); continue; } new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); if (pmd_none(*pmd)) { pte_t *new; if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { vstart = kernel_map_hugepmd(vstart, vend, pmd); continue; } new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } pte = pte_offset_kernel(pmd, vstart); this_end = (vstart + PMD_SIZE) & PMD_MASK; if (this_end > vend) this_end = vend; while (vstart < this_end) { pte_val(*pte) = (paddr | pgprot_val(prot)); vstart += PAGE_SIZE; paddr += PAGE_SIZE; pte++; } } return alloc_bytes; } static void __init flush_all_kernel_tsbs(void) { int i; for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { struct tsb *ent = &swapper_tsb[i]; ent->tag = (1UL << TSB_TAG_INVALID_BIT); } #ifndef CONFIG_DEBUG_PAGEALLOC for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { struct tsb *ent = &swapper_4m_tsb[i]; ent->tag = (1UL << TSB_TAG_INVALID_BIT); } #endif } extern unsigned int kvmap_linear_patch[1]; static void __init kernel_physical_mapping_init(void) { unsigned long i, mem_alloced = 0UL; bool use_huge = true; #ifdef CONFIG_DEBUG_PAGEALLOC use_huge = false; #endif for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL, use_huge); } printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); kvmap_linear_patch[0] = 0x01000000; /* nop */ flushi(&kvmap_linear_patch[0]); flush_all_kernel_tsbs(); __flush_tlb_all(); } #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0)), false); flush_tsb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); } #endif unsigned long __init find_ecache_flush_span(unsigned long size) { int i; for (i = 0; i < pavail_ents; i++) { if (pavail[i].reg_size >= size) return pavail[i].phys_addr; } return ~0UL; } unsigned long PAGE_OFFSET; EXPORT_SYMBOL(PAGE_OFFSET); unsigned long VMALLOC_END = 0x0000010000000000UL; EXPORT_SYMBOL(VMALLOC_END); unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; static void __init setup_page_offset(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) { /* Cheetah/Panther support a full 64-bit virtual * address, so we can use all that our page tables * support. */ sparc64_va_hole_top = 0xfff0000000000000UL; sparc64_va_hole_bottom = 0x0010000000000000UL; max_phys_bits = 42; } else if (tlb_type == hypervisor) { switch (sun4v_chip_type) { case SUN4V_CHIP_NIAGARA1: case SUN4V_CHIP_NIAGARA2: /* T1 and T2 support 48-bit virtual addresses. */ sparc64_va_hole_top = 0xffff800000000000UL; sparc64_va_hole_bottom = 0x0000800000000000UL; max_phys_bits = 39; break; case SUN4V_CHIP_NIAGARA3: /* T3 supports 48-bit virtual addresses. */ sparc64_va_hole_top = 0xffff800000000000UL; sparc64_va_hole_bottom = 0x0000800000000000UL; max_phys_bits = 43; break; case SUN4V_CHIP_NIAGARA4: case SUN4V_CHIP_NIAGARA5: case SUN4V_CHIP_SPARC64X: case SUN4V_CHIP_SPARC_M6: /* T4 and later support 52-bit virtual addresses. */ sparc64_va_hole_top = 0xfff8000000000000UL; sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 47; break; case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_SN: default: /* M7 and later support 52-bit virtual addresses. */ sparc64_va_hole_top = 0xfff8000000000000UL; sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 49; break; } } if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) { prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n", max_phys_bits); prom_halt(); } PAGE_OFFSET = sparc64_va_hole_top; VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + (sparc64_va_hole_bottom >> 2)); pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", PAGE_OFFSET, max_phys_bits); pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", VMALLOC_START, VMALLOC_END); pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", VMEMMAP_BASE, VMEMMAP_BASE << 1); } static void __init tsb_phys_patch(void) { struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; pquad = &__tsb_ldquad_phys_patch; while (pquad < &__tsb_ldquad_phys_patch_end) { unsigned long addr = pquad->addr; if (tlb_type == hypervisor) *(unsigned int *) addr = pquad->sun4v_insn; else *(unsigned int *) addr = pquad->sun4u_insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); pquad++; } p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; *(unsigned int *) addr = p->insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); p++; } } /* Don't mark as init, we give this to the Hypervisor. */ #ifndef CONFIG_DEBUG_PAGEALLOC #define NUM_KTSB_DESCR 2 #else #define NUM_KTSB_DESCR 1 #endif static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; /* The swapper TSBs are loaded with a base sequence of: * * sethi %uhi(SYMBOL), REG1 * sethi %hi(SYMBOL), REG2 * or REG1, %ulo(SYMBOL), REG1 * or REG2, %lo(SYMBOL), REG2 * sllx REG1, 32, REG1 * or REG1, REG2, REG1 * * When we use physical addressing for the TSB accesses, we patch the * first four instructions in the above sequence. */ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) { unsigned long high_bits, low_bits; high_bits = (pa >> 32) & 0xffffffff; low_bits = (pa >> 0) & 0xffffffff; while (start < end) { unsigned int *ia = (unsigned int *)(unsigned long)*start; ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia)); ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia + 1)); ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); __asm__ __volatile__("flush %0" : : "r" (ia + 2)); ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); __asm__ __volatile__("flush %0" : : "r" (ia + 3)); start++; } } static void ktsb_phys_patch(void) { extern unsigned int __swapper_tsb_phys_patch; extern unsigned int __swapper_tsb_phys_patch_end; unsigned long ktsb_pa; ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); patch_one_ktsb_phys(&__swapper_tsb_phys_patch, &__swapper_tsb_phys_patch_end, ktsb_pa); #ifndef CONFIG_DEBUG_PAGEALLOC { extern unsigned int __swapper_4m_tsb_phys_patch; extern unsigned int __swapper_4m_tsb_phys_patch_end; ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, &__swapper_4m_tsb_phys_patch_end, ktsb_pa); } #endif } static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { case 8 * 1024: default: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; break; case 64 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; break; case 512 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; break; case 4 * 1024 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; break; } ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; #ifndef CONFIG_DEBUG_PAGEALLOC /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */ ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB | HV_PGSZ_MASK_256MB | HV_PGSZ_MASK_2GB | HV_PGSZ_MASK_16GB) & cpu_pgsz_mask); ktsb_descr[1].assoc = 1; ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; ktsb_descr[1].ctx_idx = 0; ktsb_descr[1].tsb_base = ktsb_pa; ktsb_descr[1].resv = 0; #endif } void sun4v_ktsb_register(void) { unsigned long pa, ret; pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); if (ret != 0) { prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " "errors with %lx\n", pa, ret); prom_halt(); } } static void __init sun4u_linear_pte_xor_finalize(void) { #ifndef CONFIG_DEBUG_PAGEALLOC /* This is where we would add Panther support for * 32MB and 256MB pages. */ #endif } static void __init sun4v_linear_pte_xor_finalize(void) { unsigned long pagecv_flag; /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead * enables MCD error. Do not set bit 9 on M7 processor. */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_SN: pagecv_flag = 0x00; break; default: pagecv_flag = _PAGE_CV_4V; break; } #ifndef CONFIG_DEBUG_PAGEALLOC if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; } if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; } if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; } #endif } /* paging_init() sets up the page tables */ static unsigned long last_valid_pfn; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); static phys_addr_t __init available_memory(void) { phys_addr_t available = 0ULL; phys_addr_t pa_start, pa_end; u64 i; for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, &pa_end, NULL) available = available + (pa_end - pa_start); return available; } #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) /* We need to exclude reserved regions. This exclusion will include * vmlinux and initrd. To be more precise the initrd size could be used to * compute a new lower limit because it is freed later during initialization. */ static void __init reduce_memory(phys_addr_t limit_ram) { phys_addr_t avail_ram = available_memory(); phys_addr_t pa_start, pa_end; u64 i; if (limit_ram >= avail_ram) return; for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start, &pa_end, NULL) { phys_addr_t region_size = pa_end - pa_start; phys_addr_t clip_start = pa_start; avail_ram = avail_ram - region_size; /* Are we consuming too much? */ if (avail_ram < limit_ram) { phys_addr_t give_back = limit_ram - avail_ram; region_size = region_size - give_back; clip_start = clip_start + give_back; } memblock_remove(clip_start, region_size); if (avail_ram <= limit_ram) break; i = 0UL; } } void __init paging_init(void) { unsigned long end_pfn, shift, phys_base; unsigned long real_end, i; setup_page_offset(); /* These build time checkes make sure that the dcache_dirty_cpu() * page->flags usage will work. * * When a page gets marked as dcache-dirty, we store the * cpu number starting at bit 32 in the page->flags. Also, * functions like clear_dcache_dirty_cpu use the cpu mask * in 13-bit signed-immediate instruction fields. */ /* * Page flags must not reach into upper 32 bits that are used * for the cpu number */ BUILD_BUG_ON(NR_PAGEFLAGS > 32); /* * The bit fields placed in the high range must not reach below * the 32 bit boundary. Otherwise we cannot place the cpu field * at the 32 bit boundary. */ BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + ilog2(roundup_pow_of_two(NR_CPUS)) > 32); BUILD_BUG_ON(NR_CPUS > 4096); kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); #ifndef CONFIG_DEBUG_PAGEALLOC memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde * bit on M7 processor. This is a conflicting usage of the same * bit. Enabling TTE.cv on M7 would turn on Memory Corruption * Detection error on all pages and this will lead to problems * later. Kernel does not run with MCD enabled and hence rest * of the required steps to fully configure memory corruption * detection are not taken. We need to ensure TTE.mcde is not * set on M7 processor. Compute the value of cacheability * flag for use later taking this into consideration. */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_SN: page_cache4v_flag = _PAGE_CP_4V; break; default: page_cache4v_flag = _PAGE_CACHE_4V; break; } if (tlb_type == hypervisor) sun4v_pgprot_init(); else sun4u_pgprot_init(); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { tsb_phys_patch(); ktsb_phys_patch(); } if (tlb_type == hypervisor) sun4v_patch_tlb_handlers(); /* Find available physical memory... * * Read it twice in order to work around a bug in openfirmware. * The call to grab this table itself can cause openfirmware to * allocate memory, which in turn can take away some space from * the list of available memory. Reading it twice makes sure * we really do get the final value. */ read_obp_translations(); read_obp_memory("reg", &pall[0], &pall_ents); read_obp_memory("available", &pavail[0], &pavail_ents); read_obp_memory("available", &pavail[0], &pavail_ents); phys_base = 0xffffffffffffffffUL; for (i = 0; i < pavail_ents; i++) { phys_base = min(phys_base, pavail[i].phys_addr); memblock_add(pavail[i].phys_addr, pavail[i].reg_size); } memblock_reserve(kern_base, kern_size); find_ramdisk(phys_base); if (cmdline_memory_size) reduce_memory(cmdline_memory_size); memblock_allow_resize(); memblock_dump_all(); set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); printk("Kernel: Using %d locked TLB entries for main kernel image.\n", num_kernel_image_mappings); /* Set kernel pgd to upper alias so physical page computations * work. */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ setup_tba(); __flush_tlb_all(); prom_build_devicetree(); of_populate_present_mask(); #ifndef CONFIG_SMP of_fill_in_cpu_data(); #endif if (tlb_type == hypervisor) { sun4v_mdesc_init(); mdesc_populate_present_mask(cpu_all_mask); #ifndef CONFIG_SMP mdesc_fill_in_cpu_data(cpu_all_mask); #endif mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask); sun4v_linear_pte_xor_finalize(); sun4v_ktsb_init(); sun4v_ktsb_register(); } else { unsigned long impl, ver; cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); impl = ((ver >> 32) & 0xffff); if (impl == PANTHER_IMPL) cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB | HV_PGSZ_MASK_256MB); sun4u_linear_pte_xor_finalize(); } /* Flush the TLBs and the 4M TSB so that the updated linear * pte XOR settings are realized for all mappings. */ __flush_tlb_all(); #ifndef CONFIG_DEBUG_PAGEALLOC memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif __flush_tlb_all(); /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); kernel_physical_mapping_init(); { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_NORMAL] = end_pfn; free_area_init_nodes(max_zone_pfns); } printk("Booting Linux...\n"); } int page_in_phys_avail(unsigned long paddr) { int i; paddr &= PAGE_MASK; for (i = 0; i < pavail_ents; i++) { unsigned long start, end; start = pavail[i].phys_addr; end = start + pavail[i].reg_size; if (paddr >= start && paddr < end) return 1; } if (paddr >= kern_base && paddr < (kern_base + kern_size)) return 1; #ifdef CONFIG_BLK_DEV_INITRD if (paddr >= __pa(initrd_start) && paddr < __pa(PAGE_ALIGN(initrd_end))) return 1; #endif return 0; } static void __init register_page_bootmem_info(void) { #ifdef CONFIG_NEED_MULTIPLE_NODES int i; for_each_online_node(i) if (NODE_DATA(i)->node_spanned_pages) register_page_bootmem_info_node(NODE_DATA(i)); #endif } void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); register_page_bootmem_info(); free_all_bootmem(); /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. */ mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if (mem_map_zero == NULL) { prom_printf("paging_init: Cannot alloc zero page.\n"); prom_halt(); } mark_page_reserved(mem_map_zero); mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); } void free_initmem(void) { unsigned long addr, initend; int do_free = 1; /* If the physical memory maps were trimmed by kernel command * line options, don't even try freeing this initmem stuff up. * The kernel image could have been in the trimmed out region * and if so the freeing below will free invalid page structs. */ if (cmdline_memory_size) do_free = 0; /* * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. */ addr = PAGE_ALIGN((unsigned long)(__init_begin)); initend = (unsigned long)(__init_end) & PAGE_MASK; for (; addr < initend; addr += PAGE_SIZE) { unsigned long page; page = (addr + ((unsigned long) __va(kern_base)) - ((unsigned long) KERNBASE)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); if (do_free) free_reserved_page(virt_to_page(page)); } } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, "initrd"); } #endif pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; EXPORT_SYMBOL(_PAGE_IE); unsigned long _PAGE_E __read_mostly; EXPORT_SYMBOL(_PAGE_E); unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, int node) { unsigned long pte_base; pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); if (tlb_type == hypervisor) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); pte_base |= _PAGE_PMD_HUGE; vstart = vstart & PMD_MASK; vend = ALIGN(vend, PMD_SIZE); for (; vstart < vend; vstart += PMD_SIZE) { pgd_t *pgd = pgd_offset_k(vstart); unsigned long pte; pud_t *pud; pmd_t *pmd; if (pgd_none(*pgd)) { pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node); if (!new) return -ENOMEM; pgd_populate(&init_mm, pgd, new); } pud = pud_offset(pgd, vstart); if (pud_none(*pud)) { pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node); if (!new) return -ENOMEM; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); pte = pmd_val(*pmd); if (!(pte & _PAGE_VALID)) { void *block = vmemmap_alloc_block(PMD_SIZE, node); if (!block) return -ENOMEM; pmd_val(*pmd) = pte_base | __pa(block); } } return 0; } void vmemmap_free(unsigned long start, unsigned long end) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, unsigned long page_readonly, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x4] = __pgprot(page_readonly); protection_map[0x5] = __pgprot(page_readonly); protection_map[0x6] = __pgprot(page_copy); protection_map[0x7] = __pgprot(page_copy); protection_map[0x8] = __pgprot(page_none); protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xc] = __pgprot(page_readonly); protection_map[0xd] = __pgprot(page_readonly); protection_map[0xe] = __pgprot(page_shared); protection_map[0xf] = __pgprot(page_shared); } static void __init sun4u_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U); PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U | _PAGE_L_4U); _PAGE_IE = _PAGE_IE_4U; _PAGE_E = _PAGE_E_4U; _PAGE_CACHE = _PAGE_CACHE_4U; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_exec_bit = _PAGE_EXEC_4U; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } static void __init sun4v_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | page_cache4v_flag | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = page_cache4v_flag; #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } unsigned long pte_sz_bits(unsigned long sz) { if (tlb_type == hypervisor) { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4V; case 64 * 1024: return _PAGE_SZ64K_4V; case 512 * 1024: return _PAGE_SZ512K_4V; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4V; } } else { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4U; case 64 * 1024: return _PAGE_SZ64K_4U; case 512 * 1024: return _PAGE_SZ512K_4U; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4U; } } } pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); return pte; } static unsigned long kern_large_tte(unsigned long paddr) { unsigned long val; val = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | page_cache4v_flag | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; } /* If not locked, zap it. */ void __flush_tlb_all(void) { unsigned long pstate; int i; __asm__ __volatile__("flushw\n\t" "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); if (tlb_type == hypervisor) { sun4v_mmu_demap_all(); } else if (tlb_type == spitfire) { for (i = 0; i < 64; i++) { /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); spitfire_put_dtlb_data(i, 0x0UL); } /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); spitfire_put_itlb_data(i, 0x0UL); } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { cheetah_flush_dtlb_all(); cheetah_flush_itlb_all(); } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); pte_t *pte = NULL; if (page) pte = (pte_t *) page_address(page); return pte; } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); if (!page) return NULL; if (!pgtable_page_ctor(page)) { free_hot_cold_page(page, 0); return NULL; } return (pte_t *) page_address(page); } void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { free_page((unsigned long)pte); } static void __pte_free(pgtable_t pte) { struct page *page = virt_to_page(pte); pgtable_page_dtor(page); __free_page(page); } void pte_free(struct mm_struct *mm, pgtable_t pte) { __pte_free(pte); } void pgtable_free(void *table, bool is_page) { if (is_page) __pte_free(table); else kmem_cache_free(pgtable_cache, table); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { unsigned long pte, flags; struct mm_struct *mm; pmd_t entry = *pmd; if (!pmd_large(entry) || !pmd_young(entry)) return; pte = pmd_val(entry); /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ if (!(pte & _PAGE_VALID)) return; /* We are fabricating 8MB pages using 4MB real hw pages. */ pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); mm = vma->vm_mm; spin_lock_irqsave(&mm->context.lock, flags); if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, addr, pte); spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) static void context_reload(void *__data) { struct mm_struct *mm = __data; if (mm == current->mm) load_secondary_context(mm); } void hugetlb_setup(struct pt_regs *regs) { struct mm_struct *mm = current->mm; struct tsb_config *tp; if (faulthandler_disabled() || !mm) { const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } pr_alert("Unexpected HugeTLB setup in atomic context.\n"); die_if_kernel("HugeTSB in atomic", regs); } tp = &mm->context.tsb_block[MM_TSB_HUGE]; if (likely(tp->tsb == NULL)) tsb_grow(mm, MM_TSB_HUGE, 0); tsb_context_switch(mm); smp_tsb_sync(mm); /* On UltraSPARC-III+ and later, configure the second half of * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { bool need_context_reload = false; unsigned long ctx; spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; if (ctx != mm->context.sparc64_ctx_val) { /* When changing the page size fields, we * must perform a context flush so that no * stale entries match. This flush must * occur with the original context register * settings. */ do_flush_tlb_mm(mm); /* Reload the context register of all processors * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; need_context_reload = true; } spin_unlock_irq(&ctx_alloc_lock); if (need_context_reload) on_each_cpu(context_reload, mm, 0); } } #endif static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static inline resource_size_t compute_kern_paddr(void *addr) { return (resource_size_t) (addr - KERNBASE + kern_base); } static void __init kernel_lds_init(void) { code_resource.start = compute_kern_paddr(_text); code_resource.end = compute_kern_paddr(_etext - 1); data_resource.start = compute_kern_paddr(_etext); data_resource.end = compute_kern_paddr(_edata - 1); bss_resource.start = compute_kern_paddr(__bss_start); bss_resource.end = compute_kern_paddr(_end - 1); } static int __init report_memory(void) { int i; struct resource *res; kernel_lds_init(); for (i = 0; i < pavail_ents; i++) { res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (!res) { pr_warn("Failed to allocate source.\n"); break; } res->name = "System RAM"; res->start = pavail[i].phys_addr; res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; if (insert_resource(&iomem_resource, res) < 0) { pr_warn("Resource insertion failed.\n"); break; } insert_resource(res, &code_resource); insert_resource(res, &data_resource); insert_resource(res, &bss_resource); } return 0; } arch_initcall(report_memory); #ifdef CONFIG_SMP #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range #else #define do_flush_tlb_kernel_range __flush_tlb_kernel_range #endif void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { if (start < LOW_OBP_ADDRESS) { flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); } if (end > HI_OBP_ADDRESS) { flush_tsb_kernel_range(HI_OBP_ADDRESS, end); do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); } } else { flush_tsb_kernel_range(start, end); do_flush_tlb_kernel_range(start, end); } }
gpl-2.0
mv0/kvm
net/bridge/br_if.c
12
9684
/* * Userspace interface * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/netpoll.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/if_ether.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/if_vlan.h> #include "br_private.h" /* * Determine initial path cost based on speed. * using recommendations from 802.1d standard * * Since driver might sleep need to not be holding any locks. */ static int port_cost(struct net_device *dev) { struct ethtool_cmd ecmd; if (!__ethtool_get_settings(dev, &ecmd)) { switch (ethtool_cmd_speed(&ecmd)) { case SPEED_10000: return 2; case SPEED_1000: return 4; case SPEED_100: return 19; case SPEED_10: return 100; } } /* Old silly heuristics based on name */ if (!strncmp(dev->name, "lec", 3)) return 7; if (!strncmp(dev->name, "plip", 4)) return 2500; return 100; /* assume old 10Mbps */ } /* Check for port carrier transitions. */ void br_port_carrier_check(struct net_bridge_port *p) { struct net_device *dev = p->dev; struct net_bridge *br = p->br; if (!(p->flags & BR_ADMIN_COST) && netif_running(dev) && netif_oper_up(dev)) p->path_cost = port_cost(dev); if (!netif_running(br->dev)) return; spin_lock_bh(&br->lock); if (netif_running(dev) && netif_oper_up(dev)) { if (p->state == BR_STATE_DISABLED) br_stp_enable_port(p); } else { if (p->state != BR_STATE_DISABLED) br_stp_disable_port(p); } spin_unlock_bh(&br->lock); } static void release_nbp(struct kobject *kobj) { struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj); kfree(p); } static struct kobj_type brport_ktype = { #ifdef CONFIG_SYSFS .sysfs_ops = &brport_sysfs_ops, #endif .release = release_nbp, }; static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; p->br = NULL; p->dev = NULL; dev_put(dev); kobject_put(&p->kobj); } static void destroy_nbp_rcu(struct rcu_head *head) { struct net_bridge_port *p = container_of(head, struct net_bridge_port, rcu); destroy_nbp(p); } /* Delete port(interface) from bridge is done in two steps. * via RCU. First step, marks device as down. That deletes * all the timers and stops new packets from flowing through. * * Final cleanup doesn't occur until after all CPU's finished * processing packets. * * Protected from multiple admin operations by RTNL mutex */ static void del_nbp(struct net_bridge_port *p) { struct net_bridge *br = p->br; struct net_device *dev = p->dev; sysfs_remove_link(br->ifobj, p->dev->name); dev_set_promiscuity(dev, -1); spin_lock_bh(&br->lock); br_stp_disable_port(p); spin_unlock_bh(&br->lock); br_ifinfo_notify(RTM_DELLINK, p); nbp_vlan_flush(p); br_fdb_delete_by_port(br, p, 1); list_del_rcu(&p->list); dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); netdev_upper_dev_unlink(dev, br->dev); br_multicast_del_port(p); kobject_uevent(&p->kobj, KOBJ_REMOVE); kobject_del(&p->kobj); br_netpoll_disable(p); call_rcu(&p->rcu, destroy_nbp_rcu); } /* Delete bridge device */ void br_dev_delete(struct net_device *dev, struct list_head *head) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { del_nbp(p); } br_fdb_delete_by_port(br, NULL, 1); br_vlan_flush(br); del_timer_sync(&br->gc_timer); br_sysfs_delbr(br->dev); unregister_netdevice_queue(br->dev, head); } /* find an available port number */ static int find_portno(struct net_bridge *br) { int index; struct net_bridge_port *p; unsigned long *inuse; inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), GFP_KERNEL); if (!inuse) return -ENOMEM; set_bit(0, inuse); /* zero is reserved */ list_for_each_entry(p, &br->port_list, list) { set_bit(p->port_no, inuse); } index = find_first_zero_bit(inuse, BR_MAX_PORTS); kfree(inuse); return (index >= BR_MAX_PORTS) ? -EXFULL : index; } /* called with RTNL but without bridge lock */ static struct net_bridge_port *new_nbp(struct net_bridge *br, struct net_device *dev) { int index; struct net_bridge_port *p; index = find_portno(br); if (index < 0) return ERR_PTR(index); p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return ERR_PTR(-ENOMEM); p->br = br; dev_hold(dev); p->dev = dev; p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; p->flags = BR_LEARNING | BR_FLOOD; br_init_port(p); p->state = BR_STATE_DISABLED; br_stp_port_timer_init(p); br_multicast_add_port(p); return p; } int br_add_bridge(struct net *net, const char *name) { struct net_device *dev; int res; dev = alloc_netdev(sizeof(struct net_bridge), name, br_dev_setup); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &br_link_ops; res = register_netdev(dev); if (res) free_netdev(dev); return res; } int br_del_bridge(struct net *net, const char *name) { struct net_device *dev; int ret = 0; rtnl_lock(); dev = __dev_get_by_name(net, name); if (dev == NULL) ret = -ENXIO; /* Could not find device */ else if (!(dev->priv_flags & IFF_EBRIDGE)) { /* Attempt to delete non bridge device! */ ret = -EPERM; } else if (dev->flags & IFF_UP) { /* Not shutdown yet. */ ret = -EBUSY; } else br_dev_delete(dev, NULL); rtnl_unlock(); return ret; } /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */ int br_min_mtu(const struct net_bridge *br) { const struct net_bridge_port *p; int mtu = 0; ASSERT_RTNL(); if (list_empty(&br->port_list)) mtu = ETH_DATA_LEN; else { list_for_each_entry(p, &br->port_list, list) { if (!mtu || p->dev->mtu < mtu) mtu = p->dev->mtu; } } return mtu; } /* * Recomputes features using slave's features */ netdev_features_t br_features_recompute(struct net_bridge *br, netdev_features_t features) { struct net_bridge_port *p; netdev_features_t mask; if (list_empty(&br->port_list)) return features; mask = features; features &= ~NETIF_F_ONE_FOR_ALL; list_for_each_entry(p, &br->port_list, list) { features = netdev_increment_features(features, p->dev->features, mask); } return features; } /* called with RTNL */ int br_add_if(struct net_bridge *br, struct net_device *dev) { struct net_bridge_port *p; int err = 0; bool changed_addr; /* Don't allow bridging non-ethernet like devices */ if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr)) return -EINVAL; /* No bridging of bridges */ if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) return -ELOOP; /* Device is already being bridged */ if (br_port_exists(dev)) return -EBUSY; /* No bridging devices that dislike that (e.g. wireless) */ if (dev->priv_flags & IFF_DONT_BRIDGE) return -EOPNOTSUPP; p = new_nbp(br, dev); if (IS_ERR(p)) return PTR_ERR(p); call_netdevice_notifiers(NETDEV_JOIN, dev); err = dev_set_promiscuity(dev, 1); if (err) goto put_back; err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), SYSFS_BRIDGE_PORT_ATTR); if (err) goto err1; err = br_sysfs_addif(p); if (err) goto err2; err = br_netpoll_enable(p, GFP_KERNEL); if (err) goto err3; err = netdev_master_upper_dev_link(dev, br->dev); if (err) goto err4; err = netdev_rx_handler_register(dev, br_handle_frame, p); if (err) goto err5; dev->priv_flags |= IFF_BRIDGE_PORT; dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); netdev_update_features(br->dev); if (br->dev->needed_headroom < dev->needed_headroom) br->dev->needed_headroom = dev->needed_headroom; spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); if (netif_running(dev) && netif_oper_up(dev) && (br->dev->flags & IFF_UP)) br_stp_enable_port(p); spin_unlock_bh(&br->lock); br_ifinfo_notify(RTM_NEWLINK, p); if (changed_addr) call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); dev_set_mtu(br->dev, br_min_mtu(br)); if (br_fdb_insert(br, p, dev->dev_addr, 0)) netdev_err(dev, "failed insert local address bridge forwarding table\n"); kobject_uevent(&p->kobj, KOBJ_ADD); return 0; err5: netdev_upper_dev_unlink(dev, br->dev); err4: br_netpoll_disable(p); err3: sysfs_remove_link(br->ifobj, p->dev->name); err2: kobject_put(&p->kobj); p = NULL; /* kobject_put frees */ err1: dev_set_promiscuity(dev, -1); put_back: dev_put(dev); kfree(p); return err; } /* called with RTNL */ int br_del_if(struct net_bridge *br, struct net_device *dev) { struct net_bridge_port *p; bool changed_addr; p = br_port_get_rtnl(dev); if (!p || p->br != br) return -EINVAL; /* Since more than one interface can be attached to a bridge, * there still maybe an alternate path for netconsole to use; * therefore there is no reason for a NETDEV_RELEASE event. */ del_nbp(p); spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); if (changed_addr) call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); netdev_update_features(br->dev); return 0; }
gpl-2.0
Eward5513/oceanbase
oceanbase_0.4/tools/getquerytest/ob_sql_client.cpp
12
1067
/* * (C) 2007-2012 Taobao Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * Version: ob_sql_client.cpp, 02/01/2013 05:06:52 PM xiaochu Exp $ * * Author: * xiaochu.yh <xiaochu.yh@taobao.com> * Description: * MySQL Wrapper Imp * */ #include <stdlib.h> #include "tbsys.h" #include "ob_sql_client.h" extern char *g_mysql_ip; extern int g_mysql_port; int ObSqlClient::open() { int ret = 0; // don't allow open twice if (!opened_) { mysql_init(&mysql_); //if (!mysql_real_connect(&mysql_,"10.232.36.29", "admin", "admin", "testdb", 4797, NULL, 0)) if (!mysql_real_connect(&mysql_, g_mysql_ip, "admin", "admin", "testdb", g_mysql_port, NULL, 0)) { ret = -1; TBSYS_LOG(WARN, "fail to open db. ERR:%s", mysql_error(&mysql_)); } else { opened_ = true; } } return ret; } int ObSqlClient::close() { mysql_close(&mysql_); return 0; }
gpl-2.0
GalaxyTab4/android_kernel_samsung_degas
drivers/input/keyreset.c
12
7174
/* drivers/input/keyreset.c * * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/input.h> #include <linux/keyreset.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/of.h> struct keyreset_state { struct input_handler input_handler; unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; unsigned long key[BITS_TO_LONGS(KEY_CNT)]; spinlock_t lock; int key_down_target; int key_down; int key_up; int restart_disabled; int need_panic; int (*reset_fn)(void); int (*dump_fn)(int); int dump_pressed; }; int restart_requested; static void deferred_restart(struct work_struct *dummy) { restart_requested = 2; sys_sync(); restart_requested = 3; kernel_restart(NULL); } static DECLARE_WORK(restart_work, deferred_restart); static void keyreset_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { unsigned long flags; struct keyreset_state *state = handle->private; if (type != EV_KEY) return; if (code >= KEY_MAX) return; if (!test_bit(code, state->keybit)) return; spin_lock_irqsave(&state->lock, flags); if (!test_bit(code, state->key) == !value) goto done; __change_bit(code, state->key); if (test_bit(code, state->upbit)) { if (value) { state->restart_disabled = 1; state->key_up++; } else state->key_up--; } else { if (value) state->key_down++; else state->key_down--; } if (state->key_down == 0 && state->key_up == 0) state->restart_disabled = 0; pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value, state->key_down, state->key_up, state->restart_disabled); if (value && !state->restart_disabled && state->key_down == state->key_down_target) { state->restart_disabled = 1; if (state->need_panic) { panic("Kernel Panic trigger by keyboard!!!\n"); while(1); } if (restart_requested) panic("keyboard reset failed, %d", restart_requested); if (state->reset_fn) { restart_requested = state->reset_fn(); } else { pr_info("keyboard reset\n"); schedule_work(&restart_work); restart_requested = 1; } } if (state->key_down == state->key_down_target) { state->dump_pressed = 1; if (state->dump_fn) restart_requested = state->dump_fn(1); } else if (state->dump_pressed == 1) { state->dump_pressed = 0; if (state->dump_fn) restart_requested = state->dump_fn(0); } done: spin_unlock_irqrestore(&state->lock, flags); } static int keyreset_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { int i; int ret; struct input_handle *handle; struct keyreset_state *state = container_of(handler, struct keyreset_state, input_handler); for (i = 0; i < KEY_MAX; i++) { if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) break; } if (i == KEY_MAX) return -ENODEV; handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "keyreset"; handle->private = state; ret = input_register_handle(handle); if (ret) goto err_input_register_handle; ret = input_open_device(handle); if (ret) goto err_input_open_device; pr_info("using input dev %s for key reset\n", dev->name); return 0; err_input_open_device: input_unregister_handle(handle); err_input_register_handle: kfree(handle); return ret; } static void keyreset_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id keyreset_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { }, }; MODULE_DEVICE_TABLE(input, keyreset_ids); static int keyreset_probe(struct platform_device *pdev) { int ret; int key, *keyp; struct keyreset_state *state; struct keyreset_platform_data *pdata = pdev->dev.platform_data; struct device_node *np = pdev->dev.of_node; const struct property *prop; const __be32 *val; int count, i; if (!pdata && !np) return -EINVAL; if (pdata) keyp = pdata->keys_down; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; spin_lock_init(&state->lock); #ifndef CONFIG_OF while ((key = *keyp++)) { if (key >= KEY_MAX) continue; state->key_down_target++; __set_bit(key, state->keybit); } if (pdata->keys_up) { keyp = pdata->keys_up; while ((key = *keyp++)) { if (key >= KEY_MAX) continue; __set_bit(key, state->keybit); __set_bit(key, state->upbit); } } if (pdata->reset_fn) state->reset_fn = pdata->reset_fn; if (pdata->panic_before_reset) state->need_panic = 1; else state->need_panic = 0; if (pdata->dump_fn) state->dump_fn = pdata->dump_fn; #else prop = of_find_property(np, "keys-down", NULL); if (!prop || !prop->value) { pr_err("Invalid keys-down"); kfree(state); return -ENOMEM; } count = prop->length / sizeof(u32); val = prop->value; if (count > BITS_TO_LONGS(KEY_CNT)) count = BITS_TO_LONGS(KEY_CNT); for (i = 0; i < count; i++) { key = (unsigned short)be32_to_cpup(val++); printk("key = %x\n", key); if (key == KEY_RESERVED || key > KEY_MAX) continue; state->key_down_target++; __set_bit(key, state->keybit); } state->need_panic = of_property_read_bool(np, "keyreset-need-panic"); #endif state->input_handler.event = keyreset_event; state->input_handler.connect = keyreset_connect; state->input_handler.disconnect = keyreset_disconnect; state->input_handler.name = KEYRESET_NAME; state->input_handler.id_table = keyreset_ids; ret = input_register_handler(&state->input_handler); if (ret) { kfree(state); return ret; } platform_set_drvdata(pdev, state); return 0; } int keyreset_remove(struct platform_device *pdev) { struct keyreset_state *state = platform_get_drvdata(pdev); input_unregister_handler(&state->input_handler); kfree(state); return 0; } #ifdef CONFIG_OF static const struct of_device_id keyreset_dt_match[] = { { .compatible = "marvell,keyreset" }, {}, }; MODULE_DEVICE_TABLE(of, keyreset_dt_match); #endif struct platform_driver keyreset_driver = { .probe = keyreset_probe, .remove = keyreset_remove, .driver = { .name = KEYRESET_NAME, .of_match_table = of_match_ptr(keyreset_dt_match), .owner = THIS_MODULE, }, }; static int __init keyreset_init(void) { return platform_driver_register(&keyreset_driver); } static void __exit keyreset_exit(void) { return platform_driver_unregister(&keyreset_driver); } module_init(keyreset_init); module_exit(keyreset_exit);
gpl-2.0
PhenomX1998/FRACTALX-OP3
drivers/usb/gadget/function/f_fs.c
12
84354
/* * f_fs.c -- user mode file system API for USB composite function controllers * * Copyright (C) 2010 Samsung Electronics * Author: Michal Nazarewicz <mina86@mina86.com> * * Based on inode.c (GadgetFS) which was: * Copyright (C) 2003-2004 David Brownell * Copyright (C) 2003 Agilent Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/blkdev.h> #include <linux/pagemap.h> #include <linux/export.h> #include <linux/hid.h> #include <linux/module.h> #include <asm/unaligned.h> #include <linux/usb/composite.h> #include <linux/usb/functionfs.h> #include <linux/aio.h> #include <linux/mmu_context.h> #include <linux/poll.h> #include "u_fs.h" #include "u_f.h" #include "u_os_desc.h" #include "configfs.h" #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ /* Reference counter handling */ static void ffs_data_get(struct ffs_data *ffs); static void ffs_data_put(struct ffs_data *ffs); /* Creates new ffs_data object. */ static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); /* Opened counter handling. */ static void ffs_data_opened(struct ffs_data *ffs); static void ffs_data_closed(struct ffs_data *ffs); /* Called with ffs->mutex held; take over ownership of data. */ static int __must_check __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); static int __must_check __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); /* The function structure ***************************************************/ struct ffs_ep; struct ffs_function { struct usb_configuration *conf; struct usb_gadget *gadget; struct ffs_data *ffs; struct ffs_ep *eps; u8 eps_revmap[16]; short *interfaces_nums; struct usb_function function; }; static struct ffs_function *ffs_func_from_usb(struct usb_function *f) { return container_of(f, struct ffs_function, function); } static inline enum ffs_setup_state ffs_setup_state_clear_cancelled(struct ffs_data *ffs) { return (enum ffs_setup_state) cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP); } static void ffs_func_eps_disable(struct ffs_function *func); static int __must_check ffs_func_eps_enable(struct ffs_function *func); static int ffs_func_bind(struct usb_configuration *, struct usb_function *); static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned); static void ffs_func_disable(struct usb_function *); static int ffs_func_setup(struct usb_function *, const struct usb_ctrlrequest *); static void ffs_func_suspend(struct usb_function *); static void ffs_func_resume(struct usb_function *); static int ffs_func_revmap_ep(struct ffs_function *func, u8 num); static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); /* The endpoints structures *************************************************/ struct ffs_ep { struct usb_ep *ep; /* P: ffs->eps_lock */ struct usb_request *req; /* P: epfile->mutex */ /* [0]: full speed, [1]: high speed, [2]: super speed */ struct usb_endpoint_descriptor *descs[3]; u8 num; int status; /* P: epfile->mutex */ bool is_busy; }; struct ffs_epfile { /* Protects ep->ep and ep->req. */ struct mutex mutex; wait_queue_head_t wait; atomic_t error; struct ffs_data *ffs; struct ffs_ep *ep; /* P: ffs->eps_lock */ struct dentry *dentry; char name[5]; unsigned char in; /* P: ffs->eps_lock */ unsigned char isoc; /* P: ffs->eps_lock */ unsigned char _pad; atomic_t opened; }; /* ffs_io_data structure ***************************************************/ struct ffs_io_data { bool aio; bool read; struct kiocb *kiocb; const struct iovec *iovec; unsigned long nr_segs; char __user *buf; size_t len; struct mm_struct *mm; struct work_struct work; struct usb_ep *ep; struct usb_request *req; }; struct ffs_desc_helper { struct ffs_data *ffs; unsigned interfaces_count; unsigned eps_count; }; static int __must_check ffs_epfiles_create(struct ffs_data *ffs); static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); static struct dentry * ffs_sb_create_file(struct super_block *sb, const char *name, void *data, const struct file_operations *fops); /* Devices management *******************************************************/ DEFINE_MUTEX(ffs_lock); EXPORT_SYMBOL_GPL(ffs_lock); static struct ffs_dev *_ffs_find_dev(const char *name); static struct ffs_dev *_ffs_alloc_dev(void); static int _ffs_name_dev(struct ffs_dev *dev, const char *name); static void _ffs_free_dev(struct ffs_dev *dev); static void *ffs_acquire_dev(const char *dev_name); static void ffs_release_dev(struct ffs_data *ffs_data); static int ffs_ready(struct ffs_data *ffs); static void ffs_closed(struct ffs_data *ffs); /* Misc helper functions ****************************************************/ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) __attribute__((warn_unused_result, nonnull)); static char *ffs_prepare_buffer(const char __user *buf, size_t len, size_t extra_buf_alloc) __attribute__((warn_unused_result, nonnull)); /* Control file aka ep0 *****************************************************/ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) { struct ffs_data *ffs = req->context; complete_all(&ffs->ep0req_completion); } static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) { struct usb_request *req = ffs->ep0req; int ret; req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); spin_unlock_irq(&ffs->ev.waitq.lock); req->buf = data; req->length = len; /* * UDC layer requires to provide a buffer even for ZLP, but should * not use it at all. Let's provide some poisoned pointer to catch * possible bug in the driver. */ if (req->buf == NULL) req->buf = (void *)0xDEADBABE; reinit_completion(&ffs->ep0req_completion); ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = wait_for_completion_interruptible(&ffs->ep0req_completion); if (unlikely(ret)) { usb_ep_dequeue(ffs->gadget->ep0, req); return -EINTR; } ffs->setup_state = FFS_NO_SETUP; return req->status ? req->status : req->actual; } static int __ffs_ep0_stall(struct ffs_data *ffs) { if (ffs->ev.can_stall) { pr_vdebug("ep0 stall\n"); usb_ep_set_halt(ffs->gadget->ep0); ffs->setup_state = FFS_NO_SETUP; return -EL2HLT; } else { pr_debug("bogus ep0 stall!\n"); return -ESRCH; } } static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) { struct ffs_data *ffs = file->private_data; struct usb_gadget *gadget = ffs->gadget; ssize_t ret; char *data; ENTER(); /* Fast check if setup was canceled */ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) return -EIDRM; /* Acquire mutex */ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret < 0)) return ret; /* Check state */ switch (ffs->state) { case FFS_READ_DESCRIPTORS: case FFS_READ_STRINGS: /* Copy data */ if (unlikely(len < 16)) { ret = -EINVAL; break; } data = ffs_prepare_buffer(buf, len, 0); if (IS_ERR(data)) { ret = PTR_ERR(data); break; } /* Handle data */ if (ffs->state == FFS_READ_DESCRIPTORS) { pr_info("read descriptors\n"); ret = __ffs_data_got_descs(ffs, data, len); if (unlikely(ret < 0)) break; ffs->state = FFS_READ_STRINGS; ret = len; } else { pr_info("read strings\n"); ret = __ffs_data_got_strings(ffs, data, len); if (unlikely(ret < 0)) break; ret = ffs_epfiles_create(ffs); if (unlikely(ret)) { ffs->state = FFS_CLOSING; break; } ffs->state = FFS_ACTIVE; mutex_unlock(&ffs->mutex); ret = ffs_ready(ffs); if (unlikely(ret < 0)) { ffs->state = FFS_CLOSING; return ret; } return len; } break; case FFS_ACTIVE: data = NULL; /* * We're called from user space, we can use _irq * rather then _irqsave */ spin_lock_irq(&ffs->ev.waitq.lock); switch (ffs_setup_state_clear_cancelled(ffs)) { case FFS_SETUP_CANCELLED: ret = -EIDRM; goto done_spin; case FFS_NO_SETUP: ret = -ESRCH; goto done_spin; case FFS_SETUP_PENDING: break; } /* FFS_SETUP_PENDING */ if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) { spin_unlock_irq(&ffs->ev.waitq.lock); ret = __ffs_ep0_stall(ffs); break; } /* FFS_SETUP_PENDING and not stall */ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); spin_unlock_irq(&ffs->ev.waitq.lock); data = ffs_prepare_buffer(buf, len, gadget->extra_buf_alloc); if (IS_ERR(data)) { ret = PTR_ERR(data); break; } spin_lock_irq(&ffs->ev.waitq.lock); /* * We are guaranteed to be still in FFS_ACTIVE state * but the state of setup could have changed from * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need * to check for that. If that happened we copied data * from user space in vain but it's unlikely. * * For sure we are not in FFS_NO_SETUP since this is * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP * transition can be performed and it's protected by * mutex. */ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) { ret = -EIDRM; done_spin: spin_unlock_irq(&ffs->ev.waitq.lock); } else { /* unlocks spinlock */ ret = __ffs_ep0_queue_wait(ffs, data, len); } kfree(data); break; default: ret = -EBADFD; break; } mutex_unlock(&ffs->mutex); return ret; } static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, size_t n) { /* * We are holding ffs->ev.waitq.lock and ffs->mutex and we need * to release them. */ struct usb_functionfs_event events[n]; unsigned i = 0; memset(events, 0, sizeof events); do { events[i].type = ffs->ev.types[i]; if (events[i].type == FUNCTIONFS_SETUP) { events[i].u.setup = ffs->ev.setup; ffs->setup_state = FFS_SETUP_PENDING; } } while (++i < n); if (n < ffs->ev.count) { ffs->ev.count -= n; memmove(ffs->ev.types, ffs->ev.types + n, ffs->ev.count * sizeof *ffs->ev.types); } else { ffs->ev.count = 0; } spin_unlock_irq(&ffs->ev.waitq.lock); mutex_unlock(&ffs->mutex); return unlikely(__copy_to_user(buf, events, sizeof events)) ? -EFAULT : sizeof events; } static ssize_t ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) { struct ffs_data *ffs = file->private_data; char *data = NULL; size_t n; int ret; ENTER(); /* Fast check if setup was canceled */ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) return -EIDRM; /* Acquire mutex */ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret < 0)) return ret; /* Check state */ if (ffs->state != FFS_ACTIVE) { ret = -EBADFD; goto done_mutex; } /* * We're called from user space, we can use _irq rather then * _irqsave */ spin_lock_irq(&ffs->ev.waitq.lock); switch (ffs_setup_state_clear_cancelled(ffs)) { case FFS_SETUP_CANCELLED: ret = -EIDRM; break; case FFS_NO_SETUP: n = len / sizeof(struct usb_functionfs_event); if (unlikely(!n)) { ret = -EINVAL; break; } if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) { ret = -EAGAIN; break; } if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count)) { ret = -EINTR; break; } return __ffs_ep0_read_events(ffs, buf, min(n, (size_t)ffs->ev.count)); case FFS_SETUP_PENDING: if (ffs->ev.setup.bRequestType & USB_DIR_IN) { spin_unlock_irq(&ffs->ev.waitq.lock); ret = __ffs_ep0_stall(ffs); goto done_mutex; } len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); spin_unlock_irq(&ffs->ev.waitq.lock); if (likely(len)) { data = kmalloc(len, GFP_KERNEL); if (unlikely(!data)) { ret = -ENOMEM; goto done_mutex; } } spin_lock_irq(&ffs->ev.waitq.lock); /* See ffs_ep0_write() */ if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) { ret = -EIDRM; break; } /* unlocks spinlock */ ret = __ffs_ep0_queue_wait(ffs, data, len); if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) ret = -EFAULT; goto done_mutex; default: ret = -EBADFD; break; } spin_unlock_irq(&ffs->ev.waitq.lock); done_mutex: mutex_unlock(&ffs->mutex); kfree(data); return ret; } static int ffs_ep0_open(struct inode *inode, struct file *file) { struct ffs_data *ffs = inode->i_private; ENTER(); if (unlikely(ffs->state == FFS_CLOSING)) return -EBUSY; smp_mb__before_atomic(); if (atomic_read(&ffs->opened)) return -EBUSY; file->private_data = ffs; ffs_data_opened(ffs); return 0; } static int ffs_ep0_release(struct inode *inode, struct file *file) { struct ffs_data *ffs = file->private_data; ENTER(); ffs_data_closed(ffs); return 0; } static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) { struct ffs_data *ffs = file->private_data; struct usb_gadget *gadget = ffs->gadget; long ret; ENTER(); if (code == FUNCTIONFS_INTERFACE_REVMAP) { struct ffs_function *func = ffs->func; ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; } else if (gadget && gadget->ops->ioctl) { ret = gadget->ops->ioctl(gadget, code, value); } else { ret = -ENOTTY; } return ret; } static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait) { struct ffs_data *ffs = file->private_data; unsigned int mask = POLLWRNORM; int ret; poll_wait(file, &ffs->ev.waitq, wait); ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret < 0)) return mask; switch (ffs->state) { case FFS_READ_DESCRIPTORS: case FFS_READ_STRINGS: mask |= POLLOUT; break; case FFS_ACTIVE: switch (ffs->setup_state) { case FFS_NO_SETUP: if (ffs->ev.count) mask |= POLLIN; break; case FFS_SETUP_PENDING: case FFS_SETUP_CANCELLED: mask |= (POLLIN | POLLOUT); break; } case FFS_CLOSING: break; } mutex_unlock(&ffs->mutex); return mask; } static const struct file_operations ffs_ep0_operations = { .llseek = no_llseek, .open = ffs_ep0_open, .write = ffs_ep0_write, .read = ffs_ep0_read, .release = ffs_ep0_release, .unlocked_ioctl = ffs_ep0_ioctl, .poll = ffs_ep0_poll, }; /* "Normal" endpoints operations ********************************************/ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { struct ffs_ep *ep = _ep->driver_data; ENTER(); /* req may be freed during unbind */ if (ep && ep->req && likely(req->context)) { struct ffs_ep *ep = _ep->driver_data; ep->status = req->status ? req->status : req->actual; /* Set is_busy false to indicate completion of last request */ ep->is_busy = false; complete(req->context); } } static void ffs_user_copy_worker(struct work_struct *work) { struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, work); int ret = io_data->req->status ? io_data->req->status : io_data->req->actual; if (io_data->read && ret > 0) { int i; size_t pos = 0; /* * Since req->length may be bigger than io_data->len (after * being rounded up to maxpacketsize), we may end up with more * data then user space has space for. */ ret = min_t(int, ret, io_data->len); use_mm(io_data->mm); for (i = 0; i < io_data->nr_segs; i++) { size_t len = min_t(size_t, ret - pos, io_data->iovec[i].iov_len); if (!len) break; if (unlikely(copy_to_user(io_data->iovec[i].iov_base, &io_data->buf[pos], len))) { ret = -EFAULT; break; } pos += len; } unuse_mm(io_data->mm); } aio_complete(io_data->kiocb, ret, ret); usb_ep_free_request(io_data->ep, io_data->req); io_data->kiocb->private = NULL; if (io_data->read) kfree(io_data->iovec); kfree(io_data->buf); kfree(io_data); } static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct usb_request *req) { struct ffs_io_data *io_data = req->context; ENTER(); INIT_WORK(&io_data->work, ffs_user_copy_worker); schedule_work(&io_data->work); } #define MAX_BUF_LEN 4096 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) { struct ffs_epfile *epfile = file->private_data; struct ffs_ep *ep; struct ffs_data *ffs = epfile->ffs; char *data = NULL; ssize_t ret, data_len = -EINVAL; int halt; size_t extra_buf_alloc = 0; pr_debug("%s: len %zu, read %d\n", __func__, io_data->len, io_data->read); if (atomic_read(&epfile->error)) return -ENODEV; /* Are we still active? */ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { ret = -ENODEV; goto error; } /* Wait for endpoint to be enabled */ ep = epfile->ep; if (!ep) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto error; } /* Don't wait on write if device is offline */ if (!io_data->read) { ret = -ENODEV; goto error; } /* * if ep is disabled, this fails all current IOs * and wait for next epfile open to happen */ if (!atomic_read(&epfile->error)) { ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep)); if (ret < 0) goto error; } if (!ep) { ret = -ENODEV; goto error; } } /* Do we halt? */ halt = (!io_data->read == !epfile->in); if (halt && epfile->isoc) { ret = -EINVAL; goto error; } /* Allocate & copy */ if (!halt) { /* * if we _do_ wait above, the epfile->ffs->gadget might be NULL * before the waiting completes, so do not assign to 'gadget' earlier */ struct usb_gadget *gadget = epfile->ffs->gadget; spin_lock_irq(&epfile->ffs->eps_lock); /* In the meantime, endpoint got disabled or changed. */ if (epfile->ep != ep) { spin_unlock_irq(&epfile->ffs->eps_lock); return -ESHUTDOWN; } /* * Controller may require buffer size to be aligned to * maxpacketsize of an out endpoint. */ data_len = io_data->read ? usb_ep_align_maybe(gadget, ep->ep, io_data->len) : io_data->len; spin_unlock_irq(&epfile->ffs->eps_lock); extra_buf_alloc = ffs->gadget->extra_buf_alloc; if (io_data->read) data = kmalloc(data_len + extra_buf_alloc, GFP_KERNEL); else data = kmalloc(data_len, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; if (io_data->aio && !io_data->read) { int i; size_t pos = 0; for (i = 0; i < io_data->nr_segs; i++) { if (unlikely(copy_from_user(&data[pos], io_data->iovec[i].iov_base, io_data->iovec[i].iov_len))) { ret = -EFAULT; goto error; } pos += io_data->iovec[i].iov_len; } } else { if (!io_data->read && unlikely(__copy_from_user(data, io_data->buf, io_data->len))) { ret = -EFAULT; goto error; } } } /* We will be using request */ ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret)) goto error; spin_lock_irq(&epfile->ffs->eps_lock); if (epfile->ep != ep) { /* In the meantime, endpoint got disabled or changed. */ ret = -ESHUTDOWN; spin_unlock_irq(&epfile->ffs->eps_lock); } else if (halt) { /* Halt */ if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) usb_ep_set_halt(ep->ep); spin_unlock_irq(&epfile->ffs->eps_lock); ret = -EBADMSG; } else { /* Fire the request */ struct usb_request *req; /* * Sanity Check: even though data_len can't be used * uninitialized at the time I write this comment, some * compilers complain about this situation. * In order to keep the code clean from warnings, data_len is * being initialized to -EINVAL during its declaration, which * means we can't rely on compiler anymore to warn no future * changes won't result in data_len being used uninitialized. * For such reason, we're adding this redundant sanity check * here. */ if (unlikely(data_len == -EINVAL)) { WARN(1, "%s: data_len == -EINVAL\n", __func__); ret = -EINVAL; goto error_lock; } if (io_data->aio) { req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); if (unlikely(!req)) goto error_lock; req->buf = data; req->length = data_len; io_data->buf = data; io_data->ep = ep->ep; io_data->req = req; req->context = io_data; req->complete = ffs_epfile_async_io_complete; ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); if (unlikely(ret)) { usb_ep_free_request(ep->ep, req); goto error_lock; } ret = -EIOCBQUEUED; spin_unlock_irq(&epfile->ffs->eps_lock); } else { struct completion *done; req = ep->req; req->buf = data; req->length = data_len; req->complete = ffs_epfile_io_complete; ret = 0; if (io_data->read) { reinit_completion( &epfile->ffs->epout_completion); done = &epfile->ffs->epout_completion; req->context = done; } else { reinit_completion( &epfile->ffs->epin_completion); done = &epfile->ffs->epin_completion; req->context = done; } /* Don't queue another read if previous is still busy */ if (!(io_data->read && ep->is_busy)) { ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); ep->is_busy = true; } spin_unlock_irq(&epfile->ffs->eps_lock); if (unlikely(ret < 0)) { ret = -EIO; } else if (unlikely( wait_for_completion_interruptible(done))) { spin_lock_irq(&epfile->ffs->eps_lock); /* * While we were acquiring lock endpoint got * disabled (disconnect) or changed * (composition switch) */ if (epfile->ep == ep) usb_ep_dequeue(ep->ep, req); spin_unlock_irq(&epfile->ffs->eps_lock); ret = -EINTR; } else { /* * XXX We may end up silently droping data * here. Since data_len (i.e. req->length) may * be bigger than len (after being rounded up * to maxpacketsize), we may end up with more * data then user space has space for. */ spin_lock_irq(&epfile->ffs->eps_lock); /* * While we were acquiring lock endpoint got * disabled (disconnect) or changed * (composition switch) */ if (epfile->ep == ep) ret = ep->status; else ret = -ENODEV; spin_unlock_irq(&epfile->ffs->eps_lock); if (io_data->read && ret > 0) { if (io_data->len != MAX_BUF_LEN && ret < io_data->len) pr_err("less data(%zd) recieved than intended length(%zu)\n", ret, io_data->len); else if (ret > io_data->len) pr_err("More data(%zd) recieved than intended length(%zu)\n", ret, io_data->len); ret = min_t(size_t, ret, io_data->len); if (unlikely(copy_to_user(io_data->buf, data, ret))) { pr_err("Fail to copy to user len:%zd\n", ret); ret = -EFAULT; } } } kfree(data); } } mutex_unlock(&epfile->mutex); return ret; error_lock: spin_unlock_irq(&epfile->ffs->eps_lock); mutex_unlock(&epfile->mutex); error: kfree(data); if (ret < 0) pr_err_ratelimited("Error: returning %zd value\n", ret); return ret; } static ssize_t ffs_epfile_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) { struct ffs_io_data io_data; ENTER(); io_data.aio = false; io_data.read = false; io_data.buf = (char * __user)buf; io_data.len = len; return ffs_epfile_io(file, &io_data); } static ssize_t ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) { struct ffs_io_data io_data; ENTER(); io_data.aio = false; io_data.read = true; io_data.buf = buf; io_data.len = len; return ffs_epfile_io(file, &io_data); } static int ffs_epfile_open(struct inode *inode, struct file *file) { struct ffs_epfile *epfile = inode->i_private; ENTER(); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; if (atomic_read(&epfile->opened)) { pr_err("%s(): ep(%s) is already opened.\n", __func__, epfile->name); return -EBUSY; } atomic_set(&epfile->opened, 1); file->private_data = epfile; ffs_data_opened(epfile->ffs); atomic_set(&epfile->error, 0); return 0; } static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; struct ffs_epfile *epfile = kiocb->ki_filp->private_data; int value; ENTER(); spin_lock_irq(&epfile->ffs->eps_lock); if (likely(io_data && io_data->ep && io_data->req)) value = usb_ep_dequeue(io_data->ep, io_data->req); else value = -EINVAL; spin_unlock_irq(&epfile->ffs->eps_lock); return value; } static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, const struct iovec *iovec, unsigned long nr_segs, loff_t loff) { struct ffs_io_data *io_data; ENTER(); io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); if (unlikely(!io_data)) return -ENOMEM; io_data->aio = true; io_data->read = false; io_data->kiocb = kiocb; io_data->iovec = iovec; io_data->nr_segs = nr_segs; io_data->len = kiocb->ki_nbytes; io_data->mm = current->mm; kiocb->private = io_data; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); return ffs_epfile_io(kiocb->ki_filp, io_data); } static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, const struct iovec *iovec, unsigned long nr_segs, loff_t loff) { struct ffs_io_data *io_data; struct iovec *iovec_copy; ENTER(); iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); if (unlikely(!iovec_copy)) return -ENOMEM; memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); if (unlikely(!io_data)) { kfree(iovec_copy); return -ENOMEM; } io_data->aio = true; io_data->read = true; io_data->kiocb = kiocb; io_data->iovec = iovec_copy; io_data->nr_segs = nr_segs; io_data->len = kiocb->ki_nbytes; io_data->mm = current->mm; kiocb->private = io_data; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); return ffs_epfile_io(kiocb->ki_filp, io_data); } static int ffs_epfile_release(struct inode *inode, struct file *file) { struct ffs_epfile *epfile = inode->i_private; ENTER(); atomic_set(&epfile->opened, 0); atomic_set(&epfile->error, 1); ffs_data_closed(epfile->ffs); file->private_data = NULL; return 0; } static long ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value) { struct ffs_epfile *epfile = file->private_data; int ret; ENTER(); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; spin_lock_irq(&epfile->ffs->eps_lock); if (likely(epfile->ep)) { switch (code) { case FUNCTIONFS_FIFO_STATUS: ret = usb_ep_fifo_status(epfile->ep->ep); break; case FUNCTIONFS_FIFO_FLUSH: usb_ep_fifo_flush(epfile->ep->ep); ret = 0; break; case FUNCTIONFS_CLEAR_HALT: ret = usb_ep_clear_halt(epfile->ep->ep); break; case FUNCTIONFS_ENDPOINT_REVMAP: ret = epfile->ep->num; break; case FUNCTIONFS_ENDPOINT_DESC: { int desc_idx; struct usb_endpoint_descriptor *desc; switch (epfile->ffs->gadget->speed) { case USB_SPEED_SUPER: desc_idx = 2; break; case USB_SPEED_HIGH: desc_idx = 1; break; default: desc_idx = 0; } desc = epfile->ep->descs[desc_idx]; spin_unlock_irq(&epfile->ffs->eps_lock); ret = copy_to_user((void *)value, desc, sizeof(*desc)); if (ret) ret = -EFAULT; return ret; } default: ret = -ENOTTY; } } else { ret = -ENODEV; } spin_unlock_irq(&epfile->ffs->eps_lock); return ret; } static const struct file_operations ffs_epfile_operations = { .llseek = no_llseek, .open = ffs_epfile_open, .write = ffs_epfile_write, .read = ffs_epfile_read, .aio_write = ffs_epfile_aio_write, .aio_read = ffs_epfile_aio_read, .release = ffs_epfile_release, .unlocked_ioctl = ffs_epfile_ioctl, }; /* File system and super block operations ***********************************/ /* * Mounting the file system creates a controller file, used first for * function configuration then later for event monitoring. */ static struct inode *__must_check ffs_sb_make_inode(struct super_block *sb, void *data, const struct file_operations *fops, const struct inode_operations *iops, struct ffs_file_perms *perms) { struct inode *inode; ENTER(); inode = new_inode(sb); if (likely(inode)) { struct timespec current_time = CURRENT_TIME; inode->i_ino = get_next_ino(); inode->i_mode = perms->mode; inode->i_uid = perms->uid; inode->i_gid = perms->gid; inode->i_atime = current_time; inode->i_mtime = current_time; inode->i_ctime = current_time; inode->i_private = data; if (fops) inode->i_fop = fops; if (iops) inode->i_op = iops; } return inode; } /* Create "regular" file */ static struct dentry *ffs_sb_create_file(struct super_block *sb, const char *name, void *data, const struct file_operations *fops) { struct ffs_data *ffs = sb->s_fs_info; struct dentry *dentry; struct inode *inode; ENTER(); dentry = d_alloc_name(sb->s_root, name); if (unlikely(!dentry)) return NULL; inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms); if (unlikely(!inode)) { dput(dentry); return NULL; } d_add(dentry, inode); return dentry; } /* Super block */ static const struct super_operations ffs_sb_operations = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, }; struct ffs_sb_fill_data { struct ffs_file_perms perms; umode_t root_mode; const char *dev_name; struct ffs_data *ffs_data; }; static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) { struct ffs_sb_fill_data *data = _data; struct inode *inode; struct ffs_data *ffs = data->ffs_data; ENTER(); ffs->sb = sb; data->ffs_data = NULL; sb->s_fs_info = ffs; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = FUNCTIONFS_MAGIC; sb->s_op = &ffs_sb_operations; sb->s_time_gran = 1; /* Root inode */ data->perms.mode = data->root_mode; inode = ffs_sb_make_inode(sb, NULL, &simple_dir_operations, &simple_dir_inode_operations, &data->perms); sb->s_root = d_make_root(inode); if (unlikely(!sb->s_root)) return -ENOMEM; /* EP0 file */ if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations))) return -ENOMEM; return 0; } static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) { ENTER(); if (!opts || !*opts) return 0; for (;;) { unsigned long value; char *eq, *comma; /* Option limit */ comma = strchr(opts, ','); if (comma) *comma = 0; /* Value limit */ eq = strchr(opts, '='); if (unlikely(!eq)) { pr_err("'=' missing in %s\n", opts); return -EINVAL; } *eq = 0; /* Parse value */ if (kstrtoul(eq + 1, 0, &value)) { pr_err("%s: invalid value: %s\n", opts, eq + 1); return -EINVAL; } /* Interpret option */ switch (eq - opts) { case 5: if (!memcmp(opts, "rmode", 5)) data->root_mode = (value & 0555) | S_IFDIR; else if (!memcmp(opts, "fmode", 5)) data->perms.mode = (value & 0666) | S_IFREG; else goto invalid; break; case 4: if (!memcmp(opts, "mode", 4)) { data->root_mode = (value & 0555) | S_IFDIR; data->perms.mode = (value & 0666) | S_IFREG; } else { goto invalid; } break; case 3: if (!memcmp(opts, "uid", 3)) { data->perms.uid = make_kuid(current_user_ns(), value); if (!uid_valid(data->perms.uid)) { pr_err("%s: unmapped value: %lu\n", opts, value); return -EINVAL; } } else if (!memcmp(opts, "gid", 3)) { data->perms.gid = make_kgid(current_user_ns(), value); if (!gid_valid(data->perms.gid)) { pr_err("%s: unmapped value: %lu\n", opts, value); return -EINVAL; } } else { goto invalid; } break; default: invalid: pr_err("%s: invalid option\n", opts); return -EINVAL; } /* Next iteration */ if (!comma) break; opts = comma + 1; } return 0; } /* "mount -t functionfs dev_name /dev/function" ends up here */ static struct dentry * ffs_fs_mount(struct file_system_type *t, int flags, const char *dev_name, void *opts) { struct ffs_sb_fill_data data = { .perms = { .mode = S_IFREG | 0600, .uid = GLOBAL_ROOT_UID, .gid = GLOBAL_ROOT_GID, }, .root_mode = S_IFDIR | 0500, }; struct dentry *rv; int ret; void *ffs_dev; struct ffs_data *ffs; ENTER(); ret = ffs_fs_parse_opts(&data, opts); if (unlikely(ret < 0)) return ERR_PTR(ret); ffs = ffs_data_new(); if (unlikely(!ffs)) return ERR_PTR(-ENOMEM); ffs->file_perms = data.perms; ffs->dev_name = kstrdup(dev_name, GFP_KERNEL); if (unlikely(!ffs->dev_name)) { ffs_data_put(ffs); return ERR_PTR(-ENOMEM); } ffs_dev = ffs_acquire_dev(dev_name); if (IS_ERR(ffs_dev)) { ffs_data_put(ffs); return ERR_CAST(ffs_dev); } ffs->private_data = ffs_dev; data.ffs_data = ffs; rv = mount_nodev(t, flags, &data, ffs_sb_fill); if (IS_ERR(rv) && data.ffs_data) { ffs_release_dev(data.ffs_data); ffs_data_put(data.ffs_data); } return rv; } static void ffs_fs_kill_sb(struct super_block *sb) { ENTER(); kill_litter_super(sb); if (sb->s_fs_info) { ffs_release_dev(sb->s_fs_info); ffs_data_put(sb->s_fs_info); } } static struct file_system_type ffs_fs_type = { .owner = THIS_MODULE, .name = "functionfs", .mount = ffs_fs_mount, .kill_sb = ffs_fs_kill_sb, }; MODULE_ALIAS_FS("functionfs"); /* Driver's main init/cleanup functions *************************************/ static int functionfs_init(void) { int ret; ENTER(); ret = register_filesystem(&ffs_fs_type); if (likely(!ret)) pr_info("file system registered\n"); else pr_err("failed registering file system (%d)\n", ret); return ret; } static void functionfs_cleanup(void) { ENTER(); pr_info("unloading\n"); unregister_filesystem(&ffs_fs_type); } /* ffs_data and ffs_function construction and destruction code **************/ static void ffs_data_clear(struct ffs_data *ffs); static void ffs_data_reset(struct ffs_data *ffs); static void ffs_data_get(struct ffs_data *ffs) { ENTER(); smp_mb__before_atomic(); atomic_inc(&ffs->ref); } static void ffs_data_opened(struct ffs_data *ffs) { ENTER(); smp_mb__before_atomic(); atomic_inc(&ffs->ref); atomic_inc(&ffs->opened); } static void ffs_data_put(struct ffs_data *ffs) { ENTER(); smp_mb__before_atomic(); if (unlikely(atomic_dec_and_test(&ffs->ref))) { pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || waitqueue_active(&ffs->ep0req_completion.wait)); kfree(ffs->dev_name); kfree(ffs); } } static void ffs_data_closed(struct ffs_data *ffs) { ENTER(); smp_mb__before_atomic(); if (atomic_dec_and_test(&ffs->opened)) { ffs->state = FFS_CLOSING; ffs_data_reset(ffs); } ffs_data_put(ffs); } static struct ffs_data *ffs_data_new(void) { struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); if (unlikely(!ffs)) return NULL; ENTER(); atomic_set(&ffs->ref, 1); atomic_set(&ffs->opened, 0); ffs->state = FFS_READ_DESCRIPTORS; mutex_init(&ffs->mutex); spin_lock_init(&ffs->eps_lock); init_waitqueue_head(&ffs->ev.waitq); init_completion(&ffs->ep0req_completion); init_completion(&ffs->epout_completion); init_completion(&ffs->epin_completion); /* XXX REVISIT need to update it in some places, or do we? */ ffs->ev.can_stall = 1; return ffs; } static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); pr_debug("%s: ffs->gadget= %p, ffs->flags= %lu\n", __func__, ffs->gadget, ffs->flags); ffs_closed(ffs); /* Dump ffs->gadget and ffs->flags */ if (ffs->gadget) pr_err("%s: ffs:%p ffs->gadget= %p, ffs->flags= %lu\n", __func__, ffs, ffs->gadget, ffs->flags); BUG_ON(ffs->gadget); if (ffs->epfiles) ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); kfree(ffs->raw_descs_data); kfree(ffs->raw_strings); kfree(ffs->stringtabs); } static void ffs_data_reset(struct ffs_data *ffs) { ENTER(); ffs_data_clear(ffs); ffs->epfiles = NULL; ffs->raw_descs_data = NULL; ffs->raw_descs = NULL; ffs->raw_strings = NULL; ffs->stringtabs = NULL; ffs->raw_descs_length = 0; ffs->fs_descs_count = 0; ffs->hs_descs_count = 0; ffs->ss_descs_count = 0; ffs->strings_count = 0; ffs->interfaces_count = 0; ffs->eps_count = 0; ffs->ev.count = 0; ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; } static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) { struct usb_gadget_strings **lang; ENTER(); if (WARN_ON(ffs->state != FFS_ACTIVE || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) return -EBADFD; if (!ffs->first_id || ffs->old_strings_count < ffs->strings_count) { int first_id = usb_string_ids_n(cdev, ffs->strings_count); if (unlikely(first_id < 0)) return first_id; ffs->first_id = first_id; ffs->old_strings_count = ffs->strings_count; } ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); if (unlikely(!ffs->ep0req)) return -ENOMEM; ffs->ep0req->complete = ffs_ep0_complete; ffs->ep0req->context = ffs; lang = ffs->stringtabs; if (lang) { for (; *lang; ++lang) { struct usb_string *str = (*lang)->strings; int id = ffs->first_id; for (; str->s; ++id, ++str) str->id = id; } } ffs->gadget = cdev->gadget; ffs_data_get(ffs); return 0; } static void functionfs_unbind(struct ffs_data *ffs) { ENTER(); if (!WARN_ON(!ffs->gadget)) { usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; ffs->gadget = NULL; clear_bit(FFS_FL_BOUND, &ffs->flags); ffs_data_put(ffs); } } static int ffs_epfiles_create(struct ffs_data *ffs) { struct ffs_epfile *epfile, *epfiles; unsigned i, count; ENTER(); count = ffs->eps_count; epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL); if (!epfiles) return -ENOMEM; epfile = epfiles; for (i = 1; i <= count; ++i, ++epfile) { epfile->ffs = ffs; mutex_init(&epfile->mutex); init_waitqueue_head(&epfile->wait); atomic_set(&epfile->opened, 0); if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]); else sprintf(epfiles->name, "ep%u", i); epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name, epfile, &ffs_epfile_operations); if (unlikely(!epfile->dentry)) { ffs_epfiles_destroy(epfiles, i - 1); return -ENOMEM; } } ffs->epfiles = epfiles; return 0; } static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) { struct ffs_epfile *epfile = epfiles; ENTER(); for (; count; --count, ++epfile) { BUG_ON(mutex_is_locked(&epfile->mutex) || waitqueue_active(&epfile->wait)); if (epfile->dentry) { d_delete(epfile->dentry); dput(epfile->dentry); epfile->dentry = NULL; } } kfree(epfiles); } static void ffs_func_eps_disable(struct ffs_function *func) { struct ffs_ep *ep = func->eps; struct ffs_epfile *epfile = func->ffs->epfiles; unsigned count = func->ffs->eps_count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); do { atomic_set(&epfile->error, 1); /* pending requests get nuked */ if (likely(ep->ep)) { usb_ep_disable(ep->ep); ep->ep->driver_data = NULL; } epfile->ep = NULL; ++ep; ++epfile; } while (--count); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } static int ffs_func_eps_enable(struct ffs_function *func) { struct ffs_data *ffs = func->ffs; struct ffs_ep *ep = func->eps; struct ffs_epfile *epfile = ffs->epfiles; unsigned count = ffs->eps_count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); do { struct usb_endpoint_descriptor *ds; int desc_idx; if (ffs->gadget->speed == USB_SPEED_SUPER) desc_idx = 2; else if (ffs->gadget->speed == USB_SPEED_HIGH) desc_idx = 1; else desc_idx = 0; /* fall-back to lower speed if desc missing for current speed */ do { ds = ep->descs[desc_idx]; } while (!ds && --desc_idx >= 0); if (!ds) { ret = -EINVAL; break; } ep->ep->driver_data = ep; ep->ep->desc = ds; ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); if (ret) { pr_err("%s(): config_ep_by_speed(%d) err for %s\n", __func__, ret, ep->ep->name); break; } ret = usb_ep_enable(ep->ep); if (likely(!ret)) { epfile->ep = ep; epfile->in = usb_endpoint_dir_in(ds); epfile->isoc = usb_endpoint_xfer_isoc(ds); } else { break; } wake_up(&epfile->wait); ++ep; ++epfile; } while (--count); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); return ret; } /* Parsing and building descriptors and strings *****************************/ /* * This validates if data pointed by data is a valid USB descriptor as * well as record how many interfaces, endpoints and strings are * required by given configuration. Returns address after the * descriptor or NULL if data is invalid. */ enum ffs_entity_type { FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT }; enum ffs_os_desc_type { FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP }; typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, u8 *valuep, struct usb_descriptor_header *desc, void *priv); typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity, struct usb_os_desc_header *h, void *data, unsigned len, void *priv); static int __must_check ffs_do_single_desc(char *data, unsigned len, ffs_entity_callback entity, void *priv) { struct usb_descriptor_header *_ds = (void *)data; u8 length; int ret; ENTER(); /* At least two bytes are required: length and type */ if (len < 2) { pr_vdebug("descriptor too short\n"); return -EINVAL; } /* If we have at least as many bytes as the descriptor takes? */ length = _ds->bLength; if (len < length) { pr_vdebug("descriptor longer then available data\n"); return -EINVAL; } #define __entity_check_INTERFACE(val) 1 #define __entity_check_STRING(val) (val) #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) #define __entity(type, val) do { \ pr_vdebug("entity " #type "(%02x)\n", (val)); \ if (unlikely(!__entity_check_ ##type(val))) { \ pr_vdebug("invalid entity's value\n"); \ return -EINVAL; \ } \ ret = entity(FFS_ ##type, &val, _ds, priv); \ if (unlikely(ret < 0)) { \ pr_debug("entity " #type "(%02x); ret = %d\n", \ (val), ret); \ return ret; \ } \ } while (0) /* Parse descriptor depending on type. */ switch (_ds->bDescriptorType) { case USB_DT_DEVICE: case USB_DT_CONFIG: case USB_DT_STRING: case USB_DT_DEVICE_QUALIFIER: /* function can't have any of those */ pr_vdebug("descriptor reserved for gadget: %d\n", _ds->bDescriptorType); return -EINVAL; case USB_DT_INTERFACE: { struct usb_interface_descriptor *ds = (void *)_ds; pr_vdebug("interface descriptor\n"); if (length != sizeof *ds) goto inv_length; __entity(INTERFACE, ds->bInterfaceNumber); if (ds->iInterface) __entity(STRING, ds->iInterface); } break; case USB_DT_ENDPOINT: { struct usb_endpoint_descriptor *ds = (void *)_ds; pr_vdebug("endpoint descriptor\n"); if (length != USB_DT_ENDPOINT_SIZE && length != USB_DT_ENDPOINT_AUDIO_SIZE) goto inv_length; __entity(ENDPOINT, ds->bEndpointAddress); } break; case HID_DT_HID: pr_vdebug("hid descriptor\n"); if (length != sizeof(struct hid_descriptor)) goto inv_length; break; case USB_DT_OTG: if (length != sizeof(struct usb_otg_descriptor)) goto inv_length; break; case USB_DT_INTERFACE_ASSOCIATION: { struct usb_interface_assoc_descriptor *ds = (void *)_ds; pr_vdebug("interface association descriptor\n"); if (length != sizeof *ds) goto inv_length; if (ds->iFunction) __entity(STRING, ds->iFunction); } break; case USB_DT_SS_ENDPOINT_COMP: pr_vdebug("EP SS companion descriptor\n"); if (length != sizeof(struct usb_ss_ep_comp_descriptor)) goto inv_length; break; case USB_DT_OTHER_SPEED_CONFIG: case USB_DT_INTERFACE_POWER: case USB_DT_DEBUG: case USB_DT_SECURITY: case USB_DT_CS_RADIO_CONTROL: /* TODO */ pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType); return -EINVAL; default: /* We should never be here */ pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType); return -EINVAL; inv_length: pr_vdebug("invalid length: %d (descriptor %d)\n", _ds->bLength, _ds->bDescriptorType); return -EINVAL; } #undef __entity #undef __entity_check_DESCRIPTOR #undef __entity_check_INTERFACE #undef __entity_check_STRING #undef __entity_check_ENDPOINT return length; } static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, ffs_entity_callback entity, void *priv) { const unsigned _len = len; unsigned long num = 0; ENTER(); for (;;) { int ret; if (num == count) data = NULL; /* Record "descriptor" entity */ ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); if (unlikely(ret < 0)) { pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n", num, ret); return ret; } if (!data) return _len - len; ret = ffs_do_single_desc(data, len, entity, priv); if (unlikely(ret < 0)) { pr_debug("%s returns %d\n", __func__, ret); return ret; } len -= ret; data += ret; ++num; } } static int __ffs_data_do_entity(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct ffs_desc_helper *helper = priv; struct usb_endpoint_descriptor *d; ENTER(); switch (type) { case FFS_DESCRIPTOR: break; case FFS_INTERFACE: /* * Interfaces are indexed from zero so if we * encountered interface "n" then there are at least * "n+1" interfaces. */ if (*valuep >= helper->interfaces_count) helper->interfaces_count = *valuep + 1; break; case FFS_STRING: /* * Strings are indexed from 1 (0 is magic ;) reserved * for languages list or some such) */ if (*valuep > helper->ffs->strings_count) helper->ffs->strings_count = *valuep; break; case FFS_ENDPOINT: d = (void *)desc; helper->eps_count++; if (helper->eps_count >= 15) return -EINVAL; /* Check if descriptors for any speed were already parsed */ if (!helper->ffs->eps_count && !helper->ffs->interfaces_count) helper->ffs->eps_addrmap[helper->eps_count] = d->bEndpointAddress; else if (helper->ffs->eps_addrmap[helper->eps_count] != d->bEndpointAddress) return -EINVAL; break; } return 0; } static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, struct usb_os_desc_header *desc) { u16 bcd_version = le16_to_cpu(desc->bcdVersion); u16 w_index = le16_to_cpu(desc->wIndex); if (bcd_version != 1) { pr_vdebug("unsupported os descriptors version: %d", bcd_version); return -EINVAL; } switch (w_index) { case 0x4: *next_type = FFS_OS_DESC_EXT_COMPAT; break; case 0x5: *next_type = FFS_OS_DESC_EXT_PROP; break; default: pr_vdebug("unsupported os descriptor type: %d", w_index); return -EINVAL; } return sizeof(*desc); } /* * Process all extended compatibility/extended property descriptors * of a feature descriptor */ static int __must_check ffs_do_single_os_desc(char *data, unsigned len, enum ffs_os_desc_type type, u16 feature_count, ffs_os_desc_callback entity, void *priv, struct usb_os_desc_header *h) { int ret; const unsigned _len = len; ENTER(); /* loop over all ext compat/ext prop descriptors */ while (feature_count--) { ret = entity(type, h, data, len, priv); if (unlikely(ret < 0)) { pr_debug("bad OS descriptor, type: %d\n", type); return ret; } data += ret; len -= ret; } return _len - len; } /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */ static int __must_check ffs_do_os_descs(unsigned count, char *data, unsigned len, ffs_os_desc_callback entity, void *priv) { const unsigned _len = len; unsigned long num = 0; ENTER(); for (num = 0; num < count; ++num) { int ret; enum ffs_os_desc_type type; u16 feature_count; struct usb_os_desc_header *desc = (void *)data; if (len < sizeof(*desc)) return -EINVAL; /* * Record "descriptor" entity. * Process dwLength, bcdVersion, wIndex, get b/wCount. * Move the data pointer to the beginning of extended * compatibilities proper or extended properties proper * portions of the data */ if (le32_to_cpu(desc->dwLength) > len) return -EINVAL; ret = __ffs_do_os_desc_header(&type, desc); if (unlikely(ret < 0)) { pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n", num, ret); return ret; } /* * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??" */ feature_count = le16_to_cpu(desc->wCount); if (type == FFS_OS_DESC_EXT_COMPAT && (feature_count > 255 || desc->Reserved)) return -EINVAL; len -= ret; data += ret; /* * Process all function/property descriptors * of this Feature Descriptor */ ret = ffs_do_single_os_desc(data, len, type, feature_count, entity, priv, desc); if (unlikely(ret < 0)) { pr_debug("%s returns %d\n", __func__, ret); return ret; } len -= ret; data += ret; } return _len - len; } /** * Validate contents of the buffer from userspace related to OS descriptors. */ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, struct usb_os_desc_header *h, void *data, unsigned len, void *priv) { struct ffs_data *ffs = priv; u8 length; ENTER(); switch (type) { case FFS_OS_DESC_EXT_COMPAT: { struct usb_ext_compat_desc *d = data; int i; if (len < sizeof(*d) || d->bFirstInterfaceNumber >= ffs->interfaces_count || d->Reserved1) return -EINVAL; for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i]) return -EINVAL; length = sizeof(struct usb_ext_compat_desc); } break; case FFS_OS_DESC_EXT_PROP: { struct usb_ext_prop_desc *d = data; u32 type, pdl; u16 pnl; if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) return -EINVAL; length = le32_to_cpu(d->dwSize); type = le32_to_cpu(d->dwPropertyDataType); if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI) { pr_vdebug("unsupported os descriptor property type: %d", type); return -EINVAL; } pnl = le16_to_cpu(d->wPropertyNameLength); pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); if (length != 14 + pnl + pdl) { pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", length, pnl, pdl, type); return -EINVAL; } ++ffs->ms_os_descs_ext_prop_count; /* property name reported to the host as "WCHAR"s */ ffs->ms_os_descs_ext_prop_name_len += pnl * 2; ffs->ms_os_descs_ext_prop_data_len += pdl; } break; default: pr_vdebug("unknown descriptor: %d\n", type); return -EINVAL; } return length; } static int __ffs_data_got_descs(struct ffs_data *ffs, char *const _data, size_t len) { char *data = _data, *raw_descs; unsigned os_descs_count = 0, counts[3], flags; int ret = -EINVAL, i; struct ffs_desc_helper helper; ENTER(); if (get_unaligned_le32(data + 4) != len) goto error; switch (get_unaligned_le32(data)) { case FUNCTIONFS_DESCRIPTORS_MAGIC: flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC; data += 8; len -= 8; break; case FUNCTIONFS_DESCRIPTORS_MAGIC_V2: flags = get_unaligned_le32(data + 8); ffs->user_flags = flags; if (flags & ~(FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC | FUNCTIONFS_HAS_SS_DESC | FUNCTIONFS_HAS_MS_OS_DESC | FUNCTIONFS_VIRTUAL_ADDR)) { ret = -ENOSYS; goto error; } data += 12; len -= 12; break; default: goto error; } /* Read fs_count, hs_count and ss_count (if present) */ for (i = 0; i < 3; ++i) { if (!(flags & (1 << i))) { counts[i] = 0; } else if (len < 4) { goto error; } else { counts[i] = get_unaligned_le32(data); data += 4; len -= 4; } } if (flags & (1 << i)) { os_descs_count = get_unaligned_le32(data); data += 4; len -= 4; }; /* Read descriptors */ raw_descs = data; helper.ffs = ffs; for (i = 0; i < 3; ++i) { if (!counts[i]) continue; helper.interfaces_count = 0; helper.eps_count = 0; ret = ffs_do_descs(counts[i], data, len, __ffs_data_do_entity, &helper); if (ret < 0) goto error; if (!ffs->eps_count && !ffs->interfaces_count) { ffs->eps_count = helper.eps_count; ffs->interfaces_count = helper.interfaces_count; } else { if (ffs->eps_count != helper.eps_count) { ret = -EINVAL; goto error; } if (ffs->interfaces_count != helper.interfaces_count) { ret = -EINVAL; goto error; } } data += ret; len -= ret; } if (os_descs_count) { ret = ffs_do_os_descs(os_descs_count, data, len, __ffs_data_do_os_desc, ffs); if (ret < 0) goto error; data += ret; len -= ret; } if (raw_descs == data || len) { ret = -EINVAL; goto error; } ffs->raw_descs_data = _data; ffs->raw_descs = raw_descs; ffs->raw_descs_length = data - raw_descs; ffs->fs_descs_count = counts[0]; ffs->hs_descs_count = counts[1]; ffs->ss_descs_count = counts[2]; ffs->ms_os_descs_count = os_descs_count; return 0; error: kfree(_data); return ret; } static int __ffs_data_got_strings(struct ffs_data *ffs, char *const _data, size_t len) { u32 str_count, needed_count, lang_count; struct usb_gadget_strings **stringtabs, *t; struct usb_string *strings, *s; const char *data = _data; ENTER(); if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || get_unaligned_le32(data + 4) != len)) goto error; str_count = get_unaligned_le32(data + 8); lang_count = get_unaligned_le32(data + 12); /* if one is zero the other must be zero */ if (unlikely(!str_count != !lang_count)) goto error; /* Do we have at least as many strings as descriptors need? */ needed_count = ffs->strings_count; if (unlikely(str_count < needed_count)) goto error; /* * If we don't need any strings just return and free all * memory. */ if (!needed_count) { kfree(_data); return 0; } /* Allocate everything in one chunk so there's less maintenance. */ { unsigned i = 0; vla_group(d); vla_item(d, struct usb_gadget_strings *, stringtabs, lang_count + 1); vla_item(d, struct usb_gadget_strings, stringtab, lang_count); vla_item(d, struct usb_string, strings, lang_count*(needed_count+1)); char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL); if (unlikely(!vlabuf)) { kfree(_data); return -ENOMEM; } /* Initialize the VLA pointers */ stringtabs = vla_ptr(vlabuf, d, stringtabs); t = vla_ptr(vlabuf, d, stringtab); i = lang_count; do { *stringtabs++ = t++; } while (--i); *stringtabs = NULL; /* stringtabs = vlabuf = d_stringtabs for later kfree */ stringtabs = vla_ptr(vlabuf, d, stringtabs); t = vla_ptr(vlabuf, d, stringtab); s = vla_ptr(vlabuf, d, strings); strings = s; } /* For each language */ data += 16; len -= 16; do { /* lang_count > 0 so we can use do-while */ unsigned needed = needed_count; if (unlikely(len < 3)) goto error_free; t->language = get_unaligned_le16(data); t->strings = s; ++t; data += 2; len -= 2; /* For each string */ do { /* str_count > 0 so we can use do-while */ size_t length = strnlen(data, len); if (unlikely(length == len)) goto error_free; /* * User may provide more strings then we need, * if that's the case we simply ignore the * rest */ if (likely(needed)) { /* * s->id will be set while adding * function to configuration so for * now just leave garbage here. */ s->s = data; --needed; ++s; } data += length + 1; len -= length + 1; } while (--str_count); s->id = 0; /* terminator */ s->s = NULL; ++s; } while (--lang_count); /* Some garbage left? */ if (unlikely(len)) goto error_free; /* Done! */ ffs->stringtabs = stringtabs; ffs->raw_strings = _data; return 0; error_free: kfree(stringtabs); error: kfree(_data); return -EINVAL; } /* Events handling and management *******************************************/ static void __ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type) { enum usb_functionfs_event_type rem_type1, rem_type2 = type; int neg = 0; /* * Abort any unhandled setup * * We do not need to worry about some cmpxchg() changing value * of ffs->setup_state without holding the lock because when * state is FFS_SETUP_PENDING cmpxchg() in several places in * the source does nothing. */ if (ffs->setup_state == FFS_SETUP_PENDING) ffs->setup_state = FFS_SETUP_CANCELLED; switch (type) { case FUNCTIONFS_RESUME: rem_type2 = FUNCTIONFS_SUSPEND; /* FALL THROUGH */ case FUNCTIONFS_SUSPEND: case FUNCTIONFS_SETUP: rem_type1 = type; /* Discard all similar events */ break; case FUNCTIONFS_BIND: case FUNCTIONFS_UNBIND: case FUNCTIONFS_DISABLE: case FUNCTIONFS_ENABLE: /* Discard everything other then power management. */ rem_type1 = FUNCTIONFS_SUSPEND; rem_type2 = FUNCTIONFS_RESUME; neg = 1; break; default: WARN(1, "%d: unknown event, this should not happen\n", type); return; } { u8 *ev = ffs->ev.types, *out = ev; unsigned n = ffs->ev.count; for (; n; --n, ++ev) if ((*ev == rem_type1 || *ev == rem_type2) == neg) *out++ = *ev; else pr_vdebug("purging event %d\n", *ev); ffs->ev.count = out - ffs->ev.types; } pr_vdebug("adding event %d\n", type); ffs->ev.types[ffs->ev.count++] = type; wake_up_locked(&ffs->ev.waitq); } static void ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type) { unsigned long flags; spin_lock_irqsave(&ffs->ev.waitq.lock, flags); __ffs_event_add(ffs, type); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); } /* Bind/unbind USB function hooks *******************************************/ static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address) { int i; for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i) if (ffs->eps_addrmap[i] == endpoint_address) return i; return -ENOENT; } static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct usb_endpoint_descriptor *ds = (void *)desc; struct ffs_function *func = priv; struct ffs_ep *ffs_ep; unsigned ep_desc_id; int idx; static const char *speed_names[] = { "full", "high", "super" }; if (type != FFS_DESCRIPTOR) return 0; /* * If ss_descriptors is not NULL, we are reading super speed * descriptors; if hs_descriptors is not NULL, we are reading high * speed descriptors; otherwise, we are reading full speed * descriptors. */ if (func->function.ss_descriptors) { ep_desc_id = 2; func->function.ss_descriptors[(long)valuep] = desc; } else if (func->function.hs_descriptors) { ep_desc_id = 1; func->function.hs_descriptors[(long)valuep] = desc; } else { ep_desc_id = 0; func->function.fs_descriptors[(long)valuep] = desc; } if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) return 0; idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1; if (idx < 0) return idx; ffs_ep = func->eps + idx; if (unlikely(ffs_ep->descs[ep_desc_id])) { pr_err("two %sspeed descriptors for EP %d\n", speed_names[ep_desc_id], ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); return -EINVAL; } ffs_ep->descs[ep_desc_id] = ds; ffs_dump_mem(": Original ep desc", ds, ds->bLength); if (ffs_ep->ep) { ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress; if (!ds->wMaxPacketSize) ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize; } else { struct usb_request *req; struct usb_ep *ep; u8 bEndpointAddress; /* * We back up bEndpointAddress because autoconfig overwrites * it with physical endpoint address. */ bEndpointAddress = ds->bEndpointAddress; pr_vdebug("autoconfig\n"); ep = usb_ep_autoconfig(func->gadget, ds); if (unlikely(!ep)) return -ENOTSUPP; ep->driver_data = func->eps + idx; req = usb_ep_alloc_request(ep, GFP_KERNEL); if (unlikely(!req)) return -ENOMEM; ffs_ep->ep = ep; ffs_ep->req = req; func->eps_revmap[ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK] = idx + 1; /* * If we use virtual address mapping, we restore * original bEndpointAddress value. */ if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) ds->bEndpointAddress = bEndpointAddress; } ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); return 0; } static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct ffs_function *func = priv; unsigned idx; u8 newValue; switch (type) { default: case FFS_DESCRIPTOR: /* Handled in previous pass by __ffs_func_bind_do_descs() */ return 0; case FFS_INTERFACE: idx = *valuep; if (func->interfaces_nums[idx] < 0) { int id = usb_interface_id(func->conf, &func->function); if (unlikely(id < 0)) return id; func->interfaces_nums[idx] = id; } newValue = func->interfaces_nums[idx]; break; case FFS_STRING: /* String' IDs are allocated when fsf_data is bound to cdev */ newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id; break; case FFS_ENDPOINT: /* * USB_DT_ENDPOINT are handled in * __ffs_func_bind_do_descs(). */ if (desc->bDescriptorType == USB_DT_ENDPOINT) return 0; idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1; if (unlikely(!func->eps[idx].ep)) return -EINVAL; { struct usb_endpoint_descriptor **descs; descs = func->eps[idx].descs; newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress; } break; } pr_vdebug("%02x -> %02x\n", *valuep, newValue); *valuep = newValue; return 0; } static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type, struct usb_os_desc_header *h, void *data, unsigned len, void *priv) { struct ffs_function *func = priv; u8 length = 0; switch (type) { case FFS_OS_DESC_EXT_COMPAT: { struct usb_ext_compat_desc *desc = data; struct usb_os_desc_table *t; t = &func->function.os_desc_table[desc->bFirstInterfaceNumber]; t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber]; memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID, ARRAY_SIZE(desc->CompatibleID) + ARRAY_SIZE(desc->SubCompatibleID)); length = sizeof(*desc); } break; case FFS_OS_DESC_EXT_PROP: { struct usb_ext_prop_desc *desc = data; struct usb_os_desc_table *t; struct usb_os_desc_ext_prop *ext_prop; char *ext_prop_name; char *ext_prop_data; t = &func->function.os_desc_table[h->interface]; t->if_id = func->interfaces_nums[h->interface]; ext_prop = func->ffs->ms_os_descs_ext_prop_avail; func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop); ext_prop->type = le32_to_cpu(desc->dwPropertyDataType); ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength); ext_prop->data_len = le32_to_cpu(*(u32 *) usb_ext_prop_data_len_ptr(data, ext_prop->name_len)); length = ext_prop->name_len + ext_prop->data_len + 14; ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail; func->ffs->ms_os_descs_ext_prop_name_avail += ext_prop->name_len; ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail; func->ffs->ms_os_descs_ext_prop_data_avail += ext_prop->data_len; memcpy(ext_prop_data, usb_ext_prop_data_ptr(data, ext_prop->name_len), ext_prop->data_len); /* unicode data reported to the host as "WCHAR"s */ switch (ext_prop->type) { case USB_EXT_PROP_UNICODE: case USB_EXT_PROP_UNICODE_ENV: case USB_EXT_PROP_UNICODE_LINK: case USB_EXT_PROP_UNICODE_MULTI: ext_prop->data_len *= 2; break; } ext_prop->data = ext_prop_data; memcpy(ext_prop_name, usb_ext_prop_name_ptr(data), ext_prop->name_len); /* property name reported to the host as "WCHAR"s */ ext_prop->name_len *= 2; ext_prop->name = ext_prop_name; t->os_desc->ext_prop_len += ext_prop->name_len + ext_prop->data_len + 14; ++t->os_desc->ext_prop_count; list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop); } break; default: pr_vdebug("unknown descriptor: %d\n", type); } return length; } static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f, struct usb_configuration *c) { struct ffs_function *func = ffs_func_from_usb(f); struct f_fs_opts *ffs_opts = container_of(f->fi, struct f_fs_opts, func_inst); int ret; ENTER(); /* * Legacy gadget triggers binding in functionfs_ready_callback, * which already uses locking; taking the same lock here would * cause a deadlock. * * Configfs-enabled gadgets however do need ffs_dev_lock. */ if (!ffs_opts->no_configfs) ffs_dev_lock(); ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV; func->ffs = ffs_opts->dev->ffs_data; if (!ffs_opts->no_configfs) ffs_dev_unlock(); if (ret) return ERR_PTR(ret); func->conf = c; func->gadget = c->cdev->gadget; /* * in drivers/usb/gadget/configfs.c:configfs_composite_bind() * configurations are bound in sequence with list_for_each_entry, * in each configuration its functions are bound in sequence * with list_for_each_entry, so we assume no race condition * with regard to ffs_opts->bound access */ if (!ffs_opts->refcnt) { ret = functionfs_bind(func->ffs, c->cdev); if (ret) return ERR_PTR(ret); } ffs_opts->refcnt++; func->function.strings = func->ffs->stringtabs; return ffs_opts; } static int _ffs_func_bind(struct usb_configuration *c, struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; const int full = !!func->ffs->fs_descs_count; const int high = gadget_is_dualspeed(func->gadget) && func->ffs->hs_descs_count; const int super = gadget_is_superspeed(func->gadget) && func->ffs->ss_descs_count; int fs_len, hs_len, ss_len, ret, i; /* Make it a single chunk, less management later on */ vla_group(d); vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count); vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs, full ? ffs->fs_descs_count + 1 : 0); vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs, high ? ffs->hs_descs_count + 1 : 0); vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs, super ? ffs->ss_descs_count + 1 : 0); vla_item_with_sz(d, short, inums, ffs->interfaces_count); vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table, c->cdev->use_os_string ? ffs->interfaces_count : 0); vla_item_with_sz(d, char[16], ext_compat, c->cdev->use_os_string ? ffs->interfaces_count : 0); vla_item_with_sz(d, struct usb_os_desc, os_desc, c->cdev->use_os_string ? ffs->interfaces_count : 0); vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop, ffs->ms_os_descs_ext_prop_count); vla_item_with_sz(d, char, ext_prop_name, ffs->ms_os_descs_ext_prop_name_len); vla_item_with_sz(d, char, ext_prop_data, ffs->ms_os_descs_ext_prop_data_len); vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length); char *vlabuf; ENTER(); /* Has descriptors only for speeds gadget does not support */ if (unlikely(!(full | high | super))) return -ENOTSUPP; /* Allocate a single chunk, less management later on */ vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL); if (unlikely(!vlabuf)) return -ENOMEM; ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop); ffs->ms_os_descs_ext_prop_name_avail = vla_ptr(vlabuf, d, ext_prop_name); ffs->ms_os_descs_ext_prop_data_avail = vla_ptr(vlabuf, d, ext_prop_data); /* Copy descriptors */ memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs, ffs->raw_descs_length); memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); for (ret = ffs->eps_count; ret; --ret) { struct ffs_ep *ptr; ptr = vla_ptr(vlabuf, d, eps); ptr[ret].num = -1; } /* Save pointers * d_eps == vlabuf, func->eps used to kfree vlabuf later */ func->eps = vla_ptr(vlabuf, d, eps); func->interfaces_nums = vla_ptr(vlabuf, d, inums); /* * Go through all the endpoint descriptors and allocate * endpoints first, so that later we can rewrite the endpoint * numbers without worrying that it may be described later on. */ if (likely(full)) { func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs); fs_len = ffs_do_descs(ffs->fs_descs_count, vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz, __ffs_func_bind_do_descs, func); if (unlikely(fs_len < 0)) { ret = fs_len; goto error; } } else { fs_len = 0; } if (likely(high)) { func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs); hs_len = ffs_do_descs(ffs->hs_descs_count, vla_ptr(vlabuf, d, raw_descs) + fs_len, d_raw_descs__sz - fs_len, __ffs_func_bind_do_descs, func); if (unlikely(hs_len < 0)) { ret = hs_len; goto error; } } else { hs_len = 0; } if (likely(super)) { func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs); ss_len = ffs_do_descs(ffs->ss_descs_count, vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len, d_raw_descs__sz - fs_len - hs_len, __ffs_func_bind_do_descs, func); if (unlikely(ss_len < 0)) { ret = ss_len; goto error; } } else { ss_len = 0; } /* * Now handle interface numbers allocation and interface and * endpoint numbers rewriting. We can do that in one go * now. */ ret = ffs_do_descs(ffs->fs_descs_count + (high ? ffs->hs_descs_count : 0) + (super ? ffs->ss_descs_count : 0), vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz, __ffs_func_bind_do_nums, func); if (unlikely(ret < 0)) goto error; func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); if (c->cdev->use_os_string) for (i = 0; i < ffs->interfaces_count; ++i) { struct usb_os_desc *desc; desc = func->function.os_desc_table[i].os_desc = vla_ptr(vlabuf, d, os_desc) + i * sizeof(struct usb_os_desc); desc->ext_compat_id = vla_ptr(vlabuf, d, ext_compat) + i * 16; INIT_LIST_HEAD(&desc->ext_prop); } ret = ffs_do_os_descs(ffs->ms_os_descs_count, vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len + ss_len, d_raw_descs__sz - fs_len - hs_len - ss_len, __ffs_func_bind_do_os_desc, func); if (unlikely(ret < 0)) goto error; func->function.os_desc_n = c->cdev->use_os_string ? ffs->interfaces_count : 0; /* And we're done */ ffs_event_add(ffs, FUNCTIONFS_BIND); return 0; error: /* XXX Do we need to release all claimed endpoints here? */ return ret; } static int ffs_func_bind(struct usb_configuration *c, struct usb_function *f) { struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c); if (IS_ERR(ffs_opts)) return PTR_ERR(ffs_opts); return _ffs_func_bind(c, f); } /* Other USB function hooks *************************************************/ static int ffs_func_set_alt(struct usb_function *f, unsigned interface, unsigned alt) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; int ret = 0, intf; if (alt != (unsigned)-1) { intf = ffs_func_revmap_intf(func, interface); if (unlikely(intf < 0)) return intf; } if (ffs->func) { ffs_func_eps_disable(ffs->func); ffs->func = NULL; } if (ffs->state != FFS_ACTIVE) return -ENODEV; if (alt == (unsigned)-1) { ffs->func = NULL; ffs_event_add(ffs, FUNCTIONFS_DISABLE); return 0; } ffs->func = func; ret = ffs_func_eps_enable(func); if (likely(ret >= 0)) { ffs_event_add(ffs, FUNCTIONFS_ENABLE); /* Disable USB LPM later on bus_suspend */ usb_gadget_autopm_get_async(ffs->gadget); } return ret; } static void ffs_func_disable(struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; ffs_func_set_alt(f, 0, (unsigned)-1); /* matching put to allow LPM on disconnect */ usb_gadget_autopm_put_async(ffs->gadget); } static int ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; unsigned long flags; int ret; ENTER(); pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType); pr_vdebug("creq->bRequest = %02x\n", creq->bRequest); pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue)); pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex)); pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength)); /* * Most requests directed to interface go through here * (notable exceptions are set/get interface) so we need to * handle them. All other either handled by composite or * passed to usb_configuration->setup() (if one is set). No * matter, we will handle requests directed to endpoint here * as well (as it's straightforward) but what to do with any * other request? */ if (ffs->state != FFS_ACTIVE) return -ENODEV; switch (creq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex)); if (unlikely(ret < 0)) return ret; break; case USB_RECIP_ENDPOINT: ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex)); if (unlikely(ret < 0)) return ret; if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) ret = func->ffs->eps_addrmap[ret]; break; default: return -EOPNOTSUPP; } spin_lock_irqsave(&ffs->ev.waitq.lock, flags); ffs->ev.setup = *creq; ffs->ev.setup.wIndex = cpu_to_le16(ret); __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); return 0; } static void ffs_func_suspend(struct usb_function *f) { ENTER(); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); } static void ffs_func_resume(struct usb_function *f) { ENTER(); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); } /* Endpoint and interface numbers reverse mapping ***************************/ static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) { num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK]; return num ? num : -EDOM; } static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) { short *nums = func->interfaces_nums; unsigned count = func->ffs->interfaces_count; for (; count; --count, ++nums) { if (*nums >= 0 && *nums == intf) return nums - func->interfaces_nums; } return -EDOM; } /* Devices management *******************************************************/ static LIST_HEAD(ffs_devices); static struct ffs_dev *_ffs_do_find_dev(const char *name) { struct ffs_dev *dev; list_for_each_entry(dev, &ffs_devices, entry) { if (!dev->name || !name) continue; if (strcmp(dev->name, name) == 0) return dev; } return NULL; } /* * ffs_lock must be taken by the caller of this function */ static struct ffs_dev *_ffs_get_single_dev(void) { struct ffs_dev *dev; if (list_is_singular(&ffs_devices)) { dev = list_first_entry(&ffs_devices, struct ffs_dev, entry); if (dev->single) return dev; } return NULL; } /* * ffs_lock must be taken by the caller of this function */ static struct ffs_dev *_ffs_find_dev(const char *name) { struct ffs_dev *dev; dev = _ffs_get_single_dev(); if (dev) return dev; return _ffs_do_find_dev(name); } /* Configfs support *********************************************************/ static inline struct f_fs_opts *to_ffs_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_fs_opts, func_inst.group); } static void ffs_attr_release(struct config_item *item) { struct f_fs_opts *opts = to_ffs_opts(item); usb_put_function_instance(&opts->func_inst); } static struct configfs_item_operations ffs_item_ops = { .release = ffs_attr_release, }; static struct config_item_type ffs_func_type = { .ct_item_ops = &ffs_item_ops, .ct_owner = THIS_MODULE, }; /* Function registration interface ******************************************/ static void ffs_free_inst(struct usb_function_instance *f) { struct f_fs_opts *opts; opts = to_f_fs_opts(f); ffs_dev_lock(); _ffs_free_dev(opts->dev); ffs_dev_unlock(); kfree(opts); } #define MAX_INST_NAME_LEN 40 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) { struct f_fs_opts *opts; char *ptr; const char *tmp; int name_len, ret; name_len = strlen(name) + 1; if (name_len > MAX_INST_NAME_LEN) return -ENAMETOOLONG; ptr = kstrndup(name, name_len, GFP_KERNEL); if (!ptr) return -ENOMEM; opts = to_f_fs_opts(fi); tmp = NULL; ffs_dev_lock(); tmp = opts->dev->name_allocated ? opts->dev->name : NULL; ret = _ffs_name_dev(opts->dev, ptr); if (ret) { kfree(ptr); ffs_dev_unlock(); return ret; } opts->dev->name_allocated = true; ffs_dev_unlock(); kfree(tmp); return 0; } static struct usb_function_instance *ffs_alloc_inst(void) { struct f_fs_opts *opts; struct ffs_dev *dev; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); opts->func_inst.set_inst_name = ffs_set_inst_name; opts->func_inst.free_func_inst = ffs_free_inst; ffs_dev_lock(); dev = _ffs_alloc_dev(); ffs_dev_unlock(); if (IS_ERR(dev)) { kfree(opts); return ERR_CAST(dev); } opts->dev = dev; dev->opts = opts; config_group_init_type_name(&opts->func_inst.group, "", &ffs_func_type); return &opts->func_inst; } static void ffs_free(struct usb_function *f) { kfree(ffs_func_from_usb(f)); } static void ffs_func_unbind(struct usb_configuration *c, struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; struct f_fs_opts *opts = container_of(f->fi, struct f_fs_opts, func_inst); struct ffs_ep *ep = func->eps; unsigned count = ffs->eps_count; unsigned long flags; ENTER(); if (ffs->func == func) { ffs_func_eps_disable(func); ffs->func = NULL; } if (!--opts->refcnt) functionfs_unbind(ffs); /* cleanup after autoconfig */ spin_lock_irqsave(&func->ffs->eps_lock, flags); do { if (ep->ep && ep->req) usb_ep_free_request(ep->ep, ep->req); ep->req = NULL; ep->ep = NULL; ++ep; } while (--count); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); kfree(func->eps); func->eps = NULL; /* * eps, descriptors and interfaces_nums are allocated in the * same chunk so only one free is required. */ func->function.fs_descriptors = NULL; func->function.hs_descriptors = NULL; func->function.ss_descriptors = NULL; func->interfaces_nums = NULL; ffs_event_add(ffs, FUNCTIONFS_UNBIND); } static struct usb_function *ffs_alloc(struct usb_function_instance *fi) { struct ffs_function *func; ENTER(); func = kzalloc(sizeof(*func), GFP_KERNEL); if (unlikely(!func)) return ERR_PTR(-ENOMEM); func->function.name = "Function FS Gadget"; func->function.bind = ffs_func_bind; func->function.unbind = ffs_func_unbind; func->function.set_alt = ffs_func_set_alt; func->function.disable = ffs_func_disable; func->function.setup = ffs_func_setup; func->function.suspend = ffs_func_suspend; func->function.resume = ffs_func_resume; func->function.free_func = ffs_free; return &func->function; } /* * ffs_lock must be taken by the caller of this function */ static struct ffs_dev *_ffs_alloc_dev(void) { struct ffs_dev *dev; int ret; if (_ffs_get_single_dev()) return ERR_PTR(-EBUSY); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); if (list_empty(&ffs_devices)) { ret = functionfs_init(); if (ret) { kfree(dev); return ERR_PTR(ret); } } list_add(&dev->entry, &ffs_devices); return dev; } /* * ffs_lock must be taken by the caller of this function * The caller is responsible for "name" being available whenever f_fs needs it */ static int _ffs_name_dev(struct ffs_dev *dev, const char *name) { struct ffs_dev *existing; existing = _ffs_do_find_dev(name); if (existing) return -EBUSY; dev->name = name; return 0; } /* * The caller is responsible for "name" being available whenever f_fs needs it */ int ffs_name_dev(struct ffs_dev *dev, const char *name) { int ret; ffs_dev_lock(); ret = _ffs_name_dev(dev, name); ffs_dev_unlock(); return ret; } EXPORT_SYMBOL_GPL(ffs_name_dev); int ffs_single_dev(struct ffs_dev *dev) { int ret; ret = 0; ffs_dev_lock(); if (!list_is_singular(&ffs_devices)) ret = -EBUSY; else dev->single = true; ffs_dev_unlock(); return ret; } EXPORT_SYMBOL_GPL(ffs_single_dev); /* * ffs_lock must be taken by the caller of this function */ static void _ffs_free_dev(struct ffs_dev *dev) { list_del(&dev->entry); if (dev->name_allocated) kfree(dev->name); kfree(dev); if (list_empty(&ffs_devices)) functionfs_cleanup(); } static void *ffs_acquire_dev(const char *dev_name) { struct ffs_dev *ffs_dev; ENTER(); ffs_dev_lock(); ffs_dev = _ffs_find_dev(dev_name); if (!ffs_dev) ffs_dev = ERR_PTR(-ENOENT); else if (ffs_dev->mounted) ffs_dev = ERR_PTR(-EBUSY); else if (ffs_dev->ffs_acquire_dev_callback && ffs_dev->ffs_acquire_dev_callback(ffs_dev)) ffs_dev = ERR_PTR(-ENOENT); else ffs_dev->mounted = true; ffs_dev_unlock(); return ffs_dev; } static void ffs_release_dev(struct ffs_data *ffs_data) { struct ffs_dev *ffs_dev; ENTER(); ffs_dev_lock(); ffs_dev = ffs_data->private_data; if (ffs_dev) { ffs_dev->mounted = false; if (ffs_dev->ffs_release_dev_callback) ffs_dev->ffs_release_dev_callback(ffs_dev); } ffs_dev_unlock(); } static int ffs_ready(struct ffs_data *ffs) { struct ffs_dev *ffs_obj; int ret = 0; ENTER(); ffs_dev_lock(); ffs_obj = ffs->private_data; if (!ffs_obj) { ret = -EINVAL; goto done; } if (WARN_ON(ffs_obj->desc_ready)) { ret = -EBUSY; goto done; } ffs_obj->desc_ready = true; ffs_obj->ffs_data = ffs; if (ffs_obj->ffs_ready_callback) { ret = ffs_obj->ffs_ready_callback(ffs); if (ret) goto done; } set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); done: ffs_dev_unlock(); return ret; } static void ffs_closed(struct ffs_data *ffs) { struct ffs_dev *ffs_obj; struct f_fs_opts *opts; ENTER(); ffs_dev_lock(); ffs_obj = ffs->private_data; if (!ffs_obj) goto done; ffs_obj->desc_ready = false; if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && ffs_obj->ffs_closed_callback) ffs_obj->ffs_closed_callback(ffs); if (ffs_obj->opts) opts = ffs_obj->opts; else goto done; if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) goto done; unregister_gadget_item(ffs_obj->opts-> func_inst.group.cg_item.ci_parent->ci_parent); done: ffs_dev_unlock(); } /* Misc helper functions ****************************************************/ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) { return nonblock ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN : mutex_lock_interruptible(mutex); } /** * ffs_prepare_buffer() - copy userspace buffer into kernel. * @buf: userspace buffer * @len: length of the buffer * @extra_alloc_buf: Extra buffer allocation if required by UDC. * * This function returns pointer to the copied buffer */ static char *ffs_prepare_buffer(const char __user *buf, size_t len, size_t extra_buf_alloc) { char *data; if (unlikely(!len)) return NULL; data = kmalloc(len + extra_buf_alloc, GFP_KERNEL); if (unlikely(!data)) return ERR_PTR(-ENOMEM); if (unlikely(__copy_from_user(data, buf, len))) { kfree(data); return ERR_PTR(-EFAULT); } pr_vdebug("Buffer from user space:\n"); ffs_dump_mem("", data, len); return data; } DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Nazarewicz");
gpl-2.0
Josemurillo/frokTinkel
src/server/scripts/EasternKingdoms/Karazhan/boss_moroes.cpp
12
22496
/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2008 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Moroes SD%Complete: 95 SDComment: SDCategory: Karazhan EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "karazhan.h" #define SAY_AGGRO -1532011 #define SAY_SPECIAL_1 -1532012 #define SAY_SPECIAL_2 -1532013 #define SAY_KILL_1 -1532014 #define SAY_KILL_2 -1532015 #define SAY_KILL_3 -1532016 #define SAY_DEATH -1532017 #define SPELL_VANISH 29448 #define SPELL_GARROTE 37066 #define SPELL_BLIND 34694 #define SPELL_GOUGE 29425 #define SPELL_FRENZY 37023 #define POS_Z 81.73f float Locations[4][3]= { {-10991.0f, -1884.33f, 0.614315f}, {-10989.4f, -1885.88f, 0.904913f}, {-10978.1f, -1887.07f, 2.035550f}, {-10975.9f, -1885.81f, 2.253890f}, }; const uint32 Adds[6]= { 17007, 19872, 19873, 19874, 19875, 19876, }; class boss_moroes : public CreatureScript { public: boss_moroes() : CreatureScript("boss_moroes") { } CreatureAI* GetAI(Creature* creature) const { return new boss_moroesAI (creature); } struct boss_moroesAI : public ScriptedAI { boss_moroesAI(Creature* creature) : ScriptedAI(creature) { memset(AddId, 0, sizeof(AddId)); memset(AddGUID, 0, sizeof(AddGUID)); instance = creature->GetInstanceScript(); } InstanceScript* instance; uint64 AddGUID[4]; uint32 Vanish_Timer; uint32 Blind_Timer; uint32 Gouge_Timer; uint32 Wait_Timer; uint32 CheckAdds_Timer; uint32 AddId[4]; bool InVanish; bool Enrage; void Reset() { Vanish_Timer = 30000; Blind_Timer = 35000; Gouge_Timer = 23000; Wait_Timer = 0; CheckAdds_Timer = 5000; Enrage = false; InVanish = false; if (me->GetHealth()) SpawnAdds(); if (instance) instance->SetData(TYPE_MOROES, NOT_STARTED); } void StartEvent() { if (instance) instance->SetData(TYPE_MOROES, IN_PROGRESS); DoZoneInCombat(); } void EnterCombat(Unit* /*who*/) { StartEvent(); DoScriptText(SAY_AGGRO, me); AddsAttack(); DoZoneInCombat(); } void KilledUnit(Unit* /*victim*/) { DoScriptText(RAND(SAY_KILL_1, SAY_KILL_2, SAY_KILL_3), me); } void JustDied(Unit* /*killer*/) { DoScriptText(SAY_DEATH, me); if (instance) instance->SetData(TYPE_MOROES, DONE); DeSpawnAdds(); //remove aura from spell Garrote when Moroes dies if (instance) instance->DoRemoveAurasDueToSpellOnPlayers(SPELL_GARROTE); } void SpawnAdds() { DeSpawnAdds(); if (isAddlistEmpty()) { Creature* creature = NULL; std::vector<uint32> AddList; for (uint8 i = 0; i < 6; ++i) AddList.push_back(Adds[i]); while (AddList.size() > 4) AddList.erase((AddList.begin())+(rand()%AddList.size())); uint8 i = 0; for (std::vector<uint32>::const_iterator itr = AddList.begin(); itr != AddList.end(); ++itr) { uint32 entry = *itr; creature = me->SummonCreature(entry, Locations[i][0], Locations[i][1], POS_Z, Locations[i][2], TEMPSUMMON_CORPSE_TIMED_DESPAWN, 10000); if (creature) { AddGUID[i] = creature->GetGUID(); AddId[i] = entry; } ++i; } }else { for (uint8 i = 0; i < 4; ++i) { Creature* creature = me->SummonCreature(AddId[i], Locations[i][0], Locations[i][1], POS_Z, Locations[i][2], TEMPSUMMON_CORPSE_TIMED_DESPAWN, 10000); if (creature) { AddGUID[i] = creature->GetGUID(); } } } } bool isAddlistEmpty() { for (uint8 i = 0; i < 4; ++i) if (AddId[i] == 0) return true; return false; } void DeSpawnAdds() { for (uint8 i = 0; i < 4; ++i) { if (AddGUID[i]) { Creature* temp = Creature::GetCreature((*me), AddGUID[i]); if (temp && temp->isAlive()) temp->DisappearAndDie(); } } } void AddsAttack() { for (uint8 i = 0; i < 4; ++i) { if (AddGUID[i]) { Creature* temp = Creature::GetCreature((*me), AddGUID[i]); if (temp && temp->isAlive()) { temp->AI()->AttackStart(me->getVictim()); DoZoneInCombat(temp); } else EnterEvadeMode(); } } } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; if (instance && !instance->GetData(TYPE_MOROES)) { EnterEvadeMode(); return; } if (!Enrage && HealthBelowPct(30)) { DoCast(me, SPELL_FRENZY); Enrage = true; } if (CheckAdds_Timer <= diff) { for (uint8 i = 0; i < 4; ++i) { if (AddGUID[i]) { Creature* temp = Unit::GetCreature((*me), AddGUID[i]); if (temp && temp->isAlive()) if (!temp->getVictim()) temp->AI()->AttackStart(me->getVictim()); } } CheckAdds_Timer = 5000; } else CheckAdds_Timer -= diff; if (!Enrage) { //Cast Vanish, then Garrote random victim if (Vanish_Timer <= diff) { DoCast(me, SPELL_VANISH); InVanish = true; Vanish_Timer = 30000; Wait_Timer = 5000; } else Vanish_Timer -= diff; if (Gouge_Timer <= diff) { DoCastVictim(SPELL_GOUGE); Gouge_Timer = 40000; } else Gouge_Timer -= diff; if (Blind_Timer <= diff) { std::list<Unit*> targets; SelectTargetList(targets, 5, SELECT_TARGET_RANDOM, me->GetMeleeReach()*5, true); for (std::list<Unit*>::const_iterator i = targets.begin(); i != targets.end(); ++i) if (!me->IsWithinMeleeRange(*i)) { DoCast(*i, SPELL_BLIND); break; } Blind_Timer = 40000; } else Blind_Timer -= diff; } if (InVanish) { if (Wait_Timer <= diff) { DoScriptText(RAND(SAY_SPECIAL_1, SAY_SPECIAL_2), me); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) target->CastSpell(target, SPELL_GARROTE, true); InVanish = false; } else Wait_Timer -= diff; } if (!InVanish) DoMeleeAttackIfReady(); } }; }; struct boss_moroes_guestAI : public ScriptedAI { InstanceScript* instance; uint64 GuestGUID[4]; boss_moroes_guestAI(Creature* creature) : ScriptedAI(creature) { for (uint8 i = 0; i < 4; ++i) GuestGUID[i] = 0; instance = creature->GetInstanceScript(); } void Reset() { if (instance) instance->SetData(TYPE_MOROES, NOT_STARTED); } void AcquireGUID() { if (!instance) return; if (Creature* Moroes = Unit::GetCreature(*me, instance->GetData64(DATA_MOROES))) for (uint8 i = 0; i < 4; ++i) if (uint64 GUID = CAST_AI(boss_moroes::boss_moroesAI, Moroes->AI())->AddGUID[i]) GuestGUID[i] = GUID; } Unit* SelectGuestTarget() { uint64 TempGUID = GuestGUID[rand()%4]; if (TempGUID) { Unit* unit = Unit::GetUnit(*me, TempGUID); if (unit && unit->isAlive()) return unit; } return me; } void UpdateAI(const uint32 /*diff*/) { if (instance && !instance->GetData(TYPE_MOROES)) EnterEvadeMode(); DoMeleeAttackIfReady(); } }; #define SPELL_MANABURN 29405 #define SPELL_MINDFLY 29570 #define SPELL_SWPAIN 34441 #define SPELL_SHADOWFORM 29406 #define SPELL_HAMMEROFJUSTICE 13005 #define SPELL_JUDGEMENTOFCOMMAND 29386 #define SPELL_SEALOFCOMMAND 29385 #define SPELL_DISPELMAGIC 15090 //Self or other guest+Moroes #define SPELL_GREATERHEAL 29564 //Self or other guest+Moroes #define SPELL_HOLYFIRE 29563 #define SPELL_PWSHIELD 29408 #define SPELL_CLEANSE 29380 //Self or other guest+Moroes #define SPELL_GREATERBLESSOFMIGHT 29381 //Self or other guest+Moroes #define SPELL_HOLYLIGHT 29562 //Self or other guest+Moroes #define SPELL_DIVINESHIELD 41367 #define SPELL_HAMSTRING 9080 #define SPELL_MORTALSTRIKE 29572 #define SPELL_WHIRLWIND 29573 #define SPELL_DISARM 8379 #define SPELL_HEROICSTRIKE 29567 #define SPELL_SHIELDBASH 11972 #define SPELL_SHIELDWALL 29390 class boss_baroness_dorothea_millstipe : public CreatureScript { public: boss_baroness_dorothea_millstipe() : CreatureScript("boss_baroness_dorothea_millstipe") { } CreatureAI* GetAI(Creature* creature) const { return new boss_baroness_dorothea_millstipeAI (creature); } struct boss_baroness_dorothea_millstipeAI : public boss_moroes_guestAI { //Shadow Priest boss_baroness_dorothea_millstipeAI(Creature* creature) : boss_moroes_guestAI(creature) {} uint32 ManaBurn_Timer; uint32 MindFlay_Timer; uint32 ShadowWordPain_Timer; void Reset() { ManaBurn_Timer = 7000; MindFlay_Timer = 1000; ShadowWordPain_Timer = 6000; DoCast(me, SPELL_SHADOWFORM, true); boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (MindFlay_Timer <= diff) { DoCast(me->getVictim(), SPELL_MINDFLY); MindFlay_Timer = 12000; // 3 sec channeled } else MindFlay_Timer -= diff; if (ManaBurn_Timer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) if (target->getPowerType() == POWER_MANA) DoCast(target, SPELL_MANABURN); ManaBurn_Timer = 5000; // 3 sec cast } else ManaBurn_Timer -= diff; if (ShadowWordPain_Timer <= diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true)) { DoCast(target, SPELL_SWPAIN); ShadowWordPain_Timer = 7000; } } else ShadowWordPain_Timer -= diff; } }; }; class boss_baron_rafe_dreuger : public CreatureScript { public: boss_baron_rafe_dreuger() : CreatureScript("boss_baron_rafe_dreuger") { } CreatureAI* GetAI(Creature* creature) const { return new boss_baron_rafe_dreugerAI (creature); } struct boss_baron_rafe_dreugerAI : public boss_moroes_guestAI { //Retr Pally boss_baron_rafe_dreugerAI(Creature* creature) : boss_moroes_guestAI(creature){} uint32 HammerOfJustice_Timer; uint32 SealOfCommand_Timer; uint32 JudgementOfCommand_Timer; void Reset() { HammerOfJustice_Timer = 1000; SealOfCommand_Timer = 7000; JudgementOfCommand_Timer = SealOfCommand_Timer + 29000; boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (SealOfCommand_Timer <= diff) { DoCast(me, SPELL_SEALOFCOMMAND); SealOfCommand_Timer = 32000; JudgementOfCommand_Timer = 29000; } else SealOfCommand_Timer -= diff; if (JudgementOfCommand_Timer <= diff) { DoCast(me->getVictim(), SPELL_JUDGEMENTOFCOMMAND); JudgementOfCommand_Timer = SealOfCommand_Timer + 29000; } else JudgementOfCommand_Timer -= diff; if (HammerOfJustice_Timer <= diff) { DoCast(me->getVictim(), SPELL_HAMMEROFJUSTICE); HammerOfJustice_Timer = 12000; } else HammerOfJustice_Timer -= diff; } }; }; class boss_lady_catriona_von_indi : public CreatureScript { public: boss_lady_catriona_von_indi() : CreatureScript("boss_lady_catriona_von_indi") { } CreatureAI* GetAI(Creature* creature) const { return new boss_lady_catriona_von_indiAI (creature); } struct boss_lady_catriona_von_indiAI : public boss_moroes_guestAI { //Holy Priest boss_lady_catriona_von_indiAI(Creature* creature) : boss_moroes_guestAI(creature) {} uint32 DispelMagic_Timer; uint32 GreaterHeal_Timer; uint32 HolyFire_Timer; uint32 PowerWordShield_Timer; void Reset() { DispelMagic_Timer = 11000; GreaterHeal_Timer = 1500; HolyFire_Timer = 5000; PowerWordShield_Timer = 1000; AcquireGUID(); boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (PowerWordShield_Timer <= diff) { DoCast(me, SPELL_PWSHIELD); PowerWordShield_Timer = 15000; } else PowerWordShield_Timer -= diff; if (GreaterHeal_Timer <= diff) { Unit* target = SelectGuestTarget(); DoCast(target, SPELL_GREATERHEAL); GreaterHeal_Timer = 17000; } else GreaterHeal_Timer -= diff; if (HolyFire_Timer <= diff) { DoCast(me->getVictim(), SPELL_HOLYFIRE); HolyFire_Timer = 22000; } else HolyFire_Timer -= diff; if (DispelMagic_Timer <= diff) { if (Unit* target = RAND(SelectGuestTarget(), SelectTarget(SELECT_TARGET_RANDOM, 0, 100, true))) DoCast(target, SPELL_DISPELMAGIC); DispelMagic_Timer = 25000; } else DispelMagic_Timer -= diff; } }; }; class boss_lady_keira_berrybuck : public CreatureScript { public: boss_lady_keira_berrybuck() : CreatureScript("boss_lady_keira_berrybuck") { } CreatureAI* GetAI(Creature* creature) const { return new boss_lady_keira_berrybuckAI (creature); } struct boss_lady_keira_berrybuckAI : public boss_moroes_guestAI { //Holy Pally boss_lady_keira_berrybuckAI(Creature* creature) : boss_moroes_guestAI(creature) {} uint32 Cleanse_Timer; uint32 GreaterBless_Timer; uint32 HolyLight_Timer; uint32 DivineShield_Timer; void Reset() { Cleanse_Timer = 13000; GreaterBless_Timer = 1000; HolyLight_Timer = 7000; DivineShield_Timer = 31000; AcquireGUID(); boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (DivineShield_Timer <= diff) { DoCast(me, SPELL_DIVINESHIELD); DivineShield_Timer = 31000; } else DivineShield_Timer -= diff; if (HolyLight_Timer <= diff) { Unit* target = SelectGuestTarget(); DoCast(target, SPELL_HOLYLIGHT); HolyLight_Timer = 10000; } else HolyLight_Timer -= diff; if (GreaterBless_Timer <= diff) { Unit* target = SelectGuestTarget(); DoCast(target, SPELL_GREATERBLESSOFMIGHT); GreaterBless_Timer = 50000; } else GreaterBless_Timer -= diff; if (Cleanse_Timer <= diff) { Unit* target = SelectGuestTarget(); DoCast(target, SPELL_CLEANSE); Cleanse_Timer = 10000; } else Cleanse_Timer -= diff; } }; }; class boss_lord_robin_daris : public CreatureScript { public: boss_lord_robin_daris() : CreatureScript("boss_lord_robin_daris") { } CreatureAI* GetAI(Creature* creature) const { return new boss_lord_robin_darisAI (creature); } struct boss_lord_robin_darisAI : public boss_moroes_guestAI { //Arms Warr boss_lord_robin_darisAI(Creature* creature) : boss_moroes_guestAI(creature) {} uint32 Hamstring_Timer; uint32 MortalStrike_Timer; uint32 WhirlWind_Timer; void Reset() { Hamstring_Timer = 7000; MortalStrike_Timer = 10000; WhirlWind_Timer = 21000; boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (Hamstring_Timer <= diff) { DoCast(me->getVictim(), SPELL_HAMSTRING); Hamstring_Timer = 12000; } else Hamstring_Timer -= diff; if (MortalStrike_Timer <= diff) { DoCast(me->getVictim(), SPELL_MORTALSTRIKE); MortalStrike_Timer = 18000; } else MortalStrike_Timer -= diff; if (WhirlWind_Timer <= diff) { DoCast(me, SPELL_WHIRLWIND); WhirlWind_Timer = 21000; } else WhirlWind_Timer -= diff; } }; }; class boss_lord_crispin_ference : public CreatureScript { public: boss_lord_crispin_ference() : CreatureScript("boss_lord_crispin_ference") { } CreatureAI* GetAI(Creature* creature) const { return new boss_lord_crispin_ferenceAI (creature); } struct boss_lord_crispin_ferenceAI : public boss_moroes_guestAI { //Arms Warr boss_lord_crispin_ferenceAI(Creature* creature) : boss_moroes_guestAI(creature) {} uint32 Disarm_Timer; uint32 HeroicStrike_Timer; uint32 ShieldBash_Timer; uint32 ShieldWall_Timer; void Reset() { Disarm_Timer = 6000; HeroicStrike_Timer = 10000; ShieldBash_Timer = 8000; ShieldWall_Timer = 4000; boss_moroes_guestAI::Reset(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; boss_moroes_guestAI::UpdateAI(diff); if (Disarm_Timer <= diff) { DoCast(me->getVictim(), SPELL_DISARM); Disarm_Timer = 12000; } else Disarm_Timer -= diff; if (HeroicStrike_Timer <= diff) { DoCast(me->getVictim(), SPELL_HEROICSTRIKE); HeroicStrike_Timer = 10000; } else HeroicStrike_Timer -= diff; if (ShieldBash_Timer <= diff) { DoCast(me->getVictim(), SPELL_SHIELDBASH); ShieldBash_Timer = 13000; } else ShieldBash_Timer -= diff; if (ShieldWall_Timer <= diff) { DoCast(me, SPELL_SHIELDWALL); ShieldWall_Timer = 21000; } else ShieldWall_Timer -= diff; } }; }; void AddSC_boss_moroes() { new boss_moroes(); new boss_baroness_dorothea_millstipe(); new boss_baron_rafe_dreuger(); new boss_lady_catriona_von_indi(); new boss_lady_keira_berrybuck(); new boss_lord_robin_daris(); new boss_lord_crispin_ference(); }
gpl-2.0
drewis/android_kernel_grouper
fs/fat/namei_vfat.c
268
26425
/* * linux/fs/vfat/namei.c * * Written 1992,1993 by Werner Almesberger * * Windows95/Windows NT compatible extended MSDOS filesystem * by Gordon Chaffee Copyright (C) 1995. Send bug reports for the * VFAT filesystem to <chaffee@cs.berkeley.edu>. Specify * what file operation caused you trouble and if you can duplicate * the problem, send a script that demonstrates it. * * Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de> * * Support Multibyte characters and cleanup by * OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/namei.h> #include "fat.h" /* * If new entry was created in the parent, it could create the 8.3 * alias (the shortname of logname). So, the parent may have the * negative-dentry which matches the created 8.3 alias. * * If it happened, the negative dentry isn't actually negative * anymore. So, drop it. */ static int vfat_revalidate_shortname(struct dentry *dentry) { int ret = 1; spin_lock(&dentry->d_lock); if (dentry->d_time != dentry->d_parent->d_inode->i_version) ret = 0; spin_unlock(&dentry->d_lock); return ret; } static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) { if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* This is not negative dentry. Always valid. */ if (dentry->d_inode) return 1; return vfat_revalidate_shortname(dentry); } static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) { if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* * This is not negative dentry. Always valid. * * Note, rename() to existing directory entry will have ->d_inode, * and will use existing name which isn't specified name by user. * * We may be able to drop this positive dentry here. But dropping * positive dentry isn't good idea. So it's unsupported like * rename("filename", "FILENAME") for now. */ if (dentry->d_inode) return 1; /* * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ if (!nd) return 0; /* * Drop the negative dentry, in order to make sure to use the * case sensitive name which is specified by user if this is * for creation. */ if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return vfat_revalidate_shortname(dentry); } /* returns the length of a struct qstr, ignoring trailing dots */ static unsigned int __vfat_striptail_len(unsigned int len, const char *name) { while (len && name[len - 1] == '.') len--; return len; } static unsigned int vfat_striptail_len(const struct qstr *qstr) { return __vfat_striptail_len(qstr->len, qstr->name); } /* * Compute the hash for the vfat name corresponding to the dentry. * Note: if the name is invalid, we leave the hash code unchanged so * that the existing dentry can be used. The vfat fs routines will * return ENOENT or EINVAL as appropriate. */ static int vfat_hash(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr)); return 0; } /* * Compute the hash for the vfat name corresponding to the dentry. * Note: if the name is invalid, we leave the hash code unchanged so * that the existing dentry can be used. The vfat fs routines will * return ENOENT or EINVAL as appropriate. */ static int vfat_hashi(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io; const unsigned char *name; unsigned int len; unsigned long hash; name = qstr->name; len = vfat_striptail_len(qstr); hash = init_name_hash(); while (len--) hash = partial_name_hash(nls_tolower(t, *name++), hash); qstr->hash = end_name_hash(hash); return 0; } /* * Case insensitive compare of two vfat names. */ static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io; unsigned int alen, blen; /* A filename cannot end in '.' or we treat it like it has none */ alen = vfat_striptail_len(name); blen = __vfat_striptail_len(len, str); if (alen == blen) { if (nls_strnicmp(t, name->name, str, alen) == 0) return 0; } return 1; } /* * Case sensitive compare of two vfat names. */ static int vfat_cmp(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { unsigned int alen, blen; /* A filename cannot end in '.' or we treat it like it has none */ alen = vfat_striptail_len(name); blen = __vfat_striptail_len(len, str); if (alen == blen) { if (strncmp(name->name, str, alen) == 0) return 0; } return 1; } static const struct dentry_operations vfat_ci_dentry_ops = { .d_revalidate = vfat_revalidate_ci, .d_hash = vfat_hashi, .d_compare = vfat_cmpi, }; static const struct dentry_operations vfat_dentry_ops = { .d_revalidate = vfat_revalidate, .d_hash = vfat_hash, .d_compare = vfat_cmp, }; /* Characters that are undesirable in an MS-DOS file name */ static inline wchar_t vfat_bad_char(wchar_t w) { return (w < 0x0020) || (w == '*') || (w == '?') || (w == '<') || (w == '>') || (w == '|') || (w == '"') || (w == ':') || (w == '/') || (w == '\\'); } static inline wchar_t vfat_replace_char(wchar_t w) { return (w == '[') || (w == ']') || (w == ';') || (w == ',') || (w == '+') || (w == '='); } static wchar_t vfat_skip_char(wchar_t w) { return (w == '.') || (w == ' '); } static inline int vfat_is_used_badchars(const wchar_t *s, int len) { int i; for (i = 0; i < len; i++) if (vfat_bad_char(s[i])) return -EINVAL; if (s[i - 1] == ' ') /* last character cannot be space */ return -EINVAL; return 0; } static int vfat_find_form(struct inode *dir, unsigned char *name) { struct fat_slot_info sinfo; int err = fat_scan(dir, name, &sinfo); if (err) return -ENOENT; brelse(sinfo.bh); return 0; } /* * 1) Valid characters for the 8.3 format alias are any combination of * letters, uppercase alphabets, digits, any of the * following special characters: * $ % ' ` - @ { } ~ ! # ( ) & _ ^ * In this case Longfilename is not stored in disk. * * WinNT's Extension: * File name and extension name is contain uppercase/lowercase * only. And it is expressed by CASE_LOWER_BASE and CASE_LOWER_EXT. * * 2) File name is 8.3 format, but it contain the uppercase and * lowercase char, muliti bytes char, etc. In this case numtail is not * added, but Longfilename is stored. * * 3) When the one except for the above, or the following special * character are contained: * . [ ] ; , + = * numtail is added, and Longfilename must be stored in disk . */ struct shortname_info { unsigned char lower:1, upper:1, valid:1; }; #define INIT_SHORTNAME_INFO(x) do { \ (x)->lower = 1; \ (x)->upper = 1; \ (x)->valid = 1; \ } while (0) static inline int to_shortname_char(struct nls_table *nls, unsigned char *buf, int buf_size, wchar_t *src, struct shortname_info *info) { int len; if (vfat_skip_char(*src)) { info->valid = 0; return 0; } if (vfat_replace_char(*src)) { info->valid = 0; buf[0] = '_'; return 1; } len = nls->uni2char(*src, buf, buf_size); if (len <= 0) { info->valid = 0; buf[0] = '_'; len = 1; } else if (len == 1) { unsigned char prev = buf[0]; if (buf[0] >= 0x7F) { info->lower = 0; info->upper = 0; } buf[0] = nls_toupper(nls, buf[0]); if (isalpha(buf[0])) { if (buf[0] == prev) info->lower = 0; else info->upper = 0; } } else { info->lower = 0; info->upper = 0; } return len; } /* * Given a valid longname, create a unique shortname. Make sure the * shortname does not exist * Returns negative number on error, 0 for a normal * return, and 1 for valid shortname */ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls, wchar_t *uname, int ulen, unsigned char *name_res, unsigned char *lcase) { struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; wchar_t *ip, *ext_start, *end, *name_start; unsigned char base[9], ext[4], buf[5], *p; unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; int chl, chi; int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; int is_shortname; struct shortname_info base_info, ext_info; is_shortname = 1; INIT_SHORTNAME_INFO(&base_info); INIT_SHORTNAME_INFO(&ext_info); /* Now, we need to create a shortname from the long name */ ext_start = end = &uname[ulen]; while (--ext_start >= uname) { if (*ext_start == 0x002E) { /* is `.' */ if (ext_start == end - 1) { sz = ulen; ext_start = NULL; } break; } } if (ext_start == uname - 1) { sz = ulen; ext_start = NULL; } else if (ext_start) { /* * Names which start with a dot could be just * an extension eg. "...test". In this case Win95 * uses the extension as the name and sets no extension. */ name_start = &uname[0]; while (name_start < ext_start) { if (!vfat_skip_char(*name_start)) break; name_start++; } if (name_start != ext_start) { sz = ext_start - uname; ext_start++; } else { sz = ulen; ext_start = NULL; } } numtail_baselen = 6; numtail2_baselen = 2; for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) { chl = to_shortname_char(nls, charbuf, sizeof(charbuf), ip, &base_info); if (chl == 0) continue; if (baselen < 2 && (baselen + chl) > 2) numtail2_baselen = baselen; if (baselen < 6 && (baselen + chl) > 6) numtail_baselen = baselen; for (chi = 0; chi < chl; chi++) { *p++ = charbuf[chi]; baselen++; if (baselen >= 8) break; } if (baselen >= 8) { if ((chi < chl - 1) || (ip + 1) - uname < sz) is_shortname = 0; break; } } if (baselen == 0) { return -EINVAL; } extlen = 0; if (ext_start) { for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) { chl = to_shortname_char(nls, charbuf, sizeof(charbuf), ip, &ext_info); if (chl == 0) continue; if ((extlen + chl) > 3) { is_shortname = 0; break; } for (chi = 0; chi < chl; chi++) { *p++ = charbuf[chi]; extlen++; } if (extlen >= 3) { if (ip + 1 != end) is_shortname = 0; break; } } } ext[extlen] = '\0'; base[baselen] = '\0'; /* Yes, it can happen. ".\xe5" would do it. */ if (base[0] == DELETED_FLAG) base[0] = 0x05; /* OK, at this point we know that base is not longer than 8 symbols, * ext is not longer than 3, base is nonempty, both don't contain * any bad symbols (lowercase transformed to uppercase). */ memset(name_res, ' ', MSDOS_NAME); memcpy(name_res, base, baselen); memcpy(name_res + 8, ext, extlen); *lcase = 0; if (is_shortname && base_info.valid && ext_info.valid) { if (vfat_find_form(dir, name_res) == 0) return -EEXIST; if (opts->shortname & VFAT_SFN_CREATE_WIN95) { return (base_info.upper && ext_info.upper); } else if (opts->shortname & VFAT_SFN_CREATE_WINNT) { if ((base_info.upper || base_info.lower) && (ext_info.upper || ext_info.lower)) { if (!base_info.upper && base_info.lower) *lcase |= CASE_LOWER_BASE; if (!ext_info.upper && ext_info.lower) *lcase |= CASE_LOWER_EXT; return 1; } return 0; } else { BUG(); } } if (opts->numtail == 0) if (vfat_find_form(dir, name_res) < 0) return 0; /* * Try to find a unique extension. This used to * iterate through all possibilities sequentially, * but that gave extremely bad performance. Windows * only tries a few cases before using random * values for part of the base. */ if (baselen > 6) { baselen = numtail_baselen; name_res[7] = ' '; } name_res[baselen] = '~'; for (i = 1; i < 10; i++) { name_res[baselen + 1] = i + '0'; if (vfat_find_form(dir, name_res) < 0) return 0; } i = jiffies; sz = (jiffies >> 16) & 0x7; if (baselen > 2) { baselen = numtail2_baselen; name_res[7] = ' '; } name_res[baselen + 4] = '~'; name_res[baselen + 5] = '1' + sz; while (1) { snprintf(buf, sizeof(buf), "%04X", i & 0xffff); memcpy(&name_res[baselen], buf, 4); if (vfat_find_form(dir, name_res) < 0) break; i -= 11; } return 0; } /* Translate a string, including coded sequences into Unicode */ static int xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, int *longlen, int *outlen, int escape, int utf8, struct nls_table *nls) { const unsigned char *ip; unsigned char nc; unsigned char *op; unsigned int ec; int i, k, fill; int charlen; if (utf8) { *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname); if (*outlen < 0) return *outlen; else if (*outlen > FAT_LFN_LEN) return -ENAMETOOLONG; op = &outname[*outlen * sizeof(wchar_t)]; } else { if (nls) { for (i = 0, ip = name, op = outname, *outlen = 0; i < len && *outlen <= FAT_LFN_LEN; *outlen += 1) { if (escape && (*ip == ':')) { if (i > len - 5) return -EINVAL; ec = 0; for (k = 1; k < 5; k++) { nc = ip[k]; ec <<= 4; if (nc >= '0' && nc <= '9') { ec |= nc - '0'; continue; } if (nc >= 'a' && nc <= 'f') { ec |= nc - ('a' - 10); continue; } if (nc >= 'A' && nc <= 'F') { ec |= nc - ('A' - 10); continue; } return -EINVAL; } *op++ = ec & 0xFF; *op++ = ec >> 8; ip += 5; i += 5; } else { if ((charlen = nls->char2uni(ip, len - i, (wchar_t *)op)) < 0) return -EINVAL; ip += charlen; i += charlen; op += 2; } } if (i < len) return -ENAMETOOLONG; } else { for (i = 0, ip = name, op = outname, *outlen = 0; i < len && *outlen <= FAT_LFN_LEN; i++, *outlen += 1) { *op++ = *ip++; *op++ = 0; } if (i < len) return -ENAMETOOLONG; } } *longlen = *outlen; if (*outlen % 13) { *op++ = 0; *op++ = 0; *outlen += 1; if (*outlen % 13) { fill = 13 - (*outlen % 13); for (i = 0; i < fill; i++) { *op++ = 0xff; *op++ = 0xff; } *outlen += fill; } } return 0; } static int vfat_build_slots(struct inode *dir, const unsigned char *name, int len, int is_dir, int cluster, struct timespec *ts, struct msdos_dir_slot *slots, int *nr_slots) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); struct fat_mount_options *opts = &sbi->options; struct msdos_dir_slot *ps; struct msdos_dir_entry *de; unsigned char cksum, lcase; unsigned char msdos_name[MSDOS_NAME]; wchar_t *uname; __le16 time, date; u8 time_cs; int err, ulen, usize, i; loff_t offset; *nr_slots = 0; uname = __getname(); if (!uname) return -ENOMEM; err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize, opts->unicode_xlate, opts->utf8, sbi->nls_io); if (err) goto out_free; err = vfat_is_used_badchars(uname, ulen); if (err) goto out_free; err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen, msdos_name, &lcase); if (err < 0) goto out_free; else if (err == 1) { de = (struct msdos_dir_entry *)slots; err = 0; goto shortname; } /* build the entry of long file name */ cksum = fat_checksum(msdos_name); *nr_slots = usize / 13; for (ps = slots, i = *nr_slots; i > 0; i--, ps++) { ps->id = i; ps->attr = ATTR_EXT; ps->reserved = 0; ps->alias_checksum = cksum; ps->start = 0; offset = (i - 1) * 13; fatwchar_to16(ps->name0_4, uname + offset, 5); fatwchar_to16(ps->name5_10, uname + offset + 5, 6); fatwchar_to16(ps->name11_12, uname + offset + 11, 2); } slots[0].id |= 0x40; de = (struct msdos_dir_entry *)ps; shortname: /* build the entry of 8.3 alias name */ (*nr_slots)++; memcpy(de->name, msdos_name, MSDOS_NAME); de->attr = is_dir ? ATTR_DIR : ATTR_ARCH; de->lcase = lcase; fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); de->time = de->ctime = time; de->date = de->cdate = de->adate = date; de->ctime_cs = time_cs; de->start = cpu_to_le16(cluster); de->starthi = cpu_to_le16(cluster >> 16); de->size = 0; out_free: __putname(uname); return err; } static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir, int cluster, struct timespec *ts, struct fat_slot_info *sinfo) { struct msdos_dir_slot *slots; unsigned int len; int err, nr_slots; len = vfat_striptail_len(qname); if (len == 0) return -ENOENT; slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS); if (slots == NULL) return -ENOMEM; err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts, slots, &nr_slots); if (err) goto cleanup; err = fat_add_entries(dir, slots, nr_slots, sinfo); if (err) goto cleanup; /* update timestamp */ dir->i_ctime = dir->i_mtime = dir->i_atime = *ts; if (IS_DIRSYNC(dir)) (void)fat_sync_inode(dir); else mark_inode_dirty(dir); cleanup: kfree(slots); return err; } static int vfat_find(struct inode *dir, struct qstr *qname, struct fat_slot_info *sinfo) { unsigned int len = vfat_striptail_len(qname); if (len == 0) return -ENOENT; return fat_search_long(dir, qname->name, len, sinfo); } /* * (nfsd's) anonymous disconnected dentry? * NOTE: !IS_ROOT() is not anonymous (I.e. d_splice_alias() did the job). */ static int vfat_d_anon_disconn(struct dentry *dentry) { return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED); } static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; struct dentry *alias; int err; lock_super(sb); err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) { if (err == -ENOENT) { inode = NULL; goto out; } goto error; } inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } alias = d_find_alias(inode); if (alias && !vfat_d_anon_disconn(alias)) { /* * This inode has non anonymous-DCACHE_DISCONNECTED * dentry. This means, the user did ->lookup() by an * another name (longname vs 8.3 alias of it) in past. * * Switch to new one for reason of locality if possible. */ BUG_ON(d_unhashed(alias)); if (!S_ISDIR(inode->i_mode)) d_move(alias, dentry); iput(inode); unlock_super(sb); return alias; } else dput(alias); out: unlock_super(sb); dentry->d_time = dentry->d_parent->d_inode->i_version; dentry = d_splice_alias(inode, dentry); if (dentry) dentry->d_time = dentry->d_parent->d_inode->i_version; return dentry; error: unlock_super(sb); return ERR_PTR(err); } static int vfat_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode; struct fat_slot_info sinfo; struct timespec ts; int err; lock_super(sb); ts = CURRENT_TIME_SEC; err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo); if (err) goto out; dir->i_version++; inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } inode->i_version++; inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ dentry->d_time = dentry->d_parent->d_inode->i_version; d_instantiate(dentry, inode); out: unlock_super(sb); return err; } static int vfat_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; lock_super(sb); err = fat_dir_empty(inode); if (err) goto out; err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; drop_nlink(dir); clear_nlink(inode); inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; fat_detach(inode); out: unlock_super(sb); return err; } static int vfat_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; int err; lock_super(sb); err = vfat_find(dir, &dentry->d_name, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; clear_nlink(inode); inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; fat_detach(inode); out: unlock_super(sb); return err; } static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct super_block *sb = dir->i_sb; struct inode *inode; struct fat_slot_info sinfo; struct timespec ts; int err, cluster; lock_super(sb); ts = CURRENT_TIME_SEC; cluster = fat_alloc_new_dir(dir, &ts); if (cluster < 0) { err = cluster; goto out; } err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo); if (err) goto out_free; dir->i_version++; inc_nlink(dir); inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); /* the directory was completed, just return a error */ goto out; } inode->i_version++; inode->i_nlink = 2; inode->i_mtime = inode->i_atime = inode->i_ctime = ts; /* timestamp is already written, so mark_inode_dirty() is unneeded. */ dentry->d_time = dentry->d_parent->d_inode->i_version; d_instantiate(dentry, inode); unlock_super(sb); return 0; out_free: fat_free_clusters(dir, cluster); out: unlock_super(sb); return err; } static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct buffer_head *dotdot_bh; struct msdos_dir_entry *dotdot_de; struct inode *old_inode, *new_inode; struct fat_slot_info old_sinfo, sinfo; struct timespec ts; loff_t dotdot_i_pos, new_i_pos; int err, is_dir, update_dotdot, corrupt = 0; struct super_block *sb = old_dir->i_sb; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; old_inode = old_dentry->d_inode; new_inode = new_dentry->d_inode; lock_super(sb); err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo); if (err) goto out; is_dir = S_ISDIR(old_inode->i_mode); update_dotdot = (is_dir && old_dir != new_dir); if (update_dotdot) { if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de, &dotdot_i_pos) < 0) { err = -EIO; goto out; } } ts = CURRENT_TIME_SEC; if (new_inode) { if (is_dir) { err = fat_dir_empty(new_inode); if (err) goto out; } new_i_pos = MSDOS_I(new_inode)->i_pos; fat_detach(new_inode); } else { err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0, &ts, &sinfo); if (err) goto out; new_i_pos = sinfo.i_pos; } new_dir->i_version++; fat_detach(old_inode); fat_attach(old_inode, new_i_pos); if (IS_DIRSYNC(new_dir)) { err = fat_sync_inode(old_inode); if (err) goto error_inode; } else mark_inode_dirty(old_inode); if (update_dotdot) { int start = MSDOS_I(new_dir)->i_logstart; dotdot_de->start = cpu_to_le16(start); dotdot_de->starthi = cpu_to_le16(start >> 16); mark_buffer_dirty_inode(dotdot_bh, old_inode); if (IS_DIRSYNC(new_dir)) { err = sync_dirty_buffer(dotdot_bh); if (err) goto error_dotdot; } drop_nlink(old_dir); if (!new_inode) inc_nlink(new_dir); } err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */ old_sinfo.bh = NULL; if (err) goto error_dotdot; old_dir->i_version++; old_dir->i_ctime = old_dir->i_mtime = ts; if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); if (new_inode) { drop_nlink(new_inode); if (is_dir) drop_nlink(new_inode); new_inode->i_ctime = ts; } out: brelse(sinfo.bh); brelse(dotdot_bh); brelse(old_sinfo.bh); unlock_super(sb); return err; error_dotdot: /* data cluster is shared, serious corruption */ corrupt = 1; if (update_dotdot) { int start = MSDOS_I(old_dir)->i_logstart; dotdot_de->start = cpu_to_le16(start); dotdot_de->starthi = cpu_to_le16(start >> 16); mark_buffer_dirty_inode(dotdot_bh, old_inode); corrupt |= sync_dirty_buffer(dotdot_bh); } error_inode: fat_detach(old_inode); fat_attach(old_inode, old_sinfo.i_pos); if (new_inode) { fat_attach(new_inode, new_i_pos); if (corrupt) corrupt |= fat_sync_inode(new_inode); } else { /* * If new entry was not sharing the data cluster, it * shouldn't be serious corruption. */ int err2 = fat_remove_entries(new_dir, &sinfo); if (corrupt) corrupt |= err2; sinfo.bh = NULL; } if (corrupt < 0) { fat_fs_error(new_dir->i_sb, "%s: Filesystem corrupted (i_pos %lld)", __func__, sinfo.i_pos); } goto out; } static const struct inode_operations vfat_dir_inode_operations = { .create = vfat_create, .lookup = vfat_lookup, .unlink = vfat_unlink, .mkdir = vfat_mkdir, .rmdir = vfat_rmdir, .rename = vfat_rename, .setattr = fat_setattr, .getattr = fat_getattr, }; static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations; if (MSDOS_SB(sb)->options.name_check != 's') sb->s_d_op = &vfat_ci_dentry_ops; else sb->s_d_op = &vfat_dentry_ops; } static int vfat_fill_super(struct super_block *sb, void *data, int silent) { return fat_fill_super(sb, data, silent, 1, setup); } static struct dentry *vfat_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super); } static struct file_system_type vfat_fs_type = { .owner = THIS_MODULE, .name = "vfat", .mount = vfat_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static int __init init_vfat_fs(void) { return register_filesystem(&vfat_fs_type); } static void __exit exit_vfat_fs(void) { unregister_filesystem(&vfat_fs_type); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VFAT filesystem support"); MODULE_AUTHOR("Gordon Chaffee"); module_init(init_vfat_fs) module_exit(exit_vfat_fs)
gpl-2.0
sunxi/linux-3.14
drivers/gpu/drm/omapdrm/omap_plane.c
268
11791
/* * drivers/gpu/drm/omapdrm/omap_plane.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob.clark@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "drm_flip_work.h" #include "omap_drv.h" #include "omap_dmm_tiler.h" /* some hackery because omapdss has an 'enum omap_plane' (which would be * better named omap_plane_id).. and compiler seems unhappy about having * both a 'struct omap_plane' and 'enum omap_plane' */ #define omap_plane _omap_plane /* * plane funcs */ struct callback { void (*fxn)(void *); void *arg; }; #define to_omap_plane(x) container_of(x, struct omap_plane, base) struct omap_plane { struct drm_plane base; int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */ const char *name; struct omap_overlay_info info; struct omap_drm_apply apply; /* position/orientation of scanout within the fb: */ struct omap_drm_window win; bool enabled; /* last fb that we pinned: */ struct drm_framebuffer *pinned_fb; uint32_t nformats; uint32_t formats[32]; struct omap_drm_irq error_irq; /* for deferring bo unpin's until next post_apply(): */ struct drm_flip_work unpin_work; // XXX maybe get rid of this and handle vblank in crtc too? struct callback apply_done_cb; }; static void unpin_worker(struct drm_flip_work *work, void *val) { struct omap_plane *omap_plane = container_of(work, struct omap_plane, unpin_work); struct drm_device *dev = omap_plane->base.dev; omap_framebuffer_unpin(val); mutex_lock(&dev->mode_config.mutex); drm_framebuffer_unreference(val); mutex_unlock(&dev->mode_config.mutex); } /* update which fb (if any) is pinned for scanout */ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb) { struct omap_plane *omap_plane = to_omap_plane(plane); struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb; if (pinned_fb != fb) { int ret = 0; DBG("%p -> %p", pinned_fb, fb); if (fb) { drm_framebuffer_reference(fb); ret = omap_framebuffer_pin(fb); } if (pinned_fb) drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb); if (ret) { dev_err(plane->dev->dev, "could not swap %p -> %p\n", omap_plane->pinned_fb, fb); drm_framebuffer_unreference(fb); omap_plane->pinned_fb = NULL; return ret; } omap_plane->pinned_fb = fb; } return 0; } static void omap_plane_pre_apply(struct omap_drm_apply *apply) { struct omap_plane *omap_plane = container_of(apply, struct omap_plane, apply); struct omap_drm_window *win = &omap_plane->win; struct drm_plane *plane = &omap_plane->base; struct drm_device *dev = plane->dev; struct omap_overlay_info *info = &omap_plane->info; struct drm_crtc *crtc = plane->crtc; enum omap_channel channel; bool enabled = omap_plane->enabled && crtc; bool ilace, replication; int ret; DBG("%s, enabled=%d", omap_plane->name, enabled); /* if fb has changed, pin new fb: */ update_pin(plane, enabled ? plane->fb : NULL); if (!enabled) { dispc_ovl_enable(omap_plane->id, false); return; } channel = omap_crtc_channel(crtc); /* update scanout: */ omap_framebuffer_update_scanout(plane->fb, win, info); DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width, info->out_height, info->screen_width); DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, info->paddr, info->p_uv_addr); /* TODO: */ ilace = false; replication = false; /* and finally, update omapdss: */ ret = dispc_ovl_setup(omap_plane->id, info, replication, omap_crtc_timings(crtc), false); if (ret) { dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret); return; } dispc_ovl_enable(omap_plane->id, true); dispc_ovl_set_channel_out(omap_plane->id, channel); } static void omap_plane_post_apply(struct omap_drm_apply *apply) { struct omap_plane *omap_plane = container_of(apply, struct omap_plane, apply); struct drm_plane *plane = &omap_plane->base; struct omap_drm_private *priv = plane->dev->dev_private; struct omap_overlay_info *info = &omap_plane->info; struct callback cb; cb = omap_plane->apply_done_cb; omap_plane->apply_done_cb.fxn = NULL; drm_flip_work_commit(&omap_plane->unpin_work, priv->wq); if (cb.fxn) cb.fxn(cb.arg); if (omap_plane->enabled) { omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y, info->out_width, info->out_height); } } static int apply(struct drm_plane *plane) { if (plane->crtc) { struct omap_plane *omap_plane = to_omap_plane(plane); return omap_crtc_apply(plane->crtc, &omap_plane->apply); } return 0; } int omap_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, void (*fxn)(void *), void *arg) { struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_drm_window *win = &omap_plane->win; win->crtc_x = crtc_x; win->crtc_y = crtc_y; win->crtc_w = crtc_w; win->crtc_h = crtc_h; /* src values are in Q16 fixed point, convert to integer: */ win->src_x = src_x >> 16; win->src_y = src_y >> 16; win->src_w = src_w >> 16; win->src_h = src_h >> 16; if (fxn) { /* omap_crtc should ensure that a new page flip * isn't permitted while there is one pending: */ BUG_ON(omap_plane->apply_done_cb.fxn); omap_plane->apply_done_cb.fxn = fxn; omap_plane->apply_done_cb.arg = arg; } plane->fb = fb; plane->crtc = crtc; return apply(plane); } static int omap_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct omap_plane *omap_plane = to_omap_plane(plane); omap_plane->enabled = true; if (plane->fb) drm_framebuffer_unreference(plane->fb); drm_framebuffer_reference(fb); return omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, NULL, NULL); } static int omap_plane_disable(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); omap_plane->win.rotation = BIT(DRM_ROTATE_0); return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF); } static void omap_plane_destroy(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); DBG("%s", omap_plane->name); omap_irq_unregister(plane->dev, &omap_plane->error_irq); omap_plane_disable(plane); drm_plane_cleanup(plane); drm_flip_work_cleanup(&omap_plane->unpin_work); kfree(omap_plane); } int omap_plane_dpms(struct drm_plane *plane, int mode) { struct omap_plane *omap_plane = to_omap_plane(plane); bool enabled = (mode == DRM_MODE_DPMS_ON); int ret = 0; if (enabled != omap_plane->enabled) { omap_plane->enabled = enabled; ret = apply(plane); } return ret; } /* helper to install properties which are common to planes and crtcs */ void omap_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj) { struct drm_device *dev = plane->dev; struct omap_drm_private *priv = dev->dev_private; struct drm_property *prop; if (priv->has_dmm) { prop = priv->rotation_prop; if (!prop) { const struct drm_prop_enum_list props[] = { { DRM_ROTATE_0, "rotate-0" }, { DRM_ROTATE_90, "rotate-90" }, { DRM_ROTATE_180, "rotate-180" }, { DRM_ROTATE_270, "rotate-270" }, { DRM_REFLECT_X, "reflect-x" }, { DRM_REFLECT_Y, "reflect-y" }, }; prop = drm_property_create_bitmask(dev, 0, "rotation", props, ARRAY_SIZE(props)); if (prop == NULL) return; priv->rotation_prop = prop; } drm_object_attach_property(obj, prop, 0); } prop = priv->zorder_prop; if (!prop) { prop = drm_property_create_range(dev, 0, "zorder", 0, 3); if (prop == NULL) return; priv->zorder_prop = prop; } drm_object_attach_property(obj, prop, 0); } int omap_plane_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val) { struct omap_plane *omap_plane = to_omap_plane(plane); struct omap_drm_private *priv = plane->dev->dev_private; int ret = -EINVAL; if (property == priv->rotation_prop) { DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val); omap_plane->win.rotation = val; ret = apply(plane); } else if (property == priv->zorder_prop) { DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val); omap_plane->info.zorder = val; ret = apply(plane); } return ret; } static const struct drm_plane_funcs omap_plane_funcs = { .update_plane = omap_plane_update, .disable_plane = omap_plane_disable, .destroy = omap_plane_destroy, .set_property = omap_plane_set_property, }; static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) { struct omap_plane *omap_plane = container_of(irq, struct omap_plane, error_irq); DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus); } static const char *plane_names[] = { [OMAP_DSS_GFX] = "gfx", [OMAP_DSS_VIDEO1] = "vid1", [OMAP_DSS_VIDEO2] = "vid2", [OMAP_DSS_VIDEO3] = "vid3", }; static const uint32_t error_irqs[] = { [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, int id, bool private_plane) { struct omap_drm_private *priv = dev->dev_private; struct drm_plane *plane = NULL; struct omap_plane *omap_plane; struct omap_overlay_info *info; int ret; DBG("%s: priv=%d", plane_names[id], private_plane); omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); if (!omap_plane) goto fail; ret = drm_flip_work_init(&omap_plane->unpin_work, 16, "unpin", unpin_worker); if (ret) { dev_err(dev->dev, "could not allocate unpin FIFO\n"); goto fail; } omap_plane->nformats = omap_framebuffer_get_formats( omap_plane->formats, ARRAY_SIZE(omap_plane->formats), dss_feat_get_supported_color_modes(id)); omap_plane->id = id; omap_plane->name = plane_names[id]; plane = &omap_plane->base; omap_plane->apply.pre_apply = omap_plane_pre_apply; omap_plane->apply.post_apply = omap_plane_post_apply; omap_plane->error_irq.irqmask = error_irqs[id]; omap_plane->error_irq.irq = omap_plane_error_irq; omap_irq_register(dev, &omap_plane->error_irq); drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs, omap_plane->formats, omap_plane->nformats, private_plane); omap_plane_install_properties(plane, &plane->base); /* get our starting configuration, set defaults for parameters * we don't currently use, etc: */ info = &omap_plane->info; info->rotation_type = OMAP_DSS_ROT_DMA; info->rotation = OMAP_DSS_ROT_0; info->global_alpha = 0xff; info->mirror = 0; /* Set defaults depending on whether we are a CRTC or overlay * layer. * TODO add ioctl to give userspace an API to change this.. this * will come in a subsequent patch. */ if (private_plane) omap_plane->info.zorder = 0; else omap_plane->info.zorder = id; return plane; fail: if (plane) omap_plane_destroy(plane); return NULL; }
gpl-2.0
moonman/linux-stable
fs/lockd/host.c
524
17470
/* * linux/fs/lockd/host.c * * Management for NLM peer hosts. The nlm_host struct is shared * between client and server implementation. The only reason to * do so is to reduce code bloat. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/mutex.h> #include <linux/sunrpc/svc_xprt.h> #include <net/ipv6.h> #include "netns.h" #define NLMDBG_FACILITY NLMDBG_HOSTCACHE #define NLM_HOST_NRHASH 32 #define NLM_HOST_REBIND (60 * HZ) #define NLM_HOST_EXPIRE (300 * HZ) #define NLM_HOST_COLLECT (120 * HZ) static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; #define for_each_host(host, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry((host), (chain), h_hash) #define for_each_host_safe(host, next, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry_safe((host), (next), \ (chain), h_hash) static unsigned long nrhosts; static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(struct net *net); struct nlm_lookup_host_info { const int server; /* search for server|client */ const struct sockaddr *sap; /* address to search for */ const size_t salen; /* it's length */ const unsigned short protocol; /* transport to search for*/ const u32 version; /* NLM version to search for */ const char *hostname; /* remote's hostname */ const size_t hostname_len; /* it's length */ const int noresvport; /* use non-priv port */ struct net *net; /* network namespace to bind */ }; /* * Hash function must work well on big- and little-endian platforms */ static unsigned int __nlm_hash32(const __be32 n) { unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); return hash ^ (hash >> 8); } static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; return __nlm_hash32(sin->sin_addr.s_addr); } static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; const struct in6_addr addr = sin6->sin6_addr; return __nlm_hash32(addr.s6_addr32[0]) ^ __nlm_hash32(addr.s6_addr32[1]) ^ __nlm_hash32(addr.s6_addr32[2]) ^ __nlm_hash32(addr.s6_addr32[3]); } static unsigned int nlm_hash_address(const struct sockaddr *sap) { unsigned int hash; switch (sap->sa_family) { case AF_INET: hash = __nlm_hash_addr4(sap); break; case AF_INET6: hash = __nlm_hash_addr6(sap); break; default: hash = 0; } return hash & (NLM_HOST_NRHASH - 1); } /* * Allocate and initialize an nlm_host. Common to both client and server. */ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, struct nsm_handle *nsm) { struct nlm_host *host = NULL; unsigned long now = jiffies; if (nsm != NULL) atomic_inc(&nsm->sm_count); else { host = NULL; nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, ni->hostname, ni->hostname_len); if (unlikely(nsm == NULL)) { dprintk("lockd: %s failed; no nsm handle\n", __func__); goto out; } } host = kmalloc(sizeof(*host), GFP_KERNEL); if (unlikely(host == NULL)) { dprintk("lockd: %s failed; no memory\n", __func__); nsm_release(nsm); goto out; } memcpy(nlm_addr(host), ni->sap, ni->salen); host->h_addrlen = ni->salen; rpc_set_port(nlm_addr(host), 0); host->h_srcaddrlen = 0; host->h_rpcclnt = NULL; host->h_name = nsm->sm_name; host->h_version = ni->version; host->h_proto = ni->protocol; host->h_reclaiming = 0; host->h_server = ni->server; host->h_noresvport = ni->noresvport; host->h_inuse = 0; init_waitqueue_head(&host->h_gracewait); init_rwsem(&host->h_rwsem); host->h_state = 0; host->h_nsmstate = 0; host->h_pidcount = 0; atomic_set(&host->h_count, 1); mutex_init(&host->h_mutex); host->h_nextrebind = now + NLM_HOST_REBIND; host->h_expires = now + NLM_HOST_EXPIRE; INIT_LIST_HEAD(&host->h_lockowners); spin_lock_init(&host->h_lock); INIT_LIST_HEAD(&host->h_granted); INIT_LIST_HEAD(&host->h_reclaim); host->h_nsmhandle = nsm; host->h_addrbuf = nsm->sm_addrbuf; host->net = ni->net; strlcpy(host->nodename, utsname()->nodename, sizeof(host->nodename)); out: return host; } /* * Destroy an nlm_host and free associated resources * * Caller must hold nlm_host_mutex. */ static void nlm_destroy_host_locked(struct nlm_host *host) { struct rpc_clnt *clnt; struct lockd_net *ln = net_generic(host->net, lockd_net_id); dprintk("lockd: destroy host %s\n", host->h_name); hlist_del_init(&host->h_hash); nsm_unmonitor(host); nsm_release(host->h_nsmhandle); clnt = host->h_rpcclnt; if (clnt != NULL) rpc_shutdown_client(clnt); kfree(host); ln->nrhosts--; nrhosts--; } /** * nlmclnt_lookup_host - Find an NLM host handle matching a remote server * @sap: network address of server * @salen: length of server address * @protocol: transport protocol to use * @version: NLM protocol version * @hostname: '\0'-terminated hostname of server * @noresvport: 1 if non-privileged port should be used * * Returns an nlm_host structure that matches the passed-in * [server address, transport protocol, NLM version, server hostname]. * If one doesn't already exist in the host cache, a new handle is * created and returned. */ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, const char *hostname, int noresvport, struct net *net) { struct nlm_lookup_host_info ni = { .server = 0, .sap = sap, .salen = salen, .protocol = protocol, .version = version, .hostname = hostname, .hostname_len = strlen(hostname), .noresvport = noresvport, .net = net, }; struct hlist_head *chain; struct nlm_host *host; struct nsm_handle *nsm = NULL; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, (hostname ? hostname : "<none>"), version, (protocol == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); chain = &nlm_client_hosts[nlm_hash_address(sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != protocol) continue; if (host->h_version != version) continue; nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmclnt_release_host - release client nlm_host * @host: nlm_host to release * */ void nlmclnt_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release client host %s\n", host->h_name); WARN_ON_ONCE(host->h_server); if (atomic_dec_and_test(&host->h_count)) { WARN_ON_ONCE(!list_empty(&host->h_lockowners)); WARN_ON_ONCE(!list_empty(&host->h_granted)); WARN_ON_ONCE(!list_empty(&host->h_reclaim)); mutex_lock(&nlm_host_mutex); nlm_destroy_host_locked(host); mutex_unlock(&nlm_host_mutex); } } /** * nlmsvc_lookup_host - Find an NLM host handle matching a remote client * @rqstp: incoming NLM request * @hostname: name of client host * @hostname_len: length of client hostname * * Returns an nlm_host structure that matches the [client address, * transport protocol, NLM version, client hostname] of the passed-in * NLM request. If one doesn't already exist in the host cache, a * new handle is created and returned. * * Before possibly creating a new nlm_host, construct a sockaddr * for a specific source address in case the local system has * multiple network addresses. The family of the address in * rq_daddr is guaranteed to be the same as the family of the * address in rq_addr, so it's safe to use the same family for * the source address. */ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len) { struct hlist_head *chain; struct nlm_host *host = NULL; struct nsm_handle *nsm = NULL; struct sockaddr *src_sap = svc_daddr(rqstp); size_t src_len = rqstp->rq_daddrlen; struct net *net = SVC_NET(rqstp); struct nlm_lookup_host_info ni = { .server = 1, .sap = svc_addr(rqstp), .salen = rqstp->rq_addrlen, .protocol = rqstp->rq_prot, .version = rqstp->rq_vers, .hostname = hostname, .hostname_len = hostname_len, .net = net, }; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, (int)hostname_len, hostname, rqstp->rq_vers, (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); if (time_after_eq(jiffies, ln->next_gc)) nlm_gc_hosts(net); chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != ni.protocol) continue; if (host->h_version != ni.version) continue; if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) continue; /* Move to head of hash chain. */ hlist_del(&host->h_hash); hlist_add_head(&host->h_hash, chain); nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; memcpy(nlm_srcaddr(host), src_sap, src_len); host->h_srcaddrlen = src_len; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmsvc_release_host - release server nlm_host * @host: nlm_host to release * * Host is destroyed later in nlm_gc_host(). */ void nlmsvc_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release server host %s\n", host->h_name); WARN_ON_ONCE(!host->h_server); atomic_dec(&host->h_count); } /* * Create the NLM RPC client for an NLM peer */ struct rpc_clnt * nlm_bind_host(struct nlm_host *host) { struct rpc_clnt *clnt; dprintk("lockd: nlm_bind_host %s (%s)\n", host->h_name, host->h_addrbuf); /* Lock host handle */ mutex_lock(&host->h_mutex); /* If we've already created an RPC client, check whether * RPC rebind is required */ if ((clnt = host->h_rpcclnt) != NULL) { if (time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(clnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; dprintk("lockd: next rebind in %lu jiffies\n", host->h_nextrebind - jiffies); } } else { unsigned long increment = nlmsvc_timeout; struct rpc_timeout timeparms = { .to_initval = increment, .to_increment = increment, .to_maxval = increment * 6UL, .to_retries = 5U, }; struct rpc_create_args args = { .net = host->net, .protocol = host->h_proto, .address = nlm_addr(host), .addrsize = host->h_addrlen, .timeout = &timeparms, .servername = host->h_name, .program = &nlm_program, .version = host->h_version, .authflavor = RPC_AUTH_UNIX, .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_AUTOBIND), }; /* * lockd retries server side blocks automatically so we want * those to be soft RPC calls. Client side calls need to be * hard RPC tasks. */ if (!host->h_server) args.flags |= RPC_CLNT_CREATE_HARDRTRY; if (host->h_noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; if (host->h_srcaddrlen) args.saddress = nlm_srcaddr(host); clnt = rpc_create(&args); if (!IS_ERR(clnt)) host->h_rpcclnt = clnt; else { printk("lockd: couldn't create RPC handle for %s\n", host->h_name); clnt = NULL; } } mutex_unlock(&host->h_mutex); return clnt; } /* * Force a portmap lookup of the remote lockd port */ void nlm_rebind_host(struct nlm_host *host) { dprintk("lockd: rebind host %s\n", host->h_name); if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(host->h_rpcclnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; } } /* * Increment NLM host count */ struct nlm_host * nlm_get_host(struct nlm_host *host) { if (host) { dprintk("lockd: get host %s\n", host->h_name); atomic_inc(&host->h_count); host->h_expires = jiffies + NLM_HOST_EXPIRE; } return host; } static struct nlm_host *next_host_state(struct hlist_head *cache, struct nsm_handle *nsm, const struct nlm_reboot *info) { struct nlm_host *host; struct hlist_head *chain; mutex_lock(&nlm_host_mutex); for_each_host(host, chain, cache) { if (host->h_nsmhandle == nsm && host->h_nsmstate != info->state) { host->h_nsmstate = info->state; host->h_state++; nlm_get_host(host); mutex_unlock(&nlm_host_mutex); return host; } } mutex_unlock(&nlm_host_mutex); return NULL; } /** * nlm_host_rebooted - Release all resources held by rebooted host * @net: network namespace * @info: pointer to decoded results of NLM_SM_NOTIFY call * * We were notified that the specified host has rebooted. Release * all resources held by that peer. */ void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) { struct nsm_handle *nsm; struct nlm_host *host; nsm = nsm_reboot_lookup(net, info); if (unlikely(nsm == NULL)) return; /* Mark all hosts tied to this NSM state as having rebooted. * We run the loop repeatedly, because we drop the host table * lock for this. * To avoid processing a host several times, we match the nsmstate. */ while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { nlmsvc_free_host_resources(host); nlmsvc_release_host(host); } while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { nlmclnt_recovery(host); nlmclnt_release_host(host); } nsm_release(nsm); } static void nlm_complain_hosts(struct net *net) { struct hlist_head *chain; struct nlm_host *host; if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); if (ln->nrhosts == 0) return; printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net); dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net); } else { if (nrhosts == 0) return; printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %lu hosts left:\n", nrhosts); } for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; dprintk(" %s (cnt %d use %d exp %ld net %p)\n", host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires, host->net); } } void nlm_shutdown_hosts_net(struct net *net) { struct hlist_head *chain; struct nlm_host *host; mutex_lock(&nlm_host_mutex); /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts in net %p...\n", net); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_expires = jiffies - 1; if (host->h_rpcclnt) { rpc_shutdown_client(host->h_rpcclnt); host->h_rpcclnt = NULL; } } /* Then, perform a garbage collection pass */ nlm_gc_hosts(net); mutex_unlock(&nlm_host_mutex); nlm_complain_hosts(net); } /* * Shut down the hosts module. * Note that this routine is called only at server shutdown time. */ void nlm_shutdown_hosts(void) { dprintk("lockd: shutting down host module\n"); nlm_shutdown_hosts_net(NULL); } /* * Garbage collect any unused NLM hosts. * This GC combines reference counting for async operations with * mark & sweep for resources held by remote clients. */ static void nlm_gc_hosts(struct net *net) { struct hlist_head *chain; struct hlist_node *next; struct nlm_host *host; dprintk("lockd: host garbage collection for net %p\n", net); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_inuse = 0; } /* Mark all hosts that hold locks, blocks or shares */ nlmsvc_mark_resources(net); for_each_host_safe(host, next, chain, nlm_server_hosts) { if (net && host->net != net) continue; if (atomic_read(&host->h_count) || host->h_inuse || time_before(jiffies, host->h_expires)) { dprintk("nlm_gc_hosts skipping %s " "(cnt %d use %d exp %ld net %p)\n", host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires, host->net); continue; } nlm_destroy_host_locked(host); } if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); ln->next_gc = jiffies + NLM_HOST_COLLECT; } }
gpl-2.0
balister/linux-omap-philip
drivers/net/stmmac/enh_desc.c
3084
9650
/******************************************************************************* This contains the functions to handle the enhanced descriptors. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include "common.h" static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.etx.error_summary)) { CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); if (unlikely(p->des01.etx.jabber_timeout)) { CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); x->tx_jabber++; } if (unlikely(p->des01.etx.frame_flushed)) { CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); x->tx_frame_flushed++; dwmac_dma_flush_tx_fifo(ioaddr); } if (unlikely(p->des01.etx.loss_carrier)) { CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); x->tx_losscarrier++; stats->tx_carrier_errors++; } if (unlikely(p->des01.etx.no_carrier)) { CHIP_DBG(KERN_ERR "\tno_carrier error\n"); x->tx_carrier++; stats->tx_carrier_errors++; } if (unlikely(p->des01.etx.late_collision)) { CHIP_DBG(KERN_ERR "\tlate_collision error\n"); stats->collisions += p->des01.etx.collision_count; } if (unlikely(p->des01.etx.excessive_collisions)) { CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); stats->collisions += p->des01.etx.collision_count; } if (unlikely(p->des01.etx.excessive_deferral)) { CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); x->tx_deferred++; } if (unlikely(p->des01.etx.underflow_error)) { CHIP_DBG(KERN_ERR "\tunderflow error\n"); dwmac_dma_flush_tx_fifo(ioaddr); x->tx_underflow++; } if (unlikely(p->des01.etx.ip_header_error)) { CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); x->tx_ip_header_error++; } if (unlikely(p->des01.etx.payload_error)) { CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); x->tx_payload_error++; dwmac_dma_flush_tx_fifo(ioaddr); } ret = -1; } if (unlikely(p->des01.etx.deferred)) { CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); x->tx_deferred++; } #ifdef STMMAC_VLAN_TAG_USED if (p->des01.etx.vlan_frame) { CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); x->tx_vlan++; } #endif return ret; } static int enh_desc_get_tx_len(struct dma_desc *p) { return p->des01.etx.buffer1_size; } static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) { int ret = good_frame; u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; /* bits 5 7 0 | Frame status * ---------------------------------------------------------- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) * 1 0 0 | IPv4/6 No CSUM errorS. * 1 0 1 | IPv4/6 CSUM PAYLOAD error * 1 1 0 | IPv4/6 CSUM IP HR error * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS * 0 0 1 | IPv4/6 unsupported IP PAYLOAD * 0 1 1 | COE bypassed.. no IPv4/6 frame * 0 1 0 | Reserved. */ if (status == 0x0) { CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); ret = llc_snap; } else if (status == 0x4) { CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); ret = good_frame; } else if (status == 0x5) { CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); ret = csum_none; } else if (status == 0x6) { CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); ret = csum_none; } else if (status == 0x7) { CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header and Payload Error.\n"); ret = csum_none; } else if (status == 0x1) { CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); ret = discard_frame; } else if (status == 0x3) { CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); ret = discard_frame; } return ret; } static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { int ret = good_frame; struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.erx.error_summary)) { CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", p->des01.erx); if (unlikely(p->des01.erx.descriptor_error)) { CHIP_DBG(KERN_ERR "\tdescriptor error\n"); x->rx_desc++; stats->rx_length_errors++; } if (unlikely(p->des01.erx.overflow_error)) { CHIP_DBG(KERN_ERR "\toverflow error\n"); x->rx_gmac_overflow++; } if (unlikely(p->des01.erx.ipc_csum_error)) CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); if (unlikely(p->des01.erx.late_collision)) { CHIP_DBG(KERN_ERR "\tlate_collision error\n"); stats->collisions++; stats->collisions++; } if (unlikely(p->des01.erx.receive_watchdog)) { CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); x->rx_watchdog++; } if (unlikely(p->des01.erx.error_gmii)) { CHIP_DBG(KERN_ERR "\tReceive Error\n"); x->rx_mii++; } if (unlikely(p->des01.erx.crc_error)) { CHIP_DBG(KERN_ERR "\tCRC error\n"); x->rx_crc++; stats->rx_crc_errors++; } ret = discard_frame; } /* After a payload csum error, the ES bit is set. * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, p->des01.erx.frame_type, p->des01.erx.payload_csum_error); if (unlikely(p->des01.erx.dribbling)) { CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); ret = discard_frame; } if (unlikely(p->des01.erx.sa_filter_fail)) { CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); x->sa_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.da_filter_fail)) { CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); x->da_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.length_error)) { CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); x->rx_length++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED if (p->des01.erx.vlan_tag) { CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); x->rx_vlan++; } #endif return ret; } static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, int disable_rx_ic) { int i; for (i = 0; i < ring_size; i++) { p->des01.erx.own = 1; p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; /* To support jumbo frames */ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; if (i == ring_size - 1) p->des01.erx.end_ring = 1; if (disable_rx_ic) p->des01.erx.disable_ic = 1; p++; } } static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) { int i; for (i = 0; i < ring_size; i++) { p->des01.etx.own = 0; if (i == ring_size - 1) p->des01.etx.end_ring = 1; p++; } } static int enh_desc_get_tx_owner(struct dma_desc *p) { return p->des01.etx.own; } static int enh_desc_get_rx_owner(struct dma_desc *p) { return p->des01.erx.own; } static void enh_desc_set_tx_owner(struct dma_desc *p) { p->des01.etx.own = 1; } static void enh_desc_set_rx_owner(struct dma_desc *p) { p->des01.erx.own = 1; } static int enh_desc_get_tx_ls(struct dma_desc *p) { return p->des01.etx.last_segment; } static void enh_desc_release_tx_desc(struct dma_desc *p) { int ter = p->des01.etx.end_ring; memset(p, 0, offsetof(struct dma_desc, des2)); p->des01.etx.end_ring = ter; } static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, int csum_flag) { p->des01.etx.first_segment = is_fs; if (unlikely(len > BUF_SIZE_4KiB)) { p->des01.etx.buffer1_size = BUF_SIZE_4KiB; p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; } else { p->des01.etx.buffer1_size = len; } if (likely(csum_flag)) p->des01.etx.checksum_insertion = cic_full; } static void enh_desc_clear_tx_ic(struct dma_desc *p) { p->des01.etx.interrupt = 0; } static void enh_desc_close_tx_desc(struct dma_desc *p) { p->des01.etx.last_segment = 1; p->des01.etx.interrupt = 1; } static int enh_desc_get_rx_frame_len(struct dma_desc *p) { return p->des01.erx.frame_length; } const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, .get_tx_len = enh_desc_get_tx_len, .init_rx_desc = enh_desc_init_rx_desc, .init_tx_desc = enh_desc_init_tx_desc, .get_tx_owner = enh_desc_get_tx_owner, .get_rx_owner = enh_desc_get_rx_owner, .release_tx_desc = enh_desc_release_tx_desc, .prepare_tx_desc = enh_desc_prepare_tx_desc, .clear_tx_ic = enh_desc_clear_tx_ic, .close_tx_desc = enh_desc_close_tx_desc, .get_tx_ls = enh_desc_get_tx_ls, .set_tx_owner = enh_desc_set_tx_owner, .set_rx_owner = enh_desc_set_rx_owner, .get_rx_frame_len = enh_desc_get_rx_frame_len, };
gpl-2.0
szezso/android_kernel_htc_msm7x30
drivers/staging/rtl8187se/r8180_wx.c
3340
38826
/* This file contains wireless extension handlers. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver. Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon. Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. We want to tanks the Authors of those projects and the Ndiswrapper project Authors. */ #include "r8180.h" #include "r8180_hw.h" #include "ieee80211/dot11d.h" /* #define RATE_COUNT 4 */ u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000}; #define RATE_COUNT ARRAY_SIZE(rtl8180_rates) static CHANNEL_LIST DefaultChannelPlan[] = { /* {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, */ /*Default channel plan */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /*FCC */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /*IC */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*ETSI */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Spain. Change to ETSI. */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*France. Change to ETSI. */ {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /*MKK */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22},/*MKK1 */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /*Israel. */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /*For 11a , TELEC */ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /*For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */ }; static int r8180_wx_get_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b); } int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); struct iw_point *erq = &(wrqu->encoding); if (priv->ieee80211->bHwRadioOff) return 0; if (erq->flags & IW_ENCODE_DISABLED) /* i = erq->flags & IW_ENCODE_INDEX; if (i < 1 || i > 4) */ if (erq->length > 0) { /*int len = erq->length <= 5 ? 5 : 13; */ u32* tkey = (u32*) key; priv->key0[0] = tkey[0]; priv->key0[1] = tkey[1]; priv->key0[2] = tkey[2]; priv->key0[3] = tkey[3] & 0xff; DMESG("Setting wep key to %x %x %x %x", tkey[0], tkey[1], tkey[2], tkey[3]); rtl8180_set_hw_wep(dev); } return 0; } static int r8180_wx_set_beaconinterval(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *b) { int *parms = (int *)b; int bi = parms[0]; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); DMESG("setting beacon interval to %x", bi); priv->ieee80211->current_network.beacon_interval = bi; rtl8180_commit(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b); } static int r8180_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra); } static int r8180_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_crcmon(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = priv->crcmon; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (enable) priv->crcmon = 1; else priv->crcmon = 0; DMESG("bad CRC in monitor mode are %s", priv->crcmon ? "accepted" : "rejected"); if (prev != priv->crcmon && priv->up) { rtl8180_down(dev); rtl8180_up(dev); } up(&priv->wx_sem); return 0; } static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); /* printk("set mode ENABLE_IPS\n"); */ if (priv->bInactivePs) { if (wrqu->mode == IW_MODE_ADHOC) IPSLeave(dev); } ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b); /* rtl8180_commit(dev); */ up(&priv->wx_sem); return ret; } /* YJ,add,080819,for hidden ap */ struct iw_range_with_scan_capa { /* Informative stuff (to choose between different interface) */ __u32 throughput; /* To give an idea... */ /* In theory this value should be the maximum benchmarked * TCP/IP throughput, because with most of these devices the * bit rate is meaningless (overhead an co) to estimate how * fast the connection will go and pick the fastest one. * I suggest people to play with Netperf or any benchmark... */ /* NWID (or domain id) */ __u32 min_nwid; /* Minimal NWID we are able to set */ __u32 max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ __u16 old_num_channels; __u8 old_num_frequency; /* Scan capabilities */ __u8 scan_capa; }; /* YJ,add,080819,for hidden ap */ static int rtl8180_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct r8180_priv *priv = ieee80211_priv(dev); u16 val; int i; /*struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; */ /*YJ,add,080819,for hidden ap */ wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; /* TODO: Not used in 802.11b? */ /* range->min_nwid; */ /* Minimal NWID we are able to set */ /* TODO: Not used in 802.11b? */ /* range->max_nwid; */ /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ /* range->old_num_channels; */ /* range->old_num_frequency; */ /* range->old_freq[6]; */ /* Filler to keep "version" at the same offset */ if (priv->rf_set_sens != NULL) range->sensitivity = priv->max_sens; /* signal level threshold range */ range->max_qual.qual = 100; /* TODO: Find real max RSSI and stick here */ range->max_qual.level = 0; range->max_qual.noise = -98; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 20 + -98; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) range->bitrate[i] = rtl8180_rates[i]; range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->pm_capa = 0; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 16; /* range->retry_capa; */ /* What retry options are supported */ /* range->retry_flags; */ /* How to decode max/min retry limit */ /* range->r_time_flags;*/ /* How to decode max/min retry life */ /* range->min_retry; */ /* Minimal number of retries */ /* range->max_retry; */ /* Maximal number of retries */ /* range->min_r_time; */ /* Minimal retry lifetime */ /* range->max_r_time; */ /* Maximal retry lifetime */ range->num_channels = 14; for (i = 0, val = 0; i < 14; i++) { /* Include only legal frequencies for some countries */ if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) { range->freq[val].i = i + 1; range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000; range->freq[val].e = 1; val++; } else { /* FIXME: do we need to set anything for channels */ /* we don't use ? */ } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; /*tmp->scan_capa = 0x01; */ /*YJ,add,080819,for hidden ap */ return 0; } static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; struct ieee80211_device* ieee = priv->ieee80211; if (priv->ieee80211->bHwRadioOff) return 0; /*YJ,add,080819, for hidden ap */ /*printk("==*&*&*&==>%s in\n", __func__); */ /*printk("=*&*&*&*===>flag:%x, %x\n", wrqu->data.flags, IW_SCAN_THIS_ESSID); */ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req* req = (struct iw_scan_req*)b; if (req->essid_len) { /*printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); */ ieee->current_network.ssid_len = req->essid_len; memcpy(ieee->current_network.ssid, req->essid, req->essid_len); /*printk("=====>network ssid:%s\n", ieee->current_network.ssid); */ } } /*YJ,add,080819, for hidden ap, end */ down(&priv->wx_sem); if (priv->up) { /* printk("set scan ENABLE_IPS\n"); */ priv->ieee80211->actscanning = true; if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) { IPSLeave(dev); /*down(&priv->ieee80211->wx_sem); */ /* if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || !(priv->ieee80211->proto_started)){ ret = -1; up(&priv->ieee80211->wx_sem); up(&priv->wx_sem); return ret; } */ /* queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); */ /* printk("start scan============================>\n"); */ ieee80211_softmac_ips_scan_syncro(priv->ieee80211); /* ieee80211_rtl_start_scan(priv->ieee80211); */ /* intentionally forget to up sem */ /* up(&priv->ieee80211->wx_sem); */ ret = 0; } else { /* YJ,add,080828, prevent scan in BusyTraffic */ /* FIXME: Need to consider last scan time */ if ((priv->link_detect.bBusyTraffic) && (true)) { ret = 0; printk("Now traffic is busy, please try later!\n"); } else /* YJ,add,080828, prevent scan in BusyTraffic,end */ ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b); } } else ret = -1; up(&priv->wx_sem); return ret; } static int r8180_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); if (priv->up) ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b); else ret = -1; up(&priv->wx_sem); return ret; } static int r8180_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); /* printk("set essid ENABLE_IPS\n"); */ if (priv->bInactivePs) IPSLeave(dev); /* printk("haha:set essid %s essid_len = %d essid_flgs = %d\n",b, wrqu->essid.length, wrqu->essid.flags); */ ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8180_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8180_wx_set_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8180_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra); } static int r8180_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; if (wrqu->frag.disabled) priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->ieee80211->fts = wrqu->frag.value & ~0x1; } return 0; } static int r8180_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->frag.value = priv->ieee80211->fts; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); return 0; } static int r8180_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra); } static int r8180_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (priv->hw_wep) ret = r8180_wx_set_key(dev, info, wrqu, key); else { DMESG("Setting SW wep key"); ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key); } up(&priv->wx_sem); return ret; } static int r8180_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key); } static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p) { struct r8180_priv *priv = ieee80211_priv(dev); int *parms = (int*)p; int mode = parms[0]; if (priv->ieee80211->bHwRadioOff) return 0; priv->ieee80211->active_scan = mode; return 1; } /* added by christian */ /* static int r8180_wx_set_monitor_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p){ struct r8180_priv *priv = ieee80211_priv(dev); int *parms=(int*)p; int mode=parms[0]; if(priv->ieee80211->iw_mode != IW_MODE_MONITOR) return -1; priv->prism_hdr = mode; if(!mode)dev->type=ARPHRD_IEEE80211; else dev->type=ARPHRD_IEEE80211_PRISM; DMESG("using %s RX encap", mode ? "AVS":"80211"); return 0; } */ /*of r8180_wx_set_monitor_type */ /* end added christian */ static int r8180_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int err = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) { err = -EINVAL; goto exit; } if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) { err = -EINVAL; goto exit; } if (wrqu->retry.value > R8180_MAX_RETRY) { err = -EINVAL; goto exit; } if (wrqu->retry.flags & IW_RETRY_MAX) { priv->retry_rts = wrqu->retry.value; DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value); } else { priv->retry_data = wrqu->retry.value; DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value); } /* FIXME ! * We might try to write directly the TX config register * or to restart just the (R)TX process. * I'm unsure if whole reset is really needed */ rtl8180_commit(dev); /* if(priv->up){ rtl8180_rtx_disable(dev); rtl8180_rx_enable(dev); rtl8180_tx_enable(dev); } */ exit: up(&priv->wx_sem); return err; } static int r8180_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_MAX) { wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX; wrqu->retry.value = priv->retry_rts; } else { wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN; wrqu->retry.value = priv->retry_data; } /* DMESG("returning %d",wrqu->retry.value); */ return 0; } static int r8180_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->rf_set_sens == NULL) return -1; /* we have not this support for this radio */ wrqu->sens.value = priv->sens; return 0; } static int r8180_wx_set_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); short err = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); /* DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); */ if (priv->rf_set_sens == NULL) { err = -1; /* we have not this support for this radio */ goto exit; } if (priv->rf_set_sens(dev, wrqu->sens.value) == 0) priv->sens = wrqu->sens.value; else err = -EINVAL; exit: up(&priv->wx_sem); return err; } static int r8180_wx_set_rawtx(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_get_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); printk("=>>>>>>>>>>=============================>set power:%d, %d!\n", wrqu->power.disabled, wrqu->power.flags); if (wrqu->power.disabled == 0) { wrqu->power.flags |= IW_POWER_ALL_R; wrqu->power.flags |= IW_POWER_TIMEOUT; wrqu->power.value = 1000; } ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; if (wrqu->rts.disabled) priv->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; priv->rts = wrqu->rts.value; } return 0; } static int r8180_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->rts.value = priv->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == 0); return 0; } static int dummy(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { return -1; } /* static int r8180_wx_get_psmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); if(priv) { ieee = priv->ieee80211; if(ieee->ps == IEEE80211_PS_DISABLED) { *((unsigned int *)extra) = IEEE80211_PS_DISABLED; goto exit; } *((unsigned int *)extra) = IW_POWER_TIMEOUT; if (ieee->ps & IEEE80211_PS_MBCAST) *((unsigned int *)extra) |= IW_POWER_ALL_R; else *((unsigned int *)extra) |= IW_POWER_UNICAST_R; } else ret = -1; exit: up(&priv->wx_sem); return ret; } static int r8180_wx_set_psmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } */ static int r8180_wx_get_iwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); ieee = priv->ieee80211; strcpy(extra, "802.11"); if (ieee->modulation & IEEE80211_CCK_MODULATION) { strcat(extra, "b"); if (ieee->modulation & IEEE80211_OFDM_MODULATION) strcat(extra, "/g"); } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) strcat(extra, "g"); up(&priv->wx_sem); return ret; } static int r8180_wx_set_iwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; int *param = (int *)extra; int ret = 0; int modulation = 0, mode = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (*param == 1) { modulation |= IEEE80211_CCK_MODULATION; mode = IEEE_B; printk(KERN_INFO "B mode!\n"); } else if (*param == 2) { modulation |= IEEE80211_OFDM_MODULATION; mode = IEEE_G; printk(KERN_INFO "G mode!\n"); } else if (*param == 3) { modulation |= IEEE80211_CCK_MODULATION; modulation |= IEEE80211_OFDM_MODULATION; mode = IEEE_B|IEEE_G; printk(KERN_INFO "B/G mode!\n"); } if (ieee->proto_started) { ieee80211_stop_protocol(ieee); ieee->mode = mode; ieee->modulation = modulation; ieee80211_start_protocol(ieee); } else { ieee->mode = mode; ieee->modulation = modulation; /* ieee80211_start_protocol(ieee); */ } up(&priv->wx_sem); return ret; } static int r8180_wx_get_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); *extra = (char) priv->plcp_preamble_mode; /* 0:auto 1:short 2:long */ up(&priv->wx_sem); return 0; } static int r8180_wx_set_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int ret = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (*extra < 0 || *extra > 2) ret = -1; else priv->plcp_preamble_mode = *((short *)extra) ; up(&priv->wx_sem); return ret; } static int r8180_wx_get_siglevel(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */ int ret = 0; down(&priv->wx_sem); /* Modify by hikaru 6.5 */ *((int *)extra) = priv->wstats.qual.level;/*for interface test ,it should be the priv->wstats.qual.level; */ up(&priv->wx_sem); return ret; } static int r8180_wx_get_sigqual(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); /* struct ieee80211_network *network = &(priv->ieee80211->current_network); */ int ret = 0; down(&priv->wx_sem); /* Modify by hikaru 6.5 */ *((int *)extra) = priv->wstats.qual.qual;/* for interface test ,it should be the priv->wstats.qual.qual; */ up(&priv->wx_sem); return ret; } static int r8180_wx_reset_stats(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); priv->stats.txrdu = 0; priv->stats.rxrdu = 0; priv->stats.rxnolast = 0; priv->stats.rxnodata = 0; priv->stats.rxnopointer = 0; priv->stats.txnperr = 0; priv->stats.txresumed = 0; priv->stats.rxerr = 0; priv->stats.rxoverflow = 0; priv->stats.rxint = 0; priv->stats.txnpokint = 0; priv->stats.txhpokint = 0; priv->stats.txhperr = 0; priv->stats.ints = 0; priv->stats.shints = 0; priv->stats.txoverflow = 0; priv->stats.rxdmafail = 0; priv->stats.txbeacon = 0; priv->stats.txbeaconerr = 0; priv->stats.txlpokint = 0; priv->stats.txlperr = 0; priv->stats.txretry = 0;/* 20060601 */ priv->stats.rxcrcerrmin = 0 ; priv->stats.rxcrcerrmid = 0; priv->stats.rxcrcerrmax = 0; priv->stats.rxicverr = 0; up(&priv->wx_sem); return 0; } static int r8180_wx_radio_on(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); priv->rf_wakeup(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_radio_off(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); priv->rf_sleep(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_get_channelplan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); *extra = priv->channel_plan; up(&priv->wx_sem); return 0; } static int r8180_wx_set_channelplan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); /* struct ieee80211_device *ieee = netdev_priv(dev); */ int *val = (int *)extra; int i; printk("-----in fun %s\n", __func__); if (priv->ieee80211->bHwRadioOff) return 0; /* unsigned long flags; */ down(&priv->wx_sem); if (DefaultChannelPlan[*val].Len != 0) { priv->channel_plan = *val; /* Clear old channel map 8 */ for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) GET_DOT11D_INFO(priv->ieee80211)->channel_map[i] = 0; /* Set new channel map */ for (i = 1; i <= DefaultChannelPlan[*val].Len; i++) GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1; } up(&priv->wx_sem); return 0; } static int r8180_wx_get_version(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); /* struct ieee80211_device *ieee; */ down(&priv->wx_sem); strcpy(extra, "1020.0808"); up(&priv->wx_sem); return 0; } /* added by amy 080818 */ /*receive datarate from user typing valid rate is from 2 to 108 (1 - 54M), if input 0, return to normal rate adaptive. */ static int r8180_wx_set_forcerate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); u8 forcerate = *extra; down(&priv->wx_sem); printk("==============>%s(): forcerate is %d\n", __func__, forcerate); if ((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) || (forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) || (forcerate == 96) || (forcerate == 108)) { priv->ForcedDataRate = 1; priv->ieee80211->rate = forcerate * 5; } else if (forcerate == 0) { priv->ForcedDataRate = 0; printk("OK! return rate adaptive\n"); } else printk("ERR: wrong rate\n"); up(&priv->wx_sem); return 0; } static int r8180_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); /* printk("===>%s()\n", __func__); */ int ret = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* printk("====>%s()\n", __func__); */ struct r8180_priv *priv = ieee80211_priv(dev); int ret = 0; if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_auth(priv->ieee80211, info, &wrqu->param, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* printk("====>%s()\n", __func__); */ int ret = 0; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); #if 1 ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra); #endif up(&priv->wx_sem); return ret; } static int r8180_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* printk("====>%s(), len:%d\n", __func__, data->length); */ int ret = 0; struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); #if 1 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length); #endif up(&priv->wx_sem); /* printk("<======%s(), ret:%d\n", __func__, ret); */ return ret; } static iw_handler r8180_wx_handlers[] = { NULL, /* SIOCSIWCOMMIT */ r8180_wx_get_name, /* SIOCGIWNAME */ dummy, /* SIOCSIWNWID */ dummy, /* SIOCGIWNWID */ r8180_wx_set_freq, /* SIOCSIWFREQ */ r8180_wx_get_freq, /* SIOCGIWFREQ */ r8180_wx_set_mode, /* SIOCSIWMODE */ r8180_wx_get_mode, /* SIOCGIWMODE */ r8180_wx_set_sens, /* SIOCSIWSENS */ r8180_wx_get_sens, /* SIOCGIWSENS */ NULL, /* SIOCSIWRANGE */ rtl8180_wx_get_range, /* SIOCGIWRANGE */ NULL, /* SIOCSIWPRIV */ NULL, /* SIOCGIWPRIV */ NULL, /* SIOCSIWSTATS */ NULL, /* SIOCGIWSTATS */ dummy, /* SIOCSIWSPY */ dummy, /* SIOCGIWSPY */ NULL, /* SIOCGIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */ r8180_wx_set_wap, /* SIOCSIWAP */ r8180_wx_get_wap, /* SIOCGIWAP */ r8180_wx_set_mlme, /* SIOCSIWMLME*/ dummy, /* SIOCGIWAPLIST -- depricated */ r8180_wx_set_scan, /* SIOCSIWSCAN */ r8180_wx_get_scan, /* SIOCGIWSCAN */ r8180_wx_set_essid, /* SIOCSIWESSID */ r8180_wx_get_essid, /* SIOCGIWESSID */ dummy, /* SIOCSIWNICKN */ dummy, /* SIOCGIWNICKN */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ r8180_wx_set_rate, /* SIOCSIWRATE */ r8180_wx_get_rate, /* SIOCGIWRATE */ r8180_wx_set_rts, /* SIOCSIWRTS */ r8180_wx_get_rts, /* SIOCGIWRTS */ r8180_wx_set_frag, /* SIOCSIWFRAG */ r8180_wx_get_frag, /* SIOCGIWFRAG */ dummy, /* SIOCSIWTXPOW */ dummy, /* SIOCGIWTXPOW */ r8180_wx_set_retry, /* SIOCSIWRETRY */ r8180_wx_get_retry, /* SIOCGIWRETRY */ r8180_wx_set_enc, /* SIOCSIWENCODE */ r8180_wx_get_enc, /* SIOCGIWENCODE */ r8180_wx_set_power, /* SIOCSIWPOWER */ r8180_wx_get_power, /* SIOCGIWPOWER */ NULL, /*---hole---*/ NULL, /*---hole---*/ r8180_wx_set_gen_ie, /* SIOCSIWGENIE */ NULL, /* SIOCSIWGENIE */ r8180_wx_set_auth, /* SIOCSIWAUTH */ NULL, /* SIOCSIWAUTH */ r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ NULL, /*---hole---*/ }; static const struct iw_priv_args r8180_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc" }, { SIOCIWFIRSTPRIV + 0x1, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beaconint" }, { SIOCIWFIRSTPRIV + 0x3, 0, 0, "dummy" }, /* added by christian */ /* { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "prismhdr" }, */ /* end added by christian */ { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" }, { SIOCIWFIRSTPRIV + 0x5, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx" }, { SIOCIWFIRSTPRIV + 0x7, 0, 0, "dummy" }, /* { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpsmode" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_SIZE_FIXED, 0, "setpsmode" }, */ /* set/get mode have been realized in public handlers */ { SIOCIWFIRSTPRIV + 0x8, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode" }, { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getiwmode" }, { SIOCIWFIRSTPRIV + 0xA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setpreamble" }, { SIOCIWFIRSTPRIV + 0xB, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpreamble" }, { SIOCIWFIRSTPRIV + 0xC, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0xD, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getrssi" }, { SIOCIWFIRSTPRIV + 0xE, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0xF, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getlinkqual" }, { SIOCIWFIRSTPRIV + 0x10, 0, 0, "resetstats" }, { SIOCIWFIRSTPRIV + 0x11, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x12, 0, 0, "radioon" }, { SIOCIWFIRSTPRIV + 0x13, 0, 0, "radiooff" }, { SIOCIWFIRSTPRIV + 0x14, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setchannel" }, { SIOCIWFIRSTPRIV + 0x15, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getchannel" }, { SIOCIWFIRSTPRIV + 0x16, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x17, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getversion" }, { SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setrate" }, }; static iw_handler r8180_private_handler[] = { r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/ dummy, r8180_wx_set_beaconinterval, dummy, /* r8180_wx_set_monitor_type, */ r8180_wx_set_scan_type, dummy, r8180_wx_set_rawtx, dummy, r8180_wx_set_iwmode, r8180_wx_get_iwmode, r8180_wx_set_preamble, r8180_wx_get_preamble, dummy, r8180_wx_get_siglevel, dummy, r8180_wx_get_sigqual, r8180_wx_reset_stats, dummy,/* r8180_wx_get_stats */ r8180_wx_radio_on, r8180_wx_radio_off, r8180_wx_set_channelplan, r8180_wx_get_channelplan, dummy, r8180_wx_get_version, r8180_wx_set_forcerate, }; static inline int is_same_network(struct ieee80211_network *src, struct ieee80211_network *dst, struct ieee80211_device *ieee) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */ /* ((src->ssid_len == dst->ssid_len) && */ (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && /* YJ,mod, 080819,for hidden ap */ /*!memcmp(src->ssid, dst->ssid, src->ssid_len) && */ ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_BSS) == (dst->capability & WLAN_CAPABILITY_BSS))); } /* WB modefied to show signal to GUI on 18-01-2008 */ static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; struct iw_statistics* wstats = &priv->wstats; /* struct ieee80211_network* target = NULL; */ int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; /* unsigned long flag; */ if (ieee->state < IEEE80211_LINKED) { wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } tmp_level = (&ieee->current_network)->stats.signal; tmp_qual = (&ieee->current_network)->stats.signalstrength; tmp_noise = (&ieee->current_network)->stats.noise; /* printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); */ /* printk("level:%d\n", tmp_level); */ wstats->qual.level = tmp_level; wstats->qual.qual = tmp_qual; wstats->qual.noise = tmp_noise; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } struct iw_handler_def r8180_wx_handlers_def = { .standard = r8180_wx_handlers, .num_standard = ARRAY_SIZE(r8180_wx_handlers), .private = r8180_private_handler, .num_private = ARRAY_SIZE(r8180_private_handler), .num_private_args = sizeof(r8180_private_args) / sizeof(struct iw_priv_args), .get_wireless_stats = r8180_get_wireless_stats, .private_args = (struct iw_priv_args *)r8180_private_args, };
gpl-2.0
divx118/nicktime2.6.37_archos_kernel
drivers/i2c/busses/i2c-au1550.c
4620
10477
/* * i2c-au1550.c: SMBus (i2c) adapter for Alchemy PSC interface * Copyright (C) 2004 Embedded Edge, LLC <dan@embeddededge.com> * * 2.6 port by Matt Porter <mporter@kernel.crashing.org> * * The documentation describes this as an SMBus controller, but it doesn't * understand any of the SMBus protocol in hardware. It's really an I2C * controller that could emulate most of the SMBus in software. * * This is just a skeleton adapter to use with the Au1550 PSC * algorithm. It was developed for the Pb1550, but will work with * any Au1550 board that has a similar PSC configuration. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/slab.h> #include <asm/mach-au1x00/au1xxx.h> #include <asm/mach-au1x00/au1xxx_psc.h> struct i2c_au1550_data { u32 psc_base; int xfer_timeout; int ack_timeout; struct i2c_adapter adap; struct resource *ioarea; }; static int wait_xfer_done(struct i2c_au1550_data *adap) { u32 stat; int i; volatile psc_smb_t *sp; sp = (volatile psc_smb_t *)(adap->psc_base); /* Wait for Tx Buffer Empty */ for (i = 0; i < adap->xfer_timeout; i++) { stat = sp->psc_smbstat; au_sync(); if ((stat & PSC_SMBSTAT_TE) != 0) return 0; udelay(1); } return -ETIMEDOUT; } static int wait_ack(struct i2c_au1550_data *adap) { u32 stat; volatile psc_smb_t *sp; if (wait_xfer_done(adap)) return -ETIMEDOUT; sp = (volatile psc_smb_t *)(adap->psc_base); stat = sp->psc_smbevnt; au_sync(); if ((stat & (PSC_SMBEVNT_DN | PSC_SMBEVNT_AN | PSC_SMBEVNT_AL)) != 0) return -ETIMEDOUT; return 0; } static int wait_master_done(struct i2c_au1550_data *adap) { u32 stat; int i; volatile psc_smb_t *sp; sp = (volatile psc_smb_t *)(adap->psc_base); /* Wait for Master Done. */ for (i = 0; i < adap->xfer_timeout; i++) { stat = sp->psc_smbevnt; au_sync(); if ((stat & PSC_SMBEVNT_MD) != 0) return 0; udelay(1); } return -ETIMEDOUT; } static int do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q) { volatile psc_smb_t *sp; u32 stat; sp = (volatile psc_smb_t *)(adap->psc_base); /* Reset the FIFOs, clear events. */ stat = sp->psc_smbstat; sp->psc_smbevnt = PSC_SMBEVNT_ALLCLR; au_sync(); if (!(stat & PSC_SMBSTAT_TE) || !(stat & PSC_SMBSTAT_RE)) { sp->psc_smbpcr = PSC_SMBPCR_DC; au_sync(); do { stat = sp->psc_smbpcr; au_sync(); } while ((stat & PSC_SMBPCR_DC) != 0); udelay(50); } /* Write out the i2c chip address and specify operation */ addr <<= 1; if (rd) addr |= 1; /* zero-byte xfers stop immediately */ if (q) addr |= PSC_SMBTXRX_STP; /* Put byte into fifo, start up master. */ sp->psc_smbtxrx = addr; au_sync(); sp->psc_smbpcr = PSC_SMBPCR_MS; au_sync(); if (wait_ack(adap)) return -EIO; return (q) ? wait_master_done(adap) : 0; } static u32 wait_for_rx_byte(struct i2c_au1550_data *adap, u32 *ret_data) { int j; u32 data, stat; volatile psc_smb_t *sp; if (wait_xfer_done(adap)) return -EIO; sp = (volatile psc_smb_t *)(adap->psc_base); j = adap->xfer_timeout * 100; do { j--; if (j <= 0) return -EIO; stat = sp->psc_smbstat; au_sync(); if ((stat & PSC_SMBSTAT_RE) == 0) j = 0; else udelay(1); } while (j > 0); data = sp->psc_smbtxrx; au_sync(); *ret_data = data; return 0; } static int i2c_read(struct i2c_au1550_data *adap, unsigned char *buf, unsigned int len) { int i; u32 data; volatile psc_smb_t *sp; if (len == 0) return 0; /* A read is performed by stuffing the transmit fifo with * zero bytes for timing, waiting for bytes to appear in the * receive fifo, then reading the bytes. */ sp = (volatile psc_smb_t *)(adap->psc_base); i = 0; while (i < (len-1)) { sp->psc_smbtxrx = 0; au_sync(); if (wait_for_rx_byte(adap, &data)) return -EIO; buf[i] = data; i++; } /* The last byte has to indicate transfer done. */ sp->psc_smbtxrx = PSC_SMBTXRX_STP; au_sync(); if (wait_master_done(adap)) return -EIO; data = sp->psc_smbtxrx; au_sync(); buf[i] = data; return 0; } static int i2c_write(struct i2c_au1550_data *adap, unsigned char *buf, unsigned int len) { int i; u32 data; volatile psc_smb_t *sp; if (len == 0) return 0; sp = (volatile psc_smb_t *)(adap->psc_base); i = 0; while (i < (len-1)) { data = buf[i]; sp->psc_smbtxrx = data; au_sync(); if (wait_ack(adap)) return -EIO; i++; } /* The last byte has to indicate transfer done. */ data = buf[i]; data |= PSC_SMBTXRX_STP; sp->psc_smbtxrx = data; au_sync(); if (wait_master_done(adap)) return -EIO; return 0; } static int au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_au1550_data *adap = i2c_adap->algo_data; volatile psc_smb_t *sp = (volatile psc_smb_t *)adap->psc_base; struct i2c_msg *p; int i, err = 0; sp->psc_ctrl = PSC_CTRL_ENABLE; au_sync(); for (i = 0; !err && i < num; i++) { p = &msgs[i]; err = do_address(adap, p->addr, p->flags & I2C_M_RD, (p->len == 0)); if (err || !p->len) continue; if (p->flags & I2C_M_RD) err = i2c_read(adap, p->buf, p->len); else err = i2c_write(adap, p->buf, p->len); } /* Return the number of messages processed, or the error code. */ if (err == 0) err = num; sp->psc_ctrl = PSC_CTRL_SUSPEND; au_sync(); return err; } static u32 au1550_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm au1550_algo = { .master_xfer = au1550_xfer, .functionality = au1550_func, }; static void i2c_au1550_setup(struct i2c_au1550_data *priv) { volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base; u32 stat; sp->psc_ctrl = PSC_CTRL_DISABLE; au_sync(); sp->psc_sel = PSC_SEL_PS_SMBUSMODE; sp->psc_smbcfg = 0; au_sync(); sp->psc_ctrl = PSC_CTRL_ENABLE; au_sync(); do { stat = sp->psc_smbstat; au_sync(); } while ((stat & PSC_SMBSTAT_SR) == 0); sp->psc_smbcfg = (PSC_SMBCFG_RT_FIFO8 | PSC_SMBCFG_TT_FIFO8 | PSC_SMBCFG_DD_DISABLE); /* Divide by 8 to get a 6.25 MHz clock. The later protocol * timings are based on this clock. */ sp->psc_smbcfg |= PSC_SMBCFG_SET_DIV(PSC_SMBCFG_DIV8); sp->psc_smbmsk = PSC_SMBMSK_ALLMASK; au_sync(); /* Set the protocol timer values. See Table 71 in the * Au1550 Data Book for standard timing values. */ sp->psc_smbtmr = PSC_SMBTMR_SET_TH(0) | PSC_SMBTMR_SET_PS(15) | \ PSC_SMBTMR_SET_PU(15) | PSC_SMBTMR_SET_SH(15) | \ PSC_SMBTMR_SET_SU(15) | PSC_SMBTMR_SET_CL(15) | \ PSC_SMBTMR_SET_CH(15); au_sync(); sp->psc_smbcfg |= PSC_SMBCFG_DE_ENABLE; do { stat = sp->psc_smbstat; au_sync(); } while ((stat & PSC_SMBSTAT_SR) == 0); sp->psc_ctrl = PSC_CTRL_SUSPEND; au_sync(); } static void i2c_au1550_disable(struct i2c_au1550_data *priv) { volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base; sp->psc_smbcfg = 0; sp->psc_ctrl = PSC_CTRL_DISABLE; au_sync(); } /* * registering functions to load algorithms at runtime * Prior to calling us, the 50MHz clock frequency and routing * must have been set up for the PSC indicated by the adapter. */ static int __devinit i2c_au1550_probe(struct platform_device *pdev) { struct i2c_au1550_data *priv; struct resource *r; int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { ret = -ENODEV; goto out; } priv = kzalloc(sizeof(struct i2c_au1550_data), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out; } priv->ioarea = request_mem_region(r->start, resource_size(r), pdev->name); if (!priv->ioarea) { ret = -EBUSY; goto out_mem; } priv->psc_base = CKSEG1ADDR(r->start); priv->xfer_timeout = 200; priv->ack_timeout = 200; priv->adap.nr = pdev->id; priv->adap.algo = &au1550_algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &pdev->dev; strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name)); /* Now, set up the PSC for SMBus PIO mode. */ i2c_au1550_setup(priv); ret = i2c_add_numbered_adapter(&priv->adap); if (ret == 0) { platform_set_drvdata(pdev, priv); return 0; } i2c_au1550_disable(priv); release_resource(priv->ioarea); kfree(priv->ioarea); out_mem: kfree(priv); out: return ret; } static int __devexit i2c_au1550_remove(struct platform_device *pdev) { struct i2c_au1550_data *priv = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); i2c_del_adapter(&priv->adap); i2c_au1550_disable(priv); release_resource(priv->ioarea); kfree(priv->ioarea); kfree(priv); return 0; } #ifdef CONFIG_PM static int i2c_au1550_suspend(struct platform_device *pdev, pm_message_t state) { struct i2c_au1550_data *priv = platform_get_drvdata(pdev); i2c_au1550_disable(priv); return 0; } static int i2c_au1550_resume(struct platform_device *pdev) { struct i2c_au1550_data *priv = platform_get_drvdata(pdev); i2c_au1550_setup(priv); return 0; } #else #define i2c_au1550_suspend NULL #define i2c_au1550_resume NULL #endif static struct platform_driver au1xpsc_smbus_driver = { .driver = { .name = "au1xpsc_smbus", .owner = THIS_MODULE, }, .probe = i2c_au1550_probe, .remove = __devexit_p(i2c_au1550_remove), .suspend = i2c_au1550_suspend, .resume = i2c_au1550_resume, }; static int __init i2c_au1550_init(void) { return platform_driver_register(&au1xpsc_smbus_driver); } static void __exit i2c_au1550_exit(void) { platform_driver_unregister(&au1xpsc_smbus_driver); } MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:au1xpsc_smbus"); module_init (i2c_au1550_init); module_exit (i2c_au1550_exit);
gpl-2.0
Split-Screen/android_kernel_xiaomi_cancro
arch/sparc/kernel/chmc.c
7436
20646
/* chmc.c: Driver for UltraSPARC-III memory controller. * * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/spitfire.h> #include <asm/chmctrl.h> #include <asm/cpudata.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/head.h> #include <asm/io.h> #include <asm/memctrl.h> #define DRV_MODULE_NAME "chmc" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.2" MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static int mc_type; #define MC_TYPE_SAFARI 1 #define MC_TYPE_JBUS 2 static dimm_printer_t us3mc_dimm_printer; #define CHMCTRL_NDGRPS 2 #define CHMCTRL_NDIMMS 4 #define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) /* OBP memory-layout property format. */ struct chmc_obp_map { unsigned char dimm_map[144]; unsigned char pin_map[576]; }; #define DIMM_LABEL_SZ 8 struct chmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct chmc_obp_map map[2]; }; #define CHMCTRL_NBANKS 4 struct chmc_bank_info { struct chmc *p; int bank_id; u64 raw_reg; int valid; int uk; int um; int lk; int lm; int interleave; unsigned long base; unsigned long size; }; struct chmc { struct list_head list; int portid; struct chmc_obp_mem_layout layout_prop; int layout_size; void __iomem *regs; u64 timing_control1; u64 timing_control2; u64 timing_control3; u64 timing_control4; u64 memaddr_control; struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; }; #define JBUSMC_REGS_SIZE 8 #define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL #define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL #define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL #define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL #define JB_MC_REG1_XOR 0x0000010000000000UL #define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL #define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 #define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL #define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 #define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL #define JB_MC_REG1_INTERLEAVE_SHIFT 23 #define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL #define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 #define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL #define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 #define PART_TYPE_X8 0 #define PART_TYPE_X4 1 #define INTERLEAVE_NONE 0 #define INTERLEAVE_SAME 1 #define INTERLEAVE_INTERNAL 2 #define INTERLEAVE_BOTH 3 #define ADDR_GEN_128MB 0 #define ADDR_GEN_256MB 1 #define ADDR_GEN_512MB 2 #define ADDR_GEN_1GB 3 #define JB_NUM_DIMM_GROUPS 2 #define JB_NUM_DIMMS_PER_GROUP 2 #define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) struct jbusmc_obp_map { unsigned char dimm_map[18]; unsigned char pin_map[144]; }; struct jbusmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct jbusmc_obp_map map; char _pad; }; struct jbusmc_dimm_group { struct jbusmc *controller; int index; u64 base_addr; u64 size; }; struct jbusmc { void __iomem *regs; u64 mc_reg_1; u32 portid; struct jbusmc_obp_mem_layout layout; int layout_len; int num_dimm_groups; struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; struct list_head list; }; static DEFINE_SPINLOCK(mctrl_list_lock); static LIST_HEAD(mctrl_list); static void mc_list_add(struct list_head *list) { spin_lock(&mctrl_list_lock); list_add(list, &mctrl_list); spin_unlock(&mctrl_list_lock); } static void mc_list_del(struct list_head *list) { spin_lock(&mctrl_list_lock); list_del_init(list); spin_unlock(&mctrl_list_lock); } #define SYNDROME_MIN -1 #define SYNDROME_MAX 144 /* Covert syndrome code into the way the bits are positioned * on the bus. */ static int syndrome_to_qword_code(int syndrome_code) { if (syndrome_code < 128) syndrome_code += 16; else if (syndrome_code < 128 + 9) syndrome_code -= (128 - 7); else if (syndrome_code < (128 + 9 + 3)) syndrome_code -= (128 + 9 - 4); else syndrome_code -= (128 + 9 + 3); return syndrome_code; } /* All this magic has to do with how a cache line comes over the wire * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword * cycles, each of which transmit ECC/MTAG info as well as the actual * data. */ #define L2_LINE_SIZE 64 #define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) #define QW_PER_LINE 4 #define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) #define QW_BITS 144 #define SAFARI_LAST_BIT (576 - 1) #define JBUS_LAST_BIT (144 - 1) static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, int *pin_p, char **dimm_str_p, void *_prop, int base_dimm_offset) { int qword_code = syndrome_to_qword_code(syndrome_code); int cache_line_offset; int offset_inverse; int dimm_map_index; int map_val; if (mc_type == MC_TYPE_JBUS) { struct jbusmc_obp_mem_layout *p = _prop; /* JBUS */ cache_line_offset = qword_code; offset_inverse = (JBUS_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse / 8; map_val = p->map.dimm_map[dimm_map_index]; map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = p->map.pin_map[cache_line_offset]; } else { struct chmc_obp_mem_layout *p = _prop; struct chmc_obp_map *mp; int qword; /* Safari */ if (p->symmetric) mp = &p->map[0]; else mp = &p->map[1]; qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse >> 2; map_val = mp->dimm_map[dimm_map_index]; map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = mp->pin_map[cache_line_offset]; } } static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) { struct jbusmc *p; list_for_each_entry(p, &mctrl_list, list) { int i; for (i = 0; i < p->num_dimm_groups; i++) { struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; if (phys_addr < dp->base_addr || (dp->base_addr + dp->size) <= phys_addr) continue; return dp; } } return NULL; } static int jbusmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct jbusmc_obp_mem_layout *prop; struct jbusmc_dimm_group *dp; struct jbusmc *p; int first_dimm; dp = jbusmc_find_dimm_group(phys_addr); if (dp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } p = dp->controller; prop = &p->layout; first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this dimm group. */ for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } static u64 __devinit jbusmc_dimm_group_size(u64 base, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { u64 max = base + (8UL * 1024 * 1024 * 1024); u64 max_seen = base; int i; for (i = 0; i < num_mem_regs; i++) { const struct linux_prom64_registers *ent; u64 this_base; u64 this_end; ent = &mem_regs[i]; this_base = ent->phys_addr; this_end = this_base + ent->reg_size; if (base < this_base || base >= this_end) continue; if (this_end > max) this_end = max; if (this_end > max_seen) max_seen = this_end; } return max_seen - base; } static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, unsigned long index, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; dp->controller = p; dp->index = index; dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); } static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); p->num_dimm_groups++; } if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); p->num_dimm_groups++; } } static int __devinit jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; int err, len, num_mem_regs; struct jbusmc *p; const u32 *prop; const void *ml; err = -ENODEV; mem_node = of_find_node_by_path("/memory"); if (!mem_node) { printk(KERN_ERR PFX "Cannot find /memory node.\n"); goto out; } mem_regs = of_get_property(mem_node, "reg", &len); if (!mem_regs) { printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); goto out; } num_mem_regs = len / sizeof(*mem_regs); err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); goto out; } INIT_LIST_HEAD(&p->list); err = -ENODEV; prop = of_get_property(op->dev.of_node, "portid", &len); if (!prop || len != 4) { printk(KERN_ERR PFX "Cannot find portid.\n"); goto out_free; } p->portid = *prop; prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len); if (!prop || len != 8) { printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); goto out_free; } p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; err = -ENOMEM; p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); if (!p->regs) { printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); goto out_free; } err = -ENODEV; ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len); if (!ml) { printk(KERN_ERR PFX "Cannot get memory layout property.\n"); goto out_iounmap; } if (p->layout_len > sizeof(p->layout)) { printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", p->layout_len); goto out_iounmap; } memcpy(&p->layout, ml, p->layout_len); jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", op->dev.of_node->full_name); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_iounmap: of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); out_free: kfree(p); goto out; } /* Does BANK decode PHYS_ADDR? */ static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) { unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; /* Bank must be enabled to match. */ if (bp->valid == 0) return 0; /* Would BANK match upper bits? */ upper_bits ^= bp->um; /* What bits are different? */ upper_bits = ~upper_bits; /* Invert. */ upper_bits |= bp->uk; /* What bits don't matter for matching? */ upper_bits = ~upper_bits; /* Invert. */ if (upper_bits) return 0; /* Would BANK match lower bits? */ lower_bits ^= bp->lm; /* What bits are different? */ lower_bits = ~lower_bits; /* Invert. */ lower_bits |= bp->lk; /* What bits don't matter for matching? */ lower_bits = ~lower_bits; /* Invert. */ if (lower_bits) return 0; /* I always knew you'd be the one. */ return 1; } /* Given PHYS_ADDR, search memory controller banks for a match. */ static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) { struct chmc *p; list_for_each_entry(p, &mctrl_list, list) { int bank_no; for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { struct chmc_bank_info *bp; bp = &p->logical_banks[bank_no]; if (chmc_bank_match(bp, phys_addr)) return bp; } } return NULL; } /* This is the main purpose of this driver. */ static int chmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct chmc_bank_info *bp; struct chmc_obp_mem_layout *prop; int bank_in_controller, first_dimm; bp = chmc_find_bank(phys_addr); if (bp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } prop = &bp->p->layout_prop; bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); first_dimm *= CHMCTRL_NDIMMS; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this bank. */ for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } /* Accessing the registers is slightly complicated. If you want * to get at the memory controller which is on the same processor * the code is executing, you must use special ASI load/store else * you go through the global mapping. */ static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) { unsigned long ret, this_cpu; preempt_disable(); this_cpu = real_hard_smp_processor_id(); if (p->portid == this_cpu) { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } preempt_enable(); return ret; } #if 0 /* currently unused */ static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) { if (p->portid == smp_processor_id()) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (val), "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa %0, [%1] %2" : : "r" (val), "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } } #endif static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) { struct chmc_bank_info *bp = &p->logical_banks[which_bank]; bp->p = p; bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; bp->raw_reg = val; bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; bp->base = (bp->um); bp->base &= ~(bp->uk); bp->base <<= PA_UPPER_BITS_SHIFT; switch(bp->lk) { case 0xf: default: bp->interleave = 1; break; case 0xe: bp->interleave = 2; break; case 0xc: bp->interleave = 4; break; case 0x8: bp->interleave = 8; break; case 0x0: bp->interleave = 16; break; } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. */ bp->size = (((unsigned long)bp->uk & ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; bp->size /= bp->interleave; } static void chmc_fetch_decode_regs(struct chmc *p) { if (p->layout_size == 0) return; chmc_interpret_one_decode_reg(p, 0, chmc_read_mcreg(p, CHMCTRL_DECODE1)); chmc_interpret_one_decode_reg(p, 1, chmc_read_mcreg(p, CHMCTRL_DECODE2)); chmc_interpret_one_decode_reg(p, 2, chmc_read_mcreg(p, CHMCTRL_DECODE3)); chmc_interpret_one_decode_reg(p, 3, chmc_read_mcreg(p, CHMCTRL_DECODE4)); } static int __devinit chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; const void *pval; int len, portid; struct chmc *p; int err; err = -ENODEV; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) goto out; portid = of_getintprop_default(dp, "portid", -1); if (portid == -1) goto out; pval = of_get_property(dp, "memory-layout", &len); if (pval && len > sizeof(p->layout_prop)) { printk(KERN_ERR PFX "Unexpected memory-layout property " "size %d.\n", len); goto out; } err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); goto out; } p->portid = portid; p->layout_size = len; if (!pval) p->layout_size = 0; else memcpy(&p->layout_prop, pval, len); p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); if (!p->regs) { printk(KERN_ERR PFX "Could not map registers.\n"); goto out_free; } if (p->layout_size != 0UL) { p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); } chmc_fetch_decode_regs(p); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", dp->full_name, (p->layout_size ? "ACTIVE" : "INACTIVE")); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_free: kfree(p); goto out; } static int __devinit us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) return jbusmc_probe(op); return -ENODEV; } static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p) { list_del(&p->list); of_iounmap(&op->resource[0], p->regs, 0x48); kfree(p); } static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p) { mc_list_del(&p->list); of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); kfree(p); } static int __devexit us3mc_remove(struct platform_device *op) { void *p = dev_get_drvdata(&op->dev); if (p) { if (mc_type == MC_TYPE_SAFARI) chmc_destroy(op, p); else if (mc_type == MC_TYPE_JBUS) jbusmc_destroy(op, p); } return 0; } static const struct of_device_id us3mc_match[] = { { .name = "memory-controller", }, {}, }; MODULE_DEVICE_TABLE(of, us3mc_match); static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, .remove = __devexit_p(us3mc_remove), }; static inline bool us3mc_platform(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) return true; return false; } static int __init us3mc_init(void) { unsigned long ver; int ret; if (!us3mc_platform()) return -ENODEV; __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) { mc_type = MC_TYPE_JBUS; us3mc_dimm_printer = jbusmc_print_dimm; } else { mc_type = MC_TYPE_SAFARI; us3mc_dimm_printer = chmc_print_dimm; } ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } return ret; } static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); platform_driver_unregister(&us3mc_driver); } } module_init(us3mc_init); module_exit(us3mc_cleanup);
gpl-2.0
NinjahMeh/android_kernel_huawei_angler
arch/sh/kernel/cpu/sh3/clock-sh7712.c
9228
1616
/* * arch/sh/kernel/cpu/sh3/clock-sh7712.c * * SH7712 support for the clock framework * * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk> * * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int multipliers[] = { 1, 2, 3 }; static int divisors[] = { 1, 2, 3, 4, 6 }; static void master_clk_init(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0300) >> 8; clk->rate *= multipliers[idx]; } static struct sh_clk_ops sh7712_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = frqcr & 0x0007; return clk->parent->rate / divisors[idx]; } static struct sh_clk_ops sh7712_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0030) >> 4; return clk->parent->rate / divisors[idx]; } static struct sh_clk_ops sh7712_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7712_clk_ops[] = { &sh7712_master_clk_ops, &sh7712_module_clk_ops, &sh7712_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7712_clk_ops)) *ops = sh7712_clk_ops[idx]; }
gpl-2.0
TeamWin/android_kernel_samsung_crespo
arch/avr32/kernel/ptrace.c
12044
9469
/* * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/user.h> #include <linux/security.h> #include <linux/unistd.h> #include <linux/notifier.h> #include <asm/traps.h> #include <asm/uaccess.h> #include <asm/ocd.h> #include <asm/mmu_context.h> #include <linux/kdebug.h> static struct pt_regs *get_user_regs(struct task_struct *tsk) { return (struct pt_regs *)((unsigned long)task_stack_page(tsk) + THREAD_SIZE - sizeof(struct pt_regs)); } void user_enable_single_step(struct task_struct *tsk) { pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n", tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr); /* * We can't schedule in Debug mode, so when TIF_BREAKPOINT is * set, the system call or exception handler will do a * breakpoint to enter monitor mode before returning to * userspace. * * The monitor code will then notice that TIF_SINGLE_STEP is * set and return to userspace with single stepping enabled. * The CPU will then enter monitor mode again after exactly * one instruction has been executed, and the monitor code * will then send a SIGTRAP to the process. */ set_tsk_thread_flag(tsk, TIF_BREAKPOINT); set_tsk_thread_flag(tsk, TIF_SINGLE_STEP); } void user_disable_single_step(struct task_struct *child) { /* XXX(hch): a no-op here seems wrong.. */ } /* * Called by kernel/ptrace.c when detaching * * Make sure any single step bits, etc. are not set */ void ptrace_disable(struct task_struct *child) { clear_tsk_thread_flag(child, TIF_SINGLE_STEP); clear_tsk_thread_flag(child, TIF_BREAKPOINT); ocd_disable(child); } /* * Read the word at offset "offset" into the task's "struct user". We * actually access the pt_regs struct stored on the kernel stack. */ static int ptrace_read_user(struct task_struct *tsk, unsigned long offset, unsigned long __user *data) { unsigned long *regs; unsigned long value; if (offset & 3 || offset >= sizeof(struct user)) { printk("ptrace_read_user: invalid offset 0x%08lx\n", offset); return -EIO; } regs = (unsigned long *)get_user_regs(tsk); value = 0; if (offset < sizeof(struct pt_regs)) value = regs[offset / sizeof(regs[0])]; pr_debug("ptrace_read_user(%s[%u], %#lx, %p) -> %#lx\n", tsk->comm, tsk->pid, offset, data, value); return put_user(value, data); } /* * Write the word "value" to offset "offset" into the task's "struct * user". We actually access the pt_regs struct stored on the kernel * stack. */ static int ptrace_write_user(struct task_struct *tsk, unsigned long offset, unsigned long value) { unsigned long *regs; pr_debug("ptrace_write_user(%s[%u], %#lx, %#lx)\n", tsk->comm, tsk->pid, offset, value); if (offset & 3 || offset >= sizeof(struct user)) { pr_debug(" invalid offset 0x%08lx\n", offset); return -EIO; } if (offset >= sizeof(struct pt_regs)) return 0; regs = (unsigned long *)get_user_regs(tsk); regs[offset / sizeof(regs[0])] = value; return 0; } static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) { struct pt_regs *regs = get_user_regs(tsk); return copy_to_user(uregs, regs, sizeof(*regs)) ? -EFAULT : 0; } static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs) { struct pt_regs newregs; int ret; ret = -EFAULT; if (copy_from_user(&newregs, uregs, sizeof(newregs)) == 0) { struct pt_regs *regs = get_user_regs(tsk); ret = -EINVAL; if (valid_user_regs(&newregs)) { *regs = newregs; ret = 0; } } return ret; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; void __user *datap = (void __user *) data; switch (request) { /* Read the word at location addr in the child process */ case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: ret = generic_ptrace_peekdata(child, addr, data); break; case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, datap); break; /* Write the word in data at location addr */ case PTRACE_POKETEXT: case PTRACE_POKEDATA: ret = generic_ptrace_pokedata(child, addr, data); break; case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; case PTRACE_GETREGS: ret = ptrace_getregs(child, datap); break; case PTRACE_SETREGS: ret = ptrace_setregs(child, datap); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage void syscall_trace(void) { if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; if (!(current->ptrace & PT_PTRACED)) return; /* The 0x80 provides a way for the tracing parent to * distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it * will do for normal use. strace only continues with a * signal if the stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { pr_debug("syscall_trace: sending signal %d to PID %u\n", current->exit_code, current->pid); send_sig(current->exit_code, current, 1); current->exit_code = 0; } } /* * debug_trampoline() is an assembly stub which will store all user * registers on the stack and execute a breakpoint instruction. * * If we single-step into an exception handler which runs with * interrupts disabled the whole time so it doesn't have to check for * pending work, its return address will be modified so that it ends * up returning to debug_trampoline. * * If the exception handler decides to store the user context and * enable interrupts after all, it will restore the original return * address and status register value. Before it returns, it will * notice that TIF_BREAKPOINT is set and execute a breakpoint * instruction. */ extern void debug_trampoline(void); asmlinkage struct pt_regs *do_debug(struct pt_regs *regs) { struct thread_info *ti; unsigned long trampoline_addr; u32 status; u32 ctrl; int code; status = ocd_read(DS); ti = current_thread_info(); code = TRAP_BRKPT; pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n", status, regs->pc, regs->sr, ti->flags); if (!user_mode(regs)) { unsigned long die_val = DIE_BREAKPOINT; if (status & (1 << OCD_DS_SSS_BIT)) die_val = DIE_SSTEP; if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return regs; if ((status & (1 << OCD_DS_SWB_BIT)) && test_and_clear_ti_thread_flag( ti, TIF_BREAKPOINT)) { /* * Explicit breakpoint from trampoline or * exception/syscall/interrupt handler. * * The real saved regs are on the stack right * after the ones we saved on entry. */ regs++; pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:" "PC=0x%08lx SR=0x%08lx\n", regs->pc, regs->sr); BUG_ON(!user_mode(regs)); if (test_thread_flag(TIF_SINGLE_STEP)) { pr_debug("Going to do single step...\n"); return regs; } /* * No TIF_SINGLE_STEP means we're done * stepping over a syscall. Do the trap now. */ code = TRAP_TRACE; } else if ((status & (1 << OCD_DS_SSS_BIT)) && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) { pr_debug("Stepped into something, " "setting TIF_BREAKPOINT...\n"); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* * We stepped into an exception, interrupt or * syscall handler. Some exception handlers * don't check for pending work, so we need to * set up a trampoline just in case. * * The exception entry code will undo the * trampoline stuff if it does a full context * save (which also means that it'll check for * pending work later.) */ if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) { trampoline_addr = (unsigned long)&debug_trampoline; pr_debug("Setting up trampoline...\n"); ti->rar_saved = sysreg_read(RAR_EX); ti->rsr_saved = sysreg_read(RSR_EX); sysreg_write(RAR_EX, trampoline_addr); sysreg_write(RSR_EX, (MODE_EXCEPTION | SR_EM | SR_GM)); BUG_ON(ti->rsr_saved & MODE_MASK); } /* * If we stepped into a system call, we * shouldn't do a single step after we return * since the return address is right after the * "scall" instruction we were told to step * over. */ if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) { pr_debug("Supervisor; no single step\n"); clear_ti_thread_flag(ti, TIF_SINGLE_STEP); } ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); return regs; } else { printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n", status); printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags); die("Unhandled debug trap in kernel mode", regs, SIGTRAP); } } else if (status & (1 << OCD_DS_SSS_BIT)) { /* Single step in user mode */ code = TRAP_TRACE; ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); } pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n", code, regs->pc, regs->sr); clear_thread_flag(TIF_SINGLE_STEP); _exception(SIGTRAP, regs, code, instruction_pointer(regs)); return regs; }
gpl-2.0
ExorEmbedded/android-us01-kernel
sound/core/timer_compat.c
13836
3619
/* * 32bit -> 64bit ioctl wrapper for timer API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from timer.c */ #include <linux/compat.h> struct snd_timer_info32 { u32 flags; s32 card; unsigned char id[64]; unsigned char name[80]; u32 reserved0; u32 resolution; unsigned char reserved[64]; }; static int snd_timer_user_info_compat(struct file *file, struct snd_timer_info32 __user *_info) { struct snd_timer_user *tu; struct snd_timer_info32 info; struct snd_timer *t; tu = file->private_data; if (snd_BUG_ON(!tu->timeri)) return -ENXIO; t = tu->timeri->timer; if (snd_BUG_ON(!t)) return -ENXIO; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info.flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info.id, t->id, sizeof(info.id)); strlcpy(info.name, t->name, sizeof(info.name)); info.resolution = t->hw.resolution; if (copy_to_user(_info, &info, sizeof(*_info))) return -EFAULT; return 0; } struct snd_timer_status32 { struct compat_timespec tstamp; u32 resolution; u32 lost; u32 overrun; u32 queue; unsigned char reserved[64]; }; static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (snd_BUG_ON(!tu->timeri)) return -ENXIO; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } /* */ enum { SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32), SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32), }; static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: case SNDRV_TIMER_IOCTL_TREAD: case SNDRV_TIMER_IOCTL_GINFO: case SNDRV_TIMER_IOCTL_GPARAMS: case SNDRV_TIMER_IOCTL_GSTATUS: case SNDRV_TIMER_IOCTL_SELECT: case SNDRV_TIMER_IOCTL_PARAMS: case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_ioctl(file, cmd, (unsigned long)argp); case SNDRV_TIMER_IOCTL_INFO32: return snd_timer_user_info_compat(file, argp); case SNDRV_TIMER_IOCTL_STATUS32: return snd_timer_user_status_compat(file, argp); } return -ENOIOCTLCMD; }
gpl-2.0
opposablebrain/iphonefrotz
tads/tads3/vmstack.cpp
13
1500
#ifdef RCSID static char RCSid[] = "$Header: d:/cvsroot/tads/tads3/VMSTACK.CPP,v 1.3 1999/07/11 00:46:58 MJRoberts Exp $"; #endif /* * Copyright (c) 1998, 2002 Michael J. Roberts. All Rights Reserved. * * Please see the accompanying license file, LICENSE.TXT, for information * on using and copying this software. */ /* Name vmstack.cpp - VM stack implementation Function Notes Modified 10/28/98 MJRoberts - Creation */ #include "t3std.h" #include "vmtype.h" #include "vmstack.h" #include "vmfile.h" /* * allocate the stack */ CVmStack::CVmStack(size_t max_depth, size_t reserve) { /* * Allocate the array of stack elements. Allocate the requested * maximum depth plus the requested reserve space. Overallocate by a * few elements to leave ourselves a little buffer against mild * overages - for the most part, we count on the compiler to check for * proper stack usage at entry to each function, but intrinsics * sometimes push a few elements without checking. */ arr_ = (vm_val_t *)t3malloc((max_depth + reserve + 25) * sizeof(arr_[0])); /* remember the maximum depth and the reserve depth */ max_depth_ = max_depth; reserve_depth_ = reserve; /* the reserve is not yet in use */ reserve_in_use_ = FALSE; /* initialize the stack pointer */ init(); } /* * delete the stack */ CVmStack::~CVmStack() { /* delete the stack element array */ t3free(arr_); }
gpl-2.0
tprrt/linux-stable
fs/btrfs/compression.c
13
46072
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/log2.h> #include <crypto/hash.h> #include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "ordered-data.h" #include "compression.h" #include "extent_io.h" #include "extent_map.h" #include "zoned.h" static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) { switch (type) { case BTRFS_COMPRESS_ZLIB: case BTRFS_COMPRESS_LZO: case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; default: break; } return NULL; } bool btrfs_compress_is_valid_type(const char *str, size_t len) { int i; for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { size_t comp_len = strlen(btrfs_compress_types[i]); if (len < comp_len) continue; if (!strncmp(btrfs_compress_types[i], str, comp_len)) return true; } return false; } static int compression_compress_pages(int type, struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_LZO: return lzo_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_ZSTD: return zstd_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_NONE: default: /* * This can happen when compression races with remount setting * it to 'no compress', while caller doesn't call * inode_need_compress() to check if we really need to * compress. * * Not a big deal, just need to inform caller that we * haven't allocated any pages yet. */ *out_pages = 0; return -E2BIG; } } static int compression_decompress_bio(int type, struct list_head *ws, struct compressed_bio *cb) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static int compression_decompress(int type, struct list_head *ws, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static int btrfs_decompress_bio(struct compressed_bio *cb); static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, unsigned long disk_size) { return sizeof(struct compressed_bio) + (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size; } static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, u64 disk_start) { struct btrfs_fs_info *fs_info = inode->root->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); const u32 csum_size = fs_info->csum_size; const u32 sectorsize = fs_info->sectorsize; struct page *page; unsigned int i; char *kaddr; u8 csum[BTRFS_CSUM_SIZE]; struct compressed_bio *cb = bio->bi_private; u8 *cb_sum = cb->sums; if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM)) return 0; shash->tfm = fs_info->csum_shash; for (i = 0; i < cb->nr_pages; i++) { u32 pg_offset; u32 bytes_left = PAGE_SIZE; page = cb->compressed_pages[i]; /* Determine the remaining bytes inside the page first */ if (i == cb->nr_pages - 1) bytes_left = cb->compressed_len - i * PAGE_SIZE; /* Hash through the page sector by sector */ for (pg_offset = 0; pg_offset < bytes_left; pg_offset += sectorsize) { kaddr = page_address(page); crypto_shash_digest(shash, kaddr + pg_offset, sectorsize, csum); if (memcmp(&csum, cb_sum, csum_size) != 0) { btrfs_print_data_csum_error(inode, disk_start, csum, cb_sum, cb->mirror_num); if (btrfs_io_bio(bio)->device) btrfs_dev_stat_inc_and_print( btrfs_io_bio(bio)->device, BTRFS_DEV_STAT_CORRUPTION_ERRS); return -EIO; } cb_sum += csum_size; disk_start += sectorsize; } } return 0; } /* when we finish reading compressed pages from the disk, we * decompress them and then run the bio end_io routines on the * decompressed pages (in the inode address space). * * This allows the checksumming and other IO error handling routines * to work normally * * The compressed pages are freed here, and it must be run * in process context */ static void end_compressed_bio_read(struct bio *bio) { struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; unsigned int index; unsigned int mirror = btrfs_io_bio(bio)->mirror_num; int ret = 0; if (bio->bi_status) cb->errors = 1; /* if there are more bios still pending for this compressed * extent, just exit */ if (!refcount_dec_and_test(&cb->pending_bios)) goto out; /* * Record the correct mirror_num in cb->orig_bio so that * read-repair can work properly. */ btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; cb->mirror_num = mirror; /* * Some IO in this cb have failed, just skip checksum as there * is no way it could be correct. */ if (cb->errors == 1) goto csum_failed; inode = cb->inode; ret = check_compressed_csum(BTRFS_I(inode), bio, bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; /* ok, we're the last bio for this extent, lets start * the decompression. */ ret = btrfs_decompress_bio(cb); csum_failed: if (ret) cb->errors = 1; /* release the compressed pages */ index = 0; for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; put_page(page); } /* do io completion on the original bio */ if (cb->errors) { bio_io_error(cb->orig_bio); } else { struct bio_vec *bvec; struct bvec_iter_all iter_all; /* * we have verified the checksum already, set page * checked so the end_io handlers know about it */ ASSERT(!bio_flagged(bio, BIO_CLONED)); bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) SetPageChecked(bvec->bv_page); bio_endio(cb->orig_bio); } /* finally free the cb struct */ kfree(cb->compressed_pages); kfree(cb); out: bio_put(bio); } /* * Clear the writeback bits on all of the file * pages for a compressed write */ static noinline void end_compressed_writeback(struct inode *inode, const struct compressed_bio *cb) { unsigned long index = cb->start >> PAGE_SHIFT; unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct page *pages[16]; unsigned long nr_pages = end_index - index + 1; int i; int ret; if (cb->errors) mapping_set_error(inode->i_mapping, -EIO); while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); if (ret == 0) { nr_pages -= 1; index += 1; continue; } for (i = 0; i < ret; i++) { if (cb->errors) SetPageError(pages[i]); end_page_writeback(pages[i]); put_page(pages[i]); } nr_pages -= ret; index += ret; } /* the inode may be gone now */ } /* * do the cleanup once all the compressed pages hit the disk. * This will clear writeback on the file pages and free the compressed * pages. * * This also calls the writeback end hooks for the file pages so that * metadata and checksums can be updated in the file. */ static void end_compressed_bio_write(struct bio *bio) { struct compressed_bio *cb = bio->bi_private; struct inode *inode; struct page *page; unsigned int index; if (bio->bi_status) cb->errors = 1; /* if there are more bios still pending for this compressed * extent, just exit */ if (!refcount_dec_and_test(&cb->pending_bios)) goto out; /* ok, we're the last bio for this extent, step one is to * call back into the FS and do all the end_io operations */ inode = cb->inode; btrfs_record_physical_zoned(inode, cb->start, bio); btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL, cb->start, cb->start + cb->len - 1, !cb->errors); end_compressed_writeback(inode, cb); /* note, our inode could be gone now */ /* * release the compressed pages, these came from alloc_page and * are not attached to the inode at all */ index = 0; for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; put_page(page); } /* finally free the cb struct */ kfree(cb->compressed_pages); kfree(cb); out: bio_put(bio); } /* * worker function to build and submit bios for previously compressed pages. * The corresponding pages in the inode should be marked for writeback * and the compressed pages should have a reference on them for dropping * when the IO is complete. * * This also checksums the file bytes and gets things ready for * the end io hooks. */ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned int len, u64 disk_start, unsigned int compressed_len, struct page **compressed_pages, unsigned int nr_pages, unsigned int write_flags, struct cgroup_subsys_state *blkcg_css) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio = NULL; struct compressed_bio *cb; unsigned long bytes_left; int pg_index = 0; struct page *page; u64 first_byte = disk_start; blk_status_t ret; int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; const bool use_append = btrfs_use_zone_append(inode, disk_start); const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE; WARN_ON(!PAGE_ALIGNED(start)); cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); if (!cb) return BLK_STS_RESOURCE; refcount_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = &inode->vfs_inode; cb->start = start; cb->len = len; cb->mirror_num = 0; cb->compressed_pages = compressed_pages; cb->compressed_len = compressed_len; cb->orig_bio = NULL; cb->nr_pages = nr_pages; bio = btrfs_bio_alloc(first_byte); bio->bi_opf = bio_op | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; if (use_append) { struct btrfs_device *device; device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE); if (IS_ERR(device)) { kfree(cb); bio_put(bio); return BLK_STS_NOTSUPP; } bio_set_dev(bio, device->bdev); } if (blkcg_css) { bio->bi_opf |= REQ_CGROUP_PUNT; kthread_associate_blkcg(blkcg_css); } refcount_set(&cb->pending_bios, 1); /* create and submit bios for the compressed pages */ bytes_left = compressed_len; for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { int submit = 0; int len = 0; page = compressed_pages[pg_index]; page->mapping = inode->vfs_inode.i_mapping; if (bio->bi_iter.bi_size) submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, 0); /* * Page can only be added to bio if the current bio fits in * stripe. */ if (!submit) { if (pg_index == 0 && use_append) len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0); else len = bio_add_page(bio, page, PAGE_SIZE, 0); } page->mapping = NULL; if (submit || len < PAGE_SIZE) { /* * inc the count before we submit the bio so * we know the end IO handler won't happen before * we inc the count. Otherwise, the cb might get * freed before we're done setting it up */ refcount_inc(&cb->pending_bios); ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { ret = btrfs_csum_one_bio(inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); } bio = btrfs_bio_alloc(first_byte); bio->bi_opf = bio_op | write_flags; bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; if (blkcg_css) bio->bi_opf |= REQ_CGROUP_PUNT; /* * Use bio_add_page() to ensure the bio has at least one * page. */ bio_add_page(bio, page, PAGE_SIZE, 0); } if (bytes_left < PAGE_SIZE) { btrfs_info(fs_info, "bytes left %lu compress len %u nr %u", bytes_left, cb->compressed_len, cb->nr_pages); } bytes_left -= PAGE_SIZE; first_byte += PAGE_SIZE; cond_resched(); } ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { ret = btrfs_csum_one_bio(inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } ret = btrfs_map_bio(fs_info, bio, 0); if (ret) { bio->bi_status = ret; bio_endio(bio); } if (blkcg_css) kthread_associate_blkcg(NULL); return 0; } static u64 bio_end_offset(struct bio *bio) { struct bio_vec *last = bio_last_bvec_all(bio); return page_offset(last->bv_page) + last->bv_len + last->bv_offset; } static noinline int add_ra_bio_pages(struct inode *inode, u64 compressed_end, struct compressed_bio *cb) { unsigned long end_index; unsigned long pg_index; u64 last_offset; u64 isize = i_size_read(inode); int ret; struct page *page; unsigned long nr_pages = 0; struct extent_map *em; struct address_space *mapping = inode->i_mapping; struct extent_map_tree *em_tree; struct extent_io_tree *tree; u64 end; int misses = 0; last_offset = bio_end_offset(cb->orig_bio); em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; /* * For current subpage support, we only support 64K page size, * which means maximum compressed extent size (128K) is just 2x page * size. * This makes readahead less effective, so here disable readahead for * subpage for now, until full compressed write is supported. */ if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE) return 0; end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (last_offset < compressed_end) { pg_index = last_offset >> PAGE_SHIFT; if (pg_index > end_index) break; page = xa_load(&mapping->i_pages, pg_index); if (page && !xa_is_value(page)) { misses++; if (misses > 4) break; goto next; } page = __page_cache_alloc(mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) break; if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { put_page(page); goto next; } /* * at this point, we have a locked page in the page cache * for these bytes in the file. But, we have to make * sure they map to this compressed extent on disk. */ ret = set_page_extent_mapped(page); if (ret < 0) { unlock_page(page); put_page(page); break; } end = last_offset + PAGE_SIZE - 1; lock_extent(tree, last_offset, end); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, PAGE_SIZE); read_unlock(&em_tree->lock); if (!em || last_offset < em->start || (last_offset + PAGE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, last_offset, end); unlock_page(page); put_page(page); break; } free_extent_map(em); if (page->index == end_index) { size_t zero_offset = offset_in_page(isize); if (zero_offset) { int zeros; zeros = PAGE_SIZE - zero_offset; memzero_page(page, zero_offset, zeros); flush_dcache_page(page); } } ret = bio_add_page(cb->orig_bio, page, PAGE_SIZE, 0); if (ret == PAGE_SIZE) { nr_pages++; put_page(page); } else { unlock_extent(tree, last_offset, end); unlock_page(page); put_page(page); break; } next: last_offset += PAGE_SIZE; } return 0; } /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones * to hold the compressed pages on disk. * * bio->bi_iter.bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages * * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map_tree *em_tree; struct compressed_bio *cb; unsigned int compressed_len; unsigned int nr_pages; unsigned int pg_index; struct page *page; struct bio *comp_bio; u64 cur_disk_byte = bio->bi_iter.bi_sector << 9; u64 file_offset; u64 em_len; u64 em_start; struct extent_map *em; blk_status_t ret = BLK_STS_RESOURCE; int faili = 0; u8 *sums; em_tree = &BTRFS_I(inode)->extent_tree; file_offset = bio_first_bvec_all(bio)->bv_offset + page_offset(bio_first_page_all(bio)); /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); read_unlock(&em_tree->lock); if (!em) return BLK_STS_IOERR; ASSERT(em->compress_type != BTRFS_COMPRESS_NONE); compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); if (!cb) goto out; refcount_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; cb->mirror_num = mirror_num; sums = cb->sums; cb->start = em->orig_start; em_len = em->len; em_start = em->start; free_extent_map(em); em = NULL; cb->len = bio->bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) goto fail1; for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS); if (!cb->compressed_pages[pg_index]) { faili = pg_index - 1; ret = BLK_STS_RESOURCE; goto fail2; } } faili = nr_pages - 1; cb->nr_pages = nr_pages; add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ cb->len = bio->bi_iter.bi_size; comp_bio = btrfs_bio_alloc(cur_disk_byte); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; refcount_set(&cb->pending_bios, 1); for (pg_index = 0; pg_index < nr_pages; pg_index++) { u32 pg_len = PAGE_SIZE; int submit = 0; /* * To handle subpage case, we need to make sure the bio only * covers the range we need. * * If we're at the last page, truncate the length to only cover * the remaining part. */ if (pg_index == nr_pages - 1) pg_len = min_t(u32, PAGE_SIZE, compressed_len - pg_index * PAGE_SIZE); page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; page->index = em_start >> PAGE_SHIFT; if (comp_bio->bi_iter.bi_size) submit = btrfs_bio_fits_in_stripe(page, pg_len, comp_bio, 0); page->mapping = NULL; if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) { unsigned int nr_sectors; ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ /* * inc the count before we submit the bio so * we know the end IO handler won't happen before * we inc the count. Otherwise, the cb might get * freed before we're done setting it up */ refcount_inc(&cb->pending_bios); ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size, fs_info->sectorsize); sums += fs_info->csum_size * nr_sectors; ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); } comp_bio = btrfs_bio_alloc(cur_disk_byte); comp_bio->bi_opf = REQ_OP_READ; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; bio_add_page(comp_bio, page, pg_len, 0); } cur_disk_byte += pg_len; } ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); if (ret) { comp_bio->bi_status = ret; bio_endio(comp_bio); } return 0; fail2: while (faili >= 0) { __free_page(cb->compressed_pages[faili]); faili--; } kfree(cb->compressed_pages); fail1: kfree(cb); out: free_extent_map(em); return ret; } /* * Heuristic uses systematic sampling to collect data from the input data * range, the logic can be tuned by the following constants: * * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample * @SAMPLING_INTERVAL - range from which the sampled data can be collected */ #define SAMPLING_READ_SIZE (16) #define SAMPLING_INTERVAL (256) /* * For statistical analysis of the input data we consider bytes that form a * Galois Field of 256 objects. Each object has an attribute count, ie. how * many times the object appeared in the sample. */ #define BUCKET_SIZE (256) /* * The size of the sample is based on a statistical sampling rule of thumb. * The common way is to perform sampling tests as long as the number of * elements in each cell is at least 5. * * Instead of 5, we choose 32 to obtain more accurate results. * If the data contain the maximum number of symbols, which is 256, we obtain a * sample size bound by 8192. * * For a sample of at most 8KB of data per data range: 16 consecutive bytes * from up to 512 locations. */ #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ SAMPLING_READ_SIZE / SAMPLING_INTERVAL) struct bucket_item { u32 count; }; struct heuristic_ws { /* Partial copy of input data */ u8 *sample; u32 sample_size; /* Buckets store counters for each byte value */ struct bucket_item *bucket; /* Sorting buffer */ struct bucket_item *bucket_b; struct list_head list; }; static struct workspace_manager heuristic_wsm; static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; workspace = list_entry(ws, struct heuristic_ws, list); kvfree(workspace->sample); kfree(workspace->bucket); kfree(workspace->bucket_b); kfree(workspace); } static struct list_head *alloc_heuristic_ws(unsigned int level) { struct heuristic_ws *ws; ws = kzalloc(sizeof(*ws), GFP_KERNEL); if (!ws) return ERR_PTR(-ENOMEM); ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); if (!ws->sample) goto fail; ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); if (!ws->bucket) goto fail; ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); if (!ws->bucket_b) goto fail; INIT_LIST_HEAD(&ws->list); return &ws->list; fail: free_heuristic_ws(&ws->list); return ERR_PTR(-ENOMEM); } const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, }; static const struct btrfs_compress_op * const btrfs_compress_op[] = { /* The heuristic is represented as compression type 0 */ &btrfs_heuristic_compress, &btrfs_zlib_compress, &btrfs_lzo_compress, &btrfs_zstd_compress, }; static struct list_head *alloc_workspace(int type, unsigned int level) { switch (type) { case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void free_workspace(int type, struct list_head *ws) { switch (type) { case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void btrfs_init_workspace_manager(int type) { struct workspace_manager *wsm; struct list_head *workspace; wsm = btrfs_compress_op[type]->workspace_manager; INIT_LIST_HEAD(&wsm->idle_ws); spin_lock_init(&wsm->ws_lock); atomic_set(&wsm->total_ws, 0); init_waitqueue_head(&wsm->ws_wait); /* * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); } else { atomic_set(&wsm->total_ws, 1); wsm->free_ws = 1; list_add(workspace, &wsm->idle_ws); } } static void btrfs_cleanup_workspace_manager(int type) { struct workspace_manager *wsman; struct list_head *ws; wsman = btrfs_compress_op[type]->workspace_manager; while (!list_empty(&wsman->idle_ws)) { ws = wsman->idle_ws.next; list_del(ws); free_workspace(type, ws); atomic_dec(&wsman->total_ws); } } /* * This finds an available workspace or allocates a new one. * If it's not possible to allocate a new one, waits until there's one. * Preallocation makes a forward progress guarantees and we do not return * errors. */ struct list_head *btrfs_get_workspace(int type, unsigned int level) { struct workspace_manager *wsm; struct list_head *workspace; int cpus = num_online_cpus(); unsigned nofs_flag; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; ws_wait = &wsm->ws_wait; free_ws = &wsm->free_ws; again: spin_lock(ws_lock); if (!list_empty(idle_ws)) { workspace = idle_ws->next; list_del(workspace); (*free_ws)--; spin_unlock(ws_lock); return workspace; } if (atomic_read(total_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(ws_lock); prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(total_ws) > cpus && !*free_ws) schedule(); finish_wait(ws_wait, &wait); goto again; } atomic_inc(total_ws); spin_unlock(ws_lock); /* * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have * to turn it off here because we might get called from the restricted * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); workspace = alloc_workspace(type, level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { atomic_dec(total_ws); wake_up(ws_wait); /* * Do not return the error but go back to waiting. There's a * workspace preallocated for each type and the compression * time is bounded so we get to a workspace eventually. This * makes our caller's life easier. * * To prevent silent and low-probability deadlocks (when the * initial preallocation fails), check if there are any * workspaces at all. */ if (atomic_read(total_ws) == 0) { static DEFINE_RATELIMIT_STATE(_rs, /* once per minute */ 60 * HZ, /* no burst */ 1); if (__ratelimit(&_rs)) { pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); } } goto again; } return workspace; } static struct list_head *get_workspace(int type, int level) { switch (type) { case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ void btrfs_put_workspace(int type, struct list_head *ws) { struct workspace_manager *wsm; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; ws_wait = &wsm->ws_wait; free_ws = &wsm->free_ws; spin_lock(ws_lock); if (*free_ws <= num_online_cpus()) { list_add(ws, idle_ws); (*free_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(ws_lock); free_workspace(type, ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); } static void put_workspace(int type, struct list_head *ws) { switch (type) { case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } /* * Adjust @level according to the limits of the compression algorithm or * fallback to default */ static unsigned int btrfs_compress_set_level(int type, unsigned level) { const struct btrfs_compress_op *ops = btrfs_compress_op[type]; if (level == 0) level = ops->default_level; else level = min(level, ops->max_level); return level; } /* * Given an address space and start and length, compress the bytes into @pages * that are allocated on demand. * * @type_level is encoded algorithm and level, where level 0 means whatever * default the algorithm chooses and is opaque here; * - compression algo are 0-3 * - the level are bits 4-7 * * @out_pages is an in/out parameter, holds maximum number of pages to allocate * and returns number of actually allocated pages * * @total_in is used to return the number of bytes actually read. It * may be smaller than the input length if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * * @total_out is an in/out parameter, must be set to the input length and will * be also used to return the total number of compressed bytes */ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out) { int type = btrfs_compress_type(type_level); int level = btrfs_compress_level(type_level); struct list_head *workspace; int ret; level = btrfs_compress_set_level(type, level); workspace = get_workspace(type, level); ret = compression_compress_pages(type, workspace, mapping, start, pages, out_pages, total_in, total_out); put_workspace(type, workspace); return ret; } static int btrfs_decompress_bio(struct compressed_bio *cb) { struct list_head *workspace; int ret; int type = cb->compress_type; workspace = get_workspace(type, 0); ret = compression_decompress_bio(type, workspace, cb); put_workspace(type, workspace); return ret; } /* * a less complex decompression routine. Our compressed data fits in a * single page, and we want to read a single page out of it. * start_byte tells us the offset into the compressed data we're interested in */ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen) { struct list_head *workspace; int ret; workspace = get_workspace(type, 0); ret = compression_decompress(type, workspace, data_in, dest_page, start_byte, srclen, destlen); put_workspace(type, workspace); return ret; } void __init btrfs_init_compress(void) { btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); zstd_init_workspace_manager(); } void __cold btrfs_exit_compress(void) { btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); zstd_cleanup_workspace_manager(); } /* * Copy decompressed data from working buffer to pages. * * @buf: The decompressed data buffer * @buf_len: The decompressed data length * @decompressed: Number of bytes that are already decompressed inside the * compressed extent * @cb: The compressed extent descriptor * @orig_bio: The original bio that the caller wants to read for * * An easier to understand graph is like below: * * |<- orig_bio ->| |<- orig_bio->| * |<------- full decompressed extent ----->| * |<----------- @cb range ---->| * | |<-- @buf_len -->| * |<--- @decompressed --->| * * Note that, @cb can be a subpage of the full decompressed extent, but * @cb->start always has the same as the orig_file_offset value of the full * decompressed extent. * * When reading compressed extent, we have to read the full compressed extent, * while @orig_bio may only want part of the range. * Thus this function will ensure only data covered by @orig_bio will be copied * to. * * Return 0 if we have copied all needed contents for @orig_bio. * Return >0 if we need continue decompress. */ int btrfs_decompress_buf2page(const char *buf, u32 buf_len, struct compressed_bio *cb, u32 decompressed) { struct bio *orig_bio = cb->orig_bio; /* Offset inside the full decompressed extent */ u32 cur_offset; cur_offset = decompressed; /* The main loop to do the copy */ while (cur_offset < decompressed + buf_len) { struct bio_vec bvec; size_t copy_len; u32 copy_start; /* Offset inside the full decompressed extent */ u32 bvec_offset; bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); /* * cb->start may underflow, but subtracting that value can still * give us correct offset inside the full decompressed extent. */ bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; /* Haven't reached the bvec range, exit */ if (decompressed + buf_len <= bvec_offset) return 1; copy_start = max(cur_offset, bvec_offset); copy_len = min(bvec_offset + bvec.bv_len, decompressed + buf_len) - copy_start; ASSERT(copy_len); /* * Extra range check to ensure we didn't go beyond * @buf + @buf_len. */ ASSERT(copy_start - decompressed < buf_len); memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + copy_start - decompressed, copy_len); flush_dcache_page(bvec.bv_page); cur_offset += copy_len; bio_advance(orig_bio, copy_len); /* Finished the bio */ if (!orig_bio->bi_iter.bi_size) return 0; } return 1; } /* * Shannon Entropy calculation * * Pure byte distribution analysis fails to determine compressibility of data. * Try calculating entropy to estimate the average minimum number of bits * needed to encode the sampled data. * * For convenience, return the percentage of needed bits, instead of amount of * bits directly. * * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy * and can be compressible with high probability * * @ENTROPY_LVL_HIGH - data are not compressible with high probability * * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. */ #define ENTROPY_LVL_ACEPTABLE (65) #define ENTROPY_LVL_HIGH (80) /* * For increasead precision in shannon_entropy calculation, * let's do pow(n, M) to save more digits after comma: * * - maximum int bit length is 64 * - ilog2(MAX_SAMPLE_SIZE) -> 13 * - 13 * 4 = 52 < 64 -> M = 4 * * So use pow(n, 4). */ static inline u32 ilog2_w(u64 n) { return ilog2(n * n * n * n); } static u32 shannon_entropy(struct heuristic_ws *ws) { const u32 entropy_max = 8 * ilog2_w(2); u32 entropy_sum = 0; u32 p, p_base, sz_base; u32 i; sz_base = ilog2_w(ws->sample_size); for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { p = ws->bucket[i].count; p_base = ilog2_w(p); entropy_sum += p * (sz_base - p_base); } entropy_sum /= ws->sample_size; return entropy_sum * 100 / entropy_max; } #define RADIX_BASE 4U #define COUNTERS_SIZE (1U << RADIX_BASE) static u8 get4bits(u64 num, int shift) { u8 low4bits; num >>= shift; /* Reverse order */ low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); return low4bits; } /* * Use 4 bits as radix base * Use 16 u32 counters for calculating new position in buf array * * @array - array that will be sorted * @array_buf - buffer array to store sorting results * must be equal in size to @array * @num - array size */ static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, int num) { u64 max_num; u64 buf_num; u32 counters[COUNTERS_SIZE]; u32 new_addr; u32 addr; int bitlen; int shift; int i; /* * Try avoid useless loop iterations for small numbers stored in big * counters. Example: 48 33 4 ... in 64bit array */ max_num = array[0].count; for (i = 1; i < num; i++) { buf_num = array[i].count; if (buf_num > max_num) max_num = buf_num; } buf_num = ilog2(max_num); bitlen = ALIGN(buf_num, RADIX_BASE * 2); shift = 0; while (shift < bitlen) { memset(counters, 0, sizeof(counters)); for (i = 0; i < num; i++) { buf_num = array[i].count; addr = get4bits(buf_num, shift); counters[addr]++; } for (i = 1; i < COUNTERS_SIZE; i++) counters[i] += counters[i - 1]; for (i = num - 1; i >= 0; i--) { buf_num = array[i].count; addr = get4bits(buf_num, shift); counters[addr]--; new_addr = counters[addr]; array_buf[new_addr] = array[i]; } shift += RADIX_BASE; /* * Normal radix expects to move data from a temporary array, to * the main one. But that requires some CPU time. Avoid that * by doing another sort iteration to original array instead of * memcpy() */ memset(counters, 0, sizeof(counters)); for (i = 0; i < num; i ++) { buf_num = array_buf[i].count; addr = get4bits(buf_num, shift); counters[addr]++; } for (i = 1; i < COUNTERS_SIZE; i++) counters[i] += counters[i - 1]; for (i = num - 1; i >= 0; i--) { buf_num = array_buf[i].count; addr = get4bits(buf_num, shift); counters[addr]--; new_addr = counters[addr]; array[new_addr] = array_buf[i]; } shift += RADIX_BASE; } } /* * Size of the core byte set - how many bytes cover 90% of the sample * * There are several types of structured binary data that use nearly all byte * values. The distribution can be uniform and counts in all buckets will be * nearly the same (eg. encrypted data). Unlikely to be compressible. * * Other possibility is normal (Gaussian) distribution, where the data could * be potentially compressible, but we have to take a few more steps to decide * how much. * * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, * compression algo can easy fix that * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high * probability is not compressible */ #define BYTE_CORE_SET_LOW (64) #define BYTE_CORE_SET_HIGH (200) static int byte_core_set_size(struct heuristic_ws *ws) { u32 i; u32 coreset_sum = 0; const u32 core_set_threshold = ws->sample_size * 90 / 100; struct bucket_item *bucket = ws->bucket; /* Sort in reverse order */ radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); for (i = 0; i < BYTE_CORE_SET_LOW; i++) coreset_sum += bucket[i].count; if (coreset_sum > core_set_threshold) return i; for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { coreset_sum += bucket[i].count; if (coreset_sum > core_set_threshold) break; } return i; } /* * Count byte values in buckets. * This heuristic can detect textual data (configs, xml, json, html, etc). * Because in most text-like data byte set is restricted to limited number of * possible characters, and that restriction in most cases makes data easy to * compress. * * @BYTE_SET_THRESHOLD - consider all data within this byte set size: * less - compressible * more - need additional analysis */ #define BYTE_SET_THRESHOLD (64) static u32 byte_set_size(const struct heuristic_ws *ws) { u32 i; u32 byte_set_size = 0; for (i = 0; i < BYTE_SET_THRESHOLD; i++) { if (ws->bucket[i].count > 0) byte_set_size++; } /* * Continue collecting count of byte values in buckets. If the byte * set size is bigger then the threshold, it's pointless to continue, * the detection technique would fail for this type of data. */ for (; i < BUCKET_SIZE; i++) { if (ws->bucket[i].count > 0) { byte_set_size++; if (byte_set_size > BYTE_SET_THRESHOLD) return byte_set_size; } } return byte_set_size; } static bool sample_repeated_patterns(struct heuristic_ws *ws) { const u32 half_of_sample = ws->sample_size / 2; const u8 *data = ws->sample; return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; } static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, struct heuristic_ws *ws) { struct page *page; u64 index, index_end; u32 i, curr_sample_pos; u8 *in_data; /* * Compression handles the input data by chunks of 128KiB * (defined by BTRFS_MAX_UNCOMPRESSED) * * We do the same for the heuristic and loop over the whole range. * * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will * process no more than BTRFS_MAX_UNCOMPRESSED at a time. */ if (end - start > BTRFS_MAX_UNCOMPRESSED) end = start + BTRFS_MAX_UNCOMPRESSED; index = start >> PAGE_SHIFT; index_end = end >> PAGE_SHIFT; /* Don't miss unaligned end */ if (!IS_ALIGNED(end, PAGE_SIZE)) index_end++; curr_sample_pos = 0; while (index < index_end) { page = find_get_page(inode->i_mapping, index); in_data = kmap_local_page(page); /* Handle case where the start is not aligned to PAGE_SIZE */ i = start % PAGE_SIZE; while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { /* Don't sample any garbage from the last page */ if (start > end - SAMPLING_READ_SIZE) break; memcpy(&ws->sample[curr_sample_pos], &in_data[i], SAMPLING_READ_SIZE); i += SAMPLING_INTERVAL; start += SAMPLING_INTERVAL; curr_sample_pos += SAMPLING_READ_SIZE; } kunmap_local(in_data); put_page(page); index++; } ws->sample_size = curr_sample_pos; } /* * Compression heuristic. * * For now is's a naive and optimistic 'return true', we'll extend the logic to * quickly (compared to direct compression) detect data characteristics * (compressible/uncompressible) to avoid wasting CPU time on uncompressible * data. * * The following types of analysis can be performed: * - detect mostly zero data * - detect data with low "byte set" size (text, etc) * - detect data with low/high "core byte" set * * Return non-zero if the compression should be done, 0 otherwise. */ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) { struct list_head *ws_list = get_workspace(0, 0); struct heuristic_ws *ws; u32 i; u8 byte; int ret = 0; ws = list_entry(ws_list, struct heuristic_ws, list); heuristic_collect_sample(inode, start, end, ws); if (sample_repeated_patterns(ws)) { ret = 1; goto out; } memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); for (i = 0; i < ws->sample_size; i++) { byte = ws->sample[i]; ws->bucket[byte].count++; } i = byte_set_size(ws); if (i < BYTE_SET_THRESHOLD) { ret = 2; goto out; } i = byte_core_set_size(ws); if (i <= BYTE_CORE_SET_LOW) { ret = 3; goto out; } if (i >= BYTE_CORE_SET_HIGH) { ret = 0; goto out; } i = shannon_entropy(ws); if (i <= ENTROPY_LVL_ACEPTABLE) { ret = 4; goto out; } /* * For the levels below ENTROPY_LVL_HIGH, additional analysis would be * needed to give green light to compression. * * For now just assume that compression at that level is not worth the * resources because: * * 1. it is possible to defrag the data later * * 2. the data would turn out to be hardly compressible, eg. 150 byte * values, every bucket has counter at level ~54. The heuristic would * be confused. This can happen when data have some internal repeated * patterns like "abbacbbc...". This can be detected by analyzing * pairs of bytes, which is too costly. */ if (i < ENTROPY_LVL_HIGH) { ret = 5; goto out; } else { ret = 0; goto out; } out: put_workspace(0, ws_list); return ret; } /* * Convert the compression suffix (eg. after "zlib" starting with ":") to * level, unrecognized string will set the default level */ unsigned int btrfs_compress_str2level(unsigned int type, const char *str) { unsigned int level = 0; int ret; if (!type) return 0; if (str[0] == ':') { ret = kstrtouint(str + 1, 10, &level); if (ret) level = 0; } level = btrfs_compress_set_level(type, level); return level; }
gpl-2.0
CyanogenMod/android_kernel_sony_msm8960t
drivers/staging/prima/CORE/VOSS/src/vos_types.c
13
5608
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= \file vos_Types.c \brief virtual Operating System Servies (vOS) Basic type definitions Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary. ========================================================================*/ /* $Header$ */ /*-------------------------------------------------------------------------- Include Files ------------------------------------------------------------------------*/ #include "vos_types.h" #include "vos_trace.h" //#include "wlan_libra_config.h" /*-------------------------------------------------------------------------- Preprocessor definitions and constants ------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- Type declarations ------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- \brief vos_atomic_set_U32() - set a U32 variable atomically \param pTarget - pointer to the v_U32_t to set. \param value - the value to set in the v_U32_t variable. \return This function returns the value previously in the v_U32_t before the new value is set. \sa vos_atomic_increment_U32(), vos_atomic_decrement_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_set_U32( v_U32_t *pTarget, v_U32_t value ) { v_U32_t oldval; unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); oldval = *pTarget; *pTarget = value; local_irq_restore(flags); // v_U32_t prev = atomic_read(pTarget); // atomic_set(pTarget, value); return oldval; } /*---------------------------------------------------------------------------- \brief vos_atomic_increment_U32() - Increment a U32 variable atomically \param pTarget - pointer to the v_U32_t to increment. \return This function returns the value of the variable after the increment occurs. \sa vos_atomic_decrement_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_increment_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); ++*pTarget; local_irq_restore(flags); return *pTarget; // return atomic_inc_return(pTarget); } /*---------------------------------------------------------------------------- \brief vos_atomic_decrement_U32() - Decrement a U32 variable atomically \param pTarget - pointer to the v_U32_t to decrement. \return This function returns the value of the variable after the decrement occurs. \sa vos_atomic_increment_U32(), vos_atomic_set_U32() --------------------------------------------------------------------------*/ v_U32_t vos_atomic_decrement_U32( v_U32_t *pTarget ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } // return atomic_dec_return(pTarget); local_irq_save(flags); --*pTarget; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_increment_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget += value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_atomic_decrement_U32_by_value( v_U32_t *pTarget, v_U32_t value ) { unsigned long flags; if (pTarget == NULL) { VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR, "NULL ptr passed into %s",__func__); return 0; } local_irq_save(flags); *pTarget -= value ; local_irq_restore(flags); return (*pTarget); } v_U32_t vos_get_skip_ssid_check(void) { /**This is needed by only AMSS for interoperatability **/ return 1; } v_U32_t vos_get_skip_11e_check(void) { /* this is needed only for AMSS for interopratability **/ return 1; }
gpl-2.0