repo_name
string
path
string
copies
string
size
string
content
string
license
string
olafdietsche/linux-accessfs
crypto/anubis.c
2339
28430
/* * Cryptographic API. * * Anubis Algorithm * * The Anubis algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. * * See * * P.S.L.M. Barreto, V. Rijmen, * ``The Anubis block cipher,'' * NESSIE submission, 2000. * * This software implements the "tweaked" version of Anubis. * Only the S-box and (consequently) the rounds constants have been * changed. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, October 28, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 #define ANUBIS_MAX_KEY_SIZE 40 #define ANUBIS_BLOCK_SIZE 16 #define ANUBIS_MAX_N 10 #define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N) struct anubis_ctx { int key_len; // in bits int R; u32 E[ANUBIS_MAX_ROUNDS + 1][4]; u32 D[ANUBIS_MAX_ROUNDS + 1][4]; }; static const u32 T0[256] = { 0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U, 0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U, 0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U, 0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U, 0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU, 0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U, 0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U, 0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U, 0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU, 0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U, 0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U, 0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU, 0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U, 0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U, 0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U, 0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U, 0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U, 0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U, 0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U, 0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U, 0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U, 0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U, 0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U, 0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U, 0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU, 0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU, 0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU, 0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U, 0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU, 0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU, 0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U, 0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U, 0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U, 0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U, 0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U, 0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U, 0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU, 0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU, 0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU, 0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU, 0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU, 0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU, 0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU, 0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U, 0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU, 0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U, 0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU, 0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU, 0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U, 0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U, 0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U, 0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU, 0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU, 0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U, 0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U, 0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U, 0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U, 0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU, 0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU, 0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U, 0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU, 0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU, 0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU, 0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U, }; static const u32 T1[256] = { 0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU, 0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U, 0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U, 0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU, 0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U, 0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U, 0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU, 0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U, 0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U, 0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U, 0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU, 0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U, 0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U, 0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U, 0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U, 0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU, 0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U, 0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U, 0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU, 0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU, 0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U, 0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U, 0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U, 0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U, 0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U, 0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU, 0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U, 0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U, 0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U, 0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU, 0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U, 0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U, 0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU, 0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U, 0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU, 0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU, 0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU, 0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U, 0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU, 0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU, 0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU, 0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U, 0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U, 0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U, 0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U, 0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U, 0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U, 0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU, 0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U, 0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U, 0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U, 0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U, 0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U, 0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU, 0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U, 0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U, 0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U, 0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U, 0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU, 0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU, 0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U, 0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU, 0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U, 0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U, }; static const u32 T2[256] = { 0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U, 0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU, 0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U, 0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U, 0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU, 0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U, 0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU, 0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U, 0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU, 0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU, 0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U, 0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U, 0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U, 0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU, 0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U, 0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U, 0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U, 0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U, 0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU, 0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU, 0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U, 0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU, 0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU, 0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U, 0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU, 0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U, 0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U, 0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU, 0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U, 0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U, 0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU, 0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U, 0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU, 0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U, 0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU, 0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U, 0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U, 0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U, 0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U, 0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U, 0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U, 0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U, 0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U, 0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU, 0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU, 0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU, 0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU, 0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U, 0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U, 0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U, 0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U, 0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU, 0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU, 0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U, 0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U, 0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU, 0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U, 0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU, 0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U, 0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U, 0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU, 0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U, 0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU, 0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U, }; static const u32 T3[256] = { 0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U, 0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU, 0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU, 0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU, 0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U, 0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U, 0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U, 0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU, 0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU, 0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU, 0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U, 0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U, 0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U, 0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU, 0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU, 0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU, 0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU, 0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU, 0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U, 0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU, 0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U, 0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U, 0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U, 0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U, 0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U, 0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU, 0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU, 0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U, 0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U, 0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U, 0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U, 0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU, 0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U, 0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU, 0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U, 0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U, 0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU, 0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U, 0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U, 0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU, 0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U, 0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U, 0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU, 0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU, 0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U, 0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU, 0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U, 0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU, 0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U, 0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U, 0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U, 0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U, 0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U, 0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU, 0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU, 0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U, 0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U, 0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U, 0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U, 0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U, 0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU, 0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U, 0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU, 0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U, }; static const u32 T4[256] = { 0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U, 0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU, 0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU, 0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU, 0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U, 0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U, 0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U, 0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU, 0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU, 0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU, 0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U, 0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U, 0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U, 0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU, 0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU, 0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU, 0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU, 0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU, 0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U, 0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU, 0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U, 0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U, 0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U, 0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U, 0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U, 0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU, 0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU, 0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U, 0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U, 0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U, 0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U, 0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU, 0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U, 0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU, 0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U, 0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U, 0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU, 0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U, 0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U, 0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU, 0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U, 0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U, 0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU, 0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU, 0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U, 0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU, 0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U, 0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU, 0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U, 0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U, 0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U, 0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U, 0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U, 0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU, 0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU, 0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U, 0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U, 0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U, 0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U, 0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U, 0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU, 0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U, 0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU, 0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U, }; static const u32 T5[256] = { 0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U, 0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U, 0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U, 0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U, 0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U, 0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U, 0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U, 0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U, 0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U, 0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U, 0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U, 0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U, 0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U, 0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U, 0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U, 0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U, 0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U, 0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U, 0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U, 0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U, 0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U, 0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U, 0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U, 0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U, 0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU, 0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU, 0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU, 0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU, 0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU, 0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU, 0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU, 0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU, 0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU, 0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU, 0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU, 0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU, 0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU, 0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU, 0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU, 0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU, 0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U, 0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U, 0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U, 0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U, 0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U, 0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U, 0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U, 0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U, 0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U, 0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U, 0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U, 0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U, 0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U, 0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U, 0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U, 0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U, 0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU, 0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU, 0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU, 0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU, 0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU, 0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU, 0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU, 0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU, }; static const u32 rc[] = { 0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU, 0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU, 0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U, 0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU, 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, }; static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; u32 *flags = &tfm->crt_flags; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; switch (key_len) { case 16: case 20: case 24: case 28: case 32: case 36: case 40: break; default: *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } ctx->key_len = key_len * 8; N = ctx->key_len >> 5; ctx->R = R = 8 + N; /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) kappa[i] = be32_to_cpu(key[i]); /* * generate R + 1 round keys: */ for (r = 0; r <= R; r++) { u32 K0, K1, K2, K3; /* * generate r-th round key K^r: */ K0 = T4[(kappa[N - 1] >> 24) ]; K1 = T4[(kappa[N - 1] >> 16) & 0xff]; K2 = T4[(kappa[N - 1] >> 8) & 0xff]; K3 = T4[(kappa[N - 1] ) & 0xff]; for (i = N - 2; i >= 0; i--) { K0 = T4[(kappa[i] >> 24) ] ^ (T5[(K0 >> 24) ] & 0xff000000U) ^ (T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K0 ) & 0xff] & 0x000000ffU); K1 = T4[(kappa[i] >> 16) & 0xff] ^ (T5[(K1 >> 24) ] & 0xff000000U) ^ (T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K1 ) & 0xff] & 0x000000ffU); K2 = T4[(kappa[i] >> 8) & 0xff] ^ (T5[(K2 >> 24) ] & 0xff000000U) ^ (T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K2 ) & 0xff] & 0x000000ffU); K3 = T4[(kappa[i] ) & 0xff] ^ (T5[(K3 >> 24) ] & 0xff000000U) ^ (T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K3 ) & 0xff] & 0x000000ffU); } ctx->E[r][0] = K0; ctx->E[r][1] = K1; ctx->E[r][2] = K2; ctx->E[r][3] = K3; /* * compute kappa^{r+1} from kappa^r: */ if (r == R) break; for (i = 0; i < N; i++) { int j = i; inter[i] = T0[(kappa[j--] >> 24) ]; if (j < 0) j = N - 1; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T3[(kappa[j ] ) & 0xff]; } kappa[0] = inter[0] ^ rc[r]; for (i = 1; i < N; i++) kappa[i] = inter[i]; } /* * generate inverse key schedule: K'^0 = K^R, K'^R = * K^0, K'^r = theta(K^{R-r}): */ for (i = 0; i < 4; i++) { ctx->D[0][i] = ctx->E[R][i]; ctx->D[R][i] = ctx->E[0][i]; } for (r = 1; r < R; r++) { for (i = 0; i < 4; i++) { u32 v = ctx->E[R - r][i]; ctx->D[r][i] = T0[T4[(v >> 24) ] & 0xff] ^ T1[T4[(v >> 16) & 0xff] & 0xff] ^ T2[T4[(v >> 8) & 0xff] & 0xff] ^ T3[T4[(v ) & 0xff] & 0xff]; } } return 0; } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], u8 *ciphertext, const u8 *plaintext, const int R) { const __be32 *src = (const __be32 *)plaintext; __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; /* * map plaintext block to cipher state (mu) * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; /* * R - 1 full rounds: */ for (r = 1; r < R; r++) { inter[0] = T0[(state[0] >> 24) ] ^ T1[(state[1] >> 24) ] ^ T2[(state[2] >> 24) ] ^ T3[(state[3] >> 24) ] ^ roundKey[r][0]; inter[1] = T0[(state[0] >> 16) & 0xff] ^ T1[(state[1] >> 16) & 0xff] ^ T2[(state[2] >> 16) & 0xff] ^ T3[(state[3] >> 16) & 0xff] ^ roundKey[r][1]; inter[2] = T0[(state[0] >> 8) & 0xff] ^ T1[(state[1] >> 8) & 0xff] ^ T2[(state[2] >> 8) & 0xff] ^ T3[(state[3] >> 8) & 0xff] ^ roundKey[r][2]; inter[3] = T0[(state[0] ) & 0xff] ^ T1[(state[1] ) & 0xff] ^ T2[(state[2] ) & 0xff] ^ T3[(state[3] ) & 0xff] ^ roundKey[r][3]; state[0] = inter[0]; state[1] = inter[1]; state[2] = inter[2]; state[3] = inter[3]; } /* * last round: */ inter[0] = (T0[(state[0] >> 24) ] & 0xff000000U) ^ (T1[(state[1] >> 24) ] & 0x00ff0000U) ^ (T2[(state[2] >> 24) ] & 0x0000ff00U) ^ (T3[(state[3] >> 24) ] & 0x000000ffU) ^ roundKey[R][0]; inter[1] = (T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^ roundKey[R][1]; inter[2] = (T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^ roundKey[R][2]; inter[3] = (T0[(state[0] ) & 0xff] & 0xff000000U) ^ (T1[(state[1] ) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] ) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] ) & 0xff] & 0x000000ffU) ^ roundKey[R][3]; /* * map cipher state to ciphertext block (mu^{-1}): */ for (i = 0; i < 4; i++) dst[i] = cpu_to_be32(inter[i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->E, dst, src, ctx->R); } static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->D, dst, src, ctx->R); } static struct crypto_alg anubis_alg = { .cra_name = "anubis", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, .cia_max_keysize = ANUBIS_MAX_KEY_SIZE, .cia_setkey = anubis_setkey, .cia_encrypt = anubis_encrypt, .cia_decrypt = anubis_decrypt } } }; static int __init anubis_mod_init(void) { int ret = 0; ret = crypto_register_alg(&anubis_alg); return ret; } static void __exit anubis_mod_fini(void) { crypto_unregister_alg(&anubis_alg); } module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
gpl-2.0
varund7726/OwnKernel-bacon
drivers/infiniband/core/user_mad.c
2339
30881
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2008 Cisco. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/dma-mapping.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/kref.h> #include <linux/compat.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <rdma/ib_mad.h> #include <rdma/ib_user_mad.h> MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UMAD_MAX_PORTS = 64, IB_UMAD_MAX_AGENTS = 32, IB_UMAD_MAJOR = 231, IB_UMAD_MINOR_BASE = 0 }; /* * Our lifetime rules for these structs are the following: * device special file is opened, we take a reference on the * ib_umad_port's struct ib_umad_device. We drop these * references in the corresponding close(). * * In addition to references coming from open character devices, there * is one more reference to each ib_umad_device representing the * module's reference taken when allocating the ib_umad_device in * ib_umad_add_one(). * * When destroying an ib_umad_device, we drop the module's reference. */ struct ib_umad_port { struct cdev cdev; struct device *dev; struct cdev sm_cdev; struct device *sm_dev; struct semaphore sm_sem; struct mutex file_mutex; struct list_head file_list; struct ib_device *ib_dev; struct ib_umad_device *umad_dev; int dev_num; u8 port_num; }; struct ib_umad_device { int start_port, end_port; struct kobject kobj; struct ib_umad_port port[0]; }; struct ib_umad_file { struct mutex mutex; struct ib_umad_port *port; struct list_head recv_list; struct list_head send_list; struct list_head port_list; spinlock_t send_lock; wait_queue_head_t recv_wait; struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; int agents_dead; u8 use_pkey_index; u8 already_used; }; struct ib_umad_packet { struct ib_mad_send_buf *msg; struct ib_mad_recv_wc *recv_wc; struct list_head list; int length; struct ib_user_mad mad; }; static struct class *umad_class; static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); static DEFINE_SPINLOCK(port_lock); static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); static void ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device); static void ib_umad_release_dev(struct kobject *kobj) { struct ib_umad_device *dev = container_of(kobj, struct ib_umad_device, kobj); kfree(dev); } static struct kobj_type ib_umad_dev_ktype = { .release = ib_umad_release_dev, }; static int hdr_size(struct ib_umad_file *file) { return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : sizeof (struct ib_user_mad_hdr_old); } /* caller must hold file->mutex */ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) { return file->agents_dead ? NULL : file->agent[id]; } static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent, struct ib_umad_packet *packet) { int ret = 1; mutex_lock(&file->mutex); for (packet->mad.hdr.id = 0; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id++) if (agent == __get_agent(file, packet->mad.hdr.id)) { list_add_tail(&packet->list, &file->recv_list); wake_up_interruptible(&file->recv_wait); ret = 0; break; } mutex_unlock(&file->mutex); return ret; } static void dequeue_send(struct ib_umad_file *file, struct ib_umad_packet *packet) { spin_lock_irq(&file->send_lock); list_del(&packet->list); spin_unlock_irq(&file->send_lock); } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *send_wc) { struct ib_umad_file *file = agent->context; struct ib_umad_packet *packet = send_wc->send_buf->context[0]; dequeue_send(file, packet); ib_destroy_ah(packet->msg->ah); ib_free_send_mad(packet->msg); if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { packet->length = IB_MGMT_MAD_HDR; packet->mad.hdr.status = ETIMEDOUT; if (!queue_packet(file, agent, packet)) return; } kfree(packet); } static void recv_handler(struct ib_mad_agent *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_umad_file *file = agent->context; struct ib_umad_packet *packet; if (mad_recv_wc->wc->status != IB_WC_SUCCESS) goto err1; packet = kzalloc(sizeof *packet, GFP_KERNEL); if (!packet) goto err1; packet->length = mad_recv_wc->mad_len; packet->recv_wc = mad_recv_wc; packet->mad.hdr.status = 0; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); if (packet->mad.hdr.grh_present) { struct ib_ah_attr ah_attr; ib_init_ah_from_wc(agent->device, agent->port_num, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, &ah_attr); packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); } if (queue_packet(file, agent, packet)) goto err2; return; err2: kfree(packet); err1: ib_free_recv_mad(mad_recv_wc); } static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, struct ib_umad_packet *packet, size_t count) { struct ib_mad_recv_buf *recv_buf; int left, seg_payload, offset, max_seg_payload; /* We need enough room to copy the first (or only) MAD segment. */ recv_buf = &packet->recv_wc->recv_buf; if ((packet->length <= sizeof (*recv_buf->mad) && count < hdr_size(file) + packet->length) || (packet->length > sizeof (*recv_buf->mad) && count < hdr_size(file) + sizeof (*recv_buf->mad))) return -EINVAL; if (copy_to_user(buf, &packet->mad, hdr_size(file))) return -EFAULT; buf += hdr_size(file); seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); if (copy_to_user(buf, recv_buf->mad, seg_payload)) return -EFAULT; if (seg_payload < packet->length) { /* * Multipacket RMPP MAD message. Copy remainder of message. * Note that last segment may have a shorter payload. */ if (count < hdr_size(file) + packet->length) { /* * The buffer is too small, return the first RMPP segment, * which includes the RMPP message length. */ return -ENOSPC; } offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); max_seg_payload = sizeof (struct ib_mad) - offset; for (left = packet->length - seg_payload, buf += seg_payload; left; left -= seg_payload, buf += seg_payload) { recv_buf = container_of(recv_buf->list.next, struct ib_mad_recv_buf, list); seg_payload = min(left, max_seg_payload); if (copy_to_user(buf, ((void *) recv_buf->mad) + offset, seg_payload)) return -EFAULT; } } return hdr_size(file) + packet->length; } static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, struct ib_umad_packet *packet, size_t count) { ssize_t size = hdr_size(file) + packet->length; if (count < size) return -EINVAL; if (copy_to_user(buf, &packet->mad, hdr_size(file))) return -EFAULT; buf += hdr_size(file); if (copy_to_user(buf, packet->mad.data, packet->length)) return -EFAULT; return size; } static ssize_t ib_umad_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_umad_file *file = filp->private_data; struct ib_umad_packet *packet; ssize_t ret; if (count < hdr_size(file)) return -EINVAL; mutex_lock(&file->mutex); while (list_empty(&file->recv_list)) { mutex_unlock(&file->mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->recv_wait, !list_empty(&file->recv_list))) return -ERESTARTSYS; mutex_lock(&file->mutex); } packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); list_del(&packet->list); mutex_unlock(&file->mutex); if (packet->recv_wc) ret = copy_recv_mad(file, buf, packet, count); else ret = copy_send_mad(file, buf, packet, count); if (ret < 0) { /* Requeue packet */ mutex_lock(&file->mutex); list_add(&packet->list, &file->recv_list); mutex_unlock(&file->mutex); } else { if (packet->recv_wc) ib_free_recv_mad(packet->recv_wc); kfree(packet); } return ret; } static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) { int left, seg; /* Copy class specific header */ if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, msg->hdr_len - IB_MGMT_RMPP_HDR)) return -EFAULT; /* All headers are in place. Copy data segments. */ for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; seg++, left -= msg->seg_size, buf += msg->seg_size) { if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, min(left, msg->seg_size))) return -EFAULT; } return 0; } static int same_destination(struct ib_user_mad_hdr *hdr1, struct ib_user_mad_hdr *hdr2) { if (!hdr1->grh_present && !hdr2->grh_present) return (hdr1->lid == hdr2->lid); if (hdr1->grh_present && hdr2->grh_present) return !memcmp(hdr1->gid, hdr2->gid, 16); return 0; } static int is_duplicate(struct ib_umad_file *file, struct ib_umad_packet *packet) { struct ib_umad_packet *sent_packet; struct ib_mad_hdr *sent_hdr, *hdr; hdr = (struct ib_mad_hdr *) packet->mad.data; list_for_each_entry(sent_packet, &file->send_list, list) { sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; if ((hdr->tid != sent_hdr->tid) || (hdr->mgmt_class != sent_hdr->mgmt_class)) continue; /* * No need to be overly clever here. If two new operations have * the same TID, reject the second as a duplicate. This is more * restrictive than required by the spec. */ if (!ib_response_mad((struct ib_mad *) hdr)) { if (!ib_response_mad((struct ib_mad *) sent_hdr)) return 1; continue; } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) continue; if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) return 1; } return 0; } static ssize_t ib_umad_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_umad_file *file = filp->private_data; struct ib_umad_packet *packet; struct ib_mad_agent *agent; struct ib_ah_attr ah_attr; struct ib_ah *ah; struct ib_rmpp_mad *rmpp_mad; __be64 *tid; int ret, data_len, hdr_len, copy_offset, rmpp_active; if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) return -EINVAL; packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); if (!packet) return -ENOMEM; if (copy_from_user(&packet->mad, buf, hdr_size(file))) { ret = -EFAULT; goto err; } if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { ret = -EINVAL; goto err; } buf += hdr_size(file); if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { ret = -EFAULT; goto err; } mutex_lock(&file->mutex); agent = __get_agent(file, packet->mad.hdr.id); if (!agent) { ret = -EINVAL; goto err_up; } memset(&ah_attr, 0, sizeof ah_attr); ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); ah_attr.sl = packet->mad.hdr.sl; ah_attr.src_path_bits = packet->mad.hdr.path_bits; ah_attr.port_num = file->port->port_num; if (packet->mad.hdr.grh_present) { ah_attr.ah_flags = IB_AH_GRH; memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; } ah = ib_create_ah(agent->qp->pd, &ah_attr); if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto err_up; } rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { copy_offset = IB_MGMT_MAD_HDR; rmpp_active = 0; } else { copy_offset = IB_MGMT_RMPP_HDR; rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE; } data_len = count - hdr_size(file) - hdr_len; packet->msg = ib_create_send_mad(agent, be32_to_cpu(packet->mad.hdr.qpn), packet->mad.hdr.pkey_index, rmpp_active, hdr_len, data_len, GFP_KERNEL); if (IS_ERR(packet->msg)) { ret = PTR_ERR(packet->msg); goto err_ah; } packet->msg->ah = ah; packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; packet->msg->retries = packet->mad.hdr.retries; packet->msg->context[0] = packet; /* Copy MAD header. Any RMPP header is already in place. */ memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); if (!rmpp_active) { if (copy_from_user(packet->msg->mad + copy_offset, buf + copy_offset, hdr_len + data_len - copy_offset)) { ret = -EFAULT; goto err_msg; } } else { ret = copy_rmpp_mad(packet->msg, buf); if (ret) goto err_msg; } /* * Set the high-order part of the transaction ID to make MADs from * different agents unique, and allow routing responses back to the * original requestor. */ if (!ib_response_mad(packet->msg->mad)) { tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | (be64_to_cpup(tid) & 0xffffffff)); rmpp_mad->mad_hdr.tid = *tid; } spin_lock_irq(&file->send_lock); ret = is_duplicate(file, packet); if (!ret) list_add_tail(&packet->list, &file->send_list); spin_unlock_irq(&file->send_lock); if (ret) { ret = -EINVAL; goto err_msg; } ret = ib_post_send_mad(packet->msg, NULL); if (ret) goto err_send; mutex_unlock(&file->mutex); return count; err_send: dequeue_send(file, packet); err_msg: ib_free_send_mad(packet->msg); err_ah: ib_destroy_ah(ah); err_up: mutex_unlock(&file->mutex); err: kfree(packet); return ret; } static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_umad_file *file = filp->private_data; /* we will always be able to post a MAD send */ unsigned int mask = POLLOUT | POLLWRNORM; poll_wait(filp, &file->recv_wait, wait); if (!list_empty(&file->recv_list)) mask |= POLLIN | POLLRDNORM; return mask; } static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, int compat_method_mask) { struct ib_user_mad_reg_req ureq; struct ib_mad_reg_req req; struct ib_mad_agent *agent = NULL; int agent_id; int ret; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); if (!file->port->ib_dev) { ret = -EPIPE; goto out; } if (copy_from_user(&ureq, arg, sizeof ureq)) { ret = -EFAULT; goto out; } if (ureq.qpn != 0 && ureq.qpn != 1) { ret = -EINVAL; goto out; } for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) if (!__get_agent(file, agent_id)) goto found; ret = -ENOMEM; goto out; found: if (ureq.mgmt_class) { req.mgmt_class = ureq.mgmt_class; req.mgmt_class_version = ureq.mgmt_class_version; memcpy(req.oui, ureq.oui, sizeof req.oui); if (compat_method_mask) { u32 *umm = (u32 *) ureq.method_mask; int i; for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) req.method_mask[i] = umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); } else memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask); } agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, ureq.mgmt_class ? &req : NULL, ureq.rmpp_version, send_handler, recv_handler, file); if (IS_ERR(agent)) { ret = PTR_ERR(agent); agent = NULL; goto out; } if (put_user(agent_id, (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { ret = -EFAULT; goto out; } if (!file->already_used) { file->already_used = 1; if (!file->use_pkey_index) { printk(KERN_WARNING "user_mad: process %s did not enable " "P_Key index support.\n", current->comm); printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " "has info on the new ABI.\n"); } } file->agent[agent_id] = agent; ret = 0; out: mutex_unlock(&file->mutex); if (ret && agent) ib_unregister_mad_agent(agent); mutex_unlock(&file->port->file_mutex); return ret; } static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) { struct ib_mad_agent *agent = NULL; u32 id; int ret = 0; if (get_user(id, arg)) return -EFAULT; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { ret = -EINVAL; goto out; } agent = file->agent[id]; file->agent[id] = NULL; out: mutex_unlock(&file->mutex); if (agent) ib_unregister_mad_agent(agent); mutex_unlock(&file->port->file_mutex); return ret; } static long ib_umad_enable_pkey(struct ib_umad_file *file) { int ret = 0; mutex_lock(&file->mutex); if (file->already_used) ret = -EINVAL; else file->use_pkey_index = 1; mutex_unlock(&file->mutex); return ret; } static long ib_umad_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case IB_USER_MAD_REGISTER_AGENT: return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); case IB_USER_MAD_UNREGISTER_AGENT: return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); default: return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case IB_USER_MAD_REGISTER_AGENT: return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); case IB_USER_MAD_UNREGISTER_AGENT: return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); default: return -ENOIOCTLCMD; } } #endif /* * ib_umad_open() does not need the BKL: * * - the ib_umad_port structures are properly reference counted, and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - the ioctl method does not affect any global state outside of the * file structure being operated on; */ static int ib_umad_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_umad_file *file; int ret = -ENXIO; port = container_of(inode->i_cdev, struct ib_umad_port, cdev); mutex_lock(&port->file_mutex); if (!port->ib_dev) goto out; ret = -ENOMEM; file = kzalloc(sizeof *file, GFP_KERNEL); if (!file) goto out; mutex_init(&file->mutex); spin_lock_init(&file->send_lock); INIT_LIST_HEAD(&file->recv_list); INIT_LIST_HEAD(&file->send_list); init_waitqueue_head(&file->recv_wait); file->port = port; filp->private_data = file; list_add_tail(&file->port_list, &port->file_list); ret = nonseekable_open(inode, filp); if (ret) { list_del(&file->port_list); kfree(file); goto out; } kobject_get(&port->umad_dev->kobj); out: mutex_unlock(&port->file_mutex); return ret; } static int ib_umad_close(struct inode *inode, struct file *filp) { struct ib_umad_file *file = filp->private_data; struct ib_umad_device *dev = file->port->umad_dev; struct ib_umad_packet *packet, *tmp; int already_dead; int i; mutex_lock(&file->port->file_mutex); mutex_lock(&file->mutex); already_dead = file->agents_dead; file->agents_dead = 1; list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { if (packet->recv_wc) ib_free_recv_mad(packet->recv_wc); kfree(packet); } list_del(&file->port_list); mutex_unlock(&file->mutex); if (!already_dead) for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) if (file->agent[i]) ib_unregister_mad_agent(file->agent[i]); mutex_unlock(&file->port->file_mutex); kfree(file); kobject_put(&dev->kobj); return 0; } static const struct file_operations umad_fops = { .owner = THIS_MODULE, .read = ib_umad_read, .write = ib_umad_write, .poll = ib_umad_poll, .unlocked_ioctl = ib_umad_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ib_umad_compat_ioctl, #endif .open = ib_umad_open, .release = ib_umad_close, .llseek = no_llseek, }; static int ib_umad_sm_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_port_modify props = { .set_port_cap_mask = IB_PORT_SM }; int ret; port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); if (filp->f_flags & O_NONBLOCK) { if (down_trylock(&port->sm_sem)) { ret = -EAGAIN; goto fail; } } else { if (down_interruptible(&port->sm_sem)) { ret = -ERESTARTSYS; goto fail; } } ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); if (ret) goto err_up_sem; filp->private_data = port; ret = nonseekable_open(inode, filp); if (ret) goto err_clr_sm_cap; kobject_get(&port->umad_dev->kobj); return 0; err_clr_sm_cap: swap(props.set_port_cap_mask, props.clr_port_cap_mask); ib_modify_port(port->ib_dev, port->port_num, 0, &props); err_up_sem: up(&port->sm_sem); fail: return ret; } static int ib_umad_sm_close(struct inode *inode, struct file *filp) { struct ib_umad_port *port = filp->private_data; struct ib_port_modify props = { .clr_port_cap_mask = IB_PORT_SM }; int ret = 0; mutex_lock(&port->file_mutex); if (port->ib_dev) ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); mutex_unlock(&port->file_mutex); up(&port->sm_sem); kobject_put(&port->umad_dev->kobj); return ret; } static const struct file_operations umad_sm_fops = { .owner = THIS_MODULE, .open = ib_umad_sm_open, .release = ib_umad_sm_close, .llseek = no_llseek, }; static struct ib_client umad_client = { .name = "umad", .add = ib_umad_add_one, .remove = ib_umad_remove_one }; static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); if (!port) return -ENODEV; return sprintf(buf, "%s\n", port->ib_dev->name); } static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static ssize_t show_port(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); if (!port) return -ENODEV; return sprintf(buf, "%d\n", port->port_num); } static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_MAD_ABI_VERSION)); static dev_t overflow_maj; static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); static int find_overflow_devnum(void) { int ret; if (!overflow_maj) { ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, "infiniband_mad"); if (ret) { printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); return ret; } } ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); if (ret >= IB_UMAD_MAX_PORTS) return -1; return ret; } static int ib_umad_init_port(struct ib_device *device, int port_num, struct ib_umad_device *umad_dev, struct ib_umad_port *port) { int devnum; dev_t base; spin_lock(&port_lock); devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); if (devnum >= IB_UMAD_MAX_PORTS) { spin_unlock(&port_lock); devnum = find_overflow_devnum(); if (devnum < 0) return -1; spin_lock(&port_lock); port->dev_num = devnum + IB_UMAD_MAX_PORTS; base = devnum + overflow_maj; set_bit(devnum, overflow_map); } else { port->dev_num = devnum; base = devnum + base_dev; set_bit(devnum, dev_map); } spin_unlock(&port_lock); port->ib_dev = device; port->port_num = port_num; sema_init(&port->sm_sem, 1); mutex_init(&port->file_mutex); INIT_LIST_HEAD(&port->file_list); cdev_init(&port->cdev, &umad_fops); port->cdev.owner = THIS_MODULE; port->cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); if (cdev_add(&port->cdev, base, 1)) goto err_cdev; port->dev = device_create(umad_class, device->dma_device, port->cdev.dev, port, "umad%d", port->dev_num); if (IS_ERR(port->dev)) goto err_cdev; if (device_create_file(port->dev, &dev_attr_ibdev)) goto err_dev; if (device_create_file(port->dev, &dev_attr_port)) goto err_dev; base += IB_UMAD_MAX_PORTS; cdev_init(&port->sm_cdev, &umad_sm_fops); port->sm_cdev.owner = THIS_MODULE; port->sm_cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; port->sm_dev = device_create(umad_class, device->dma_device, port->sm_cdev.dev, port, "issm%d", port->dev_num); if (IS_ERR(port->sm_dev)) goto err_sm_cdev; if (device_create_file(port->sm_dev, &dev_attr_ibdev)) goto err_sm_dev; if (device_create_file(port->sm_dev, &dev_attr_port)) goto err_sm_dev; return 0; err_sm_dev: device_destroy(umad_class, port->sm_cdev.dev); err_sm_cdev: cdev_del(&port->sm_cdev); err_dev: device_destroy(umad_class, port->cdev.dev); err_cdev: cdev_del(&port->cdev); if (port->dev_num < IB_UMAD_MAX_PORTS) clear_bit(devnum, dev_map); else clear_bit(devnum, overflow_map); return -1; } static void ib_umad_kill_port(struct ib_umad_port *port) { struct ib_umad_file *file; int id; dev_set_drvdata(port->dev, NULL); dev_set_drvdata(port->sm_dev, NULL); device_destroy(umad_class, port->cdev.dev); device_destroy(umad_class, port->sm_cdev.dev); cdev_del(&port->cdev); cdev_del(&port->sm_cdev); mutex_lock(&port->file_mutex); port->ib_dev = NULL; list_for_each_entry(file, &port->file_list, port_list) { mutex_lock(&file->mutex); file->agents_dead = 1; mutex_unlock(&file->mutex); for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) if (file->agent[id]) ib_unregister_mad_agent(file->agent[id]); } mutex_unlock(&port->file_mutex); if (port->dev_num < IB_UMAD_MAX_PORTS) clear_bit(port->dev_num, dev_map); else clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); } static void ib_umad_add_one(struct ib_device *device) { struct ib_umad_device *umad_dev; int s, e, i; if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; else { s = 1; e = device->phys_port_cnt; } umad_dev = kzalloc(sizeof *umad_dev + (e - s + 1) * sizeof (struct ib_umad_port), GFP_KERNEL); if (!umad_dev) return; kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); umad_dev->start_port = s; umad_dev->end_port = e; for (i = s; i <= e; ++i) { umad_dev->port[i - s].umad_dev = umad_dev; if (ib_umad_init_port(device, i, umad_dev, &umad_dev->port[i - s])) goto err; } ib_set_client_data(device, &umad_client, umad_dev); return; err: while (--i >= s) ib_umad_kill_port(&umad_dev->port[i - s]); kobject_put(&umad_dev->kobj); } static void ib_umad_remove_one(struct ib_device *device) { struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client); int i; if (!umad_dev) return; for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) ib_umad_kill_port(&umad_dev->port[i]); kobject_put(&umad_dev->kobj); } static char *umad_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } static int __init ib_umad_init(void) { int ret; ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, "infiniband_mad"); if (ret) { printk(KERN_ERR "user_mad: couldn't register device number\n"); goto out; } umad_class = class_create(THIS_MODULE, "infiniband_mad"); if (IS_ERR(umad_class)) { ret = PTR_ERR(umad_class); printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); goto out_chrdev; } umad_class->devnode = umad_devnode; ret = class_create_file(umad_class, &class_attr_abi_version.attr); if (ret) { printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&umad_client); if (ret) { printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); goto out_class; } return 0; out_class: class_destroy(umad_class); out_chrdev: unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); out: return ret; } static void __exit ib_umad_cleanup(void) { ib_unregister_client(&umad_client); class_destroy(umad_class); unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); if (overflow_maj) unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); } module_init(ib_umad_init); module_exit(ib_umad_cleanup);
gpl-2.0
dsb9938/Rezound-ICS-Kernel
drivers/misc/atmel-ssc.c
3619
3629
/* * Atmel SSC driver * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/atmel-ssc.h> #include <linux/slab.h> /* Serialize access to ssc_list and user count */ static DEFINE_SPINLOCK(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) { int ssc_valid = 0; struct ssc_device *ssc; spin_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->id == ssc_num) { ssc_valid = 1; break; } } if (!ssc_valid) { spin_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { spin_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; spin_unlock(&user_lock); clk_enable(ssc->clk); return ssc; } EXPORT_SYMBOL(ssc_request); void ssc_free(struct ssc_device *ssc) { spin_lock(&user_lock); if (ssc->user) { ssc->user--; clk_disable(ssc->clk); } else { dev_dbg(&ssc->pdev->dev, "device already free\n"); } spin_unlock(&user_lock); } EXPORT_SYMBOL(ssc_free); static int __init ssc_probe(struct platform_device *pdev) { int retval = 0; struct resource *regs; struct ssc_device *ssc; ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL); if (!ssc) { dev_dbg(&pdev->dev, "out of memory\n"); retval = -ENOMEM; goto out; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no mmio resource defined\n"); retval = -ENXIO; goto out_free; } ssc->clk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(ssc->clk)) { dev_dbg(&pdev->dev, "no pclk clock defined\n"); retval = -ENXIO; goto out_free; } ssc->pdev = pdev; ssc->regs = ioremap(regs->start, regs->end - regs->start + 1); if (!ssc->regs) { dev_dbg(&pdev->dev, "ioremap failed\n"); retval = -EINVAL; goto out_clk; } /* disable all interrupts */ clk_enable(ssc->clk); ssc_writel(ssc->regs, IDR, ~0UL); ssc_readl(ssc->regs, SR); clk_disable(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); if (!ssc->irq) { dev_dbg(&pdev->dev, "could not get irq\n"); retval = -ENXIO; goto out_unmap; } spin_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); spin_unlock(&user_lock); platform_set_drvdata(pdev, ssc); dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n", ssc->regs, ssc->irq); goto out; out_unmap: iounmap(ssc->regs); out_clk: clk_put(ssc->clk); out_free: kfree(ssc); out: return retval; } static int __devexit ssc_remove(struct platform_device *pdev) { struct ssc_device *ssc = platform_get_drvdata(pdev); spin_lock(&user_lock); iounmap(ssc->regs); clk_put(ssc->clk); list_del(&ssc->list); kfree(ssc); spin_unlock(&user_lock); return 0; } static struct platform_driver ssc_driver = { .remove = __devexit_p(ssc_remove), .driver = { .name = "ssc", .owner = THIS_MODULE, }, }; static int __init ssc_init(void) { return platform_driver_probe(&ssc_driver, ssc_probe); } module_init(ssc_init); static void __exit ssc_exit(void) { platform_driver_unregister(&ssc_driver); } module_exit(ssc_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ssc");
gpl-2.0
koquantam/android_kernel_oc_vivalto3gvn
arch/arm/mach-gemini/board-nas4220b.c
3875
2390
/* * Support for Raidsonic NAS-4220-B * * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com> * * based on rut1xx.c * Copyright (C) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/mdio-gpio.h> #include <linux/io.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/hardware.h> #include <mach/global_reg.h> #include "common.h" static struct gpio_led ib4220b_leds[] = { { .name = "nas4220b:orange:hdd", .default_trigger = "none", .gpio = 60, }, { .name = "nas4220b:green:os", .default_trigger = "heartbeat", .gpio = 62, }, }; static struct gpio_led_platform_data ib4220b_leds_data = { .num_leds = ARRAY_SIZE(ib4220b_leds), .leds = ib4220b_leds, }; static struct platform_device ib4220b_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &ib4220b_leds_data, }, }; static struct gpio_keys_button ib4220b_keys[] = { { .code = KEY_SETUP, .gpio = 61, .active_low = 1, .desc = "Backup Button", .type = EV_KEY, }, { .code = KEY_RESTART, .gpio = 63, .active_low = 1, .desc = "Softreset Button", .type = EV_KEY, }, }; static struct gpio_keys_platform_data ib4220b_keys_data = { .buttons = ib4220b_keys, .nbuttons = ARRAY_SIZE(ib4220b_keys), }; static struct platform_device ib4220b_key_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &ib4220b_keys_data, }, }; static void __init ib4220b_init(void) { gemini_gpio_init(); platform_register_uart(); platform_register_pflash(SZ_16M, NULL, 0); platform_device_register(&ib4220b_led_device); platform_device_register(&ib4220b_key_device); platform_register_rtc(); } MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B") .atag_offset = 0x100, .map_io = gemini_map_io, .init_irq = gemini_init_irq, .init_time = gemini_timer_init, .init_machine = ib4220b_init, .restart = gemini_restart, MACHINE_END
gpl-2.0
Fusion-Devices/android_kernel_cyanogen_msm8916
drivers/hid/hid-zpff.c
4387
3659
/* * Force feedback support for Zeroplus based devices * * Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_ZEROPLUS_FF struct zpff_device { struct hid_report *report; }; static int zpff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct zpff_device *zpff = data; int left, right; /* * The following is specified the other way around in the Zeroplus * datasheet but the order below is correct for the XFX Executioner; * however it is possible that the XFX Executioner is an exception */ left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", left, right); left = left * 0x7f / 0xffff; right = right * 0x7f / 0xffff; zpff->report->field[2]->value[0] = left; zpff->report->field[3]->value[0] = right; dbg_hid("running with 0x%02x 0x%02x\n", left, right); hid_hw_request(hid, zpff->report, HID_REQ_SET_REPORT); return 0; } static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *dev = hidinput->input; int i, error; for (i = 0; i < 4; i++) { report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); if (!report) return -ENODEV; } zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); if (!zpff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, zpff, zpff_play); if (error) { kfree(zpff); return error; } zpff->report = report; zpff->report->field[0]->value[0] = 0x00; zpff->report->field[1]->value[0] = 0x02; zpff->report->field[2]->value[0] = 0x00; zpff->report->field[3]->value[0] = 0x00; hid_hw_request(hid, zpff->report, HID_REQ_SET_REPORT); hid_info(hid, "force feedback for Zeroplus based devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; } #else static inline int zpff_init(struct hid_device *hid) { return 0; } #endif static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } zpff_init(hdev); return 0; err: return ret; } static const struct hid_device_id zp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, { } }; MODULE_DEVICE_TABLE(hid, zp_devices); static struct hid_driver zp_driver = { .name = "zeroplus", .id_table = zp_devices, .probe = zp_probe, }; module_hid_driver(zp_driver); MODULE_LICENSE("GPL");
gpl-2.0
thanhphat11/Android_kernel_xiaomi_ALL
drivers/media/video/em28xx/em28xx-input.c
4899
14681
/* handle em28xx IR remotes via linux kernel input layer. Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it> Markus Rechberger <mrechberger@gmail.com> Mauro Carvalho Chehab <mchehab@infradead.org> Sascha Sommer <saschasommer@freenet.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/slab.h> #include "em28xx.h" #define EM28XX_SNAPSHOT_KEY KEY_CAMERA #define EM28XX_SBUTTON_QUERY_INTERVAL 500 #define EM28XX_R0C_USBSUSP_SNAPSHOT 0x20 static unsigned int ir_debug; module_param(ir_debug, int, 0644); MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]"); #define MODULE_NAME "em28xx" #define i2cdprintk(fmt, arg...) \ if (ir_debug) { \ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \ } #define dprintk(fmt, arg...) \ if (ir_debug) { \ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \ } /********************************************************** Polling structure used by em28xx IR's **********************************************************/ struct em28xx_ir_poll_result { unsigned int toggle_bit:1; unsigned int read_count:7; u8 rc_address; u8 rc_data[4]; /* 1 byte on em2860/2880, 4 on em2874 */ }; struct em28xx_IR { struct em28xx *dev; struct rc_dev *rc; char name[32]; char phys[32]; /* poll external decoder */ int polling; struct delayed_work work; unsigned int full_code:1; unsigned int last_readcount; int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *); }; /********************************************************** I2C IR based get keycodes - should be used with ir-kbd-i2c **********************************************************/ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char b; /* poll IR chip */ if (1 != i2c_master_recv(ir->c, &b, 1)) { i2cdprintk("read error\n"); return -EIO; } /* it seems that 0xFE indicates that a button is still hold down, while 0xff indicates that no button is hold down. 0xfe sequences are sometimes interrupted by 0xFF */ i2cdprintk("key %02x\n", b); if (b == 0xff) return 0; if (b == 0xfe) /* keep old data */ return 1; *ir_key = b; *ir_raw = b; return 1; } int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char buf[2]; u16 code; int size; /* poll IR chip */ size = i2c_master_recv(ir->c, buf, sizeof(buf)); if (size != 2) return -EIO; /* Does eliminate repeated parity code */ if (buf[1] == 0xff) return 0; ir->old = buf[1]; /* * Rearranges bits to the right order. * The bit order were determined experimentally by using * The original Hauppauge Grey IR and another RC5 that uses addr=0x08 * The RC5 code has 14 bits, but we've experimentally determined * the meaning for only 11 bits. * So, the code translation is not complete. Yet, it is enough to * work with the provided RC5 IR. */ code = ((buf[0] & 0x01) ? 0x0020 : 0) | /* 0010 0000 */ ((buf[0] & 0x02) ? 0x0010 : 0) | /* 0001 0000 */ ((buf[0] & 0x04) ? 0x0008 : 0) | /* 0000 1000 */ ((buf[0] & 0x08) ? 0x0004 : 0) | /* 0000 0100 */ ((buf[0] & 0x10) ? 0x0002 : 0) | /* 0000 0010 */ ((buf[0] & 0x20) ? 0x0001 : 0) | /* 0000 0001 */ ((buf[1] & 0x08) ? 0x1000 : 0) | /* 0001 0000 */ ((buf[1] & 0x10) ? 0x0800 : 0) | /* 0000 1000 */ ((buf[1] & 0x20) ? 0x0400 : 0) | /* 0000 0100 */ ((buf[1] & 0x40) ? 0x0200 : 0) | /* 0000 0010 */ ((buf[1] & 0x80) ? 0x0100 : 0); /* 0000 0001 */ i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x%02x)\n", code, buf[1], buf[0]); /* return key */ *ir_key = code; *ir_raw = code; return 1; } int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char buf[3]; /* poll IR chip */ if (3 != i2c_master_recv(ir->c, buf, 3)) { i2cdprintk("read error\n"); return -EIO; } i2cdprintk("key %02x\n", buf[2]&0x3f); if (buf[0] != 0x00) return 0; *ir_key = buf[2]&0x3f; *ir_raw = buf[2]&0x3f; return 1; } int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char subaddr, keydetect, key; struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0, .buf = &subaddr, .len = 1}, { .addr = ir->c->addr, .flags = I2C_M_RD, .buf = &keydetect, .len = 1} }; subaddr = 0x10; if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { i2cdprintk("read error\n"); return -EIO; } if (keydetect == 0x00) return 0; subaddr = 0x00; msg[1].buf = &key; if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { i2cdprintk("read error\n"); return -EIO; } if (key == 0x00) return 0; *ir_key = key; *ir_raw = key; return 1; } /********************************************************** Poll based get keycode functions **********************************************************/ /* This is for the em2860/em2880 */ static int default_polling_getkey(struct em28xx_IR *ir, struct em28xx_ir_poll_result *poll_result) { struct em28xx *dev = ir->dev; int rc; u8 msg[3] = { 0, 0, 0 }; /* Read key toggle, brand, and key code on registers 0x45, 0x46 and 0x47 */ rc = dev->em28xx_read_reg_req_len(dev, 0, EM28XX_R45_IR, msg, sizeof(msg)); if (rc < 0) return rc; /* Infrared toggle (Reg 0x45[7]) */ poll_result->toggle_bit = (msg[0] >> 7); /* Infrared read count (Reg 0x45[6:0] */ poll_result->read_count = (msg[0] & 0x7f); /* Remote Control Address (Reg 0x46) */ poll_result->rc_address = msg[1]; /* Remote Control Data (Reg 0x47) */ poll_result->rc_data[0] = msg[2]; return 0; } static int em2874_polling_getkey(struct em28xx_IR *ir, struct em28xx_ir_poll_result *poll_result) { struct em28xx *dev = ir->dev; int rc; u8 msg[5] = { 0, 0, 0, 0, 0 }; /* Read key toggle, brand, and key code on registers 0x51-55 */ rc = dev->em28xx_read_reg_req_len(dev, 0, EM2874_R51_IR, msg, sizeof(msg)); if (rc < 0) return rc; /* Infrared toggle (Reg 0x51[7]) */ poll_result->toggle_bit = (msg[0] >> 7); /* Infrared read count (Reg 0x51[6:0] */ poll_result->read_count = (msg[0] & 0x7f); /* Remote Control Address (Reg 0x52) */ poll_result->rc_address = msg[1]; /* Remote Control Data (Reg 0x53-55) */ poll_result->rc_data[0] = msg[2]; poll_result->rc_data[1] = msg[3]; poll_result->rc_data[2] = msg[4]; return 0; } /********************************************************** Polling code for em28xx **********************************************************/ static void em28xx_ir_handle_key(struct em28xx_IR *ir) { int result; struct em28xx_ir_poll_result poll_result; /* read the registers containing the IR status */ result = ir->get_key(ir, &poll_result); if (unlikely(result < 0)) { dprintk("ir->get_key() failed %d\n", result); return; } if (unlikely(poll_result.read_count != ir->last_readcount)) { dprintk("%s: toggle: %d, count: %d, key 0x%02x%02x\n", __func__, poll_result.toggle_bit, poll_result.read_count, poll_result.rc_address, poll_result.rc_data[0]); if (ir->full_code) rc_keydown(ir->rc, poll_result.rc_address << 8 | poll_result.rc_data[0], poll_result.toggle_bit); else rc_keydown(ir->rc, poll_result.rc_data[0], poll_result.toggle_bit); if (ir->dev->chip_id == CHIP_ID_EM2874 || ir->dev->chip_id == CHIP_ID_EM2884) /* The em2874 clears the readcount field every time the register is read. The em2860/2880 datasheet says that it is supposed to clear the readcount, but it doesn't. So with the em2874, we are looking for a non-zero read count as opposed to a readcount that is incrementing */ ir->last_readcount = 0; else ir->last_readcount = poll_result.read_count; } } static void em28xx_ir_work(struct work_struct *work) { struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work); em28xx_ir_handle_key(ir); schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); } static int em28xx_ir_start(struct rc_dev *rc) { struct em28xx_IR *ir = rc->priv; INIT_DELAYED_WORK(&ir->work, em28xx_ir_work); schedule_delayed_work(&ir->work, 0); return 0; } static void em28xx_ir_stop(struct rc_dev *rc) { struct em28xx_IR *ir = rc->priv; cancel_delayed_work_sync(&ir->work); } int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type) { int rc = 0; struct em28xx_IR *ir = rc_dev->priv; struct em28xx *dev = ir->dev; u8 ir_config = EM2874_IR_RC5; /* Adjust xclk based o IR table for RC5/NEC tables */ if (rc_type == RC_TYPE_RC5) { dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE; ir->full_code = 1; } else if (rc_type == RC_TYPE_NEC) { dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE; ir_config = EM2874_IR_NEC; ir->full_code = 1; } else if (rc_type != RC_TYPE_UNKNOWN) rc = -EINVAL; em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk, EM28XX_XCLK_IR_RC5_MODE); /* Setup the proper handler based on the chip */ switch (dev->chip_id) { case CHIP_ID_EM2860: case CHIP_ID_EM2883: ir->get_key = default_polling_getkey; break; case CHIP_ID_EM2884: case CHIP_ID_EM2874: case CHIP_ID_EM28174: ir->get_key = em2874_polling_getkey; em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1); break; default: printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n", dev->chip_id); rc = -EINVAL; } return rc; } int em28xx_ir_init(struct em28xx *dev) { struct em28xx_IR *ir; struct rc_dev *rc; int err = -ENOMEM; if (dev->board.ir_codes == NULL) { /* No remote control support */ return 0; } ir = kzalloc(sizeof(*ir), GFP_KERNEL); rc = rc_allocate_device(); if (!ir || !rc) goto err_out_free; /* record handles to ourself */ ir->dev = dev; dev->ir = ir; ir->rc = rc; /* * em2874 supports more protocols. For now, let's just announce * the two protocols that were already tested */ rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC; rc->priv = ir; rc->change_protocol = em28xx_ir_change_protocol; rc->open = em28xx_ir_start; rc->close = em28xx_ir_stop; /* By default, keep protocol field untouched */ err = em28xx_ir_change_protocol(rc, RC_TYPE_UNKNOWN); if (err) goto err_out_free; /* This is how often we ask the chip for IR information */ ir->polling = 100; /* ms */ /* init input device */ snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name); usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); rc->input_name = ir->name; rc->input_phys = ir->phys; rc->input_id.bustype = BUS_USB; rc->input_id.version = 1; rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct); rc->dev.parent = &dev->udev->dev; rc->map_name = dev->board.ir_codes; rc->driver_name = MODULE_NAME; /* all done */ err = rc_register_device(rc); if (err) goto err_out_stop; return 0; err_out_stop: dev->ir = NULL; err_out_free: rc_free_device(rc); kfree(ir); return err; } int em28xx_ir_fini(struct em28xx *dev) { struct em28xx_IR *ir = dev->ir; /* skip detach on non attached boards */ if (!ir) return 0; if (ir->rc) rc_unregister_device(ir->rc); /* done */ kfree(ir); dev->ir = NULL; return 0; } /********************************************************** Handle Webcam snapshot button **********************************************************/ static void em28xx_query_sbutton(struct work_struct *work) { /* Poll the register and see if the button is depressed */ struct em28xx *dev = container_of(work, struct em28xx, sbutton_query_work.work); int ret; ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP); if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) { u8 cleared; /* Button is depressed, clear the register */ cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT; em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1); /* Not emulate the keypress */ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY, 1); /* Now unpress the key */ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY, 0); } /* Schedule next poll */ schedule_delayed_work(&dev->sbutton_query_work, msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL)); } void em28xx_register_snapshot_button(struct em28xx *dev) { struct input_dev *input_dev; int err; em28xx_info("Registering snapshot button...\n"); input_dev = input_allocate_device(); if (!input_dev) { em28xx_errdev("input_allocate_device failed\n"); return; } usb_make_path(dev->udev, dev->snapshot_button_path, sizeof(dev->snapshot_button_path)); strlcat(dev->snapshot_button_path, "/sbutton", sizeof(dev->snapshot_button_path)); INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton); input_dev->name = "em28xx snapshot button"; input_dev->phys = dev->snapshot_button_path; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit); input_dev->keycodesize = 0; input_dev->keycodemax = 0; input_dev->id.bustype = BUS_USB; input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct); input_dev->id.version = 1; input_dev->dev.parent = &dev->udev->dev; err = input_register_device(input_dev); if (err) { em28xx_errdev("input_register_device failed\n"); input_free_device(input_dev); return; } dev->sbutton_input_dev = input_dev; schedule_delayed_work(&dev->sbutton_query_work, msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL)); return; } void em28xx_deregister_snapshot_button(struct em28xx *dev) { if (dev->sbutton_input_dev != NULL) { em28xx_info("Deregistering snapshot button\n"); cancel_delayed_work_sync(&dev->sbutton_query_work); input_unregister_device(dev->sbutton_input_dev); dev->sbutton_input_dev = NULL; } return; }
gpl-2.0
sebirdman/kernel_m7
sound/soc/jz4740/jz4740-i2s.c
4899
12934
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "jz4740-i2s.h" #include "jz4740-pcm.h" #define JZ_REG_AIC_CONF 0x00 #define JZ_REG_AIC_CTRL 0x04 #define JZ_REG_AIC_I2S_FMT 0x10 #define JZ_REG_AIC_FIFO_STATUS 0x14 #define JZ_REG_AIC_I2S_STATUS 0x1c #define JZ_REG_AIC_CLK_DIV 0x30 #define JZ_REG_AIC_FIFO 0x34 #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12) #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8) #define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6) #define JZ_AIC_CONF_INTERNAL_CODEC BIT(5) #define JZ_AIC_CONF_I2S BIT(4) #define JZ_AIC_CONF_RESET BIT(3) #define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2) #define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1) #define JZ_AIC_CONF_ENABLE BIT(0) #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12 #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8 #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19) #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16) #define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15) #define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14) #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) #define JZ_AIC_CTRL_FLUSH BIT(8) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) #define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3) #define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2) #define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1) #define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0) #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19 #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16 #define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12) #define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4) #define JZ_AIC_I2S_FMT_MSB BIT(0) #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf struct jz4740_i2s { struct resource *mem; void __iomem *base; dma_addr_t phys_base; struct clk *clk_aic; struct clk *clk_i2s; struct jz4740_pcm_config pcm_config_playback; struct jz4740_pcm_config pcm_config_capture; }; static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s, unsigned int reg) { return readl(i2s->base + reg); } static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, unsigned int reg, uint32_t value) { writel(value, i2s->base + reg); } static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf, ctrl; if (dai->active) return 0; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); ctrl |= JZ_AIC_CTRL_FLUSH; jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t ctrl; uint32_t mask; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA; else mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ctrl |= mask; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl &= ~mask; break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); return 0; } static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t format = 0; uint32_t conf; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER; format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK; break; case SND_SOC_DAIFMT_CBM_CFS: conf |= JZ_AIC_CONF_SYNC_CLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFM: conf |= JZ_AIC_CONF_BIT_CLK_MASTER; break; case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_MSB: format |= JZ_AIC_I2S_FMT_MSB; break; case SND_SOC_DAIFMT_I2S: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format); return 0; } static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); enum jz4740_dma_width dma_width; struct jz4740_pcm_config *pcm_config; unsigned int sample_size; uint32_t ctrl; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: sample_size = 0; dma_width = JZ4740_DMA_WIDTH_8BIT; break; case SNDRV_PCM_FORMAT_S16: sample_size = 1; dma_width = JZ4740_DMA_WIDTH_16BIT; break; default: return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET; if (params_channels(params) == 1) ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO; else ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO; pcm_config = &i2s->pcm_config_playback; pcm_config->dma_config.dst_width = dma_width; } else { ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET; pcm_config = &i2s->pcm_config_capture; pcm_config->dma_config.src_width = dma_width; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); snd_soc_dai_set_dma_data(dai, substream, pcm_config); return 0; } static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); struct clk *parent; int ret = 0; switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; default: return -EINVAL; } clk_put(parent); return ret; } static int jz4740_i2s_suspend(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) { conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable(i2s->clk_i2s); } clk_disable(i2s->clk_aic); return 0; } static int jz4740_i2s_resume(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); if (dai->active) { clk_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); } return 0; } static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s) { struct jz4740_dma_config *dma_config; /* Playback */ dma_config = &i2s->pcm_config_playback.dma_config; dma_config->src_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_TRANSMIT; dma_config->flags = JZ4740_DMA_SRC_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_playback.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; /* Capture */ dma_config = &i2s->pcm_config_capture.dma_config; dma_config->dst_width = JZ4740_DMA_WIDTH_32BIT, dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE; dma_config->request_type = JZ4740_DMA_TYPE_AIC_RECEIVE; dma_config->flags = JZ4740_DMA_DST_AUTOINC; dma_config->mode = JZ4740_DMA_MODE_SINGLE; i2s->pcm_config_capture.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO; } static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_enable(i2s->clk_aic); jz4740_i2c_init_pcm_config(i2s); conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) | (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) | JZ_AIC_CONF_OVERFLOW_PLAY_LAST | JZ_AIC_CONF_I2S | JZ_AIC_CONF_INTERNAL_CODEC; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET); jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); clk_disable(i2s->clk_aic); return 0; } static const struct snd_soc_dai_ops jz4740_i2s_dai_ops = { .startup = jz4740_i2s_startup, .shutdown = jz4740_i2s_shutdown, .trigger = jz4740_i2s_trigger, .hw_params = jz4740_i2s_hw_params, .set_fmt = jz4740_i2s_set_fmt, .set_sysclk = jz4740_i2s_set_sysclk, }; #define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_driver jz4740_i2s_dai = { .probe = jz4740_i2s_dai_probe, .remove = jz4740_i2s_dai_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .symmetric_rates = 1, .ops = &jz4740_i2s_dai_ops, .suspend = jz4740_i2s_suspend, .resume = jz4740_i2s_resume, }; static int __devinit jz4740_i2s_dev_probe(struct platform_device *pdev) { struct jz4740_i2s *i2s; int ret; i2s = kzalloc(sizeof(*i2s), GFP_KERNEL); if (!i2s) return -ENOMEM; i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!i2s->mem) { ret = -ENOENT; goto err_free; } i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem), pdev->name); if (!i2s->mem) { ret = -EBUSY; goto err_free; } i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem)); if (!i2s->base) { ret = -EBUSY; goto err_release_mem_region; } i2s->phys_base = i2s->mem->start; i2s->clk_aic = clk_get(&pdev->dev, "aic"); if (IS_ERR(i2s->clk_aic)) { ret = PTR_ERR(i2s->clk_aic); goto err_iounmap; } i2s->clk_i2s = clk_get(&pdev->dev, "i2s"); if (IS_ERR(i2s->clk_i2s)) { ret = PTR_ERR(i2s->clk_i2s); goto err_clk_put_aic; } platform_set_drvdata(pdev, i2s); ret = snd_soc_register_dai(&pdev->dev, &jz4740_i2s_dai); if (ret) { dev_err(&pdev->dev, "Failed to register DAI\n"); goto err_clk_put_i2s; } return 0; err_clk_put_i2s: clk_put(i2s->clk_i2s); err_clk_put_aic: clk_put(i2s->clk_aic); err_iounmap: iounmap(i2s->base); err_release_mem_region: release_mem_region(i2s->mem->start, resource_size(i2s->mem)); err_free: kfree(i2s); return ret; } static int __devexit jz4740_i2s_dev_remove(struct platform_device *pdev) { struct jz4740_i2s *i2s = platform_get_drvdata(pdev); snd_soc_unregister_dai(&pdev->dev); clk_put(i2s->clk_i2s); clk_put(i2s->clk_aic); iounmap(i2s->base); release_mem_region(i2s->mem->start, resource_size(i2s->mem)); platform_set_drvdata(pdev, NULL); kfree(i2s); return 0; } static struct platform_driver jz4740_i2s_driver = { .probe = jz4740_i2s_dev_probe, .remove = __devexit_p(jz4740_i2s_dev_remove), .driver = { .name = "jz4740-i2s", .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_i2s_driver); MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>"); MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-i2s");
gpl-2.0
moscowdesire/Sony
drivers/pcmcia/tcic.c
8227
23942
/*====================================================================== Device driver for Databook TCIC-2 PCMCIA controller tcic.c 1.111 2000/02/15 04:13:12 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/io.h> #include <pcmcia/ss.h> #include "tcic.h" MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* The base port address of the TCIC-2 chip */ static unsigned long tcic_base = TCIC_BASE; /* Specify a socket number to ignore */ static int ignore = -1; /* Probe for safe interrupts? */ static int do_scan = 1; /* Bit map of interrupts to choose from */ static u_int irq_mask = 0xffff; static int irq_list[16]; static unsigned int irq_list_count; /* The card status change interrupt -- 0 means autoselect */ static int cs_irq; /* Poll status interval -- 0 means default to interrupt */ static int poll_interval; /* Delay for card status double-checking */ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; module_param(tcic_base, ulong, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); module_param(irq_mask, int, 0444); module_param_array(irq_list, int, &irq_list_count, 0444); module_param(cs_irq, int, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); static void tcic_timer(u_long data); static struct pccard_operations tcic_operations; struct tcic_socket { u_short psock; u_char last_sstat; u_char id; struct pcmcia_socket socket; }; static struct timer_list poll_timer; static int tcic_timer_pending; static int sockets; static struct tcic_socket socket_table[2]; /*====================================================================*/ /* Trick when selecting interrupts: the TCIC sktirq pin is supposed to map to irq 11, but is coded as 0 or 1 in the irq registers. */ #define TCIC_IRQ(x) ((x) ? (((x) == 11) ? 1 : (x)) : 15) #ifdef DEBUG_X static u_char tcic_getb(u_char reg) { u_char val = inb(tcic_base+reg); printk(KERN_DEBUG "tcic_getb(%#lx) = %#x\n", tcic_base+reg, val); return val; } static u_short tcic_getw(u_char reg) { u_short val = inw(tcic_base+reg); printk(KERN_DEBUG "tcic_getw(%#lx) = %#x\n", tcic_base+reg, val); return val; } static void tcic_setb(u_char reg, u_char data) { printk(KERN_DEBUG "tcic_setb(%#lx, %#x)\n", tcic_base+reg, data); outb(data, tcic_base+reg); } static void tcic_setw(u_char reg, u_short data) { printk(KERN_DEBUG "tcic_setw(%#lx, %#x)\n", tcic_base+reg, data); outw(data, tcic_base+reg); } #else #define tcic_getb(reg) inb(tcic_base+reg) #define tcic_getw(reg) inw(tcic_base+reg) #define tcic_setb(reg, data) outb(data, tcic_base+reg) #define tcic_setw(reg, data) outw(data, tcic_base+reg) #endif static void tcic_setl(u_char reg, u_int data) { #ifdef DEBUG_X printk(KERN_DEBUG "tcic_setl(%#x, %#lx)\n", tcic_base+reg, data); #endif outw(data & 0xffff, tcic_base+reg); outw(data >> 16, tcic_base+reg+2); } static void tcic_aux_setb(u_short reg, u_char data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setb(TCIC_AUX, data); } static u_short tcic_aux_getw(u_short reg) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); return tcic_getw(TCIC_AUX); } static void tcic_aux_setw(u_short reg, u_short data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setw(TCIC_AUX, data); } /*====================================================================*/ /* Time conversion functions */ static int to_cycles(int ns) { if (ns < 14) return 0; else return 2*(ns-14)/cycle_time; } /*====================================================================*/ static volatile u_int irq_hits; static irqreturn_t __init tcic_irq_count(int irq, void *dev) { irq_hits++; return IRQ_HANDLED; } static u_int __init try_irq(int irq) { u_short cfg; irq_hits = 0; if (request_irq(irq, tcic_irq_count, 0, "irq scan", tcic_irq_count) != 0) return -1; mdelay(10); if (irq_hits) { free_irq(irq, tcic_irq_count); return -1; } /* Generate one interrupt */ cfg = TCIC_SYSCFG_AUTOBUSY | 0x0a00; tcic_aux_setw(TCIC_AUX_SYSCFG, cfg | TCIC_IRQ(irq)); tcic_setb(TCIC_IENA, TCIC_IENA_ERR | TCIC_IENA_CFG_HIGH); tcic_setb(TCIC_ICSR, TCIC_ICSR_ERR | TCIC_ICSR_JAM); udelay(1000); free_irq(irq, tcic_irq_count); /* Turn off interrupts */ tcic_setb(TCIC_IENA, TCIC_IENA_CFG_OFF); while (tcic_getb(TCIC_ICSR)) tcic_setb(TCIC_ICSR, TCIC_ICSR_JAM); tcic_aux_setw(TCIC_AUX_SYSCFG, cfg); return (irq_hits != 1); } static u_int __init irq_scan(u_int mask0) { u_int mask1; int i; #ifdef __alpha__ #define PIC 0x4d0 /* Don't probe level-triggered interrupts -- reserved for PCI */ int level_mask = inb_p(PIC) | (inb_p(PIC+1) << 8); if (level_mask) mask0 &= ~level_mask; #endif mask1 = 0; if (do_scan) { for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (try_irq(i) == 0)) mask1 |= (1 << i); for (i = 0; i < 16; i++) if ((mask1 & (1 << i)) && (try_irq(i) != 0)) { mask1 ^= (1 << i); } } if (mask1) { printk("scanned"); } else { /* Fallback: just find interrupts that aren't in use */ for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (request_irq(i, tcic_irq_count, 0, "x", tcic_irq_count) == 0)) { mask1 |= (1 << i); free_irq(i, tcic_irq_count); } printk("default"); } printk(") = "); for (i = 0; i < 16; i++) if (mask1 & (1<<i)) printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i); printk(" "); return mask1; } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non-PCMCIA) Linux driver. We make an exception for cards that look like serial devices. ======================================================================*/ static int __init is_active(int s) { u_short scf1, ioctl, base, num; u_char pwr, sstat; u_int addr; tcic_setl(TCIC_ADDR, (s << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(s)); scf1 = tcic_getw(TCIC_DATA); pwr = tcic_getb(TCIC_PWR); sstat = tcic_getb(TCIC_SSTAT); addr = TCIC_IWIN(s, 0); tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); base = tcic_getw(TCIC_DATA); tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); ioctl = tcic_getw(TCIC_DATA); if (ioctl & TCIC_ICTL_TINY) num = 1; else { num = (base ^ (base-1)); base = base & (base-1); } if ((sstat & TCIC_SSTAT_CD) && (pwr & TCIC_PWR_VCC(s)) && (scf1 & TCIC_SCF1_IOSTS) && (ioctl & TCIC_ICTL_ENA) && ((base & 0xfeef) != 0x02e8)) { struct resource *res = request_region(base, num, "tcic-2"); if (!res) /* region is busy */ return 1; release_region(base, num); } return 0; } /*====================================================================== This returns the revision code for the specified socket. ======================================================================*/ static int __init get_tcic_id(void) { u_short id; tcic_aux_setw(TCIC_AUX_TEST, TCIC_TEST_DIAG); id = tcic_aux_getw(TCIC_AUX_ILOCK); id = (id & TCIC_ILOCKTEST_ID_MASK) >> TCIC_ILOCKTEST_ID_SH; tcic_aux_setw(TCIC_AUX_TEST, 0); return id; } /*====================================================================*/ static struct platform_driver tcic_driver = { .driver = { .name = "tcic-pcmcia", .owner = THIS_MODULE, }, }; static struct platform_device tcic_device = { .name = "tcic-pcmcia", .id = 0, }; static int __init init_tcic(void) { int i, sock, ret = 0; u_int mask, scan; if (platform_driver_register(&tcic_driver)) return -1; printk(KERN_INFO "Databook TCIC-2 PCMCIA probe: "); sock = 0; if (!request_region(tcic_base, 16, "tcic-2")) { printk("could not allocate ports,\n "); platform_driver_unregister(&tcic_driver); return -ENODEV; } else { tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } if (sock == 0) { /* See if resetting the controller does any good */ tcic_setb(TCIC_SCTRL, TCIC_SCTRL_RESET); tcic_setb(TCIC_SCTRL, 0); tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } } } if (sock == 0) { printk("not found.\n"); release_region(tcic_base, 16); platform_driver_unregister(&tcic_driver); return -ENODEV; } sockets = 0; for (i = 0; i < sock; i++) { if ((i == ignore) || is_active(i)) continue; socket_table[sockets].psock = i; socket_table[sockets].id = get_tcic_id(); socket_table[sockets].socket.owner = THIS_MODULE; /* only 16-bit cards, memory windows must be size-aligned */ /* No PCI or CardBus support */ socket_table[sockets].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN; /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ socket_table[sockets].socket.irq_mask = 0x4cf8; /* 4K minimum window size */ socket_table[sockets].socket.map_size = 0x1000; sockets++; } switch (socket_table[0].id) { case TCIC_ID_DB86082: printk("DB86082"); break; case TCIC_ID_DB86082A: printk("DB86082A"); break; case TCIC_ID_DB86084: printk("DB86084"); break; case TCIC_ID_DB86084A: printk("DB86084A"); break; case TCIC_ID_DB86072: printk("DB86072"); break; case TCIC_ID_DB86184: printk("DB86184"); break; case TCIC_ID_DB86082B: printk("DB86082B"); break; default: printk("Unknown ID 0x%02x", socket_table[0].id); } /* Set up polling */ poll_timer.function = &tcic_timer; poll_timer.data = 0; init_timer(&poll_timer); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else for (i = mask = 0; i < irq_list_count; i++) mask |= (1<<irq_list[i]); /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ mask &= 0x4cf8; /* Scan interrupts */ mask = irq_scan(mask); for (i=0;i<sockets;i++) socket_table[i].socket.irq_mask = mask; /* Check for only two interrupts available */ scan = (mask & (mask-1)); if (((scan & (scan-1)) == 0) && (poll_interval == 0)) poll_interval = HZ; if (poll_interval == 0) { /* Avoid irq 12 unless it is explicitly requested */ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); for (i = 15; i > 0; i--) if ((cs_mask & (1 << i)) && (request_irq(i, tcic_interrupt, 0, "tcic", tcic_interrupt) == 0)) break; cs_irq = i; if (cs_irq == 0) poll_interval = HZ; } if (socket_table[0].socket.irq_mask & (1 << 11)) printk("sktirq is irq 11, "); if (cs_irq != 0) printk("status change on irq %d\n", cs_irq); else printk("polled status, interval = %d ms\n", poll_interval * 1000 / HZ); for (i = 0; i < sockets; i++) { tcic_setw(TCIC_ADDR+2, socket_table[i].psock << TCIC_SS_SHFT); socket_table[i].last_sstat = tcic_getb(TCIC_SSTAT); } /* jump start interrupt handler, if needed */ tcic_interrupt(0, NULL); platform_device_register(&tcic_device); for (i = 0; i < sockets; i++) { socket_table[i].socket.ops = &tcic_operations; socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; socket_table[i].socket.dev.parent = &tcic_device.dev; ret = pcmcia_register_socket(&socket_table[i].socket); if (ret && i) pcmcia_unregister_socket(&socket_table[0].socket); } return ret; return 0; } /* init_tcic */ /*====================================================================*/ static void __exit exit_tcic(void) { int i; del_timer_sync(&poll_timer); if (cs_irq != 0) { tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); free_irq(cs_irq, tcic_interrupt); } release_region(tcic_base, 16); for (i = 0; i < sockets; i++) { pcmcia_unregister_socket(&socket_table[i].socket); } platform_device_unregister(&tcic_device); platform_driver_unregister(&tcic_driver); } /* exit_tcic */ /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev) { int i, quick = 0; u_char latch, sstat; u_short psock; u_int events; static volatile int active = 0; if (active) { printk(KERN_NOTICE "tcic: reentered interrupt handler!\n"); return IRQ_NONE; } else active = 1; pr_debug("tcic_interrupt()\n"); for (i = 0; i < sockets; i++) { psock = socket_table[i].psock; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); sstat = tcic_getb(TCIC_SSTAT); latch = sstat ^ socket_table[psock].last_sstat; socket_table[i].last_sstat = sstat; if (tcic_getb(TCIC_ICSR) & TCIC_ICSR_CDCHG) { tcic_setb(TCIC_ICSR, TCIC_ICSR_CLEAR); quick = 1; } if (latch == 0) continue; events = (latch & TCIC_SSTAT_CD) ? SS_DETECT : 0; events |= (latch & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { events |= (latch & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { events |= (latch & TCIC_SSTAT_RDY) ? SS_READY : 0; events |= (latch & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; events |= (latch & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } if (events) { pcmcia_parse_events(&socket_table[i].socket, events); } } /* Schedule next poll, if needed */ if (((cs_irq == 0) || quick) && (!tcic_timer_pending)) { poll_timer.expires = jiffies + (quick ? poll_quick : poll_interval); add_timer(&poll_timer); tcic_timer_pending = 1; } active = 0; pr_debug("interrupt done\n"); return IRQ_HANDLED; } /* tcic_interrupt */ static void tcic_timer(u_long data) { pr_debug("tcic_timer()\n"); tcic_timer_pending = 0; tcic_interrupt(0, NULL); } /* tcic_timer */ /*====================================================================*/ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); reg = tcic_getb(TCIC_SSTAT); *value = (reg & TCIC_SSTAT_CD) ? SS_DETECT : 0; *value |= (reg & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { *value |= (reg & TCIC_SSTAT_RDY) ? SS_READY : 0; *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; *value |= (reg & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } reg = tcic_getb(TCIC_PWR); if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) *value |= SS_POWERON; dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value); return 0; } /* tcic_get_status */ /*====================================================================*/ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; u_short scf1, scf2; dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); reg = tcic_getb(TCIC_PWR); reg &= ~(TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock)); if (state->Vcc == 50) { switch (state->Vpp) { case 0: reg |= TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock); break; case 50: reg |= TCIC_PWR_VCC(psock); break; case 120: reg |= TCIC_PWR_VPP(psock); break; default: return -EINVAL; } } else if (state->Vcc != 0) return -EINVAL; if (reg != tcic_getb(TCIC_PWR)) tcic_setb(TCIC_PWR, reg); reg = TCIC_ILOCK_HOLD_CCLK | TCIC_ILOCK_CWAIT; if (state->flags & SS_OUTPUT_ENA) { tcic_setb(TCIC_SCTRL, TCIC_SCTRL_ENA); reg |= TCIC_ILOCK_CRESENA; } else tcic_setb(TCIC_SCTRL, 0); if (state->flags & SS_RESET) reg |= TCIC_ILOCK_CRESET; tcic_aux_setb(TCIC_AUX_ILOCK, reg); tcic_setw(TCIC_ADDR, TCIC_SCF1(psock)); scf1 = TCIC_SCF1_FINPACK; scf1 |= TCIC_IRQ(state->io_irq); if (state->flags & SS_IOCARD) { scf1 |= TCIC_SCF1_IOSTS; if (state->flags & SS_SPKR_ENA) scf1 |= TCIC_SCF1_SPKR; if (state->flags & SS_DMA_MODE) scf1 |= TCIC_SCF1_DREQ2 << TCIC_SCF1_DMA_SHIFT; } tcic_setw(TCIC_DATA, scf1); /* Some general setup stuff, and configure status interrupt */ reg = TCIC_WAIT_ASYNC | TCIC_WAIT_SENSE | to_cycles(250); tcic_aux_setb(TCIC_AUX_WCTL, reg); tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00| TCIC_IRQ(cs_irq)); /* Card status change interrupt mask */ tcic_setw(TCIC_ADDR, TCIC_SCF2(psock)); scf2 = TCIC_SCF2_MALL; if (state->csc_mask & SS_DETECT) scf2 &= ~TCIC_SCF2_MCD; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg &= ~TCIC_SCF2_MLBAT1; } else { if (state->csc_mask & SS_BATDEAD) reg &= ~TCIC_SCF2_MLBAT1; if (state->csc_mask & SS_BATWARN) reg &= ~TCIC_SCF2_MLBAT2; if (state->csc_mask & SS_READY) reg &= ~TCIC_SCF2_MRDY; } tcic_setw(TCIC_DATA, scf2); /* For the ISA bus, the irq should be active-high totem-pole */ tcic_setb(TCIC_IENA, TCIC_IENA_CDCHG | TCIC_IENA_CFG_HIGH); return 0; } /* tcic_set_socket */ /*====================================================================*/ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_int addr; u_short base, len, ioctl; dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_IWIN(psock, io->map); base = io->start; len = io->stop - io->start; /* Check to see that len+1 is power of two, etc */ if ((len & (len+1)) || (base & len)) return -EINVAL; base |= (len+1)>>1; tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); tcic_setw(TCIC_DATA, base); ioctl = (psock << TCIC_ICTL_SS_SHFT); ioctl |= (len == 0) ? TCIC_ICTL_TINY : 0; ioctl |= (io->flags & MAP_ACTIVE) ? TCIC_ICTL_ENA : 0; ioctl |= to_cycles(io->speed) & TCIC_ICTL_WSCNT_MASK; if (!(io->flags & MAP_AUTOSZ)) { ioctl |= TCIC_ICTL_QUIET; ioctl |= (io->flags & MAP_16BIT) ? TCIC_ICTL_BW_16 : TCIC_ICTL_BW_8; } tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); tcic_setw(TCIC_DATA, ioctl); return 0; } /* tcic_set_io_map */ /*====================================================================*/ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_short addr, ctl; u_long base, len, mmap; dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->res->start, (unsigned long long)mem->res->end, mem->card_start); if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || (mem->res->start > mem->res->end) || (mem->speed > 1000)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_MWIN(psock, mem->map); base = mem->res->start; len = mem->res->end - mem->res->start; if ((len & (len+1)) || (base & len)) return -EINVAL; if (len == 0x0fff) base = (base >> TCIC_MBASE_HA_SHFT) | TCIC_MBASE_4K_BIT; else base = (base | (len+1)>>1) >> TCIC_MBASE_HA_SHFT; tcic_setw(TCIC_ADDR, addr + TCIC_MBASE_X); tcic_setw(TCIC_DATA, base); mmap = mem->card_start - mem->res->start; mmap = (mmap >> TCIC_MMAP_CA_SHFT) & TCIC_MMAP_CA_MASK; if (mem->flags & MAP_ATTRIB) mmap |= TCIC_MMAP_REG; tcic_setw(TCIC_ADDR, addr + TCIC_MMAP_X); tcic_setw(TCIC_DATA, mmap); ctl = TCIC_MCTL_QUIET | (psock << TCIC_MCTL_SS_SHFT); ctl |= to_cycles(mem->speed) & TCIC_MCTL_WSCNT_MASK; ctl |= (mem->flags & MAP_16BIT) ? 0 : TCIC_MCTL_B8; ctl |= (mem->flags & MAP_WRPROT) ? TCIC_MCTL_WP : 0; ctl |= (mem->flags & MAP_ACTIVE) ? TCIC_MCTL_ENA : 0; tcic_setw(TCIC_ADDR, addr + TCIC_MCTL_X); tcic_setw(TCIC_DATA, ctl); return 0; } /* tcic_set_mem_map */ /*====================================================================*/ static int tcic_init(struct pcmcia_socket *s) { int i; struct resource res = { .start = 0, .end = 0x1000 }; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { .res = &res, }; for (i = 0; i < 2; i++) { io.map = i; tcic_set_io_map(s, &io); } for (i = 0; i < 5; i++) { mem.map = i; tcic_set_mem_map(s, &mem); } return 0; } static struct pccard_operations tcic_operations = { .init = tcic_init, .get_status = tcic_get_status, .set_socket = tcic_set_socket, .set_io_map = tcic_set_io_map, .set_mem_map = tcic_set_mem_map, }; /*====================================================================*/ module_init(init_tcic); module_exit(exit_tcic);
gpl-2.0
IllusionRom-deprecated/android_kernel_lge_zee
drivers/pcmcia/tcic.c
8227
23942
/*====================================================================== Device driver for Databook TCIC-2 PCMCIA controller tcic.c 1.111 2000/02/15 04:13:12 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/io.h> #include <pcmcia/ss.h> #include "tcic.h" MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* The base port address of the TCIC-2 chip */ static unsigned long tcic_base = TCIC_BASE; /* Specify a socket number to ignore */ static int ignore = -1; /* Probe for safe interrupts? */ static int do_scan = 1; /* Bit map of interrupts to choose from */ static u_int irq_mask = 0xffff; static int irq_list[16]; static unsigned int irq_list_count; /* The card status change interrupt -- 0 means autoselect */ static int cs_irq; /* Poll status interval -- 0 means default to interrupt */ static int poll_interval; /* Delay for card status double-checking */ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; module_param(tcic_base, ulong, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); module_param(irq_mask, int, 0444); module_param_array(irq_list, int, &irq_list_count, 0444); module_param(cs_irq, int, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); static void tcic_timer(u_long data); static struct pccard_operations tcic_operations; struct tcic_socket { u_short psock; u_char last_sstat; u_char id; struct pcmcia_socket socket; }; static struct timer_list poll_timer; static int tcic_timer_pending; static int sockets; static struct tcic_socket socket_table[2]; /*====================================================================*/ /* Trick when selecting interrupts: the TCIC sktirq pin is supposed to map to irq 11, but is coded as 0 or 1 in the irq registers. */ #define TCIC_IRQ(x) ((x) ? (((x) == 11) ? 1 : (x)) : 15) #ifdef DEBUG_X static u_char tcic_getb(u_char reg) { u_char val = inb(tcic_base+reg); printk(KERN_DEBUG "tcic_getb(%#lx) = %#x\n", tcic_base+reg, val); return val; } static u_short tcic_getw(u_char reg) { u_short val = inw(tcic_base+reg); printk(KERN_DEBUG "tcic_getw(%#lx) = %#x\n", tcic_base+reg, val); return val; } static void tcic_setb(u_char reg, u_char data) { printk(KERN_DEBUG "tcic_setb(%#lx, %#x)\n", tcic_base+reg, data); outb(data, tcic_base+reg); } static void tcic_setw(u_char reg, u_short data) { printk(KERN_DEBUG "tcic_setw(%#lx, %#x)\n", tcic_base+reg, data); outw(data, tcic_base+reg); } #else #define tcic_getb(reg) inb(tcic_base+reg) #define tcic_getw(reg) inw(tcic_base+reg) #define tcic_setb(reg, data) outb(data, tcic_base+reg) #define tcic_setw(reg, data) outw(data, tcic_base+reg) #endif static void tcic_setl(u_char reg, u_int data) { #ifdef DEBUG_X printk(KERN_DEBUG "tcic_setl(%#x, %#lx)\n", tcic_base+reg, data); #endif outw(data & 0xffff, tcic_base+reg); outw(data >> 16, tcic_base+reg+2); } static void tcic_aux_setb(u_short reg, u_char data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setb(TCIC_AUX, data); } static u_short tcic_aux_getw(u_short reg) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); return tcic_getw(TCIC_AUX); } static void tcic_aux_setw(u_short reg, u_short data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setw(TCIC_AUX, data); } /*====================================================================*/ /* Time conversion functions */ static int to_cycles(int ns) { if (ns < 14) return 0; else return 2*(ns-14)/cycle_time; } /*====================================================================*/ static volatile u_int irq_hits; static irqreturn_t __init tcic_irq_count(int irq, void *dev) { irq_hits++; return IRQ_HANDLED; } static u_int __init try_irq(int irq) { u_short cfg; irq_hits = 0; if (request_irq(irq, tcic_irq_count, 0, "irq scan", tcic_irq_count) != 0) return -1; mdelay(10); if (irq_hits) { free_irq(irq, tcic_irq_count); return -1; } /* Generate one interrupt */ cfg = TCIC_SYSCFG_AUTOBUSY | 0x0a00; tcic_aux_setw(TCIC_AUX_SYSCFG, cfg | TCIC_IRQ(irq)); tcic_setb(TCIC_IENA, TCIC_IENA_ERR | TCIC_IENA_CFG_HIGH); tcic_setb(TCIC_ICSR, TCIC_ICSR_ERR | TCIC_ICSR_JAM); udelay(1000); free_irq(irq, tcic_irq_count); /* Turn off interrupts */ tcic_setb(TCIC_IENA, TCIC_IENA_CFG_OFF); while (tcic_getb(TCIC_ICSR)) tcic_setb(TCIC_ICSR, TCIC_ICSR_JAM); tcic_aux_setw(TCIC_AUX_SYSCFG, cfg); return (irq_hits != 1); } static u_int __init irq_scan(u_int mask0) { u_int mask1; int i; #ifdef __alpha__ #define PIC 0x4d0 /* Don't probe level-triggered interrupts -- reserved for PCI */ int level_mask = inb_p(PIC) | (inb_p(PIC+1) << 8); if (level_mask) mask0 &= ~level_mask; #endif mask1 = 0; if (do_scan) { for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (try_irq(i) == 0)) mask1 |= (1 << i); for (i = 0; i < 16; i++) if ((mask1 & (1 << i)) && (try_irq(i) != 0)) { mask1 ^= (1 << i); } } if (mask1) { printk("scanned"); } else { /* Fallback: just find interrupts that aren't in use */ for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (request_irq(i, tcic_irq_count, 0, "x", tcic_irq_count) == 0)) { mask1 |= (1 << i); free_irq(i, tcic_irq_count); } printk("default"); } printk(") = "); for (i = 0; i < 16; i++) if (mask1 & (1<<i)) printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i); printk(" "); return mask1; } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non-PCMCIA) Linux driver. We make an exception for cards that look like serial devices. ======================================================================*/ static int __init is_active(int s) { u_short scf1, ioctl, base, num; u_char pwr, sstat; u_int addr; tcic_setl(TCIC_ADDR, (s << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(s)); scf1 = tcic_getw(TCIC_DATA); pwr = tcic_getb(TCIC_PWR); sstat = tcic_getb(TCIC_SSTAT); addr = TCIC_IWIN(s, 0); tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); base = tcic_getw(TCIC_DATA); tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); ioctl = tcic_getw(TCIC_DATA); if (ioctl & TCIC_ICTL_TINY) num = 1; else { num = (base ^ (base-1)); base = base & (base-1); } if ((sstat & TCIC_SSTAT_CD) && (pwr & TCIC_PWR_VCC(s)) && (scf1 & TCIC_SCF1_IOSTS) && (ioctl & TCIC_ICTL_ENA) && ((base & 0xfeef) != 0x02e8)) { struct resource *res = request_region(base, num, "tcic-2"); if (!res) /* region is busy */ return 1; release_region(base, num); } return 0; } /*====================================================================== This returns the revision code for the specified socket. ======================================================================*/ static int __init get_tcic_id(void) { u_short id; tcic_aux_setw(TCIC_AUX_TEST, TCIC_TEST_DIAG); id = tcic_aux_getw(TCIC_AUX_ILOCK); id = (id & TCIC_ILOCKTEST_ID_MASK) >> TCIC_ILOCKTEST_ID_SH; tcic_aux_setw(TCIC_AUX_TEST, 0); return id; } /*====================================================================*/ static struct platform_driver tcic_driver = { .driver = { .name = "tcic-pcmcia", .owner = THIS_MODULE, }, }; static struct platform_device tcic_device = { .name = "tcic-pcmcia", .id = 0, }; static int __init init_tcic(void) { int i, sock, ret = 0; u_int mask, scan; if (platform_driver_register(&tcic_driver)) return -1; printk(KERN_INFO "Databook TCIC-2 PCMCIA probe: "); sock = 0; if (!request_region(tcic_base, 16, "tcic-2")) { printk("could not allocate ports,\n "); platform_driver_unregister(&tcic_driver); return -ENODEV; } else { tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } if (sock == 0) { /* See if resetting the controller does any good */ tcic_setb(TCIC_SCTRL, TCIC_SCTRL_RESET); tcic_setb(TCIC_SCTRL, 0); tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } } } if (sock == 0) { printk("not found.\n"); release_region(tcic_base, 16); platform_driver_unregister(&tcic_driver); return -ENODEV; } sockets = 0; for (i = 0; i < sock; i++) { if ((i == ignore) || is_active(i)) continue; socket_table[sockets].psock = i; socket_table[sockets].id = get_tcic_id(); socket_table[sockets].socket.owner = THIS_MODULE; /* only 16-bit cards, memory windows must be size-aligned */ /* No PCI or CardBus support */ socket_table[sockets].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN; /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ socket_table[sockets].socket.irq_mask = 0x4cf8; /* 4K minimum window size */ socket_table[sockets].socket.map_size = 0x1000; sockets++; } switch (socket_table[0].id) { case TCIC_ID_DB86082: printk("DB86082"); break; case TCIC_ID_DB86082A: printk("DB86082A"); break; case TCIC_ID_DB86084: printk("DB86084"); break; case TCIC_ID_DB86084A: printk("DB86084A"); break; case TCIC_ID_DB86072: printk("DB86072"); break; case TCIC_ID_DB86184: printk("DB86184"); break; case TCIC_ID_DB86082B: printk("DB86082B"); break; default: printk("Unknown ID 0x%02x", socket_table[0].id); } /* Set up polling */ poll_timer.function = &tcic_timer; poll_timer.data = 0; init_timer(&poll_timer); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else for (i = mask = 0; i < irq_list_count; i++) mask |= (1<<irq_list[i]); /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ mask &= 0x4cf8; /* Scan interrupts */ mask = irq_scan(mask); for (i=0;i<sockets;i++) socket_table[i].socket.irq_mask = mask; /* Check for only two interrupts available */ scan = (mask & (mask-1)); if (((scan & (scan-1)) == 0) && (poll_interval == 0)) poll_interval = HZ; if (poll_interval == 0) { /* Avoid irq 12 unless it is explicitly requested */ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); for (i = 15; i > 0; i--) if ((cs_mask & (1 << i)) && (request_irq(i, tcic_interrupt, 0, "tcic", tcic_interrupt) == 0)) break; cs_irq = i; if (cs_irq == 0) poll_interval = HZ; } if (socket_table[0].socket.irq_mask & (1 << 11)) printk("sktirq is irq 11, "); if (cs_irq != 0) printk("status change on irq %d\n", cs_irq); else printk("polled status, interval = %d ms\n", poll_interval * 1000 / HZ); for (i = 0; i < sockets; i++) { tcic_setw(TCIC_ADDR+2, socket_table[i].psock << TCIC_SS_SHFT); socket_table[i].last_sstat = tcic_getb(TCIC_SSTAT); } /* jump start interrupt handler, if needed */ tcic_interrupt(0, NULL); platform_device_register(&tcic_device); for (i = 0; i < sockets; i++) { socket_table[i].socket.ops = &tcic_operations; socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; socket_table[i].socket.dev.parent = &tcic_device.dev; ret = pcmcia_register_socket(&socket_table[i].socket); if (ret && i) pcmcia_unregister_socket(&socket_table[0].socket); } return ret; return 0; } /* init_tcic */ /*====================================================================*/ static void __exit exit_tcic(void) { int i; del_timer_sync(&poll_timer); if (cs_irq != 0) { tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); free_irq(cs_irq, tcic_interrupt); } release_region(tcic_base, 16); for (i = 0; i < sockets; i++) { pcmcia_unregister_socket(&socket_table[i].socket); } platform_device_unregister(&tcic_device); platform_driver_unregister(&tcic_driver); } /* exit_tcic */ /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev) { int i, quick = 0; u_char latch, sstat; u_short psock; u_int events; static volatile int active = 0; if (active) { printk(KERN_NOTICE "tcic: reentered interrupt handler!\n"); return IRQ_NONE; } else active = 1; pr_debug("tcic_interrupt()\n"); for (i = 0; i < sockets; i++) { psock = socket_table[i].psock; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); sstat = tcic_getb(TCIC_SSTAT); latch = sstat ^ socket_table[psock].last_sstat; socket_table[i].last_sstat = sstat; if (tcic_getb(TCIC_ICSR) & TCIC_ICSR_CDCHG) { tcic_setb(TCIC_ICSR, TCIC_ICSR_CLEAR); quick = 1; } if (latch == 0) continue; events = (latch & TCIC_SSTAT_CD) ? SS_DETECT : 0; events |= (latch & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { events |= (latch & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { events |= (latch & TCIC_SSTAT_RDY) ? SS_READY : 0; events |= (latch & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; events |= (latch & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } if (events) { pcmcia_parse_events(&socket_table[i].socket, events); } } /* Schedule next poll, if needed */ if (((cs_irq == 0) || quick) && (!tcic_timer_pending)) { poll_timer.expires = jiffies + (quick ? poll_quick : poll_interval); add_timer(&poll_timer); tcic_timer_pending = 1; } active = 0; pr_debug("interrupt done\n"); return IRQ_HANDLED; } /* tcic_interrupt */ static void tcic_timer(u_long data) { pr_debug("tcic_timer()\n"); tcic_timer_pending = 0; tcic_interrupt(0, NULL); } /* tcic_timer */ /*====================================================================*/ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); reg = tcic_getb(TCIC_SSTAT); *value = (reg & TCIC_SSTAT_CD) ? SS_DETECT : 0; *value |= (reg & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { *value |= (reg & TCIC_SSTAT_RDY) ? SS_READY : 0; *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; *value |= (reg & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } reg = tcic_getb(TCIC_PWR); if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) *value |= SS_POWERON; dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value); return 0; } /* tcic_get_status */ /*====================================================================*/ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; u_short scf1, scf2; dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); reg = tcic_getb(TCIC_PWR); reg &= ~(TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock)); if (state->Vcc == 50) { switch (state->Vpp) { case 0: reg |= TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock); break; case 50: reg |= TCIC_PWR_VCC(psock); break; case 120: reg |= TCIC_PWR_VPP(psock); break; default: return -EINVAL; } } else if (state->Vcc != 0) return -EINVAL; if (reg != tcic_getb(TCIC_PWR)) tcic_setb(TCIC_PWR, reg); reg = TCIC_ILOCK_HOLD_CCLK | TCIC_ILOCK_CWAIT; if (state->flags & SS_OUTPUT_ENA) { tcic_setb(TCIC_SCTRL, TCIC_SCTRL_ENA); reg |= TCIC_ILOCK_CRESENA; } else tcic_setb(TCIC_SCTRL, 0); if (state->flags & SS_RESET) reg |= TCIC_ILOCK_CRESET; tcic_aux_setb(TCIC_AUX_ILOCK, reg); tcic_setw(TCIC_ADDR, TCIC_SCF1(psock)); scf1 = TCIC_SCF1_FINPACK; scf1 |= TCIC_IRQ(state->io_irq); if (state->flags & SS_IOCARD) { scf1 |= TCIC_SCF1_IOSTS; if (state->flags & SS_SPKR_ENA) scf1 |= TCIC_SCF1_SPKR; if (state->flags & SS_DMA_MODE) scf1 |= TCIC_SCF1_DREQ2 << TCIC_SCF1_DMA_SHIFT; } tcic_setw(TCIC_DATA, scf1); /* Some general setup stuff, and configure status interrupt */ reg = TCIC_WAIT_ASYNC | TCIC_WAIT_SENSE | to_cycles(250); tcic_aux_setb(TCIC_AUX_WCTL, reg); tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00| TCIC_IRQ(cs_irq)); /* Card status change interrupt mask */ tcic_setw(TCIC_ADDR, TCIC_SCF2(psock)); scf2 = TCIC_SCF2_MALL; if (state->csc_mask & SS_DETECT) scf2 &= ~TCIC_SCF2_MCD; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg &= ~TCIC_SCF2_MLBAT1; } else { if (state->csc_mask & SS_BATDEAD) reg &= ~TCIC_SCF2_MLBAT1; if (state->csc_mask & SS_BATWARN) reg &= ~TCIC_SCF2_MLBAT2; if (state->csc_mask & SS_READY) reg &= ~TCIC_SCF2_MRDY; } tcic_setw(TCIC_DATA, scf2); /* For the ISA bus, the irq should be active-high totem-pole */ tcic_setb(TCIC_IENA, TCIC_IENA_CDCHG | TCIC_IENA_CFG_HIGH); return 0; } /* tcic_set_socket */ /*====================================================================*/ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_int addr; u_short base, len, ioctl; dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_IWIN(psock, io->map); base = io->start; len = io->stop - io->start; /* Check to see that len+1 is power of two, etc */ if ((len & (len+1)) || (base & len)) return -EINVAL; base |= (len+1)>>1; tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); tcic_setw(TCIC_DATA, base); ioctl = (psock << TCIC_ICTL_SS_SHFT); ioctl |= (len == 0) ? TCIC_ICTL_TINY : 0; ioctl |= (io->flags & MAP_ACTIVE) ? TCIC_ICTL_ENA : 0; ioctl |= to_cycles(io->speed) & TCIC_ICTL_WSCNT_MASK; if (!(io->flags & MAP_AUTOSZ)) { ioctl |= TCIC_ICTL_QUIET; ioctl |= (io->flags & MAP_16BIT) ? TCIC_ICTL_BW_16 : TCIC_ICTL_BW_8; } tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); tcic_setw(TCIC_DATA, ioctl); return 0; } /* tcic_set_io_map */ /*====================================================================*/ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_short addr, ctl; u_long base, len, mmap; dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->res->start, (unsigned long long)mem->res->end, mem->card_start); if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || (mem->res->start > mem->res->end) || (mem->speed > 1000)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_MWIN(psock, mem->map); base = mem->res->start; len = mem->res->end - mem->res->start; if ((len & (len+1)) || (base & len)) return -EINVAL; if (len == 0x0fff) base = (base >> TCIC_MBASE_HA_SHFT) | TCIC_MBASE_4K_BIT; else base = (base | (len+1)>>1) >> TCIC_MBASE_HA_SHFT; tcic_setw(TCIC_ADDR, addr + TCIC_MBASE_X); tcic_setw(TCIC_DATA, base); mmap = mem->card_start - mem->res->start; mmap = (mmap >> TCIC_MMAP_CA_SHFT) & TCIC_MMAP_CA_MASK; if (mem->flags & MAP_ATTRIB) mmap |= TCIC_MMAP_REG; tcic_setw(TCIC_ADDR, addr + TCIC_MMAP_X); tcic_setw(TCIC_DATA, mmap); ctl = TCIC_MCTL_QUIET | (psock << TCIC_MCTL_SS_SHFT); ctl |= to_cycles(mem->speed) & TCIC_MCTL_WSCNT_MASK; ctl |= (mem->flags & MAP_16BIT) ? 0 : TCIC_MCTL_B8; ctl |= (mem->flags & MAP_WRPROT) ? TCIC_MCTL_WP : 0; ctl |= (mem->flags & MAP_ACTIVE) ? TCIC_MCTL_ENA : 0; tcic_setw(TCIC_ADDR, addr + TCIC_MCTL_X); tcic_setw(TCIC_DATA, ctl); return 0; } /* tcic_set_mem_map */ /*====================================================================*/ static int tcic_init(struct pcmcia_socket *s) { int i; struct resource res = { .start = 0, .end = 0x1000 }; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { .res = &res, }; for (i = 0; i < 2; i++) { io.map = i; tcic_set_io_map(s, &io); } for (i = 0; i < 5; i++) { mem.map = i; tcic_set_mem_map(s, &mem); } return 0; } static struct pccard_operations tcic_operations = { .init = tcic_init, .get_status = tcic_get_status, .set_socket = tcic_set_socket, .set_io_map = tcic_set_io_map, .set_mem_map = tcic_set_mem_map, }; /*====================================================================*/ module_init(init_tcic); module_exit(exit_tcic);
gpl-2.0
Ryuinferno/Blazing_Kernel_t1
sound/core/seq/oss/seq_oss_midi.c
9507
17220
/* * OSS compatible sequencer driver * * MIDI device handlers * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <sound/asoundef.h> #include "seq_oss_midi.h" #include "seq_oss_readq.h" #include "seq_oss_timer.h" #include "seq_oss_event.h" #include <sound/seq_midi_event.h> #include "../seq_lock.h" #include <linux/init.h> #include <linux/slab.h> /* * constants */ #define SNDRV_SEQ_OSS_MAX_MIDI_NAME 30 /* * definition of midi device record */ struct seq_oss_midi { int seq_device; /* device number */ int client; /* sequencer client number */ int port; /* sequencer port number */ unsigned int flags; /* port capability */ int opened; /* flag for opening */ unsigned char name[SNDRV_SEQ_OSS_MAX_MIDI_NAME]; struct snd_midi_event *coder; /* MIDI event coder */ struct seq_oss_devinfo *devinfo; /* assigned OSSseq device */ snd_use_lock_t use_lock; }; /* * midi device table */ static int max_midi_devs; static struct seq_oss_midi *midi_devs[SNDRV_SEQ_OSS_MAX_MIDI_DEVS]; static DEFINE_SPINLOCK(register_lock); /* * prototypes */ static struct seq_oss_midi *get_mdev(int dev); static struct seq_oss_midi *get_mididev(struct seq_oss_devinfo *dp, int dev); static int send_synth_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, int dev); static int send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq_oss_midi *mdev); /* * look up the existing ports * this looks a very exhausting job. */ int __init snd_seq_oss_midi_lookup_ports(int client) { struct snd_seq_client_info *clinfo; struct snd_seq_port_info *pinfo; clinfo = kzalloc(sizeof(*clinfo), GFP_KERNEL); pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); if (! clinfo || ! pinfo) { kfree(clinfo); kfree(pinfo); return -ENOMEM; } clinfo->client = -1; while (snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, clinfo) == 0) { if (clinfo->client == client) continue; /* ignore myself */ pinfo->addr.client = clinfo->client; pinfo->addr.port = -1; while (snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, pinfo) == 0) snd_seq_oss_midi_check_new_port(pinfo); } kfree(clinfo); kfree(pinfo); return 0; } /* */ static struct seq_oss_midi * get_mdev(int dev) { struct seq_oss_midi *mdev; unsigned long flags; spin_lock_irqsave(&register_lock, flags); mdev = midi_devs[dev]; if (mdev) snd_use_lock_use(&mdev->use_lock); spin_unlock_irqrestore(&register_lock, flags); return mdev; } /* * look for the identical slot */ static struct seq_oss_midi * find_slot(int client, int port) { int i; struct seq_oss_midi *mdev; unsigned long flags; spin_lock_irqsave(&register_lock, flags); for (i = 0; i < max_midi_devs; i++) { mdev = midi_devs[i]; if (mdev && mdev->client == client && mdev->port == port) { /* found! */ snd_use_lock_use(&mdev->use_lock); spin_unlock_irqrestore(&register_lock, flags); return mdev; } } spin_unlock_irqrestore(&register_lock, flags); return NULL; } #define PERM_WRITE (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE) #define PERM_READ (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ) /* * register a new port if it doesn't exist yet */ int snd_seq_oss_midi_check_new_port(struct snd_seq_port_info *pinfo) { int i; struct seq_oss_midi *mdev; unsigned long flags; debug_printk(("check for MIDI client %d port %d\n", pinfo->addr.client, pinfo->addr.port)); /* the port must include generic midi */ if (! (pinfo->type & SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC)) return 0; /* either read or write subscribable */ if ((pinfo->capability & PERM_WRITE) != PERM_WRITE && (pinfo->capability & PERM_READ) != PERM_READ) return 0; /* * look for the identical slot */ if ((mdev = find_slot(pinfo->addr.client, pinfo->addr.port)) != NULL) { /* already exists */ snd_use_lock_free(&mdev->use_lock); return 0; } /* * allocate midi info record */ if ((mdev = kzalloc(sizeof(*mdev), GFP_KERNEL)) == NULL) { snd_printk(KERN_ERR "can't malloc midi info\n"); return -ENOMEM; } /* copy the port information */ mdev->client = pinfo->addr.client; mdev->port = pinfo->addr.port; mdev->flags = pinfo->capability; mdev->opened = 0; snd_use_lock_init(&mdev->use_lock); /* copy and truncate the name of synth device */ strlcpy(mdev->name, pinfo->name, sizeof(mdev->name)); /* create MIDI coder */ if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &mdev->coder) < 0) { snd_printk(KERN_ERR "can't malloc midi coder\n"); kfree(mdev); return -ENOMEM; } /* OSS sequencer adds running status to all sequences */ snd_midi_event_no_status(mdev->coder, 1); /* * look for en empty slot */ spin_lock_irqsave(&register_lock, flags); for (i = 0; i < max_midi_devs; i++) { if (midi_devs[i] == NULL) break; } if (i >= max_midi_devs) { if (max_midi_devs >= SNDRV_SEQ_OSS_MAX_MIDI_DEVS) { spin_unlock_irqrestore(&register_lock, flags); snd_midi_event_free(mdev->coder); kfree(mdev); return -ENOMEM; } max_midi_devs++; } mdev->seq_device = i; midi_devs[mdev->seq_device] = mdev; spin_unlock_irqrestore(&register_lock, flags); return 0; } /* * release the midi device if it was registered */ int snd_seq_oss_midi_check_exit_port(int client, int port) { struct seq_oss_midi *mdev; unsigned long flags; int index; if ((mdev = find_slot(client, port)) != NULL) { spin_lock_irqsave(&register_lock, flags); midi_devs[mdev->seq_device] = NULL; spin_unlock_irqrestore(&register_lock, flags); snd_use_lock_free(&mdev->use_lock); snd_use_lock_sync(&mdev->use_lock); if (mdev->coder) snd_midi_event_free(mdev->coder); kfree(mdev); } spin_lock_irqsave(&register_lock, flags); for (index = max_midi_devs - 1; index >= 0; index--) { if (midi_devs[index]) break; } max_midi_devs = index + 1; spin_unlock_irqrestore(&register_lock, flags); return 0; } /* * release the midi device if it was registered */ void snd_seq_oss_midi_clear_all(void) { int i; struct seq_oss_midi *mdev; unsigned long flags; spin_lock_irqsave(&register_lock, flags); for (i = 0; i < max_midi_devs; i++) { if ((mdev = midi_devs[i]) != NULL) { if (mdev->coder) snd_midi_event_free(mdev->coder); kfree(mdev); midi_devs[i] = NULL; } } max_midi_devs = 0; spin_unlock_irqrestore(&register_lock, flags); } /* * set up midi tables */ void snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp) { dp->max_mididev = max_midi_devs; } /* * clean up midi tables */ void snd_seq_oss_midi_cleanup(struct seq_oss_devinfo *dp) { int i; for (i = 0; i < dp->max_mididev; i++) snd_seq_oss_midi_close(dp, i); dp->max_mididev = 0; } /* * open all midi devices. ignore errors. */ void snd_seq_oss_midi_open_all(struct seq_oss_devinfo *dp, int file_mode) { int i; for (i = 0; i < dp->max_mididev; i++) snd_seq_oss_midi_open(dp, i, file_mode); } /* * get the midi device information */ static struct seq_oss_midi * get_mididev(struct seq_oss_devinfo *dp, int dev) { if (dev < 0 || dev >= dp->max_mididev) return NULL; return get_mdev(dev); } /* * open the midi device if not opened yet */ int snd_seq_oss_midi_open(struct seq_oss_devinfo *dp, int dev, int fmode) { int perm; struct seq_oss_midi *mdev; struct snd_seq_port_subscribe subs; if ((mdev = get_mididev(dp, dev)) == NULL) return -ENODEV; /* already used? */ if (mdev->opened && mdev->devinfo != dp) { snd_use_lock_free(&mdev->use_lock); return -EBUSY; } perm = 0; if (is_write_mode(fmode)) perm |= PERM_WRITE; if (is_read_mode(fmode)) perm |= PERM_READ; perm &= mdev->flags; if (perm == 0) { snd_use_lock_free(&mdev->use_lock); return -ENXIO; } /* already opened? */ if ((mdev->opened & perm) == perm) { snd_use_lock_free(&mdev->use_lock); return 0; } perm &= ~mdev->opened; memset(&subs, 0, sizeof(subs)); if (perm & PERM_WRITE) { subs.sender = dp->addr; subs.dest.client = mdev->client; subs.dest.port = mdev->port; if (snd_seq_kernel_client_ctl(dp->cseq, SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, &subs) >= 0) mdev->opened |= PERM_WRITE; } if (perm & PERM_READ) { subs.sender.client = mdev->client; subs.sender.port = mdev->port; subs.dest = dp->addr; subs.flags = SNDRV_SEQ_PORT_SUBS_TIMESTAMP; subs.queue = dp->queue; /* queue for timestamps */ if (snd_seq_kernel_client_ctl(dp->cseq, SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, &subs) >= 0) mdev->opened |= PERM_READ; } if (! mdev->opened) { snd_use_lock_free(&mdev->use_lock); return -ENXIO; } mdev->devinfo = dp; snd_use_lock_free(&mdev->use_lock); return 0; } /* * close the midi device if already opened */ int snd_seq_oss_midi_close(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_midi *mdev; struct snd_seq_port_subscribe subs; if ((mdev = get_mididev(dp, dev)) == NULL) return -ENODEV; if (! mdev->opened || mdev->devinfo != dp) { snd_use_lock_free(&mdev->use_lock); return 0; } debug_printk(("closing client %d port %d mode %d\n", mdev->client, mdev->port, mdev->opened)); memset(&subs, 0, sizeof(subs)); if (mdev->opened & PERM_WRITE) { subs.sender = dp->addr; subs.dest.client = mdev->client; subs.dest.port = mdev->port; snd_seq_kernel_client_ctl(dp->cseq, SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, &subs); } if (mdev->opened & PERM_READ) { subs.sender.client = mdev->client; subs.sender.port = mdev->port; subs.dest = dp->addr; snd_seq_kernel_client_ctl(dp->cseq, SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, &subs); } mdev->opened = 0; mdev->devinfo = NULL; snd_use_lock_free(&mdev->use_lock); return 0; } /* * change seq capability flags to file mode flags */ int snd_seq_oss_midi_filemode(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_midi *mdev; int mode; if ((mdev = get_mididev(dp, dev)) == NULL) return 0; mode = 0; if (mdev->opened & PERM_WRITE) mode |= SNDRV_SEQ_OSS_FILE_WRITE; if (mdev->opened & PERM_READ) mode |= SNDRV_SEQ_OSS_FILE_READ; snd_use_lock_free(&mdev->use_lock); return mode; } /* * reset the midi device and close it: * so far, only close the device. */ void snd_seq_oss_midi_reset(struct seq_oss_devinfo *dp, int dev) { struct seq_oss_midi *mdev; if ((mdev = get_mididev(dp, dev)) == NULL) return; if (! mdev->opened) { snd_use_lock_free(&mdev->use_lock); return; } if (mdev->opened & PERM_WRITE) { struct snd_seq_event ev; int c; debug_printk(("resetting client %d port %d\n", mdev->client, mdev->port)); memset(&ev, 0, sizeof(ev)); ev.dest.client = mdev->client; ev.dest.port = mdev->port; ev.queue = dp->queue; ev.source.port = dp->port; if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) { ev.type = SNDRV_SEQ_EVENT_SENSING; snd_seq_oss_dispatch(dp, &ev, 0, 0); } for (c = 0; c < 16; c++) { ev.type = SNDRV_SEQ_EVENT_CONTROLLER; ev.data.control.channel = c; ev.data.control.param = MIDI_CTL_ALL_NOTES_OFF; snd_seq_oss_dispatch(dp, &ev, 0, 0); if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) { ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; snd_seq_oss_dispatch(dp, &ev, 0, 0); ev.type = SNDRV_SEQ_EVENT_PITCHBEND; ev.data.control.value = 0; snd_seq_oss_dispatch(dp, &ev, 0, 0); } } } // snd_seq_oss_midi_close(dp, dev); snd_use_lock_free(&mdev->use_lock); } /* * get client/port of the specified MIDI device */ void snd_seq_oss_midi_get_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_addr *addr) { struct seq_oss_midi *mdev; if ((mdev = get_mididev(dp, dev)) == NULL) return; addr->client = mdev->client; addr->port = mdev->port; snd_use_lock_free(&mdev->use_lock); } /* * input callback - this can be atomic */ int snd_seq_oss_midi_input(struct snd_seq_event *ev, int direct, void *private_data) { struct seq_oss_devinfo *dp = (struct seq_oss_devinfo *)private_data; struct seq_oss_midi *mdev; int rc; if (dp->readq == NULL) return 0; if ((mdev = find_slot(ev->source.client, ev->source.port)) == NULL) return 0; if (! (mdev->opened & PERM_READ)) { snd_use_lock_free(&mdev->use_lock); return 0; } if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) rc = send_synth_event(dp, ev, mdev->seq_device); else rc = send_midi_event(dp, ev, mdev); snd_use_lock_free(&mdev->use_lock); return rc; } /* * convert ALSA sequencer event to OSS synth event */ static int send_synth_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, int dev) { union evrec ossev; memset(&ossev, 0, sizeof(ossev)); switch (ev->type) { case SNDRV_SEQ_EVENT_NOTEON: ossev.v.cmd = MIDI_NOTEON; break; case SNDRV_SEQ_EVENT_NOTEOFF: ossev.v.cmd = MIDI_NOTEOFF; break; case SNDRV_SEQ_EVENT_KEYPRESS: ossev.v.cmd = MIDI_KEY_PRESSURE; break; case SNDRV_SEQ_EVENT_CONTROLLER: ossev.l.cmd = MIDI_CTL_CHANGE; break; case SNDRV_SEQ_EVENT_PGMCHANGE: ossev.l.cmd = MIDI_PGM_CHANGE; break; case SNDRV_SEQ_EVENT_CHANPRESS: ossev.l.cmd = MIDI_CHN_PRESSURE; break; case SNDRV_SEQ_EVENT_PITCHBEND: ossev.l.cmd = MIDI_PITCH_BEND; break; default: return 0; /* not supported */ } ossev.v.dev = dev; switch (ev->type) { case SNDRV_SEQ_EVENT_NOTEON: case SNDRV_SEQ_EVENT_NOTEOFF: case SNDRV_SEQ_EVENT_KEYPRESS: ossev.v.code = EV_CHN_VOICE; ossev.v.note = ev->data.note.note; ossev.v.parm = ev->data.note.velocity; ossev.v.chn = ev->data.note.channel; break; case SNDRV_SEQ_EVENT_CONTROLLER: case SNDRV_SEQ_EVENT_PGMCHANGE: case SNDRV_SEQ_EVENT_CHANPRESS: ossev.l.code = EV_CHN_COMMON; ossev.l.p1 = ev->data.control.param; ossev.l.val = ev->data.control.value; ossev.l.chn = ev->data.control.channel; break; case SNDRV_SEQ_EVENT_PITCHBEND: ossev.l.code = EV_CHN_COMMON; ossev.l.val = ev->data.control.value + 8192; ossev.l.chn = ev->data.control.channel; break; } snd_seq_oss_readq_put_timestamp(dp->readq, ev->time.tick, dp->seq_mode); snd_seq_oss_readq_put_event(dp->readq, &ossev); return 0; } /* * decode event and send MIDI bytes to read queue */ static int send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq_oss_midi *mdev) { char msg[32]; int len; snd_seq_oss_readq_put_timestamp(dp->readq, ev->time.tick, dp->seq_mode); if (!dp->timer->running) len = snd_seq_oss_timer_start(dp->timer); if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, ev->data.ext.ptr, ev->data.ext.len); } else { len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev); if (len > 0) snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, msg, len); } return 0; } /* * dump midi data * return 0 : enqueued * non-zero : invalid - ignored */ int snd_seq_oss_midi_putc(struct seq_oss_devinfo *dp, int dev, unsigned char c, struct snd_seq_event *ev) { struct seq_oss_midi *mdev; if ((mdev = get_mididev(dp, dev)) == NULL) return -ENODEV; if (snd_midi_event_encode_byte(mdev->coder, c, ev) > 0) { snd_seq_oss_fill_addr(dp, ev, mdev->client, mdev->port); snd_use_lock_free(&mdev->use_lock); return 0; } snd_use_lock_free(&mdev->use_lock); return -EINVAL; } /* * create OSS compatible midi_info record */ int snd_seq_oss_midi_make_info(struct seq_oss_devinfo *dp, int dev, struct midi_info *inf) { struct seq_oss_midi *mdev; if ((mdev = get_mididev(dp, dev)) == NULL) return -ENXIO; inf->device = dev; inf->dev_type = 0; /* FIXME: ?? */ inf->capabilities = 0; /* FIXME: ?? */ strlcpy(inf->name, mdev->name, sizeof(inf->name)); snd_use_lock_free(&mdev->use_lock); return 0; } #ifdef CONFIG_PROC_FS /* * proc interface */ static char * capmode_str(int val) { val &= PERM_READ|PERM_WRITE; if (val == (PERM_READ|PERM_WRITE)) return "read/write"; else if (val == PERM_READ) return "read"; else if (val == PERM_WRITE) return "write"; else return "none"; } void snd_seq_oss_midi_info_read(struct snd_info_buffer *buf) { int i; struct seq_oss_midi *mdev; snd_iprintf(buf, "\nNumber of MIDI devices: %d\n", max_midi_devs); for (i = 0; i < max_midi_devs; i++) { snd_iprintf(buf, "\nmidi %d: ", i); mdev = get_mdev(i); if (mdev == NULL) { snd_iprintf(buf, "*empty*\n"); continue; } snd_iprintf(buf, "[%s] ALSA port %d:%d\n", mdev->name, mdev->client, mdev->port); snd_iprintf(buf, " capability %s / opened %s\n", capmode_str(mdev->flags), capmode_str(mdev->opened)); snd_use_lock_free(&mdev->use_lock); } } #endif /* CONFIG_PROC_FS */
gpl-2.0
fear130986/GT-I9195_EUR_KK_Opensource_kernel
drivers/mmc/host/msm_sdcc.c
36
193500
/* * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver * * Copyright (C) 2007 Google Inc, * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on mmci.c * * Author: San Mehat (san@android.com) * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/memory.h> #include <linux/pm_runtime.h> #include <linux/wakelock.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/pm_qos.h> #ifdef CONFIG_SEC_FPGA #include <linux/barcode_emul.h> #endif #include <asm/cacheflush.h> #include <asm/div64.h> #include <asm/sizes.h> #include <asm/mach/mmc.h> #include <mach/msm_iomap.h> #include <mach/clk.h> #include <mach/dma.h> #include <mach/sdio_al.h> #include <mach/mpm.h> #include <mach/msm_bus.h> #include "msm_sdcc.h" #include "msm_sdcc_dml.h" #if defined(CONFIG_MACH_M2_SPR) || defined(CONFIG_MACH_M2_VZW) || defined(CONFIG_MACH_M2_ATT) #include <mach/msm8960-gpio.h> #else #include <mach/apq8064-gpio.h> #endif #if defined(CONFIG_MACH_JF_SKT) || defined(CONFIG_MACH_JF_KTT) || defined(CONFIG_MACH_JF_LGT) #include <linux/mfd/pm8xxx/mpp.h> #include <../board-8064.h> struct pm8xxx_mpp_config_data tflash_ls_en_mpp_high = { .type = PM8XXX_MPP_TYPE_D_OUTPUT, .level = PM8921_MPP_DIG_LEVEL_S4, .control = PM8XXX_MPP_DOUT_CTRL_HIGH, }; struct pm8xxx_mpp_config_data tflash_ls_en_mpp_low = { .type = PM8XXX_MPP_TYPE_D_OUTPUT, .level = PM8921_MPP_DIG_LEVEL_S4, .control = PM8XXX_MPP_DOUT_CTRL_LOW, }; #endif #define DRIVER_NAME "msm-sdcc" #define DBG(host, fmt, args...) \ pr_debug("%s: %s: " fmt "\n", mmc_hostname(host->mmc), __func__ , args) #define IRQ_DEBUG 0 #define SPS_SDCC_PRODUCER_PIPE_INDEX 1 #define SPS_SDCC_CONSUMER_PIPE_INDEX 2 #define SPS_CONS_PERIPHERAL 0 #define SPS_PROD_PERIPHERAL 1 /* Use SPS only if transfer size is more than this macro */ #define SPS_MIN_XFER_SIZE MCI_FIFOSIZE #define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */ #define INVALID_TUNING_PHASE -1 #if defined(CONFIG_DEBUG_FS) static void msmsdcc_dbg_createhost(struct msmsdcc_host *); static struct dentry *debugfs_dir; static int msmsdcc_dbg_init(void); #endif static int msmsdcc_prep_xfer(struct msmsdcc_host *host, struct mmc_data *data); static u64 dma_mask = DMA_BIT_MASK(32); static unsigned int msmsdcc_pwrsave = 1; static struct mmc_command dummy52cmd; static struct mmc_request dummy52mrq = { .cmd = &dummy52cmd, .data = NULL, .stop = NULL, }; static struct mmc_command dummy52cmd = { .opcode = SD_IO_RW_DIRECT, .flags = MMC_RSP_PRESENT, .data = NULL, .mrq = &dummy52mrq, }; /* * An array holding the Tuning pattern to compare with when * executing a tuning cycle. */ static const u32 tuning_block_64[] = { 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE, 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777, 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF, 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7 }; static const u32 tuning_block_128[] = { 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC, 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF, 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF, 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB, 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC, 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF, 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB, 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77 }; #if IRQ_DEBUG == 1 static char *irq_status_bits[] = { "cmdcrcfail", "datcrcfail", "cmdtimeout", "dattimeout", "txunderrun", "rxoverrun", "cmdrespend", "cmdsent", "dataend", NULL, "datablkend", "cmdactive", "txactive", "rxactive", "txhalfempty", "rxhalffull", "txfifofull", "rxfifofull", "txfifoempty", "rxfifoempty", "txdataavlbl", "rxdataavlbl", "sdiointr", "progdone", "atacmdcompl", "sdiointrope", "ccstimeout", NULL, NULL, NULL, NULL, NULL }; static void msmsdcc_print_status(struct msmsdcc_host *host, char *hdr, uint32_t status) { int i; pr_debug("%s-%s ", mmc_hostname(host->mmc), hdr); for (i = 0; i < 32; i++) { if (status & (1 << i)) pr_debug("%s ", irq_status_bits[i]); } pr_debug("\n"); } #endif static void msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c); static inline void msmsdcc_sync_reg_wr(struct msmsdcc_host *host); static inline void msmsdcc_delay(struct msmsdcc_host *host); static void msmsdcc_dump_sdcc_state(struct msmsdcc_host *host); static void msmsdcc_sg_start(struct msmsdcc_host *host); static int msmsdcc_vreg_reset(struct msmsdcc_host *host); static int msmsdcc_runtime_resume(struct device *dev); static int msmsdcc_execute_tuning(struct mmc_host *mmc, u32 opcode); static bool msmsdcc_is_wait_for_auto_prog_done(struct msmsdcc_host *host, struct mmc_request *mrq); static bool msmsdcc_is_wait_for_prog_done(struct msmsdcc_host *host, struct mmc_request *mrq); static inline unsigned short msmsdcc_get_nr_sg(struct msmsdcc_host *host) { unsigned short ret = NR_SG; if (is_sps_mode(host)) { ret = SPS_MAX_DESCS / 32; } else { /* DMA or PIO mode */ if (NR_SG > MAX_NR_SG_DMA_PIO) ret = MAX_NR_SG_DMA_PIO; } return ret; } /* Prevent idle power collapse(pc) while operating in peripheral mode */ static void msmsdcc_pm_qos_update_latency(struct msmsdcc_host *host, int vote) { if (!host->cpu_dma_latency) return; if (vote) pm_qos_update_request(&host->pm_qos_req_dma, host->cpu_dma_latency); else pm_qos_update_request(&host->pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); } #ifdef CONFIG_MMC_MSM_SPS_SUPPORT static int msmsdcc_sps_reset_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep); static int msmsdcc_sps_restore_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep); #else static inline int msmsdcc_sps_init_ep_conn(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep, bool is_producer) { return 0; } static inline void msmsdcc_sps_exit_ep_conn(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { } static inline int msmsdcc_sps_reset_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { return 0; } static inline int msmsdcc_sps_restore_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { return 0; } static inline int msmsdcc_sps_init(struct msmsdcc_host *host) { return 0; } static inline void msmsdcc_sps_exit(struct msmsdcc_host *host) {} #endif /* CONFIG_MMC_MSM_SPS_SUPPORT */ /** * Apply reset * * This function resets SPS BAM and DML cores. * * This function should be called to recover from error * conditions encountered during CMD/DATA tranfsers with card. * * @host - Pointer to driver's host structure * */ static int msmsdcc_bam_dml_reset_and_restore(struct msmsdcc_host *host) { int rc; /* Reset all SDCC BAM pipes */ rc = msmsdcc_sps_reset_ep(host, &host->sps.prod); if (rc) { pr_err("%s: msmsdcc_sps_reset_ep(prod) error=%d\n", mmc_hostname(host->mmc), rc); goto out; } rc = msmsdcc_sps_reset_ep(host, &host->sps.cons); if (rc) { pr_err("%s: msmsdcc_sps_reset_ep(cons) error=%d\n", mmc_hostname(host->mmc), rc); goto out; } /* Reset BAM */ rc = sps_device_reset(host->sps.bam_handle); if (rc) { pr_err("%s: sps_device_reset error=%d\n", mmc_hostname(host->mmc), rc); goto out; } memset(host->sps.prod.config.desc.base, 0x00, host->sps.prod.config.desc.size); memset(host->sps.cons.config.desc.base, 0x00, host->sps.cons.config.desc.size); /* Restore all BAM pipes connections */ rc = msmsdcc_sps_restore_ep(host, &host->sps.prod); if (rc) { pr_err("%s: msmsdcc_sps_restore_ep(prod) error=%d\n", mmc_hostname(host->mmc), rc); goto out; } rc = msmsdcc_sps_restore_ep(host, &host->sps.cons); if (rc) { pr_err("%s: msmsdcc_sps_restore_ep(cons) error=%d\n", mmc_hostname(host->mmc), rc); goto out; } /* Reset and init DML */ rc = msmsdcc_dml_init(host); if (rc) pr_err("%s: msmsdcc_dml_init error=%d\n", mmc_hostname(host->mmc), rc); out: if (!rc) host->sps.reset_bam = false; return rc; } /** * Apply soft reset * * This function applies soft reset to SDCC core. * * This function should be called to recover from error * conditions encountered with CMD/DATA tranfsers with card. * * Soft reset should only be used with SDCC controller v4. * * @host - Pointer to driver's host structure * */ static void msmsdcc_soft_reset(struct msmsdcc_host *host) { /* * Reset controller state machines without resetting * configuration registers (MCI_POWER, MCI_CLK, MCI_INT_MASKn). */ if (is_sw_reset_save_config(host)) { ktime_t start; writel_relaxed(readl_relaxed(host->base + MMCIPOWER) | MCI_SW_RST_CFG, host->base + MMCIPOWER); msmsdcc_sync_reg_wr(host); start = ktime_get(); while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST_CFG) { /* * SW reset can take upto 10HCLK + 15MCLK cycles. * Calculating based on min clk rates (hclk = 27MHz, * mclk = 400KHz) it comes to ~40us. Let's poll for * max. 1ms for reset completion. */ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) { pr_err("%s: %s failed\n", mmc_hostname(host->mmc), __func__); BUG(); } } } else { writel_relaxed(0, host->base + MMCICOMMAND); msmsdcc_sync_reg_wr(host); writel_relaxed(0, host->base + MMCIDATACTRL); msmsdcc_sync_reg_wr(host); } } static void msmsdcc_hard_reset(struct msmsdcc_host *host) { int ret; /* * Reset SDCC controller to power on default state. * Don't issue a reset request to clock control block if * SDCC controller itself can support hard reset. */ if (is_sw_hard_reset(host)) { ktime_t start; writel_relaxed(readl_relaxed(host->base + MMCIPOWER) | MCI_SW_RST, host->base + MMCIPOWER); msmsdcc_sync_reg_wr(host); start = ktime_get(); while (readl_relaxed(host->base + MMCIPOWER) & MCI_SW_RST) { /* * See comment in msmsdcc_soft_reset() on choosing 1ms * poll timeout. */ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) { pr_err("%s: %s failed\n", mmc_hostname(host->mmc), __func__); BUG(); } } } else { ret = clk_reset(host->clk, CLK_RESET_ASSERT); if (ret) pr_err("%s: Clock assert failed at %u Hz" \ " with err %d\n", mmc_hostname(host->mmc), host->clk_rate, ret); ret = clk_reset(host->clk, CLK_RESET_DEASSERT); if (ret) pr_err("%s: Clock deassert failed at %u Hz" \ " with err %d\n", mmc_hostname(host->mmc), host->clk_rate, ret); mb(); /* Give some delay for clock reset to propogate to controller */ msmsdcc_delay(host); } } static void msmsdcc_reset_and_restore(struct msmsdcc_host *host) { if (is_soft_reset(host)) { msmsdcc_soft_reset(host); pr_debug("%s: Applied soft reset to Controller\n", mmc_hostname(host->mmc)); } else { /* Give Clock reset (hard reset) to controller */ u32 mci_clk = 0; u32 mci_mask0 = 0; u32 dll_config = 0; /* Save the controller state */ mci_clk = readl_relaxed(host->base + MMCICLOCK); mci_mask0 = readl_relaxed(host->base + MMCIMASK0); host->pwr = readl_relaxed(host->base + MMCIPOWER); if (host->tuning_needed) dll_config = readl_relaxed(host->base + MCI_DLL_CONFIG); mb(); msmsdcc_hard_reset(host); pr_debug("%s: Applied hard reset to controller\n", mmc_hostname(host->mmc)); /* Restore the contoller state */ writel_relaxed(host->pwr, host->base + MMCIPOWER); msmsdcc_sync_reg_wr(host); writel_relaxed(mci_clk, host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); writel_relaxed(mci_mask0, host->base + MMCIMASK0); if (host->tuning_needed) writel_relaxed(dll_config, host->base + MCI_DLL_CONFIG); mb(); /* no delay required after writing to MASK0 register */ } if (is_sps_mode(host)) /* * delay the SPS BAM reset in thread context as * sps_connect/sps_disconnect APIs can be called * only from non-atomic context. */ host->sps.reset_bam = true; if (host->dummy_52_needed) host->dummy_52_needed = 0; } static void msmsdcc_reset_dpsm(struct msmsdcc_host *host) { struct mmc_request *mrq = host->curr.mrq; if (!mrq || !mrq->cmd || !mrq->data) goto out; /* * If we have not waited for the prog done for write transfer then * perform the DPSM reset without polling for TXACTIVE. * Otherwise, we poll here unnecessarily as TXACTIVE will not be * deasserted until DAT0 (Busy line) goes high. */ if (mrq->data->flags & MMC_DATA_WRITE) { if (!msmsdcc_is_wait_for_prog_done(host, mrq)) { if (is_wait_for_tx_rx_active(host) && !is_auto_prog_done(host)) pr_warning("%s: %s: AUTO_PROG_DONE capability is must\n", mmc_hostname(host->mmc), __func__); goto no_polling; } } /* Make sure h/w (TX/RX) is inactive before resetting DPSM */ if (is_wait_for_tx_rx_active(host)) { ktime_t start = ktime_get(); writel_relaxed(readl_relaxed(host->base + MMCICLOCK) | (1 << 17), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); while (readl_relaxed(host->base + MMCISTATUS) & (MCI_TXACTIVE | MCI_RXACTIVE)) { /* * TX/RX active bits may be asserted for 4HCLK + 4MCLK * cycles (~11us) after data transfer due to clock mux * switching delays. Let's poll for 1ms and panic if * still active. */ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) { pr_err("%s: %s still active\n", mmc_hostname(host->mmc), readl_relaxed(host->base + MMCISTATUS) & MCI_TXACTIVE ? "TX" : "RX"); msmsdcc_dump_sdcc_state(host); msmsdcc_reset_and_restore(host); host->pending_dpsm_reset = false; goto out; } } writel_relaxed(readl_relaxed(host->base + MMCICLOCK) & ~(1 << 17), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); } no_polling: writel_relaxed(0, host->base + MMCIDATACTRL); msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */ out: return; } static int msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) { int retval = 0; BUG_ON(host->curr.data); del_timer(&host->req_tout_timer); if (mrq->data) mrq->data->bytes_xfered = host->curr.data_xfered; if (mrq->cmd->error == -ETIMEDOUT) mdelay(5); msmsdcc_reset_dpsm(host); /* Clear current request information as current request has ended */ memset(&host->curr, 0, sizeof(struct msmsdcc_curr_req)); /* * Need to drop the host lock here; mmc_request_done may call * back into the driver... */ spin_unlock(&host->lock); mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); return retval; } static void msmsdcc_stop_data(struct msmsdcc_host *host) { host->curr.data = NULL; host->curr.got_dataend = 0; host->curr.wait_for_auto_prog_done = false; host->curr.got_auto_prog_done = false; } static inline uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) { return host->core_memres->start + MMCIFIFO; } static inline unsigned int msmsdcc_get_min_sup_clk_rate( struct msmsdcc_host *host); static inline void msmsdcc_sync_reg_wr(struct msmsdcc_host *host) { mb(); if (!is_wait_for_reg_write(host)) udelay(host->reg_write_delay); else if (readl_relaxed(host->base + MCI_STATUS2) & MCI_MCLK_REG_WR_ACTIVE) { ktime_t start, diff; start = ktime_get(); while (readl_relaxed(host->base + MCI_STATUS2) & MCI_MCLK_REG_WR_ACTIVE) { diff = ktime_sub(ktime_get(), start); /* poll for max. 1 ms */ if (ktime_to_us(diff) > 1000) { pr_warning("%s: previous reg. write is" " still active\n", mmc_hostname(host->mmc)); break; } } } } static inline void msmsdcc_delay(struct msmsdcc_host *host) { udelay(host->reg_write_delay); } static inline void msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) { writel_relaxed(arg, host->base + MMCIARGUMENT); writel_relaxed(c, host->base + MMCICOMMAND); /* * As after sending the command, we don't write any of the * controller registers and just wait for the * CMD_RESPOND_END/CMD_SENT/Command failure notication * from Controller. */ mb(); } static void msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd) { struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->user; writel_relaxed(host->cmd_timeout, host->base + MMCIDATATIMER); writel_relaxed((unsigned int)host->curr.xfer_size, host->base + MMCIDATALENGTH); writel_relaxed(host->cmd_datactrl, host->base + MMCIDATACTRL); msmsdcc_sync_reg_wr(host); /* Force delay prior to ADM or command */ if (host->cmd_cmd) { msmsdcc_start_command_exec(host, (u32)host->cmd_cmd->arg, (u32)host->cmd_c); } } static void msmsdcc_dma_complete_tlet(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; unsigned long flags; struct mmc_request *mrq; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; BUG_ON(!mrq); if (!(host->dma.result & DMOV_RSLT_VALID)) { pr_err("msmsdcc: Invalid DataMover result\n"); goto out; } if (host->dma.result & DMOV_RSLT_DONE) { host->curr.data_xfered = host->curr.xfer_size; host->curr.xfer_remain -= host->curr.xfer_size; } else { /* Error or flush */ if (host->dma.result & DMOV_RSLT_ERROR) pr_err("%s: DMA error (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); if (host->dma.result & DMOV_RSLT_FLUSH) pr_err("%s: DMA channel flushed (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", host->dma.err.flush[0], host->dma.err.flush[1], host->dma.err.flush[2], host->dma.err.flush[3], host->dma.err.flush[4], host->dma.err.flush[5]); msmsdcc_reset_and_restore(host); if (!mrq->data->error) mrq->data->error = -EIO; } if (!mrq->data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); if (host->curr.user_pages) { struct scatterlist *sg = host->dma.sg; int i; for (i = 0; i < host->dma.num_ents; i++, sg++) flush_dcache_page(sg_page(sg)); } host->dma.sg = NULL; host->dma.busy = 0; if ((host->curr.got_dataend && (!host->curr.wait_for_auto_prog_done || (host->curr.wait_for_auto_prog_done && host->curr.got_auto_prog_done))) || mrq->data->error) { /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ if (!mrq->data->error) { host->curr.data_xfered = host->curr.xfer_size; host->curr.xfer_remain -= host->curr.xfer_size; } if (host->dummy_52_needed) { mrq->data->bytes_xfered = host->curr.data_xfered; host->dummy_52_sent = 1; msmsdcc_start_command(host, &dummy52cmd, MCI_CPSM_PROGENA); goto out; } msmsdcc_stop_data(host); if (!mrq->data->stop || mrq->cmd->error || (mrq->sbc && !mrq->data->error)) { mrq->data->bytes_xfered = host->curr.data_xfered; msmsdcc_reset_dpsm(host); del_timer(&host->req_tout_timer); /* * Clear current request information as current * request has ended */ memset(&host->curr, 0, sizeof(struct msmsdcc_curr_req)); spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); return; } else if (mrq->data->stop && ((mrq->sbc && mrq->data->error) || !mrq->sbc)) { msmsdcc_start_command(host, mrq->data->stop, 0); } } out: spin_unlock_irqrestore(&host->lock, flags); return; } #ifdef CONFIG_MMC_MSM_SPS_SUPPORT /** * Callback notification from SPS driver * * This callback function gets triggered called from * SPS driver when requested SPS data transfer is * completed. * * SPS driver invokes this callback in BAM irq context so * SDCC driver schedule a tasklet for further processing * this callback notification at later point of time in * tasklet context and immediately returns control back * to SPS driver. * * @nofity - Pointer to sps event notify sturcture * */ static void msmsdcc_sps_complete_cb(struct sps_event_notify *notify) { struct msmsdcc_host *host = (struct msmsdcc_host *) ((struct sps_event_notify *)notify)->user; host->sps.notify = *notify; pr_debug("%s: %s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n", mmc_hostname(host->mmc), __func__, notify->event_id, notify->data.transfer.iovec.addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); /* Schedule a tasklet for completing data transfer */ tasklet_schedule(&host->sps.tlet); } /** * Tasklet handler for processing SPS callback event * * This function processing SPS event notification and * checks if the SPS transfer is completed or not and * then accordingly notifies status to MMC core layer. * * This function is called in tasklet context. * * @data - Pointer to sdcc driver data * */ static void msmsdcc_sps_complete_tlet(unsigned long data) { unsigned long flags; int i, rc; u32 data_xfered = 0; struct mmc_request *mrq; struct sps_iovec iovec; struct sps_pipe *sps_pipe_handle; struct msmsdcc_host *host = (struct msmsdcc_host *)data; struct sps_event_notify *notify = &host->sps.notify; spin_lock_irqsave(&host->lock, flags); if (host->sps.dir == DMA_FROM_DEVICE) sps_pipe_handle = host->sps.prod.pipe_handle; else sps_pipe_handle = host->sps.cons.pipe_handle; mrq = host->curr.mrq; if (!mrq) { spin_unlock_irqrestore(&host->lock, flags); return; } pr_debug("%s: %s: sps event_id=%d\n", mmc_hostname(host->mmc), __func__, notify->event_id); /* * Got End of transfer event!!! Check if all of the data * has been transferred? */ for (i = 0; i < host->sps.xfer_req_cnt; i++) { rc = sps_get_iovec(sps_pipe_handle, &iovec); if (rc) { pr_err("%s: %s: sps_get_iovec() failed rc=%d, i=%d", mmc_hostname(host->mmc), __func__, rc, i); break; } data_xfered += iovec.size; } if (data_xfered == host->curr.xfer_size) { host->curr.data_xfered = host->curr.xfer_size; host->curr.xfer_remain -= host->curr.xfer_size; pr_debug("%s: Data xfer success. data_xfered=0x%x", mmc_hostname(host->mmc), host->curr.xfer_size); } else { pr_err("%s: Data xfer failed. data_xfered=0x%x," " xfer_size=%d", mmc_hostname(host->mmc), data_xfered, host->curr.xfer_size); msmsdcc_reset_and_restore(host); if (!mrq->data->error) mrq->data->error = -EIO; } /* Unmap sg buffers */ if (!mrq->data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents, host->sps.dir); host->sps.sg = NULL; host->sps.busy = 0; if ((host->curr.got_dataend && (!host->curr.wait_for_auto_prog_done || (host->curr.wait_for_auto_prog_done && host->curr.got_auto_prog_done))) || mrq->data->error) { /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ if (!mrq->data->error) { host->curr.data_xfered = host->curr.xfer_size; host->curr.xfer_remain -= host->curr.xfer_size; } if (host->dummy_52_needed) { mrq->data->bytes_xfered = host->curr.data_xfered; host->dummy_52_sent = 1; msmsdcc_start_command(host, &dummy52cmd, MCI_CPSM_PROGENA); spin_unlock_irqrestore(&host->lock, flags); return; } msmsdcc_stop_data(host); if (!mrq->data->stop || mrq->cmd->error || (mrq->sbc && !mrq->data->error)) { mrq->data->bytes_xfered = host->curr.data_xfered; msmsdcc_reset_dpsm(host); del_timer(&host->req_tout_timer); /* * Clear current request information as current * request has ended */ memset(&host->curr, 0, sizeof(struct msmsdcc_curr_req)); spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); return; } else if (mrq->data->stop && ((mrq->sbc && mrq->data->error) || !mrq->sbc)) { msmsdcc_start_command(host, mrq->data->stop, 0); } } spin_unlock_irqrestore(&host->lock, flags); } /** * Exit from current SPS data transfer * * This function exits from current SPS data transfer. * * This function should be called when error condition * is encountered during data transfer. * * @host - Pointer to sdcc host structure * */ static void msmsdcc_sps_exit_curr_xfer(struct msmsdcc_host *host) { struct mmc_request *mrq; mrq = host->curr.mrq; BUG_ON(!mrq); msmsdcc_reset_and_restore(host); if (!mrq->data->error) mrq->data->error = -EIO; /* Unmap sg buffers */ if (!mrq->data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents, host->sps.dir); host->sps.sg = NULL; host->sps.busy = 0; if (host->curr.data) msmsdcc_stop_data(host); if (!mrq->data->stop || mrq->cmd->error || (mrq->sbc && !mrq->data->error)) msmsdcc_request_end(host, mrq); else if (mrq->data->stop && ((mrq->sbc && mrq->data->error) || !mrq->sbc)) msmsdcc_start_command(host, mrq->data->stop, 0); } #else static inline void msmsdcc_sps_complete_cb(struct sps_event_notify *notify) { } static inline void msmsdcc_sps_complete_tlet(unsigned long data) { } static inline void msmsdcc_sps_exit_curr_xfer(struct msmsdcc_host *host) { } #endif /* CONFIG_MMC_MSM_SPS_SUPPORT */ static int msmsdcc_enable_cdr_cm_sdc4_dll(struct msmsdcc_host *host); static void msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { struct msmsdcc_dma_data *dma_data = container_of(cmd, struct msmsdcc_dma_data, hdr); struct msmsdcc_host *host = dma_data->host; dma_data->result = result; if (err) memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata)); tasklet_schedule(&host->dma_tlet); } static bool msmsdcc_is_dma_possible(struct msmsdcc_host *host, struct mmc_data *data) { bool ret = true; u32 xfer_size = data->blksz * data->blocks; if (host->enforce_pio_mode) { ret = false; goto out; } if (is_sps_mode(host)) { /* * BAM Mode: Fall back on PIO if size is less * than or equal to SPS_MIN_XFER_SIZE bytes. */ if (xfer_size <= SPS_MIN_XFER_SIZE) ret = false; } else if (is_dma_mode(host)) { /* * ADM Mode: Fall back on PIO if size is less than FIFO size * or not integer multiple of FIFO size */ if (xfer_size % MCI_FIFOSIZE) ret = false; } else { /* PIO Mode */ ret = false; } out: return ret; } static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) { struct msmsdcc_nc_dmadata *nc; dmov_box *box; uint32_t rows; int n; int i, err = 0, box_cmd_cnt = 0; struct scatterlist *sg = data->sg; unsigned int len, offset; if ((host->dma.channel == -1) || (host->dma.crci == -1)) return -ENOENT; BUG_ON((host->pdev_id < 1) || (host->pdev_id > 5)); host->dma.sg = data->sg; host->dma.num_ents = data->sg_len; /* Prevent memory corruption */ BUG_ON(host->dma.num_ents > msmsdcc_get_nr_sg(host)); nc = host->dma.nc; if (data->flags & MMC_DATA_READ) host->dma.dir = DMA_FROM_DEVICE; else host->dma.dir = DMA_TO_DEVICE; if (!data->host_cookie) { n = msmsdcc_prep_xfer(host, data); if (unlikely(n < 0)) { host->dma.sg = NULL; host->dma.num_ents = 0; return -ENOMEM; } } /* host->curr.user_pages = (data->flags & MMC_DATA_USERPAGE); */ host->curr.user_pages = 0; box = &nc->cmd[0]; for (i = 0; i < host->dma.num_ents; i++) { len = sg_dma_len(sg); offset = 0; do { /* Check if we can do DMA */ if (!len || (box_cmd_cnt >= MMC_MAX_DMA_CMDS)) { err = -ENOTSUPP; goto unmap; } box->cmd = CMD_MODE_BOX; if (len >= MMC_MAX_DMA_BOX_LENGTH) { len = MMC_MAX_DMA_BOX_LENGTH; len -= len % data->blksz; } rows = (len % MCI_FIFOSIZE) ? (len / MCI_FIFOSIZE) + 1 : (len / MCI_FIFOSIZE); if (data->flags & MMC_DATA_READ) { box->src_row_addr = msmsdcc_fifo_addr(host); box->dst_row_addr = sg_dma_address(sg) + offset; box->src_dst_len = (MCI_FIFOSIZE << 16) | (MCI_FIFOSIZE); box->row_offset = MCI_FIFOSIZE; box->num_rows = rows * ((1 << 16) + 1); box->cmd |= CMD_SRC_CRCI(host->dma.crci); } else { box->src_row_addr = sg_dma_address(sg) + offset; box->dst_row_addr = msmsdcc_fifo_addr(host); box->src_dst_len = (MCI_FIFOSIZE << 16) | (MCI_FIFOSIZE); box->row_offset = (MCI_FIFOSIZE << 16); box->num_rows = rows * ((1 << 16) + 1); box->cmd |= CMD_DST_CRCI(host->dma.crci); } offset += len; len = sg_dma_len(sg) - offset; box++; box_cmd_cnt++; } while (len); sg++; } /* Mark last command */ box--; box->cmd |= CMD_LC; /* location of command block must be 64 bit aligned */ BUG_ON(host->dma.cmd_busaddr & 0x07); nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); host->dma.hdr.complete_func = msmsdcc_dma_complete_func; /* Flush all data to memory before starting dma */ mb(); unmap: if (err) { if (!data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); pr_err("%s: cannot do DMA, fall back to PIO mode err=%d\n", mmc_hostname(host->mmc), err); } return err; } static int msmsdcc_prep_xfer(struct msmsdcc_host *host, struct mmc_data *data) { int rc = 0; unsigned int dir; /* Prevent memory corruption */ BUG_ON(data->sg_len > msmsdcc_get_nr_sg(host)); if (data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; else dir = DMA_TO_DEVICE; /* Make sg buffers DMA ready */ rc = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, dir); if (unlikely(rc != data->sg_len)) { pr_err("%s: Unable to map in all sg elements, rc=%d\n", mmc_hostname(host->mmc), rc); rc = -ENOMEM; goto dma_map_err; } pr_debug("%s: %s: %s: sg_len=%d\n", mmc_hostname(host->mmc), __func__, dir == DMA_FROM_DEVICE ? "READ" : "WRITE", data->sg_len); goto out; dma_map_err: dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, data->flags); out: return rc; } #ifdef CONFIG_MMC_MSM_SPS_SUPPORT /** * Submits data transfer request to SPS driver * * This function make sg (scatter gather) data buffers * DMA ready and then submits them to SPS driver for * transfer. * * @host - Pointer to sdcc host structure * @data - Pointer to mmc_data structure * * @return 0 if success else negative value */ static int msmsdcc_sps_start_xfer(struct msmsdcc_host *host, struct mmc_data *data) { int rc = 0; u32 flags; int i; u32 addr, len, data_cnt; struct scatterlist *sg = data->sg; struct sps_pipe *sps_pipe_handle; host->sps.sg = data->sg; host->sps.num_ents = data->sg_len; host->sps.xfer_req_cnt = 0; if (data->flags & MMC_DATA_READ) { host->sps.dir = DMA_FROM_DEVICE; sps_pipe_handle = host->sps.prod.pipe_handle; } else { host->sps.dir = DMA_TO_DEVICE; sps_pipe_handle = host->sps.cons.pipe_handle; } if (!data->host_cookie) { rc = msmsdcc_prep_xfer(host, data); if (unlikely(rc < 0)) { host->dma.sg = NULL; host->dma.num_ents = 0; goto out; } } for (i = 0; i < data->sg_len; i++) { /* * Check if this is the last buffer to transfer? * If yes then set the INT and EOT flags. */ len = sg_dma_len(sg); addr = sg_dma_address(sg); flags = 0; while (len > 0) { if (len > SPS_MAX_DESC_SIZE) { data_cnt = SPS_MAX_DESC_SIZE; } else { data_cnt = len; if ((i == data->sg_len - 1) && (sps_pipe_handle == host->sps.cons.pipe_handle)) { /* * set EOT only for consumer pipe, for * producer pipe h/w will set it. */ flags = SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT; } } rc = sps_transfer_one(sps_pipe_handle, addr, data_cnt, host, flags); if (rc) { pr_err("%s: sps_transfer_one() error! rc=%d," " pipe=0x%x, sg=0x%x, sg_buf_no=%d\n", mmc_hostname(host->mmc), rc, (u32)sps_pipe_handle, (u32)sg, i); goto dma_map_err; } addr += data_cnt; len -= data_cnt; host->sps.xfer_req_cnt++; } sg++; } goto out; dma_map_err: /* unmap sg buffers */ if (!data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), host->sps.sg, host->sps.num_ents, host->sps.dir); out: return rc; } #else static int msmsdcc_sps_start_xfer(struct msmsdcc_host *host, struct mmc_data *data) { return 0; } #endif /* CONFIG_MMC_MSM_SPS_SUPPORT */ static void msmsdcc_start_command_deferred(struct msmsdcc_host *host, struct mmc_command *cmd, u32 *c) { DBG(host, "op %02x arg %08x flags %08x\n", cmd->opcode, cmd->arg, cmd->flags); *c |= (cmd->opcode | MCI_CPSM_ENABLE); if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) *c |= MCI_CPSM_LONGRSP; *c |= MCI_CPSM_RESPONSE; } if (/*interrupt*/0) *c |= MCI_CPSM_INTERRUPT; /* DAT_CMD bit should be set for all ADTC */ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) *c |= MCI_CSPM_DATCMD; /* Check if AUTO CMD19/CMD21 is required or not? */ if (host->tuning_needed && (cmd->mrq->data && (cmd->mrq->data->flags & MMC_DATA_READ)) && (host->en_auto_cmd19 || host->en_auto_cmd21)) { /* * For open ended block read operation (without CMD23), * AUTO_CMD19/AUTO_CMD21 bit should be set while sending * the READ command. * For close ended block read operation (with CMD23), * AUTO_CMD19/AUTO_CMD21 bit should be set while sending * CMD23. */ if ((cmd->opcode == MMC_SET_BLOCK_COUNT && host->curr.mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || (!host->curr.mrq->sbc && (cmd->opcode == MMC_READ_SINGLE_BLOCK || cmd->opcode == MMC_READ_MULTIPLE_BLOCK || cmd->opcode == SD_IO_RW_EXTENDED))) { msmsdcc_enable_cdr_cm_sdc4_dll(host); if (host->en_auto_cmd19 && host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) *c |= MCI_CSPM_AUTO_CMD19; else if (host->en_auto_cmd21 && host->mmc->ios.timing == MMC_TIMING_MMC_HS200) *c |= MCI_CSPM_AUTO_CMD21; } } if (cmd->mrq->data && (cmd->mrq->data->flags & MMC_DATA_READ)) writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_CDR_EN), host->base + MCI_DLL_CONFIG); else /* Clear CDR_EN bit for non read operations */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) & ~MCI_CDR_EN), host->base + MCI_DLL_CONFIG); if ((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) { *c |= MCI_CPSM_PROGENA; host->prog_enable = 1; } if (cmd == cmd->mrq->stop) *c |= MCI_CSPM_MCIABORT; if (host->curr.cmd != NULL) { pr_err("%s: Overlapping command requests\n", mmc_hostname(host->mmc)); } host->curr.cmd = cmd; } static void msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data, struct mmc_command *cmd, u32 c) { unsigned int datactrl = 0, timeout; unsigned long long clks; void __iomem *base = host->base; unsigned int pio_irqmask = 0; BUG_ON(!data->sg); BUG_ON(!data->sg_len); host->curr.data = data; host->curr.xfer_size = data->blksz * data->blocks; host->curr.xfer_remain = host->curr.xfer_size; host->curr.data_xfered = 0; host->curr.got_dataend = 0; host->curr.got_auto_prog_done = false; datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); if (host->curr.wait_for_auto_prog_done) datactrl |= MCI_AUTO_PROG_DONE; if (msmsdcc_is_dma_possible(host, data)) { if (is_dma_mode(host) && !msmsdcc_config_dma(host, data)) { datactrl |= MCI_DPSM_DMAENABLE; } else if (is_sps_mode(host)) { if (!msmsdcc_sps_start_xfer(host, data)) { /* Now kick start DML transfer */ mb(); msmsdcc_dml_start_xfer(host, data); datactrl |= MCI_DPSM_DMAENABLE; host->sps.busy = 1; } } } /* Is data transfer in PIO mode required? */ if (!(datactrl & MCI_DPSM_DMAENABLE)) { if (data->flags & MMC_DATA_READ) { pio_irqmask = MCI_RXFIFOHALFFULLMASK; if (host->curr.xfer_remain < MCI_FIFOSIZE) pio_irqmask |= MCI_RXDATAAVLBLMASK; } else pio_irqmask = MCI_TXFIFOHALFEMPTYMASK | MCI_TXFIFOEMPTYMASK; msmsdcc_sg_start(host); } if (data->flags & MMC_DATA_READ) datactrl |= (MCI_DPSM_DIRECTION | MCI_RX_DATA_PEND); else if (host->curr.use_wr_data_pend) datactrl |= MCI_DATA_PEND; if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) clks = (unsigned long long)data->timeout_ns * (host->clk_rate / 2); else clks = (unsigned long long)data->timeout_ns * host->clk_rate; do_div(clks, 1000000000UL); timeout = data->timeout_clks + (unsigned int)clks*2 ; WARN(!timeout, "%s: data timeout is zero. timeout_ns=0x%x, timeout_clks=0x%x\n", mmc_hostname(host->mmc), data->timeout_ns, data->timeout_clks); if (is_dma_mode(host) && (datactrl & MCI_DPSM_DMAENABLE)) { /* Use ADM (Application Data Mover) HW for Data transfer */ /* Save parameters for the dma exec function */ host->cmd_timeout = timeout; host->cmd_pio_irqmask = pio_irqmask; host->cmd_datactrl = datactrl; host->cmd_cmd = cmd; host->dma.hdr.exec_func = msmsdcc_dma_exec_func; host->dma.hdr.user = (void *)host; host->dma.busy = 1; if (cmd) { msmsdcc_start_command_deferred(host, cmd, &c); host->cmd_c = c; } writel_relaxed((readl_relaxed(host->base + MMCIMASK0) & (~(MCI_IRQ_PIO))) | host->cmd_pio_irqmask, host->base + MMCIMASK0); mb(); msm_dmov_enqueue_cmd_ext(host->dma.channel, &host->dma.hdr); } else { /* SPS-BAM mode or PIO mode */ writel_relaxed(timeout, base + MMCIDATATIMER); writel_relaxed(host->curr.xfer_size, base + MMCIDATALENGTH); writel_relaxed((readl_relaxed(host->base + MMCIMASK0) & (~(MCI_IRQ_PIO))) | pio_irqmask, host->base + MMCIMASK0); writel_relaxed(datactrl, base + MMCIDATACTRL); if (cmd) { /* Delay between data/command */ msmsdcc_sync_reg_wr(host); /* Daisy-chain the command if requested */ msmsdcc_start_command(host, cmd, c); } else { /* * We don't need delay after writing to DATA_CTRL * register if we are not writing to CMD register * immediately after this. As we already have delay * before sending the command, we just need mb() here. */ mb(); } } } static void msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c) { msmsdcc_start_command_deferred(host, cmd, &c); msmsdcc_start_command_exec(host, cmd->arg, c); } static void msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data, unsigned int status) { if ((status & MCI_DATACRCFAIL) || (status & MCI_DATATIMEOUT)) { u32 opcode = data->mrq->cmd->opcode; if (!((!host->tuning_in_progress && opcode == MMC_BUS_TEST_W) || (opcode == MMC_BUS_TEST_R) || (host->tuning_in_progress && (opcode == MMC_SEND_TUNING_BLOCK_HS200 || opcode == MMC_SEND_TUNING_BLOCK)))) { /* Execute full tuning in case of CRC/timeout errors */ host->saved_tuning_phase = INVALID_TUNING_PHASE; if (status & MCI_DATACRCFAIL) { pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc)); pr_err("%s: opcode 0x%.8x\n", __func__, opcode); pr_err("%s: blksz %d, blocks %d\n", __func__, data->blksz, data->blocks); } else { pr_err("%s: CMD%d: Data timeout. DAT0 => %d\n", mmc_hostname(host->mmc), opcode, (readl_relaxed(host->base + MCI_TEST_INPUT) & 0x2) ? 1 : 0); msmsdcc_dump_sdcc_state(host); } } /* * CRC is optional for the bus test commands, not all * cards respond back with CRC. However controller * waits for the CRC and times out. Hence ignore the * data timeouts during the Bustest. */ if (!((!host->tuning_in_progress && opcode == MMC_BUS_TEST_W) || (opcode == MMC_BUS_TEST_R))) { if (status & MCI_DATACRCFAIL) data->error = -EILSEQ; else data->error = -ETIMEDOUT; } /* In case of DATA CRC/timeout error, execute tuning again */ #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if (host->tuning_needed&&!host->tuning_in_progress&&(host->pdev_id!=4)) #elif defined (CONFIG_BCM4335)||defined (CONFIG_BCM4335_MODULE) if (host->tuning_needed&&!host->tuning_in_progress&&(host->pdev_id!=3)) #else if (host->tuning_needed&&!host->tuning_in_progress) #endif host->tuning_done = false; } else if (status & MCI_RXOVERRUN) { pr_err("%s: RX overrun\n", mmc_hostname(host->mmc)); data->error = -EIO; } else if (status & MCI_TXUNDERRUN) { pr_err("%s: TX underrun\n", mmc_hostname(host->mmc)); data->error = -EIO; } else { pr_err("%s: Unknown error (0x%.8x)\n", mmc_hostname(host->mmc), status); data->error = -EIO; } /* Dummy CMD52 is not needed when CMD53 has errors */ if (host->dummy_52_needed) host->dummy_52_needed = 0; } static int msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain) { void __iomem *base = host->base; uint32_t *ptr = (uint32_t *) buffer; int count = 0; if (remain % 4) remain = ((remain >> 2) + 1) << 2; while (readl_relaxed(base + MMCISTATUS) & MCI_RXDATAAVLBL) { *ptr = readl_relaxed(base + MMCIFIFO + (count % MCI_FIFOSIZE)); ptr++; count += sizeof(uint32_t); remain -= sizeof(uint32_t); if (remain == 0) break; } return count; } static int msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer, unsigned int remain) { void __iomem *base = host->base; char *ptr = buffer; unsigned int maxcnt = MCI_FIFOHALFSIZE; while (readl_relaxed(base + MMCISTATUS) & (MCI_TXFIFOEMPTY | MCI_TXFIFOHALFEMPTY)) { unsigned int count, sz; count = min(remain, maxcnt); sz = count % 4 ? (count >> 2) + 1 : (count >> 2); writesl(base + MMCIFIFO, ptr, sz); ptr += count; remain -= count; if (remain == 0) break; } mb(); return ptr - buffer; } /* * Copy up to a word (4 bytes) between a scatterlist * and a temporary bounce buffer when the word lies across * two pages. The temporary buffer can then be read to/ * written from the FIFO once. */ static void _msmsdcc_sg_consume_word(struct msmsdcc_host *host) { struct msmsdcc_pio_data *pio = &host->pio; unsigned int bytes_avail; if (host->curr.data->flags & MMC_DATA_READ) memcpy(pio->sg_miter.addr, pio->bounce_buf, pio->bounce_buf_len); else memcpy(pio->bounce_buf, pio->sg_miter.addr, pio->bounce_buf_len); while (pio->bounce_buf_len != 4) { if (!sg_miter_next(&pio->sg_miter)) break; bytes_avail = min_t(unsigned int, pio->sg_miter.length, 4 - pio->bounce_buf_len); if (host->curr.data->flags & MMC_DATA_READ) memcpy(pio->sg_miter.addr, &pio->bounce_buf[pio->bounce_buf_len], bytes_avail); else memcpy(&pio->bounce_buf[pio->bounce_buf_len], pio->sg_miter.addr, bytes_avail); pio->sg_miter.consumed = bytes_avail; pio->bounce_buf_len += bytes_avail; } } /* * Use sg_miter_next to return as many 4-byte aligned * chunks as possible, using a temporary 4 byte buffer * for alignment if necessary */ static int msmsdcc_sg_next(struct msmsdcc_host *host, char **buf, int *len) { struct msmsdcc_pio_data *pio = &host->pio; unsigned int length, rlength; char *buffer; if (!sg_miter_next(&pio->sg_miter)) return 0; buffer = pio->sg_miter.addr; length = pio->sg_miter.length; if (length < host->curr.xfer_remain) { rlength = round_down(length, 4); if (rlength) { /* * We have a 4-byte aligned chunk. * The rounding will be reflected by * a call to msmsdcc_sg_consumed */ length = rlength; goto sg_next_end; } /* * We have a length less than 4 bytes. Check to * see if more buffer is available, and combine * to make 4 bytes if possible. */ pio->bounce_buf_len = length; memset(pio->bounce_buf, 0, 4); /* * On a read, get 4 bytes from FIFO, and distribute * (4-bouce_buf_len) bytes into consecutive * sgl buffers when msmsdcc_sg_consumed is called */ if (host->curr.data->flags & MMC_DATA_READ) { buffer = pio->bounce_buf; length = 4; goto sg_next_end; } else { _msmsdcc_sg_consume_word(host); buffer = pio->bounce_buf; length = pio->bounce_buf_len; } } sg_next_end: *buf = buffer; *len = length; return 1; } /* * Update sg_miter.consumed based on how many bytes were * consumed. If the bounce buffer was used to read from FIFO, * redistribute into sgls. */ static void msmsdcc_sg_consumed(struct msmsdcc_host *host, unsigned int length) { struct msmsdcc_pio_data *pio = &host->pio; if (host->curr.data->flags & MMC_DATA_READ) { if (length > pio->sg_miter.consumed) /* * consumed 4 bytes, but sgl * describes < 4 bytes */ _msmsdcc_sg_consume_word(host); else pio->sg_miter.consumed = length; } else if (length < pio->sg_miter.consumed) pio->sg_miter.consumed = length; } static void msmsdcc_sg_start(struct msmsdcc_host *host) { unsigned int sg_miter_flags = SG_MITER_ATOMIC; host->pio.bounce_buf_len = 0; if (host->curr.data->flags & MMC_DATA_READ) sg_miter_flags |= SG_MITER_TO_SG; else sg_miter_flags |= SG_MITER_FROM_SG; sg_miter_start(&host->pio.sg_miter, host->curr.data->sg, host->curr.data->sg_len, sg_miter_flags); } static void msmsdcc_sg_stop(struct msmsdcc_host *host) { sg_miter_stop(&host->pio.sg_miter); } static inline void msmsdcc_clear_pio_irq_mask(struct msmsdcc_host *host) { writel_relaxed(readl_relaxed(host->base + MMCIMASK0) & ~MCI_IRQ_PIO, host->base + MMCIMASK0); mb(); } static irqreturn_t msmsdcc_pio_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; void __iomem *base = host->base; uint32_t status; unsigned long flags; unsigned int remain; char *buffer; spin_lock(&host->lock); if (!atomic_read(&host->clks_on) || !host->curr.data) { spin_unlock(&host->lock); return IRQ_NONE; } status = readl_relaxed(base + MMCISTATUS); if (((readl_relaxed(host->base + MMCIMASK0) & status) & (MCI_IRQ_PIO)) == 0) { spin_unlock(&host->lock); return IRQ_NONE; } #if IRQ_DEBUG msmsdcc_print_status(host, "irq1-r", status); #endif local_irq_save(flags); do { unsigned int len; if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_TXFIFOEMPTY | MCI_RXDATAAVLBL))) break; if (!msmsdcc_sg_next(host, &buffer, &remain)) break; len = 0; if (status & MCI_RXACTIVE) len = msmsdcc_pio_read(host, buffer, remain); if (status & MCI_TXACTIVE) len = msmsdcc_pio_write(host, buffer, remain); /* len might have aligned to 32bits above */ if (len > remain) len = remain; host->curr.xfer_remain -= len; host->curr.data_xfered += len; remain -= len; msmsdcc_sg_consumed(host, len); if (remain) /* Done with this page? */ break; /* Nope */ status = readl_relaxed(base + MMCISTATUS); } while (1); msmsdcc_sg_stop(host); local_irq_restore(flags); if (!host->curr.xfer_remain) { msmsdcc_clear_pio_irq_mask(host); goto out_unlock; } if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) { writel_relaxed((readl_relaxed(host->base + MMCIMASK0) & ~MCI_IRQ_PIO) | MCI_RXDATAAVLBLMASK, host->base + MMCIMASK0); mb(); } out_unlock: spin_unlock(&host->lock); return IRQ_HANDLED; } static void msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq); static void msmsdcc_wait_for_rxdata(struct msmsdcc_host *host, struct mmc_data *data) { u32 loop_cnt = 0; /* * For read commands with data less than fifo size, it is possible to * get DATAEND first and RXDATA_AVAIL might be set later because of * synchronization delay through the asynchronous RX FIFO. Thus, for * such cases, even after DATAEND interrupt is received software * should poll for RXDATA_AVAIL until the requested data is read out * of FIFO. This change is needed to get around this abnormal but * sometimes expected behavior of SDCC3 controller. * * We can expect RXDATAAVAIL bit to be set after 6HCLK clock cycles * after the data is loaded into RX FIFO. This would amount to less * than a microsecond and thus looping for 1000 times is good enough * for that delay. */ while (((int)host->curr.xfer_remain > 0) && (++loop_cnt < 1000)) { if (readl_relaxed(host->base + MMCISTATUS) & MCI_RXDATAAVLBL) { spin_unlock(&host->lock); msmsdcc_pio_irq(1, host); spin_lock(&host->lock); } } if (loop_cnt == 1000) { pr_info("%s: Timed out while polling for Rx Data\n", mmc_hostname(host->mmc)); data->error = -ETIMEDOUT; msmsdcc_reset_and_restore(host); } } static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status) { struct mmc_command *cmd = host->curr.cmd; host->curr.cmd = NULL; if (mmc_resp_type(cmd)) cmd->resp[0] = readl_relaxed(host->base + MMCIRESPONSE0); /* * Read rest of the response registers only if * long response is expected for this command */ if (mmc_resp_type(cmd) & MMC_RSP_136) { cmd->resp[1] = readl_relaxed(host->base + MMCIRESPONSE1); cmd->resp[2] = readl_relaxed(host->base + MMCIRESPONSE2); cmd->resp[3] = readl_relaxed(host->base + MMCIRESPONSE3); } if (status & (MCI_CMDTIMEOUT | MCI_AUTOCMD19TIMEOUT)) { pr_err("%s: CMD%d: Command timeout\n", mmc_hostname(host->mmc), cmd->opcode); cmd->error = -ETIMEDOUT; } else if ((status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) && !host->tuning_in_progress) { pr_err("%s: CMD%d: Command CRC error\n", mmc_hostname(host->mmc), cmd->opcode); msmsdcc_dump_sdcc_state(host); #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if( host->pdev_id == 4){ printk("%s: Skipped tuning.\n",mmc_hostname(host->mmc)); } #elif defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE) if( host->pdev_id == 3){ printk("%s: Skipped tuning.\n",mmc_hostname(host->mmc)); } #else /* Execute full tuning in case of CRC errors */ host->saved_tuning_phase = INVALID_TUNING_PHASE; if (host->tuning_needed) host->tuning_done = false; #endif cmd->error = -EILSEQ; } if (!cmd->error) { if (cmd->cmd_timeout_ms > host->curr.req_tout_ms) { host->curr.req_tout_ms = cmd->cmd_timeout_ms; mod_timer(&host->req_tout_timer, (jiffies + msecs_to_jiffies(host->curr.req_tout_ms))); } } if (!cmd->data || cmd->error) { if (host->curr.data && host->dma.sg && is_dma_mode(host)) msm_dmov_flush(host->dma.channel, 0); else if (host->curr.data && host->sps.sg && is_sps_mode(host)) { /* Stop current SPS transfer */ msmsdcc_sps_exit_curr_xfer(host); } else if (host->curr.data) { /* Non DMA */ msmsdcc_clear_pio_irq_mask(host); msmsdcc_reset_and_restore(host); msmsdcc_stop_data(host); msmsdcc_request_end(host, cmd->mrq); } else { /* host->data == NULL */ if (!cmd->error && host->prog_enable) { if (status & MCI_PROGDONE) { host->prog_enable = 0; msmsdcc_request_end(host, cmd->mrq); } else host->curr.cmd = cmd; } else { host->prog_enable = 0; host->curr.wait_for_auto_prog_done = false; if (host->dummy_52_needed) host->dummy_52_needed = 0; if (cmd->data && cmd->error) msmsdcc_reset_and_restore(host); msmsdcc_request_end(host, cmd->mrq); } } } else if (cmd->data) { if (cmd == host->curr.mrq->sbc) msmsdcc_start_command(host, host->curr.mrq->cmd, 0); else if ((cmd->data->flags & MMC_DATA_WRITE) && !host->curr.use_wr_data_pend) msmsdcc_start_data(host, cmd->data, NULL, 0); } } static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; u32 status; int ret = 0; int timer = 0; spin_lock(&host->lock); do { struct mmc_command *cmd; struct mmc_data *data; if (timer) { timer = 0; msmsdcc_delay(host); } if (!atomic_read(&host->clks_on)) { pr_debug("%s: %s: SDIO async irq received\n", mmc_hostname(host->mmc), __func__); /* * Only async interrupt can come when clocks are off, * disable further interrupts and enable them when * clocks are on. */ if (!host->sdcc_irq_disabled) { disable_irq_nosync(irq); host->sdcc_irq_disabled = 1; } /* * If mmc_card_wake_sdio_irq() is set, mmc core layer * will take care of signaling sdio irq during * mmc_sdio_resume(). */ if (host->sdcc_suspended) { /* * This is a wakeup interrupt so hold wakelock * until SDCC resume is handled. */ wake_lock(&host->sdio_wlock); } else { spin_unlock(&host->lock); mmc_signal_sdio_irq(host->mmc); spin_lock(&host->lock); } ret = 1; break; } status = readl_relaxed(host->base + MMCISTATUS); if (((readl_relaxed(host->base + MMCIMASK0) & status) & (~(MCI_IRQ_PIO))) == 0) break; #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-r", status); #endif status &= readl_relaxed(host->base + MMCIMASK0); writel_relaxed(status, host->base + MMCICLEAR); /* Allow clear to take effect*/ if (host->clk_rate <= msmsdcc_get_min_sup_clk_rate(host)) msmsdcc_sync_reg_wr(host); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif if (status & MCI_SDIOINTROPE) { if (host->sdcc_suspending) wake_lock(&host->sdio_suspend_wlock); spin_unlock(&host->lock); mmc_signal_sdio_irq(host->mmc); spin_lock(&host->lock); } data = host->curr.data; if (host->dummy_52_sent) { if (status & (MCI_PROGDONE | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT)) { if (status & MCI_CMDTIMEOUT) pr_debug("%s: dummy CMD52 timeout\n", mmc_hostname(host->mmc)); if (status & MCI_CMDCRCFAIL) pr_debug("%s: dummy CMD52 CRC failed\n", mmc_hostname(host->mmc)); host->dummy_52_sent = 0; host->dummy_52_needed = 0; if (data) { msmsdcc_stop_data(host); msmsdcc_request_end(host, data->mrq); } WARN(!data, "No data cmd for dummy CMD52\n"); spin_unlock(&host->lock); return IRQ_HANDLED; } break; } /* * Check for proper command response */ cmd = host->curr.cmd; if ((status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT | MCI_PROGDONE | MCI_AUTOCMD19TIMEOUT)) && host->curr.cmd) { msmsdcc_do_cmdirq(host, status); } if (host->curr.data) { /* Check for data errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg && is_dma_mode(host)) msm_dmov_flush(host->dma.channel, 0); else if (host->sps.sg && is_sps_mode(host)) { /* Stop current SPS transfer */ msmsdcc_sps_exit_curr_xfer(host); } else { msmsdcc_clear_pio_irq_mask(host); msmsdcc_reset_and_restore(host); if (host->curr.data) msmsdcc_stop_data(host); if (!data->stop || (host->curr.mrq->sbc && !data->error)) timer |= msmsdcc_request_end(host, data->mrq); else if ((host->curr.mrq->sbc && data->error) || !host->curr.mrq->sbc) { msmsdcc_start_command(host, data->stop, 0); timer = 1; } } } /* Check for prog done */ if (host->curr.wait_for_auto_prog_done && (status & MCI_PROGDONE)) host->curr.got_auto_prog_done = true; /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (host->curr.got_dataend && (!host->curr.wait_for_auto_prog_done || (host->curr.wait_for_auto_prog_done && host->curr.got_auto_prog_done))) { /* * If DMA is still in progress, we complete * via the completion handler */ if (!host->dma.busy && !host->sps.busy) { /* * There appears to be an issue in the * controller where if you request a * small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND * irq without the PIO data irq. * * Check to see if theres still data * to be read, and simulate a PIO irq. */ if (data->flags & MMC_DATA_READ) msmsdcc_wait_for_rxdata(host, data); if (!data->error) { host->curr.data_xfered = host->curr.xfer_size; host->curr.xfer_remain -= host->curr.xfer_size; } if (!host->dummy_52_needed) { msmsdcc_stop_data(host); if (!data->stop || (host->curr.mrq->sbc && !data->error)) msmsdcc_request_end( host, data->mrq); else if ((host->curr.mrq->sbc && data->error) || !host->curr.mrq->sbc) { msmsdcc_start_command( host, data->stop, 0); timer = 1; } } else { host->dummy_52_sent = 1; msmsdcc_start_command(host, &dummy52cmd, MCI_CPSM_PROGENA); } } } } ret = 1; } while (status); spin_unlock(&host->lock); return IRQ_RETVAL(ret); } static void msmsdcc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, bool is_first_request) { struct msmsdcc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; int rc = 0; if (unlikely(!data)) { pr_err("%s: %s cannot prepare null data\n", mmc_hostname(mmc), __func__); return; } if (unlikely(data->host_cookie)) { /* Very wrong */ data->host_cookie = 0; pr_err("%s: %s Request reposted for prepare\n", mmc_hostname(mmc), __func__); return; } if (!msmsdcc_is_dma_possible(host, data)) return; rc = msmsdcc_prep_xfer(host, data); if (unlikely(rc < 0)) { data->host_cookie = 0; return; } data->host_cookie = 1; } static void msmsdcc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, int err) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned int dir; struct mmc_data *data = mrq->data; if (unlikely(!data)) { pr_err("%s: %s cannot cleanup null data\n", mmc_hostname(mmc), __func__); return; } if (data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; else dir = DMA_TO_DEVICE; if (data->host_cookie) dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, dir); data->host_cookie = 0; } static void msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq) { if (mrq->data) { /* Queue/read data, daisy-chain command when data starts */ if ((mrq->data->flags & MMC_DATA_READ) || host->curr.use_wr_data_pend) msmsdcc_start_data(host, mrq->data, mrq->sbc ? mrq->sbc : mrq->cmd, 0); else msmsdcc_start_command(host, mrq->sbc ? mrq->sbc : mrq->cmd, 0); } else { msmsdcc_start_command(host, mrq->cmd, 0); } } /* * This function returns true if AUTO_PROG_DONE feature of host is * applicable for current request, returns "false" otherwise. * * NOTE: Caller should call this function only for data write operations. */ static bool msmsdcc_is_wait_for_auto_prog_done(struct msmsdcc_host *host, struct mmc_request *mrq) { /* * Auto-prog done will be enabled for following cases: * mrq->sbc | mrq->stop * _____________|________________ * True | Don't care * False | False (CMD24, ACMD25 use case) */ if (is_auto_prog_done(host) && (mrq->sbc || !mrq->stop)) return true; return false; } /* * This function returns true if controller can wait for prog done * for current request, returns "false" otherwise. * * NOTE: Caller should call this function only for data write operations. */ static bool msmsdcc_is_wait_for_prog_done(struct msmsdcc_host *host, struct mmc_request *mrq) { if (msmsdcc_is_wait_for_auto_prog_done(host, mrq) || mrq->stop) return true; return false; } static void msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; unsigned int error = 0; int retries = 5; /* * Get the SDIO AL client out of LPM. */ WARN(host->dummy_52_sent, "Dummy CMD52 in progress\n"); if (host->plat->is_sdio_al_client) msmsdcc_sdio_al_lpm(mmc, false); /* * Don't start the request if SDCC is not in proper state to handle it * BAM state is checked below if applicable */ if (!host->pwr || !atomic_read(&host->clks_on) || host->sdcc_irq_disabled) { WARN(1, "%s: %s: SDCC is in bad state. don't process new request (CMD%d)\n", mmc_hostname(host->mmc), __func__, mrq->cmd->opcode); error = EIO; goto bad_state; } /* check if sps bam needs to be reset */ if (is_sps_mode(host) && host->sps.reset_bam) { while (retries) { if (!msmsdcc_bam_dml_reset_and_restore(host)) break; pr_err("%s: msmsdcc_bam_dml_reset_and_restore returned error. %d attempts left.\n", mmc_hostname(host->mmc), --retries); } /* check if BAM reset succeeded or not */ if (host->sps.reset_bam) { pr_err("%s: bam reset failed. Not processing the new request (CMD%d)\n", mmc_hostname(host->mmc), mrq->cmd->opcode); error = EAGAIN; goto bad_state; } } /* * Check if DLL retuning is required? if yes, perform it here before * starting new request. */ if (host->tuning_needed && !host->tuning_in_progress && !host->tuning_done) { pr_debug("%s: %s: execute_tuning for timing mode = %d\n", mmc_hostname(mmc), __func__, host->mmc->ios.timing); if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) msmsdcc_execute_tuning(mmc, MMC_SEND_TUNING_BLOCK); else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200) msmsdcc_execute_tuning(mmc, MMC_SEND_TUNING_BLOCK_HS200); } if (host->eject) { error = ENOMEDIUM; goto card_ejected; } WARN(host->curr.mrq, "%s: %s: New request (CMD%d) received while" " other request (CMD%d) is in progress\n", mmc_hostname(host->mmc), __func__, mrq->cmd->opcode, host->curr.mrq->cmd->opcode); spin_lock_irqsave(&host->lock, flags); /* * Set timeout value to 10 secs (or more in case of buggy cards) */ if ((mmc->card) && (mmc->card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT)) host->curr.req_tout_ms = 20000; else host->curr.req_tout_ms = MSM_MMC_REQ_TIMEOUT; /* * Kick the software request timeout timer here with the timeout * value identified above */ mod_timer(&host->req_tout_timer, (jiffies + msecs_to_jiffies(host->curr.req_tout_ms))); host->curr.mrq = mrq; if (mrq->sbc) { mrq->sbc->mrq = mrq; mrq->sbc->data = mrq->data; } if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) { if (msmsdcc_is_wait_for_auto_prog_done(host, mrq)) { host->curr.wait_for_auto_prog_done = true; } else { if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED) || (mrq->cmd->opcode == 54)) host->dummy_52_needed = 1; } if ((mrq->cmd->opcode == MMC_WRITE_BLOCK) || (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)) host->curr.use_wr_data_pend = true; } msmsdcc_request_start(host, mrq); spin_unlock_irqrestore(&host->lock, flags); return; bad_state: msmsdcc_dump_sdcc_state(host); card_ejected: mrq->cmd->error = -error; if (mrq->data) { mrq->data->error = -error; mrq->data->bytes_xfered = 0; } mmc_request_done(mmc, mrq); } static inline int msmsdcc_vreg_set_voltage(struct msm_mmc_reg_data *vreg, int min_uV, int max_uV) { int rc = 0; if (vreg->set_voltage_sup) { rc = regulator_set_voltage(vreg->reg, min_uV, max_uV); if (rc) { pr_err("%s: regulator_set_voltage(%s) failed." " min_uV=%d, max_uV=%d, rc=%d\n", __func__, vreg->name, min_uV, max_uV, rc); } } return rc; } static inline int msmsdcc_vreg_get_voltage(struct msm_mmc_reg_data *vreg) { int rc = 0; rc = regulator_get_voltage(vreg->reg); if (rc < 0) pr_err("%s: regulator_get_voltage(%s) failed. rc=%d\n", __func__, vreg->name, rc); return rc; } static inline int msmsdcc_vreg_set_optimum_mode(struct msm_mmc_reg_data *vreg, int uA_load) { int rc = 0; /* regulators that do not support regulator_set_voltage also do not support regulator_set_optimum_mode */ if (vreg->set_voltage_sup) { rc = regulator_set_optimum_mode(vreg->reg, uA_load); if (rc < 0) pr_err("%s: regulator_set_optimum_mode(reg=%s, " "uA_load=%d) failed. rc=%d\n", __func__, vreg->name, uA_load, rc); else /* regulator_set_optimum_mode() can return non zero * value even for success case. */ rc = 0; } return rc; } static inline int msmsdcc_vreg_init_reg(struct msm_mmc_reg_data *vreg, struct device *dev) { int rc = 0; /* check if regulator is already initialized? */ if (vreg->reg) goto out; /* Get the regulator handle */ vreg->reg = regulator_get(dev, vreg->name); if (IS_ERR(vreg->reg)) { rc = PTR_ERR(vreg->reg); pr_err("%s: regulator_get(%s) failed. rc=%d\n", __func__, vreg->name, rc); goto out; } if (regulator_count_voltages(vreg->reg) > 0) { vreg->set_voltage_sup = 1; /* sanity check */ if (!vreg->high_vol_level || !vreg->hpm_uA) { pr_err("%s: %s invalid constraints specified\n", __func__, vreg->name); rc = -EINVAL; } } out: return rc; } static inline void msmsdcc_vreg_deinit_reg(struct msm_mmc_reg_data *vreg) { if (vreg->reg) regulator_put(vreg->reg); } /* This init function should be called only once for each SDCC slot */ static int msmsdcc_vreg_init(struct msmsdcc_host *host, bool is_init) { int rc = 0; struct msm_mmc_slot_reg_data *curr_slot; struct msm_mmc_reg_data *curr_vdd_reg, *curr_vdd_io_reg; struct device *dev = mmc_dev(host->mmc); curr_slot = host->plat->vreg_data; if (!curr_slot) goto out; curr_vdd_reg = curr_slot->vdd_data; curr_vdd_io_reg = curr_slot->vdd_io_data; if (is_init) { /* * Get the regulator handle from voltage regulator framework * and then try to set the voltage level for the regulator */ if (curr_vdd_reg) { rc = msmsdcc_vreg_init_reg(curr_vdd_reg, dev); if (rc) goto out; } if (curr_vdd_io_reg) { rc = msmsdcc_vreg_init_reg(curr_vdd_io_reg, dev); if (rc) goto vdd_reg_deinit; } rc = msmsdcc_vreg_reset(host); if (rc) pr_err("msmsdcc.%d vreg reset failed (%d)\n", host->pdev_id, rc); goto out; } else { /* Deregister all regulators from regulator framework */ goto vdd_io_reg_deinit; } vdd_io_reg_deinit: if (curr_vdd_io_reg) msmsdcc_vreg_deinit_reg(curr_vdd_io_reg); vdd_reg_deinit: if (curr_vdd_reg) msmsdcc_vreg_deinit_reg(curr_vdd_reg); out: return rc; } static int msmsdcc_vreg_enable(struct msm_mmc_reg_data *vreg) { int rc = 0; /* Put regulator in HPM (high power mode) */ rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->hpm_uA); if (rc < 0) goto out; if (!vreg->is_enabled) { /* Set voltage level */ rc = msmsdcc_vreg_set_voltage(vreg, vreg->high_vol_level, vreg->high_vol_level); if (rc) goto out; rc = regulator_enable(vreg->reg); if (rc) { pr_err("%s: regulator_enable(%s) failed. rc=%d\n", __func__, vreg->name, rc); goto out; } vreg->is_enabled = true; } out: return rc; } static int msmsdcc_vreg_disable(struct msm_mmc_reg_data *vreg, bool is_init) { int rc = 0; /* Never disable regulator marked as always_on */ if (vreg->is_enabled && !vreg->always_on) { rc = regulator_disable(vreg->reg); if (rc) { pr_err("%s: regulator_disable(%s) failed. rc=%d\n", __func__, vreg->name, rc); goto out; } vreg->is_enabled = false; rc = msmsdcc_vreg_set_optimum_mode(vreg, 0); if (rc < 0) goto out; /* Set min. voltage level to 0 */ rc = msmsdcc_vreg_set_voltage(vreg, 0, vreg->high_vol_level); if (rc) goto out; } else if (vreg->is_enabled && vreg->always_on) { if (!is_init && vreg->lpm_sup) { /* Put always_on regulator in LPM (low power mode) */ rc = msmsdcc_vreg_set_optimum_mode(vreg, vreg->lpm_uA); if (rc < 0) goto out; } else if (is_init && vreg->reset_at_init) { /** * The regulator might not actually be disabled if it * is shared and in use by other drivers. */ rc = regulator_disable(vreg->reg); if (rc) { pr_err("%s: regulator_disable(%s) failed at " \ "bootup. rc=%d\n", __func__, vreg->name, rc); goto out; } vreg->is_enabled = false; } } out: return rc; } static int msmsdcc_setup_vreg(struct msmsdcc_host *host, bool enable, bool is_init) { int rc = 0, i; struct msm_mmc_slot_reg_data *curr_slot; struct msm_mmc_reg_data *vreg_table[2]; curr_slot = host->plat->vreg_data; if (!curr_slot) { rc = -EINVAL; goto out; } #if !defined(CONFIG_MACH_JFVE_EUR) #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT if (!enable) { #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || defined(CONFIG_MACH_JF_EUR) || \ defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) if (system_rev != BOARD_REV07) { /* TI Level Shifter */ if (system_rev < BOARD_REV08 && host->pdev_id == 4) #else /* VZW/SPT/USCC */ if (system_rev != BOARD_REV08) { /* TI Level Shifter */ if (system_rev < BOARD_REV09 && host->pdev_id == 4) #endif /* Disable level shifter */ gpio_set_value(60, 0); /* TFLASH_LS_EN */ #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || defined(CONFIG_MACH_JF_EUR) || \ defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) else if (system_rev >= BOARD_REV08 && host->pdev_id == 2) #else /* VZW/SPT/USCC/KOR */ else if (system_rev >= BOARD_REV09 && host->pdev_id == 2) #endif #if defined(CONFIG_MACH_JF_DCM) ice_gpiox_set(FPGA_GPIO_TFLASH_LS_EN, 0); #elif defined(CONFIG_MACH_JF_SKT) || defined(CONFIG_MACH_JF_KTT) || defined(CONFIG_MACH_JF_LGT) pm8xxx_mpp_config(GPIO_TFLASH_LS_EN, &tflash_ls_en_mpp_low); #else gpio_set_value(64, 0); /* TFLASH_LS_EN */ #endif mdelay(1); } } #endif #endif vreg_table[0] = curr_slot->vdd_data; vreg_table[1] = curr_slot->vdd_io_data; for (i = 0; i < ARRAY_SIZE(vreg_table); i++) { if (vreg_table[i]) { if (enable) rc = msmsdcc_vreg_enable(vreg_table[i]); else rc = msmsdcc_vreg_disable(vreg_table[i], is_init); if (rc) goto out; } } #if !defined(CONFIG_MACH_JFVE_EUR) #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT if (enable) { mdelay(1); #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || defined(CONFIG_MACH_JF_EUR) || \ defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) if (system_rev < BOARD_REV08 && host->pdev_id == 4) #else /* VZW/SPT/USCC */ if (system_rev < BOARD_REV09 && host->pdev_id == 4) #endif /* Enable level shifter */ gpio_set_value(60, 1); /* TFLASH_LS_EN */ #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || defined(CONFIG_MACH_JF_EUR) || \ defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) else if (system_rev >= BOARD_REV08 && host->pdev_id == 2) #else /* VZW/SPT/USCC/KOR */ else if (system_rev >= BOARD_REV09 && host->pdev_id == 2) #endif #if defined(CONFIG_MACH_JF_DCM) ice_gpiox_set(FPGA_GPIO_TFLASH_LS_EN, 1); #elif defined(CONFIG_MACH_JF_SKT) || defined(CONFIG_MACH_JF_KTT) || defined(CONFIG_MACH_JF_LGT) pm8xxx_mpp_config(GPIO_TFLASH_LS_EN, &tflash_ls_en_mpp_high); #else gpio_set_value(64, 1); /* TFLASH_LS_EN */ #endif mdelay(1); } else { mdelay(1); #if defined(CONFIG_MACH_JF_ATT) || defined(CONFIG_MACH_JF_TMO) || defined(CONFIG_MACH_JF_EUR) || \ defined(CONFIG_MACH_JACTIVE_ATT) || defined(CONFIG_MACH_JACTIVE_EUR) if (system_rev == BOARD_REV07) { /* Toshiba Level Shifter */ if (system_rev < BOARD_REV08 && host->pdev_id == 4) #else /* VZW/SPT/USCC */ if (system_rev == BOARD_REV08) { /* Toshiba Level Shifter */ if (system_rev < BOARD_REV09 && host->pdev_id == 4) #endif /* Disable level shifter */ gpio_set_value(60, 0); /* TFLASH_LS_EN */ mdelay(1); } } #endif #endif out: return rc; } /* * Reset vreg by ensuring it is off during probe. A call * to enable vreg is needed to balance disable vreg */ static int msmsdcc_vreg_reset(struct msmsdcc_host *host) { int rc; rc = msmsdcc_setup_vreg(host, 1, true); if (rc) return rc; rc = msmsdcc_setup_vreg(host, 0, true); return rc; } enum vdd_io_level { /* set vdd_io_data->low_vol_level */ VDD_IO_LOW, /* set vdd_io_data->high_vol_level */ VDD_IO_HIGH, /* * set whatever there in voltage_level (third argument) of * msmsdcc_set_vdd_io_vol() function. */ VDD_IO_SET_LEVEL, }; /* * This function returns the current VDD IO voltage level. * Returns negative value if it fails to read the voltage level * Returns 0 if regulator was disabled or if VDD_IO (and VDD) * regulator were not defined for host. */ static int msmsdcc_get_vdd_io_vol(struct msmsdcc_host *host) { int rc = 0; if (host->plat->vreg_data) { struct msm_mmc_reg_data *io_reg = host->plat->vreg_data->vdd_io_data; /* * If vdd_io is not defined, then we can consider that * IO voltage is same as VDD. */ if (!io_reg) io_reg = host->plat->vreg_data->vdd_data; if (io_reg && io_reg->is_enabled) rc = msmsdcc_vreg_get_voltage(io_reg); } return rc; } /* * This function updates the IO pad power switch bit in MCI_CLK register * based on currrent IO pad voltage level. * NOTE: This function assumes that host lock was not taken by caller. */ static void msmsdcc_update_io_pad_pwr_switch(struct msmsdcc_host *host) { int rc = 0; unsigned long flags; if (!is_io_pad_pwr_switch(host)) return; rc = msmsdcc_get_vdd_io_vol(host); spin_lock_irqsave(&host->lock, flags); /* * Dual voltage pad is the SDCC's (chipset) functionality and not all * the SDCC instances support the dual voltage pads. * For dual-voltage pad (1.8v/3.3v), SW should set IO_PAD_PWR_SWITCH * bit before using the pads in 1.8V mode. * For regular, not dual-voltage pads (including eMMC 1.2v/1.8v pads), * IO_PAD_PWR_SWITCH bit is a don't care. * But we don't have an option to know (by reading some SDCC register) * that a particular SDCC instance supports dual voltage pads or not, * so we simply set the IO_PAD_PWR_SWITCH bit for low voltage IO * (1.8v/1.2v). For regular (not dual-voltage pads), this bit value * is anyway ignored. */ if (rc > 0 && rc < 2700000) host->io_pad_pwr_switch = 1; else host->io_pad_pwr_switch = 0; if (atomic_read(&host->clks_on)) { if (host->io_pad_pwr_switch) writel_relaxed((readl_relaxed(host->base + MMCICLOCK) | IO_PAD_PWR_SWITCH), host->base + MMCICLOCK); else writel_relaxed((readl_relaxed(host->base + MMCICLOCK) & ~IO_PAD_PWR_SWITCH), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); } spin_unlock_irqrestore(&host->lock, flags); } static int msmsdcc_set_vdd_io_vol(struct msmsdcc_host *host, enum vdd_io_level level, unsigned int voltage_level) { int rc = 0; int set_level; if (host->plat->vreg_data) { struct msm_mmc_reg_data *vdd_io_reg = host->plat->vreg_data->vdd_io_data; if (vdd_io_reg && vdd_io_reg->is_enabled) { switch (level) { case VDD_IO_LOW: set_level = vdd_io_reg->low_vol_level; break; case VDD_IO_HIGH: set_level = vdd_io_reg->high_vol_level; break; case VDD_IO_SET_LEVEL: set_level = voltage_level; break; default: pr_err("%s: %s: invalid argument level = %d", mmc_hostname(host->mmc), __func__, level); rc = -EINVAL; goto out; } rc = msmsdcc_vreg_set_voltage(vdd_io_reg, set_level, set_level); } } out: return rc; } static inline int msmsdcc_is_pwrsave(struct msmsdcc_host *host) { if (host->clk_rate > 400000 && msmsdcc_pwrsave) return 1; return 0; } /* * Any function calling msmsdcc_setup_clocks must * acquire clk_mutex. May sleep. */ static int msmsdcc_setup_clocks(struct msmsdcc_host *host, bool enable) { int rc = 0; if (enable && !atomic_read(&host->clks_on)) { if (!IS_ERR_OR_NULL(host->bus_clk)) { rc = clk_prepare_enable(host->bus_clk); if (rc) { pr_err("%s: %s: failed to enable the bus-clock with error %d\n", mmc_hostname(host->mmc), __func__, rc); goto out; } } if (!IS_ERR(host->pclk)) { rc = clk_prepare_enable(host->pclk); if (rc) { pr_err("%s: %s: failed to enable the pclk with error %d\n", mmc_hostname(host->mmc), __func__, rc); goto disable_bus; } } rc = clk_prepare_enable(host->clk); if (rc) { pr_err("%s: %s: failed to enable the host-clk with error %d\n", mmc_hostname(host->mmc), __func__, rc); goto disable_pclk; } mb(); msmsdcc_delay(host); atomic_set(&host->clks_on, 1); } else if (!enable && atomic_read(&host->clks_on)) { mb(); msmsdcc_delay(host); clk_disable_unprepare(host->clk); if (!IS_ERR(host->pclk)) clk_disable_unprepare(host->pclk); if (!IS_ERR_OR_NULL(host->bus_clk)) clk_disable_unprepare(host->bus_clk); atomic_set(&host->clks_on, 0); } goto out; disable_pclk: if (!IS_ERR_OR_NULL(host->pclk)) clk_disable_unprepare(host->pclk); disable_bus: if (!IS_ERR_OR_NULL(host->bus_clk)) clk_disable_unprepare(host->bus_clk); out: return rc; } static inline unsigned int msmsdcc_get_sup_clk_rate(struct msmsdcc_host *host, unsigned int req_clk) { unsigned int sel_clk = -1; if (req_clk < msmsdcc_get_min_sup_clk_rate(host)) { sel_clk = msmsdcc_get_min_sup_clk_rate(host); goto out; } if (host->plat->sup_clk_table && host->plat->sup_clk_cnt) { unsigned char cnt; for (cnt = 0; cnt < host->plat->sup_clk_cnt; cnt++) { if (host->plat->sup_clk_table[cnt] > req_clk) break; else if (host->plat->sup_clk_table[cnt] == req_clk) { sel_clk = host->plat->sup_clk_table[cnt]; break; } else sel_clk = host->plat->sup_clk_table[cnt]; } } else { if ((req_clk < host->plat->msmsdcc_fmax) && (req_clk > host->plat->msmsdcc_fmid)) sel_clk = host->plat->msmsdcc_fmid; else sel_clk = req_clk; } out: return sel_clk; } static inline unsigned int msmsdcc_get_min_sup_clk_rate( struct msmsdcc_host *host) { if (host->plat->sup_clk_table && host->plat->sup_clk_cnt) return host->plat->sup_clk_table[0]; else return host->plat->msmsdcc_fmin; } static inline unsigned int msmsdcc_get_max_sup_clk_rate( struct msmsdcc_host *host) { if (host->plat->sup_clk_table && host->plat->sup_clk_cnt) return host->plat->sup_clk_table[host->plat->sup_clk_cnt - 1]; else return host->plat->msmsdcc_fmax; } static int msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable) { struct msm_mmc_gpio_data *curr; int i, rc = 0; curr = host->plat->pin_data->gpio_data; for (i = 0; i < curr->size; i++) { if (!gpio_is_valid(curr->gpio[i].no)) { rc = -EINVAL; pr_err("%s: Invalid gpio = %d\n", mmc_hostname(host->mmc), curr->gpio[i].no); goto free_gpios; } if (enable) { if (curr->gpio[i].is_always_on && curr->gpio[i].is_enabled) continue; rc = gpio_request(curr->gpio[i].no, curr->gpio[i].name); if (rc) { pr_err("%s: gpio_request(%d, %s) failed %d\n", mmc_hostname(host->mmc), curr->gpio[i].no, curr->gpio[i].name, rc); goto free_gpios; } curr->gpio[i].is_enabled = true; } else { if (curr->gpio[i].is_always_on) continue; gpio_free(curr->gpio[i].no); curr->gpio[i].is_enabled = false; } } goto out; free_gpios: for (i--; i >= 0; i--) { gpio_free(curr->gpio[i].no); curr->gpio[i].is_enabled = false; } out: return rc; } static int msmsdcc_setup_pad(struct msmsdcc_host *host, bool enable) { struct msm_mmc_pad_data *curr; int i; curr = host->plat->pin_data->pad_data; for (i = 0; i < curr->drv->size; i++) { if (enable) msm_tlmm_set_hdrive(curr->drv->on[i].no, curr->drv->on[i].val); else msm_tlmm_set_hdrive(curr->drv->off[i].no, curr->drv->off[i].val); } for (i = 0; i < curr->pull->size; i++) { if (enable) msm_tlmm_set_pull(curr->pull->on[i].no, curr->pull->on[i].val); else msm_tlmm_set_pull(curr->pull->off[i].no, curr->pull->off[i].val); } return 0; } static u32 msmsdcc_setup_pins(struct msmsdcc_host *host, bool enable) { int rc = 0; if (!host->plat->pin_data || host->plat->pin_data->cfg_sts == enable) return 0; if (host->plat->pin_data->is_gpio) rc = msmsdcc_setup_gpio(host, enable); else rc = msmsdcc_setup_pad(host, enable); if (!rc) host->plat->pin_data->cfg_sts = enable; return rc; } static int msmsdcc_cfg_mpm_sdiowakeup(struct msmsdcc_host *host, unsigned mode) { int ret = 0; unsigned int pin = host->plat->mpm_sdiowakeup_int; if (!pin) return 0; switch (mode) { case SDC_DAT1_DISABLE: ret = msm_mpm_enable_pin(pin, 0); break; case SDC_DAT1_ENABLE: ret = msm_mpm_set_pin_type(pin, IRQ_TYPE_LEVEL_LOW); ret = msm_mpm_enable_pin(pin, 1); break; case SDC_DAT1_ENWAKE: ret = msm_mpm_set_pin_wake(pin, 1); break; case SDC_DAT1_DISWAKE: ret = msm_mpm_set_pin_wake(pin, 0); break; default: ret = -EINVAL; break; } return ret; } static u32 msmsdcc_setup_pwr(struct msmsdcc_host *host, struct mmc_ios *ios) { u32 pwr = 0; int ret = 0; struct mmc_host *mmc = host->mmc; if (host->plat->translate_vdd && !host->sdio_gpio_lpm) ret = host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); else if (!host->plat->translate_vdd && !host->sdio_gpio_lpm) ret = msmsdcc_setup_vreg(host, !!ios->vdd, false); if (ret) { pr_err("%s: Failed to setup voltage regulators\n", mmc_hostname(host->mmc)); goto out; } switch (ios->power_mode) { case MMC_POWER_OFF: pwr = MCI_PWR_OFF; msmsdcc_cfg_mpm_sdiowakeup(host, SDC_DAT1_DISABLE); /* * If VDD IO rail is always on, set low voltage for VDD * IO rail when slot is not in use (like when card is not * present or during system suspend). */ msmsdcc_set_vdd_io_vol(host, VDD_IO_LOW, 0); msmsdcc_update_io_pad_pwr_switch(host); msmsdcc_setup_pins(host, false); break; case MMC_POWER_UP: /* writing PWR_UP bit is redundant */ pwr = MCI_PWR_UP; msmsdcc_cfg_mpm_sdiowakeup(host, SDC_DAT1_ENABLE); msmsdcc_set_vdd_io_vol(host, VDD_IO_HIGH, 0); msmsdcc_update_io_pad_pwr_switch(host); msmsdcc_setup_pins(host, true); break; case MMC_POWER_ON: pwr = MCI_PWR_ON; break; } out: return pwr; } static void msmsdcc_enable_irq_wake(struct msmsdcc_host *host) { unsigned int wakeup_irq; wakeup_irq = (host->plat->sdiowakeup_irq) ? host->plat->sdiowakeup_irq : host->core_irqres->start; if (!host->irq_wake_enabled) { enable_irq_wake(wakeup_irq); host->irq_wake_enabled = true; } } static void msmsdcc_disable_irq_wake(struct msmsdcc_host *host) { unsigned int wakeup_irq; wakeup_irq = (host->plat->sdiowakeup_irq) ? host->plat->sdiowakeup_irq : host->core_irqres->start; if (host->irq_wake_enabled) { disable_irq_wake(wakeup_irq); host->irq_wake_enabled = false; } } /* Returns required bandwidth in Bytes per Sec */ static unsigned int msmsdcc_get_bw_required(struct msmsdcc_host *host, struct mmc_ios *ios) { unsigned int bw; bw = host->clk_rate; /* * For DDR mode, SDCC controller clock will be at * the double rate than the actual clock that goes to card. */ if (ios->bus_width == MMC_BUS_WIDTH_4) bw /= 2; else if (ios->bus_width == MMC_BUS_WIDTH_1) bw /= 8; return bw; } static int msmsdcc_msm_bus_get_vote_for_bw(struct msmsdcc_host *host, unsigned int bw) { unsigned int *table = host->plat->msm_bus_voting_data->bw_vecs; unsigned int size = host->plat->msm_bus_voting_data->bw_vecs_size; int i; if (host->msm_bus_vote.is_max_bw_needed && bw) return host->msm_bus_vote.max_bw_vote; for (i = 0; i < size; i++) { if (bw <= table[i]) break; } if (i && (i == size)) i--; return i; } static int msmsdcc_msm_bus_register(struct msmsdcc_host *host) { int rc = 0; struct msm_bus_scale_pdata *use_cases; if (host->plat->msm_bus_voting_data && host->plat->msm_bus_voting_data->use_cases && host->plat->msm_bus_voting_data->bw_vecs && host->plat->msm_bus_voting_data->bw_vecs_size) { use_cases = host->plat->msm_bus_voting_data->use_cases; host->msm_bus_vote.client_handle = msm_bus_scale_register_client(use_cases); } else { return 0; } if (!host->msm_bus_vote.client_handle) { pr_err("%s: msm_bus_scale_register_client() failed\n", mmc_hostname(host->mmc)); rc = -EFAULT; } else { /* cache the vote index for minimum and maximum bandwidth */ host->msm_bus_vote.min_bw_vote = msmsdcc_msm_bus_get_vote_for_bw(host, 0); host->msm_bus_vote.max_bw_vote = msmsdcc_msm_bus_get_vote_for_bw(host, UINT_MAX); #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if (host->pdev_id == 4) host->msm_bus_vote.is_max_bw_needed = 1; #endif } return rc; } static void msmsdcc_msm_bus_unregister(struct msmsdcc_host *host) { if (host->msm_bus_vote.client_handle) msm_bus_scale_unregister_client( host->msm_bus_vote.client_handle); } /* * This function must be called with host lock acquired. * Caller of this function should also ensure that msm bus client * handle is not null. */ static inline int msmsdcc_msm_bus_set_vote(struct msmsdcc_host *host, int vote, unsigned long flags) { int rc = 0; if (vote != host->msm_bus_vote.curr_vote) { spin_unlock_irqrestore(&host->lock, flags); rc = msm_bus_scale_client_update_request( host->msm_bus_vote.client_handle, vote); if (rc) pr_err("%s: msm_bus_scale_client_update_request() failed." " bus_client_handle=0x%x, vote=%d, err=%d\n", mmc_hostname(host->mmc), host->msm_bus_vote.client_handle, vote, rc); spin_lock_irqsave(&host->lock, flags); if (!rc) host->msm_bus_vote.curr_vote = vote; } return rc; } /* * Internal work. Work to set 0 bandwidth for msm bus. */ static void msmsdcc_msm_bus_work(struct work_struct *work) { struct msmsdcc_host *host = container_of(work, struct msmsdcc_host, msm_bus_vote.vote_work.work); unsigned long flags; if (!host->msm_bus_vote.client_handle) return; spin_lock_irqsave(&host->lock, flags); /* don't vote for 0 bandwidth if any request is in progress */ if (!host->curr.mrq) msmsdcc_msm_bus_set_vote(host, host->msm_bus_vote.min_bw_vote, flags); else pr_warning("%s: %s: SDCC transfer in progress. skipping" " bus voting to 0 bandwidth\n", mmc_hostname(host->mmc), __func__); spin_unlock_irqrestore(&host->lock, flags); } /* * This function cancels any scheduled delayed work * and sets the bus vote based on ios argument. * If "ios" argument is NULL, bandwidth required is 0 else * calculate the bandwidth based on ios parameters. */ static void msmsdcc_msm_bus_cancel_work_and_set_vote( struct msmsdcc_host *host, struct mmc_ios *ios) { unsigned long flags; unsigned int bw; int vote; if (!host->msm_bus_vote.client_handle) return; bw = ios ? msmsdcc_get_bw_required(host, ios) : 0; cancel_delayed_work_sync(&host->msm_bus_vote.vote_work); spin_lock_irqsave(&host->lock, flags); vote = msmsdcc_msm_bus_get_vote_for_bw(host, bw); msmsdcc_msm_bus_set_vote(host, vote, flags); spin_unlock_irqrestore(&host->lock, flags); } /* This function queues a work which will set the bandwidth requiement to 0 */ static void msmsdcc_msm_bus_queue_work(struct msmsdcc_host *host) { unsigned long flags; if (!host->msm_bus_vote.client_handle) return; spin_lock_irqsave(&host->lock, flags); if (host->msm_bus_vote.min_bw_vote != host->msm_bus_vote.curr_vote) queue_delayed_work(system_nrt_wq, &host->msm_bus_vote.vote_work, msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY)); spin_unlock_irqrestore(&host->lock, flags); } static void msmsdcc_cfg_sdio_wakeup(struct msmsdcc_host *host, bool enable_wakeup_irq) { struct mmc_host *mmc = host->mmc; /* * SDIO_AL clients has different mechanism of handling LPM through * sdio_al driver itself. The sdio wakeup interrupt is configured as * part of that. Here, we are interested only in clients like WLAN. */ if (!(mmc->card && mmc_card_sdio(mmc->card)) || host->plat->is_sdio_al_client) goto out; if (!host->sdcc_suspended) { /* * When MSM is not in power collapse and we * are disabling clocks, enable bit 22 in MASK0 * to handle asynchronous SDIO interrupts. */ if (enable_wakeup_irq) { writel_relaxed(MCI_SDIOINTMASK, host->base + MMCIMASK0); mb(); } else { writel_relaxed(MCI_SDIOINTMASK, host->base + MMCICLEAR); msmsdcc_sync_reg_wr(host); } goto out; } else if (!mmc_card_wake_sdio_irq(mmc)) { /* * Wakeup MSM only if SDIO function drivers set * MMC_PM_WAKE_SDIO_IRQ flag in their suspend call. */ goto out; } if (enable_wakeup_irq) { if (!host->plat->sdiowakeup_irq) { /* * When there is no gpio line that can be configured * as wakeup interrupt handle it by configuring * asynchronous sdio interrupts and DAT1 line. */ writel_relaxed(MCI_SDIOINTMASK, host->base + MMCIMASK0); mb(); msmsdcc_cfg_mpm_sdiowakeup(host, SDC_DAT1_ENWAKE); /* configure sdcc core interrupt as wakeup interrupt */ msmsdcc_enable_irq_wake(host); } else { /* Let gpio line handle wakeup interrupt */ writel_relaxed(0, host->base + MMCIMASK0); mb(); if (host->sdio_wakeupirq_disabled) { host->sdio_wakeupirq_disabled = 0; /* configure gpio line as wakeup interrupt */ msmsdcc_enable_irq_wake(host); enable_irq(host->plat->sdiowakeup_irq); } } } else { if (!host->plat->sdiowakeup_irq) { /* * We may not have cleared bit 22 in the interrupt * handler as the clocks might be off at that time. */ writel_relaxed(MCI_SDIOINTMASK, host->base + MMCICLEAR); msmsdcc_sync_reg_wr(host); msmsdcc_cfg_mpm_sdiowakeup(host, SDC_DAT1_DISWAKE); msmsdcc_disable_irq_wake(host); } else if (!host->sdio_wakeupirq_disabled) { disable_irq_nosync(host->plat->sdiowakeup_irq); msmsdcc_disable_irq_wake(host); host->sdio_wakeupirq_disabled = 1; } } out: return; } static void msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct msmsdcc_host *host = mmc_priv(mmc); u32 clk = 0, pwr = 0; int rc; unsigned long flags; unsigned int clock; /* * Disable SDCC core interrupt until set_ios is completed. * This avoids any race conditions with interrupt raised * when turning on/off the clocks. One possible * scenario is SDIO operational interrupt while the clock * is turned off. * host->lock is being released intermittently below. * Thus, prevent concurrent access to host. */ mutex_lock(&host->clk_mutex); DBG(host, "ios->clock = %u\n", ios->clock); spin_lock_irqsave(&host->lock, flags); if (!host->sdcc_irq_disabled) { disable_irq_nosync(host->core_irqres->start); host->sdcc_irq_disabled = 1; } spin_unlock_irqrestore(&host->lock, flags); /* Make sure sdcc core irq is synchronized */ synchronize_irq(host->core_irqres->start); pwr = msmsdcc_setup_pwr(host, ios); spin_lock_irqsave(&host->lock, flags); if (ios->clock) { spin_unlock_irqrestore(&host->lock, flags); rc = msmsdcc_setup_clocks(host, true); if (rc) goto out; spin_lock_irqsave(&host->lock, flags); writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0); mb(); msmsdcc_cfg_sdio_wakeup(host, false); clock = msmsdcc_get_sup_clk_rate(host, ios->clock); /* * For DDR50 mode, controller needs clock rate to be * double than what is required on the SD card CLK pin. */ if (ios->timing == MMC_TIMING_UHS_DDR50) { clock = msmsdcc_get_sup_clk_rate(host, ios->clock * 2); } if (clock != host->clk_rate) { spin_unlock_irqrestore(&host->lock, flags); rc = clk_set_rate(host->clk, clock); spin_lock_irqsave(&host->lock, flags); if (rc < 0) pr_err("%s: failed to set clk rate %u\n", mmc_hostname(mmc), clock); host->clk_rate = clock; host->reg_write_delay = (1 + ((3 * USEC_PER_SEC) / (host->clk_rate ? host->clk_rate : msmsdcc_get_min_sup_clk_rate(host)))); } /* * give atleast 2 MCLK cycles delay for clocks * and SDCC core to stabilize */ mb(); msmsdcc_delay(host); clk |= MCI_CLK_ENABLE; } if (ios->bus_width == MMC_BUS_WIDTH_8) clk |= MCI_CLK_WIDEBUS_8; else if (ios->bus_width == MMC_BUS_WIDTH_4) clk |= MCI_CLK_WIDEBUS_4; else clk |= MCI_CLK_WIDEBUS_1; if (msmsdcc_is_pwrsave(host)) clk |= MCI_CLK_PWRSAVE; clk |= MCI_CLK_FLOWENA; host->tuning_needed = 0; /* * Select the controller timing mode according * to current bus speed mode */ if (host->clk_rate > (100 * 1000 * 1000) && (ios->timing == MMC_TIMING_UHS_SDR104 || ios->timing == MMC_TIMING_MMC_HS200)) { /* Card clock frequency must be > 100MHz to enable tuning */ clk |= (4 << 14); host->tuning_needed = 1; /* } else if (ios->timing == MMC_TIMING_UHS_DDR50) { clk |= (3 << 14); */ } else { /* clk |= (2 << 14);*/ /* feedback clock */ if (ios->timing == MMC_TIMING_UHS_DDR50) clk |= (3 << 14); else clk |= (2 << 14); /* feedback clock */ host->tuning_done = false; if (atomic_read(&host->clks_on)) { /* Write 1 to DLL_RST bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_DLL_RST), host->base + MCI_DLL_CONFIG); /* Write 1 to DLL_PDN bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_DLL_PDN), host->base + MCI_DLL_CONFIG); } } /* Select free running MCLK as input clock of cm_dll_sdc4 */ clk |= (2 << 23); if (host->io_pad_pwr_switch) clk |= IO_PAD_PWR_SWITCH; /* Don't write into registers if clocks are disabled */ if (atomic_read(&host->clks_on)) { if (readl_relaxed(host->base + MMCICLOCK) != clk) { writel_relaxed(clk, host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); } if (readl_relaxed(host->base + MMCIPOWER) != pwr) { host->pwr = pwr; writel_relaxed(pwr, host->base + MMCIPOWER); msmsdcc_sync_reg_wr(host); } } if (!(clk & MCI_CLK_ENABLE) && atomic_read(&host->clks_on)) { msmsdcc_cfg_sdio_wakeup(host, true); spin_unlock_irqrestore(&host->lock, flags); /* * May get a wake-up interrupt the instant we disable the * clocks. This would disable the wake-up interrupt. */ msmsdcc_setup_clocks(host, false); spin_lock_irqsave(&host->lock, flags); } if (host->tuning_in_progress) WARN(!atomic_read(&host->clks_on), "tuning_in_progress but SDCC clocks are OFF\n"); /* Let interrupts be disabled if the host is powered off */ if (ios->power_mode != MMC_POWER_OFF && host->sdcc_irq_disabled) { enable_irq(host->core_irqres->start); host->sdcc_irq_disabled = 0; } spin_unlock_irqrestore(&host->lock, flags); out: mutex_unlock(&host->clk_mutex); } int msmsdcc_set_pwrsave(struct mmc_host *mmc, int pwrsave) { struct msmsdcc_host *host = mmc_priv(mmc); u32 clk; clk = readl_relaxed(host->base + MMCICLOCK); pr_debug("Changing to pwr_save=%d", pwrsave); if (pwrsave && msmsdcc_is_pwrsave(host)) clk |= MCI_CLK_PWRSAVE; else clk &= ~MCI_CLK_PWRSAVE; writel_relaxed(clk, host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); return 0; } static int msmsdcc_get_ro(struct mmc_host *mmc) { int status = -ENOSYS; struct msmsdcc_host *host = mmc_priv(mmc); if (host->plat->wpswitch) { status = host->plat->wpswitch(mmc_dev(mmc)); } else if (gpio_is_valid(host->plat->wpswitch_gpio)) { status = gpio_request(host->plat->wpswitch_gpio, "SD_WP_Switch"); if (status) { pr_err("%s: %s: Failed to request GPIO %d\n", mmc_hostname(mmc), __func__, host->plat->wpswitch_gpio); } else { status = gpio_direction_input( host->plat->wpswitch_gpio); if (!status) { /* * Wait for atleast 300ms as debounce * time for GPIO input to stabilize. */ msleep(300); status = gpio_get_value_cansleep( host->plat->wpswitch_gpio); status ^= !host->plat->is_wpswitch_active_low; } gpio_free(host->plat->wpswitch_gpio); } } if (status < 0) status = -ENOSYS; pr_debug("%s: Card read-only status %d\n", __func__, status); return status; } static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; /* * We may come here with clocks turned off in that case don't * attempt to write into MASK0 register. While turning on the * clocks mci_irqenable will be written to MASK0 register. */ spin_lock_irqsave(&host->lock, flags); if (enable) { host->mci_irqenable |= MCI_SDIOINTOPERMASK; if (atomic_read(&host->clks_on)) { writel_relaxed(readl_relaxed(host->base + MMCIMASK0) | MCI_SDIOINTOPERMASK, host->base + MMCIMASK0); mb(); } } else { host->mci_irqenable &= ~MCI_SDIOINTOPERMASK; if (atomic_read(&host->clks_on)) { writel_relaxed(readl_relaxed(host->base + MMCIMASK0) & ~MCI_SDIOINTOPERMASK, host->base + MMCIMASK0); mb(); } } spin_unlock_irqrestore(&host->lock, flags); } #ifdef CONFIG_PM_RUNTIME static void msmsdcc_print_rpm_info(struct msmsdcc_host *host) { struct device *dev = mmc_dev(host->mmc); pr_err("%s: PM: sdcc_suspended=%d, pending_resume=%d, sdcc_suspending=%d\n", mmc_hostname(host->mmc), host->sdcc_suspended, host->pending_resume, host->sdcc_suspending); pr_err("%s: RPM: runtime_status=%d, usage_count=%d," " is_suspended=%d, disable_depth=%d, runtime_error=%d," " request_pending=%d, request=%d\n", mmc_hostname(host->mmc), dev->power.runtime_status, atomic_read(&dev->power.usage_count), dev->power.is_suspended, dev->power.disable_depth, dev->power.runtime_error, dev->power.request_pending, dev->power.request); } static int msmsdcc_enable(struct mmc_host *mmc) { int rc = 0; struct device *dev = mmc->parent; struct msmsdcc_host *host = mmc_priv(mmc); msmsdcc_pm_qos_update_latency(host, 1); if (mmc->card && mmc_card_sdio(mmc->card)) goto out; if (host->sdcc_suspended && host->pending_resume) { host->pending_resume = false; pm_runtime_get_noresume(dev); rc = msmsdcc_runtime_resume(dev); goto skip_get_sync; } if (dev->power.runtime_status == RPM_SUSPENDING) { if (mmc->suspend_task == current) { pm_runtime_get_noresume(dev); goto out; } } else if (dev->power.runtime_status == RPM_RESUMING) { pm_runtime_get_noresume(dev); goto out; } rc = pm_runtime_get_sync(dev); skip_get_sync: if (rc < 0) { WARN(1, "%s: %s: failed with error %d\n", mmc_hostname(mmc), __func__, rc); msmsdcc_print_rpm_info(host); return rc; } out: msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios); return 0; } static int msmsdcc_disable(struct mmc_host *mmc) { int rc; struct msmsdcc_host *host = mmc_priv(mmc); msmsdcc_pm_qos_update_latency(host, 0); if (mmc->card && mmc_card_sdio(mmc->card)) { rc = 0; goto out; } if (host->plat->disable_runtime_pm) return -ENOTSUPP; rc = pm_runtime_put_sync(mmc->parent); if (rc < 0) { WARN(1, "%s: %s: failed with error %d\n", mmc_hostname(mmc), __func__, rc); msmsdcc_print_rpm_info(host); return rc; } out: msmsdcc_msm_bus_queue_work(host); return rc; } #else static void msmsdcc_print_rpm_info(struct msmsdcc_host *host) {} static int msmsdcc_enable(struct mmc_host *mmc) { struct device *dev = mmc->parent; struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; int rc = 0; msmsdcc_pm_qos_update_latency(host, 1); if (mmc->card && mmc_card_sdio(mmc->card)) { rc = 0; goto out; } if (host->sdcc_suspended && host->pending_resume) { host->pending_resume = false; rc = msmsdcc_runtime_resume(dev); goto out; } mutex_lock(&host->clk_mutex); rc = msmsdcc_setup_clocks(host, true); mutex_unlock(&host->clk_mutex); out: if (rc < 0) { pr_info("%s: %s: failed with error %d", mmc_hostname(mmc), __func__, rc); msmsdcc_pm_qos_update_latency(host, 0); return rc; } msmsdcc_msm_bus_cancel_work_and_set_vote(host, &mmc->ios); return 0; } static int msmsdcc_disable(struct mmc_host *mmc) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; int rc = 0; msmsdcc_pm_qos_update_latency(host, 0); if (mmc->card && mmc_card_sdio(mmc->card)) goto out; mutex_lock(&host->clk_mutex); rc = msmsdcc_setup_clocks(host, false); mutex_unlock(&host->clk_mutex); if (rc) { msmsdcc_pm_qos_update_latency(host, 1); return rc; } out: msmsdcc_msm_bus_queue_work(host); return rc; } #endif static int msmsdcc_switch_io_voltage(struct mmc_host *mmc, struct mmc_ios *ios) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; int rc = 0; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: /* Set VDD IO to high voltage range (2.7v - 3.6v) */ rc = msmsdcc_set_vdd_io_vol(host, VDD_IO_HIGH, 0); if (!rc) msmsdcc_update_io_pad_pwr_switch(host); goto out; case MMC_SIGNAL_VOLTAGE_180: break; case MMC_SIGNAL_VOLTAGE_120: /* * For eMMC cards, VDD_IO voltage range must be changed * only if it operates in HS200 SDR 1.2V mode or in * DDR 1.2V mode. */ rc = msmsdcc_set_vdd_io_vol(host, VDD_IO_SET_LEVEL, 1200000); if (!rc) msmsdcc_update_io_pad_pwr_switch(host); goto out; default: /* invalid selection. don't do anything */ rc = -EINVAL; goto out; } /* * If we are here means voltage switch from high voltage to * low voltage is required */ spin_lock_irqsave(&host->lock, flags); /* * Poll on MCIDATIN_3_0 and MCICMDIN bits of MCI_TEST_INPUT * register until they become all zeros. */ if (readl_relaxed(host->base + MCI_TEST_INPUT) & (0xF << 1)) { rc = -EAGAIN; pr_err("%s: %s: MCIDATIN_3_0 is still not all zeros", mmc_hostname(mmc), __func__); goto out_unlock; } /* Stop SD CLK output. */ writel_relaxed((readl_relaxed(host->base + MMCICLOCK) | MCI_CLK_PWRSAVE), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); spin_unlock_irqrestore(&host->lock, flags); /* * Switch VDD Io from high voltage range (2.7v - 3.6v) to * low voltage range (1.7v - 1.95v). */ rc = msmsdcc_set_vdd_io_vol(host, VDD_IO_LOW, 0); if (rc) goto out; msmsdcc_update_io_pad_pwr_switch(host); /* Wait 5 ms for the voltage regulater in the card to become stable. */ usleep_range(5000, 5500); spin_lock_irqsave(&host->lock, flags); /* Disable PWRSAVE would make sure that SD CLK is always running */ writel_relaxed((readl_relaxed(host->base + MMCICLOCK) & ~MCI_CLK_PWRSAVE), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); spin_unlock_irqrestore(&host->lock, flags); /* * If MCIDATIN_3_0 and MCICMDIN bits of MCI_TEST_INPUT register * don't become all ones within 1 ms then a Voltage Switch * sequence has failed and a power cycle to the card is required. * Otherwise Voltage Switch sequence is completed successfully. */ usleep_range(1000, 1500); spin_lock_irqsave(&host->lock, flags); if ((readl_relaxed(host->base + MCI_TEST_INPUT) & (0xF << 1)) != (0xF << 1)) { pr_err("%s: %s: MCIDATIN_3_0 are still not all ones", mmc_hostname(mmc), __func__); rc = -EAGAIN; goto out_unlock; } out_unlock: /* Enable PWRSAVE */ writel_relaxed((readl_relaxed(host->base + MMCICLOCK) | MCI_CLK_PWRSAVE), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); spin_unlock_irqrestore(&host->lock, flags); out: return rc; } static inline void msmsdcc_cm_sdc4_dll_set_freq(struct msmsdcc_host *host) { u32 mclk_freq = 0; /* Program the MCLK value to MCLK_FREQ bit field */ if (host->clk_rate <= 112000000) mclk_freq = 0; else if (host->clk_rate <= 125000000) mclk_freq = 1; else if (host->clk_rate <= 137000000) mclk_freq = 2; else if (host->clk_rate <= 150000000) mclk_freq = 3; else if (host->clk_rate <= 162000000) mclk_freq = 4; else if (host->clk_rate <= 175000000) mclk_freq = 5; else if (host->clk_rate <= 187000000) mclk_freq = 6; else if (host->clk_rate <= 200000000) mclk_freq = 7; writel_relaxed(((readl_relaxed(host->base + MCI_DLL_CONFIG) & ~(7 << 24)) | (mclk_freq << 24)), host->base + MCI_DLL_CONFIG); } /* Initialize the DLL (Programmable Delay Line ) */ static int msmsdcc_init_cm_sdc4_dll(struct msmsdcc_host *host) { int rc = 0; unsigned long flags; u32 wait_cnt; spin_lock_irqsave(&host->lock, flags); /* * Make sure that clock is always enabled when DLL * tuning is in progress. Keeping PWRSAVE ON may * turn off the clock. So let's disable the PWRSAVE * here and re-enable it once tuning is completed. */ writel_relaxed((readl_relaxed(host->base + MMCICLOCK) & ~MCI_CLK_PWRSAVE), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); /* Write 1 to DLL_RST bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_DLL_RST), host->base + MCI_DLL_CONFIG); /* Write 1 to DLL_PDN bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_DLL_PDN), host->base + MCI_DLL_CONFIG); msmsdcc_cm_sdc4_dll_set_freq(host); /* Write 0 to DLL_RST bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) & ~MCI_DLL_RST), host->base + MCI_DLL_CONFIG); /* Write 0 to DLL_PDN bit of MCI_DLL_CONFIG register */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) & ~MCI_DLL_PDN), host->base + MCI_DLL_CONFIG); /* Set DLL_EN bit to 1. */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_DLL_EN), host->base + MCI_DLL_CONFIG); /* Set CK_OUT_EN bit to 1. */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG); wait_cnt = 50; /* Wait until DLL_LOCK bit of MCI_DLL_STATUS register becomes '1' */ while (!(readl_relaxed(host->base + MCI_DLL_STATUS) & MCI_DLL_LOCK)) { /* max. wait for 50us sec for LOCK bit to be set */ if (--wait_cnt == 0) { pr_err("%s: %s: DLL failed to LOCK\n", mmc_hostname(host->mmc), __func__); rc = -ETIMEDOUT; goto out; } /* wait for 1us before polling again */ udelay(1); } out: /* re-enable PWRSAVE */ writel_relaxed((readl_relaxed(host->base + MMCICLOCK) | MCI_CLK_PWRSAVE), host->base + MMCICLOCK); msmsdcc_sync_reg_wr(host); spin_unlock_irqrestore(&host->lock, flags); return rc; } static inline int msmsdcc_dll_poll_ck_out_en(struct msmsdcc_host *host, u8 poll) { int rc = 0; u32 wait_cnt = 50; u8 ck_out_en = 0; /* poll for MCI_CK_OUT_EN bit. max. poll time = 50us */ ck_out_en = !!(readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN); while (ck_out_en != poll) { if (--wait_cnt == 0) { pr_err("%s: %s: CK_OUT_EN bit is not %d\n", mmc_hostname(host->mmc), __func__, poll); rc = -ETIMEDOUT; goto out; } udelay(1); ck_out_en = !!(readl_relaxed(host->base + MCI_DLL_CONFIG) & MCI_CK_OUT_EN); } out: return rc; } /* * Enable a CDR circuit in CM_SDC4_DLL block to enable automatic * calibration sequence. This function should be called before * enabling AUTO_CMD19 bit in MCI_CMD register for block read * commands (CMD17/CMD18). * * This function gets called when host spinlock acquired. */ static int msmsdcc_enable_cdr_cm_sdc4_dll(struct msmsdcc_host *host) { int rc = 0; u32 config; config = readl_relaxed(host->base + MCI_DLL_CONFIG); config |= MCI_CDR_EN; config &= ~(MCI_CDR_EXT_EN | MCI_CK_OUT_EN); writel_relaxed(config, host->base + MCI_DLL_CONFIG); /* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '0' */ rc = msmsdcc_dll_poll_ck_out_en(host, 0); if (rc) goto err_out; /* Set CK_OUT_EN bit of MCI_DLL_CONFIG register to 1. */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG); /* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '1' */ rc = msmsdcc_dll_poll_ck_out_en(host, 1); if (rc) goto err_out; goto out; err_out: pr_err("%s: %s: Failed\n", mmc_hostname(host->mmc), __func__); out: return rc; } static int msmsdcc_config_cm_sdc4_dll_phase(struct msmsdcc_host *host, u8 phase) { int rc = 0; u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9, 0x8}; unsigned long flags; u32 config; spin_lock_irqsave(&host->lock, flags); config = readl_relaxed(host->base + MCI_DLL_CONFIG); config &= ~(MCI_CDR_EN | MCI_CK_OUT_EN); config |= (MCI_CDR_EXT_EN | MCI_DLL_EN); writel_relaxed(config, host->base + MCI_DLL_CONFIG); /* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '0' */ rc = msmsdcc_dll_poll_ck_out_en(host, 0); if (rc) goto err_out; /* * Write the selected DLL clock output phase (0 ... 15) * to CDR_SELEXT bit field of MCI_DLL_CONFIG register. */ writel_relaxed(((readl_relaxed(host->base + MCI_DLL_CONFIG) & ~(0xF << 20)) | (grey_coded_phase_table[phase] << 20)), host->base + MCI_DLL_CONFIG); /* Set CK_OUT_EN bit of MCI_DLL_CONFIG register to 1. */ writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) | MCI_CK_OUT_EN), host->base + MCI_DLL_CONFIG); /* Wait until CK_OUT_EN bit of MCI_DLL_CONFIG register becomes '1' */ rc = msmsdcc_dll_poll_ck_out_en(host, 1); if (rc) goto err_out; config = readl_relaxed(host->base + MCI_DLL_CONFIG); config |= MCI_CDR_EN; config &= ~MCI_CDR_EXT_EN; writel_relaxed(config, host->base + MCI_DLL_CONFIG); goto out; err_out: pr_err("%s: %s: Failed to set DLL phase: %d\n", mmc_hostname(host->mmc), __func__, phase); out: spin_unlock_irqrestore(&host->lock, flags); return rc; } /* * Find out the greatest range of consecuitive selected * DLL clock output phases that can be used as sampling * setting for SD3.0 UHS-I card read operation (in SDR104 * timing mode) or for eMMC4.5 card read operation (in HS200 * timing mode). * Select the 3/4 of the range and configure the DLL with the * selected DLL clock output phase. */ static int find_most_appropriate_phase(struct msmsdcc_host *host, u8 *phase_table, u8 total_phases) { #define MAX_PHASES 16 int ret; u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; u8 phases_per_row[MAX_PHASES] = {0}; int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; bool phase_0_found = false, phase_15_found = false; if (!total_phases || (total_phases > MAX_PHASES)) { pr_err("%s: %s: invalid argument: total_phases=%d\n", mmc_hostname(host->mmc), __func__, total_phases); return -EINVAL; } for (cnt = 0; cnt < total_phases; cnt++) { ranges[row_index][col_index] = phase_table[cnt]; phases_per_row[row_index] += 1; col_index++; if ((cnt + 1) == total_phases) { continue; /* check if next phase in phase_table is consecutive or not */ } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { row_index++; col_index = 0; } } if (row_index >= MAX_PHASES) return -EINVAL; /* Check if phase-0 is present in first valid window? */ if (!ranges[0][0]) { phase_0_found = true; phase_0_raw_index = 0; /* Check if cycle exist between 2 valid windows */ for (cnt = 1; cnt <= row_index; cnt++) { if (phases_per_row[cnt]) { for (i = 0; i < phases_per_row[cnt]; i++) { if (ranges[cnt][i] == 15) { phase_15_found = true; phase_15_raw_index = cnt; break; } } } } } /* If 2 valid windows form cycle then merge them as single window */ if (phase_0_found && phase_15_found) { /* number of phases in raw where phase 0 is present */ u8 phases_0 = phases_per_row[phase_0_raw_index]; /* number of phases in raw where phase 15 is present */ u8 phases_15 = phases_per_row[phase_15_raw_index]; if (phases_0 + phases_15 >= MAX_PHASES) /* * If there are more than 1 phase windows then total * number of phases in both the windows should not be * more than or equal to MAX_PHASES. */ return -EINVAL; /* Merge 2 cyclic windows */ i = phases_15; for (cnt = 0; cnt < phases_0; cnt++) { ranges[phase_15_raw_index][i] = ranges[phase_0_raw_index][cnt]; if (++i >= MAX_PHASES) break; } phases_per_row[phase_0_raw_index] = 0; phases_per_row[phase_15_raw_index] = phases_15 + phases_0; } for (cnt = 0; cnt <= row_index; cnt++) { if (phases_per_row[cnt] > curr_max) { curr_max = phases_per_row[cnt]; selected_row_index = cnt; } } i = ((curr_max * 3) / 4); if (i) i--; ret = (int)ranges[selected_row_index][i]; if (ret >= MAX_PHASES) { ret = -EINVAL; pr_err("%s: %s: invalid phase selected=%d\n", mmc_hostname(host->mmc), __func__, ret); } return ret; } static int msmsdcc_execute_tuning(struct mmc_host *mmc, u32 opcode) { int rc = 0; struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0; const u32 *tuning_block_pattern = tuning_block_64; int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */ bool is_tuning_all_phases; pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__); /* Tuning is only required for SDR104 modes */ if (!host->tuning_needed) { rc = 0; goto exit; } spin_lock_irqsave(&host->lock, flags); WARN(!host->pwr, "SDCC power is turned off\n"); WARN(!atomic_read(&host->clks_on), "SDCC clocks are turned off\n"); WARN(host->sdcc_irq_disabled, "SDCC IRQ is disabled\n"); host->tuning_in_progress = 1; if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) && (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) { tuning_block_pattern = tuning_block_128; size = sizeof(tuning_block_128); } spin_unlock_irqrestore(&host->lock, flags); /* first of all reset the tuning block */ rc = msmsdcc_init_cm_sdc4_dll(host); if (rc) goto out; data_buf = kmalloc(size, GFP_KERNEL); if (!data_buf) { rc = -ENOMEM; goto out; } /* phase = 0; */ is_tuning_all_phases = !(host->mmc->card && (host->saved_tuning_phase != INVALID_TUNING_PHASE)); retry: if (is_tuning_all_phases) phase = 0; /* start from phase 0 during init */ else phase = (u8)host->saved_tuning_phase; do { struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = { .cmd = &cmd, .data = &data }; struct scatterlist sg; /* set the phase in delay line hw block */ rc = msmsdcc_config_cm_sdc4_dll_phase(host, phase); if (rc) goto kfree; cmd.opcode = opcode; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = size; data.blocks = 1; data.flags = MMC_DATA_READ; data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */ data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, size); memset(data_buf, 0, size); mmc_wait_for_req(mmc, &mrq); if (!cmd.error && !data.error && !memcmp(data_buf, tuning_block_pattern, size)) { /* tuning is successful at this tuning point */ if (!is_tuning_all_phases) goto kfree; tuned_phases[tuned_phase_cnt++] = phase; pr_err("%s: %s: found good phase = %d\n", mmc_hostname(mmc), __func__, phase); } else if (!is_tuning_all_phases) { pr_debug("%s: tuning failed at saved phase (%d), retrying\n", mmc_hostname(mmc), (u32)phase); is_tuning_all_phases = true; goto retry; } } while (++phase < 16); if (tuned_phase_cnt) { rc = find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) goto kfree; else phase = (u8)rc; /* * Finally set the selected phase in delay * line hw block. */ rc = msmsdcc_config_cm_sdc4_dll_phase(host, phase); if (rc) goto kfree; else host->saved_tuning_phase = phase; pr_err("%s: %s: finally setting the tuning phase to %d\n", mmc_hostname(mmc), __func__, phase); } else { /* tuning failed */ pr_err("%s: %s: no tuning point found\n", mmc_hostname(mmc), __func__); msmsdcc_dump_sdcc_state(host); rc = -EAGAIN; } kfree: kfree(data_buf); out: spin_lock_irqsave(&host->lock, flags); host->tuning_in_progress = 0; if (!rc || (host->pdev_id == 3)) host->tuning_done = true; spin_unlock_irqrestore(&host->lock, flags); exit: pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__); return rc; } static int msmsdcc_notify_load(struct mmc_host *mmc, enum mmc_load state) { int err = 0; unsigned long rate; struct msmsdcc_host *host = mmc_priv(mmc); if (IS_ERR_OR_NULL(host->bus_clk)) goto out; switch (state) { case MMC_LOAD_HIGH: rate = MSMSDCC_BUS_VOTE_MAX_RATE; break; case MMC_LOAD_LOW: rate = MSMSDCC_BUS_VOTE_MIN_RATE; break; default: err = -EINVAL; goto out; } if (rate != host->bus_clk_rate) { err = clk_set_rate(host->bus_clk, rate); if (err) pr_err("%s: %s: bus clk set rate %lu Hz err %d\n", mmc_hostname(mmc), __func__, rate, err); else host->bus_clk_rate = rate; } out: return err; } static const struct mmc_host_ops msmsdcc_ops = { .enable = msmsdcc_enable, .disable = msmsdcc_disable, .pre_req = msmsdcc_pre_req, .post_req = msmsdcc_post_req, .request = msmsdcc_request, .set_ios = msmsdcc_set_ios, .get_ro = msmsdcc_get_ro, .enable_sdio_irq = msmsdcc_enable_sdio_irq, .start_signal_voltage_switch = msmsdcc_switch_io_voltage, .execute_tuning = msmsdcc_execute_tuning, .notify_load = msmsdcc_notify_load, }; static unsigned int msmsdcc_slot_status(struct msmsdcc_host *host) { int status; unsigned int gpio_no = host->plat->status_gpio; status = gpio_request(gpio_no, "SD_HW_Detect"); if (status) { pr_err("%s: %s: Failed to request GPIO %d\n", mmc_hostname(host->mmc), __func__, gpio_no); } else { status = gpio_direction_input(gpio_no); if (!status) { status = gpio_get_value_cansleep(gpio_no); if (host->plat->is_status_gpio_active_low) status = !status; } gpio_free(gpio_no); } return status; } static void msmsdcc_check_status(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; unsigned int status; if (host->plat->status || gpio_is_valid(host->plat->status_gpio)) { if (host->plat->status) status = host->plat->status(mmc_dev(host->mmc)); else status = msmsdcc_slot_status(host); host->eject = !status; if (status ^ host->oldstat) { if (host->plat->status) pr_info("%s: Slot status change detected " "(%d -> %d)\n", mmc_hostname(host->mmc), host->oldstat, status); else if (host->plat->is_status_gpio_active_low) pr_info("%s: Slot status change detected " "(%d -> %d) and the card detect GPIO" " is ACTIVE_LOW\n", mmc_hostname(host->mmc), host->oldstat, status); else pr_info("%s: Slot status change detected " "(%d -> %d) and the card detect GPIO" " is ACTIVE_HIGH\n", mmc_hostname(host->mmc), host->oldstat, status); mmc_detect_change(host->mmc, 0); } host->oldstat = status; } else { mmc_detect_change(host->mmc, 0); } } static irqreturn_t msmsdcc_platform_status_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; pr_debug("%s: %d\n", __func__, irq); msmsdcc_check_status((unsigned long) host); return IRQ_HANDLED; } static irqreturn_t msmsdcc_platform_sdiowakeup_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; pr_debug("%s: SDIO Wake up IRQ : %d\n", mmc_hostname(host->mmc), irq); spin_lock(&host->lock); if (!host->sdio_wakeupirq_disabled) { disable_irq_nosync(irq); if (host->sdcc_suspended) { wake_lock(&host->sdio_wlock); msmsdcc_disable_irq_wake(host); } host->sdio_wakeupirq_disabled = 1; } if (host->plat->is_sdio_al_client) { wake_lock(&host->sdio_wlock); spin_unlock(&host->lock); mmc_signal_sdio_irq(host->mmc); goto out_unlocked; } spin_unlock(&host->lock); out_unlocked: return IRQ_HANDLED; } static void msmsdcc_status_notify_cb(int card_present, void *dev_id) { struct msmsdcc_host *host = dev_id; pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc), card_present); msmsdcc_check_status((unsigned long) host); } static int msmsdcc_init_dma(struct msmsdcc_host *host) { memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data)); host->dma.host = host; host->dma.channel = -1; host->dma.crci = -1; if (!host->dmares) return -ENODEV; host->dma.nc = dma_alloc_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), &host->dma.nc_busaddr, GFP_KERNEL); if (host->dma.nc == NULL) { pr_err("Unable to allocate DMA buffer\n"); return -ENOMEM; } memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata)); host->dma.cmd_busaddr = host->dma.nc_busaddr; host->dma.cmdptr_busaddr = host->dma.nc_busaddr + offsetof(struct msmsdcc_nc_dmadata, cmdptr); host->dma.channel = host->dmares->start; host->dma.crci = host->dma_crci_res->start; return 0; } #ifdef CONFIG_MMC_MSM_SPS_SUPPORT /** * Allocate and Connect a SDCC peripheral's SPS endpoint * * This function allocates endpoint context and * connect it with memory endpoint by calling * appropriate SPS driver APIs. * * Also registers a SPS callback function with * SPS driver * * This function should only be called once typically * during driver probe. * * @host - Pointer to sdcc host structure * @ep - Pointer to sps endpoint data structure * @is_produce - 1 means Producer endpoint * 0 means Consumer endpoint * * @return - 0 if successful else negative value. * */ static int msmsdcc_sps_init_ep_conn(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep, bool is_producer) { int rc = 0; struct sps_pipe *sps_pipe_handle; struct sps_connect *sps_config = &ep->config; struct sps_register_event *sps_event = &ep->event; /* Allocate endpoint context */ sps_pipe_handle = sps_alloc_endpoint(); if (!sps_pipe_handle) { pr_err("%s: sps_alloc_endpoint() failed!!! is_producer=%d", mmc_hostname(host->mmc), is_producer); rc = -ENOMEM; goto out; } /* Get default connection configuration for an endpoint */ rc = sps_get_config(sps_pipe_handle, sps_config); if (rc) { pr_err("%s: sps_get_config() failed!!! pipe_handle=0x%x," " rc=%d", mmc_hostname(host->mmc), (u32)sps_pipe_handle, rc); goto get_config_err; } /* Modify the default connection configuration */ if (is_producer) { /* * For SDCC producer transfer, source should be * SDCC peripheral where as destination should * be system memory. */ sps_config->source = host->sps.bam_handle; sps_config->destination = SPS_DEV_HANDLE_MEM; /* Producer pipe will handle this connection */ sps_config->mode = SPS_MODE_SRC; sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; } else { /* * For SDCC consumer transfer, source should be * system memory where as destination should * SDCC peripheral */ sps_config->source = SPS_DEV_HANDLE_MEM; sps_config->destination = host->sps.bam_handle; sps_config->mode = SPS_MODE_DEST; sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT | SPS_O_ACK_TRANSFERS; } /* Producer pipe index */ sps_config->src_pipe_index = host->sps.src_pipe_index; /* Consumer pipe index */ sps_config->dest_pipe_index = host->sps.dest_pipe_index; /* * This event thresold value is only significant for BAM-to-BAM * transfer. It's ignored for BAM-to-System mode transfer. */ sps_config->event_thresh = 0x10; /* Allocate maximum descriptor fifo size */ sps_config->desc.size = SPS_MAX_DESC_FIFO_SIZE - (SPS_MAX_DESC_FIFO_SIZE % SPS_MAX_DESC_LENGTH); sps_config->desc.base = dma_alloc_coherent(mmc_dev(host->mmc), sps_config->desc.size, &sps_config->desc.phys_base, GFP_KERNEL); if (!sps_config->desc.base) { rc = -ENOMEM; pr_err("%s: dma_alloc_coherent() failed!!! Can't allocate buffer\n" , mmc_hostname(host->mmc)); goto get_config_err; } memset(sps_config->desc.base, 0x00, sps_config->desc.size); /* Establish connection between peripheral and memory endpoint */ rc = sps_connect(sps_pipe_handle, sps_config); if (rc) { pr_err("%s: sps_connect() failed!!! pipe_handle=0x%x," " rc=%d", mmc_hostname(host->mmc), (u32)sps_pipe_handle, rc); goto sps_connect_err; } sps_event->mode = SPS_TRIGGER_CALLBACK; sps_event->options = SPS_O_EOT; sps_event->callback = msmsdcc_sps_complete_cb; sps_event->xfer_done = NULL; sps_event->user = (void *)host; /* Register callback event for EOT (End of transfer) event. */ rc = sps_register_event(sps_pipe_handle, sps_event); if (rc) { pr_err("%s: sps_connect() failed!!! pipe_handle=0x%x," " rc=%d", mmc_hostname(host->mmc), (u32)sps_pipe_handle, rc); goto reg_event_err; } /* Now save the sps pipe handle */ ep->pipe_handle = sps_pipe_handle; pr_debug("%s: %s, success !!! %s: pipe_handle=0x%x," " desc_fifo.phys_base=0x%x\n", mmc_hostname(host->mmc), __func__, is_producer ? "READ" : "WRITE", (u32)sps_pipe_handle, sps_config->desc.phys_base); goto out; reg_event_err: sps_disconnect(sps_pipe_handle); sps_connect_err: dma_free_coherent(mmc_dev(host->mmc), sps_config->desc.size, sps_config->desc.base, sps_config->desc.phys_base); get_config_err: sps_free_endpoint(sps_pipe_handle); out: return rc; } /** * Disconnect and Deallocate a SDCC peripheral's SPS endpoint * * This function disconnect endpoint and deallocates * endpoint context. * * This function should only be called once typically * during driver remove. * * @host - Pointer to sdcc host structure * @ep - Pointer to sps endpoint data structure * */ static void msmsdcc_sps_exit_ep_conn(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { struct sps_pipe *sps_pipe_handle = ep->pipe_handle; struct sps_connect *sps_config = &ep->config; struct sps_register_event *sps_event = &ep->event; sps_event->xfer_done = NULL; sps_event->callback = NULL; sps_register_event(sps_pipe_handle, sps_event); sps_disconnect(sps_pipe_handle); dma_free_coherent(mmc_dev(host->mmc), sps_config->desc.size, sps_config->desc.base, sps_config->desc.phys_base); sps_free_endpoint(sps_pipe_handle); } /** * Reset SDCC peripheral's SPS endpoint * * This function disconnects an endpoint. * * This function should be called for reseting * SPS endpoint when data transfer error is * encountered during data transfer. This * can be considered as soft reset to endpoint. * * This function should only be called if * msmsdcc_sps_init() is already called. * * @host - Pointer to sdcc host structure * @ep - Pointer to sps endpoint data structure * * @return - 0 if successful else negative value. */ static int msmsdcc_sps_reset_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { int rc = 0; struct sps_pipe *sps_pipe_handle = ep->pipe_handle; rc = sps_disconnect(sps_pipe_handle); if (rc) { pr_err("%s: %s: sps_disconnect() failed!!! pipe_handle=0x%x," " rc=%d", mmc_hostname(host->mmc), __func__, (u32)sps_pipe_handle, rc); goto out; } out: return rc; } /** * Restore SDCC peripheral's SPS endpoint * * This function connects an endpoint. * * This function should be called for restoring * SPS endpoint after data transfer error is * encountered during data transfer. This * can be considered as soft reset to endpoint. * * This function should only be called if * msmsdcc_sps_reset_ep() is called before. * * @host - Pointer to sdcc host structure * @ep - Pointer to sps endpoint data structure * * @return - 0 if successful else negative value. */ static int msmsdcc_sps_restore_ep(struct msmsdcc_host *host, struct msmsdcc_sps_ep_conn_data *ep) { int rc = 0; struct sps_pipe *sps_pipe_handle = ep->pipe_handle; struct sps_connect *sps_config = &ep->config; struct sps_register_event *sps_event = &ep->event; /* Establish connection between peripheral and memory endpoint */ rc = sps_connect(sps_pipe_handle, sps_config); if (rc) { pr_err("%s: %s: sps_connect() failed!!! pipe_handle=0x%x," " rc=%d", mmc_hostname(host->mmc), __func__, (u32)sps_pipe_handle, rc); goto out; } /* Register callback event for EOT (End of transfer) event. */ rc = sps_register_event(sps_pipe_handle, sps_event); if (rc) { pr_err("%s: %s: sps_register_event() failed!!!" " pipe_handle=0x%x, rc=%d", mmc_hostname(host->mmc), __func__, (u32)sps_pipe_handle, rc); goto reg_event_err; } goto out; reg_event_err: sps_disconnect(sps_pipe_handle); out: return rc; } /** * Handle BAM device's global error condition * * This is an error handler for the SDCC bam device * * This function is registered as a callback with SPS-BAM * driver and will called in case there are an errors for * the SDCC BAM deivce. Any error conditions in the BAM * device are global and will be result in this function * being called once per device. * * This function will be called from the sps driver's * interrupt context. * * @sps_cb_case - indicates what error it is * @user - Pointer to sdcc host structure */ static void msmsdcc_sps_bam_global_irq_cb(enum sps_callback_case sps_cb_case, void *user) { struct msmsdcc_host *host = (struct msmsdcc_host *)user; struct mmc_request *mrq; unsigned long flags; int32_t error = 0; BUG_ON(!host); BUG_ON(!is_sps_mode(host)); if (sps_cb_case == SPS_CALLBACK_BAM_ERROR_IRQ) { /* Reset all endpoints along with resetting bam. */ host->sps.reset_bam = true; pr_err("%s: BAM Global ERROR IRQ happened\n", mmc_hostname(host->mmc)); error = EAGAIN; } else if (sps_cb_case == SPS_CALLBACK_BAM_HRESP_ERR_IRQ) { /** * This means that there was an AHB access error and * the address we are trying to read/write is something * we dont have priviliges to do so. */ pr_err("%s: BAM HRESP_ERR_IRQ happened\n", mmc_hostname(host->mmc)); error = EACCES; } else { /** * This should not have happened ideally. If this happens * there is some seriously wrong. */ pr_err("%s: BAM global IRQ callback received, type:%d\n", mmc_hostname(host->mmc), (u32) sps_cb_case); error = EIO; } spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; if (mrq && mrq->cmd) { msmsdcc_dump_sdcc_state(host); if (!mrq->cmd->error) mrq->cmd->error = -error; if (host->curr.data) { if (mrq->data && !mrq->data->error) mrq->data->error = -error; host->curr.data_xfered = 0; if (host->sps.sg && is_sps_mode(host)) { /* Stop current SPS transfer */ msmsdcc_sps_exit_curr_xfer(host); } else { /* this condition should not have happened */ pr_err("%s: something is seriously wrong. "\ "Funtion: %s, line: %d\n", mmc_hostname(host->mmc), __func__, __LINE__); } } else { /* this condition should not have happened */ pr_err("%s: something is seriously wrong. Funtion: "\ "%s, line: %d\n", mmc_hostname(host->mmc), __func__, __LINE__); } } spin_unlock_irqrestore(&host->lock, flags); } /** * Initialize SPS HW connected with SDCC core * * This function register BAM HW resources with * SPS driver and then initialize 2 SPS endpoints * * This function should only be called once typically * during driver probe. * * @host - Pointer to sdcc host structure * * @return - 0 if successful else negative value. * */ static int msmsdcc_sps_init(struct msmsdcc_host *host) { int rc = 0; struct sps_bam_props bam = {0}; host->bam_base = ioremap(host->bam_memres->start, resource_size(host->bam_memres)); if (!host->bam_base) { pr_err("%s: BAM ioremap() failed!!! phys_addr=0x%x," " size=0x%x", mmc_hostname(host->mmc), host->bam_memres->start, (host->bam_memres->end - host->bam_memres->start)); rc = -ENOMEM; goto out; } bam.phys_addr = host->bam_memres->start; bam.virt_addr = host->bam_base; /* * This event thresold value is only significant for BAM-to-BAM * transfer. It's ignored for BAM-to-System mode transfer. */ bam.event_threshold = 0x10; /* Pipe event threshold */ /* * This threshold controls when the BAM publish * the descriptor size on the sideband interface. * SPS HW will be used for data transfer size even * less than SDCC FIFO size. So let's set BAM summing * thresold to SPS_MIN_XFER_SIZE bytes. */ bam.summing_threshold = SPS_MIN_XFER_SIZE; /* SPS driver wll handle the SDCC BAM IRQ */ bam.irq = (u32)host->bam_irqres->start; bam.manage = SPS_BAM_MGR_LOCAL; bam.callback = msmsdcc_sps_bam_global_irq_cb; bam.user = (void *)host; pr_info("%s: bam physical base=0x%x\n", mmc_hostname(host->mmc), (u32)bam.phys_addr); pr_info("%s: bam virtual base=0x%x\n", mmc_hostname(host->mmc), (u32)bam.virt_addr); /* Register SDCC Peripheral BAM device to SPS driver */ rc = sps_register_bam_device(&bam, &host->sps.bam_handle); if (rc) { pr_err("%s: sps_register_bam_device() failed!!! err=%d", mmc_hostname(host->mmc), rc); goto reg_bam_err; } pr_info("%s: BAM device registered. bam_handle=0x%x", mmc_hostname(host->mmc), host->sps.bam_handle); host->sps.src_pipe_index = SPS_SDCC_PRODUCER_PIPE_INDEX; host->sps.dest_pipe_index = SPS_SDCC_CONSUMER_PIPE_INDEX; rc = msmsdcc_sps_init_ep_conn(host, &host->sps.prod, SPS_PROD_PERIPHERAL); if (rc) goto sps_reset_err; rc = msmsdcc_sps_init_ep_conn(host, &host->sps.cons, SPS_CONS_PERIPHERAL); if (rc) goto cons_conn_err; pr_info("%s: Qualcomm MSM SDCC-BAM at 0x%016llx irq %d\n", mmc_hostname(host->mmc), (unsigned long long)host->bam_memres->start, (unsigned int)host->bam_irqres->start); goto out; cons_conn_err: msmsdcc_sps_exit_ep_conn(host, &host->sps.prod); sps_reset_err: sps_deregister_bam_device(host->sps.bam_handle); reg_bam_err: iounmap(host->bam_base); out: return rc; } /** * De-initialize SPS HW connected with SDCC core * * This function deinitialize SPS endpoints and then * deregisters BAM resources from SPS driver. * * This function should only be called once typically * during driver remove. * * @host - Pointer to sdcc host structure * */ static void msmsdcc_sps_exit(struct msmsdcc_host *host) { msmsdcc_sps_exit_ep_conn(host, &host->sps.cons); msmsdcc_sps_exit_ep_conn(host, &host->sps.prod); sps_deregister_bam_device(host->sps.bam_handle); iounmap(host->bam_base); } #endif /* CONFIG_MMC_MSM_SPS_SUPPORT */ static ssize_t show_polling(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int poll; unsigned long flags; spin_lock_irqsave(&host->lock, flags); poll = !!(mmc->caps & MMC_CAP_NEEDS_POLL); spin_unlock_irqrestore(&host->lock, flags); return snprintf(buf, PAGE_SIZE, "%d\n", poll); } static ssize_t store_polling(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int value; unsigned long flags; sscanf(buf, "%d", &value); spin_lock_irqsave(&host->lock, flags); if (value) { mmc->caps |= MMC_CAP_NEEDS_POLL; mmc_detect_change(host->mmc, 0); } else { mmc->caps &= ~MMC_CAP_NEEDS_POLL; } #ifdef CONFIG_HAS_EARLYSUSPEND host->polling_enabled = mmc->caps & MMC_CAP_NEEDS_POLL; #endif spin_unlock_irqrestore(&host->lock, flags); return count; } static ssize_t show_sdcc_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); return snprintf(buf, PAGE_SIZE, "%u\n", host->msm_bus_vote.is_max_bw_needed); } static ssize_t store_sdcc_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); uint32_t value; unsigned long flags; if (!kstrtou32(buf, 0, &value)) { spin_lock_irqsave(&host->lock, flags); host->msm_bus_vote.is_max_bw_needed = !!value; spin_unlock_irqrestore(&host->lock, flags); } return count; } static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); return snprintf(buf, PAGE_SIZE, "%u (Min 5 sec)\n", host->idle_tout / 1000); } static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); unsigned int long flags; int timeout; /* in secs */ if (!kstrtou32(buf, 0, &timeout) && (timeout > MSM_MMC_DEFAULT_IDLE_TIMEOUT / 1000)) { spin_lock_irqsave(&host->lock, flags); host->idle_tout = timeout * 1000; spin_unlock_irqrestore(&host->lock, flags); } return count; } static inline void set_auto_cmd_setting(struct device *dev, const char *buf, bool is_cmd19) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); unsigned int long flags; int temp; if (!kstrtou32(buf, 0, &temp)) { spin_lock_irqsave(&host->lock, flags); if (is_cmd19) host->en_auto_cmd19 = !!temp; else host->en_auto_cmd21 = !!temp; spin_unlock_irqrestore(&host->lock, flags); } } static ssize_t show_enable_auto_cmd19(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); return snprintf(buf, PAGE_SIZE, "%d\n", host->en_auto_cmd19); } static ssize_t store_enable_auto_cmd19(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { set_auto_cmd_setting(dev, buf, true); return count; } static ssize_t show_enable_auto_cmd21(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); return snprintf(buf, PAGE_SIZE, "%d\n", host->en_auto_cmd21); } static ssize_t store_enable_auto_cmd21(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { set_auto_cmd_setting(dev, buf, false); return count; } #ifdef CONFIG_HAS_EARLYSUSPEND static void msmsdcc_early_suspend(struct early_suspend *h) { struct msmsdcc_host *host = container_of(h, struct msmsdcc_host, early_suspend); unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->polling_enabled = host->mmc->caps & MMC_CAP_NEEDS_POLL; host->mmc->caps &= ~MMC_CAP_NEEDS_POLL; spin_unlock_irqrestore(&host->lock, flags); }; static void msmsdcc_late_resume(struct early_suspend *h) { struct msmsdcc_host *host = container_of(h, struct msmsdcc_host, early_suspend); unsigned long flags; if (host->polling_enabled) { spin_lock_irqsave(&host->lock, flags); host->mmc->caps |= MMC_CAP_NEEDS_POLL; mmc_detect_change(host->mmc, 0); spin_unlock_irqrestore(&host->lock, flags); } }; #endif static void msmsdcc_print_regs(const char *name, void __iomem *base, u32 phys_base, unsigned int no_of_regs) { unsigned int i; if (!base) return; pr_err("===== %s: Register Dumps @phys_base=0x%x, @virt_base=0x%x" " =====\n", name, phys_base, (u32)base); for (i = 0; i < no_of_regs; i = i + 4) { pr_err("Reg=0x%.2x: 0x%.8x, 0x%.8x, 0x%.8x, 0x%.8x\n", i*4, (u32)readl_relaxed(base + i*4), (u32)readl_relaxed(base + ((i+1)*4)), (u32)readl_relaxed(base + ((i+2)*4)), (u32)readl_relaxed(base + ((i+3)*4))); } } static void msmsdcc_dump_sdcc_state(struct msmsdcc_host *host) { /* Dump current state of SDCC clocks, power and irq */ pr_err("%s: SDCC PWR is %s\n", mmc_hostname(host->mmc), (host->pwr ? "ON" : "OFF")); pr_err("%s: SDCC clks are %s, MCLK rate=%d\n", mmc_hostname(host->mmc), (atomic_read(&host->clks_on) ? "ON" : "OFF"), (u32)clk_get_rate(host->clk)); pr_err("%s: SDCC irq is %s\n", mmc_hostname(host->mmc), (host->sdcc_irq_disabled ? "disabled" : "enabled")); /* Now dump SDCC registers. Don't print FIFO registers */ if (atomic_read(&host->clks_on)) { msmsdcc_print_regs("SDCC-CORE", host->base, host->core_memres->start, 28); msmsdcc_print_regs("SDCC-DML", host->dml_base, host->dml_memres->start, 20); msmsdcc_print_regs("SDCC-BAM", host->bam_base, host->bam_memres->start, 20); pr_err("%s: MCI_TEST_INPUT = 0x%.8x\n", mmc_hostname(host->mmc), readl_relaxed(host->base + MCI_TEST_INPUT)); } if (host->curr.data) { if (!msmsdcc_is_dma_possible(host, host->curr.data)) pr_err("%s: PIO mode\n", mmc_hostname(host->mmc)); else if (is_dma_mode(host)) pr_err("%s: ADM mode: busy=%d, chnl=%d, crci=%d\n", mmc_hostname(host->mmc), host->dma.busy, host->dma.channel, host->dma.crci); else if (is_sps_mode(host)) { if (host->sps.busy && atomic_read(&host->clks_on)) msmsdcc_print_regs("SDCC-DML", host->dml_base, host->dml_memres->start, 16); pr_err("%s: SPS mode: busy=%d\n", mmc_hostname(host->mmc), host->sps.busy); } pr_err("%s: xfer_size=%d, data_xfered=%d, xfer_remain=%d\n", mmc_hostname(host->mmc), host->curr.xfer_size, host->curr.data_xfered, host->curr.xfer_remain); } if (host->sps.reset_bam) pr_err("%s: SPS BAM reset failed: sps reset_bam=%d\n", mmc_hostname(host->mmc), host->sps.reset_bam); pr_err("%s: got_dataend=%d, prog_enable=%d," " wait_for_auto_prog_done=%d, got_auto_prog_done=%d," " req_tout_ms=%d\n", mmc_hostname(host->mmc), host->curr.got_dataend, host->prog_enable, host->curr.wait_for_auto_prog_done, host->curr.got_auto_prog_done, host->curr.req_tout_ms); msmsdcc_print_rpm_info(host); } static void msmsdcc_req_tout_timer_hdlr(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; struct mmc_request *mrq; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->dummy_52_sent) { pr_info("%s: %s: dummy CMD52 timeout\n", mmc_hostname(host->mmc), __func__); host->dummy_52_sent = 0; } mrq = host->curr.mrq; if (mrq && mrq->cmd) { if (!mrq->cmd->ignore_timeout) { pr_info("%s: CMD%d: Request timeout\n", mmc_hostname(host->mmc), mrq->cmd->opcode); msmsdcc_dump_sdcc_state(host); } if (!mrq->cmd->error) mrq->cmd->error = -ETIMEDOUT; host->dummy_52_needed = 0; if (host->curr.data) { if (mrq->data && !mrq->data->error) mrq->data->error = -ETIMEDOUT; host->curr.data_xfered = 0; if (host->dma.sg && is_dma_mode(host)) { msm_dmov_flush(host->dma.channel, 0); } else if (host->sps.sg && is_sps_mode(host)) { /* Stop current SPS transfer */ msmsdcc_sps_exit_curr_xfer(host); } else { msmsdcc_clear_pio_irq_mask(host); msmsdcc_reset_and_restore(host); msmsdcc_stop_data(host); if (mrq->data && mrq->data->stop) msmsdcc_start_command(host, mrq->data->stop, 0); else msmsdcc_request_end(host, mrq); } } else { host->prog_enable = 0; host->curr.wait_for_auto_prog_done = false; msmsdcc_reset_and_restore(host); msmsdcc_request_end(host, mrq); } } spin_unlock_irqrestore(&host->lock, flags); } /* * msmsdcc_dt_get_array - Wrapper fn to read an array of 32 bit integers * * @dev: device node from which the property value is to be read. * @prop_name: name of the property to be searched. * @out_array: filled array returned to caller * @len: filled array size returned to caller * @size: expected size of the array * * If expected "size" doesn't match with "len" an error is returned. If * expected size is zero, the length of actual array is returned provided * return value is zero. * * RETURNS: * zero on success, negative error if failed. */ static int msmsdcc_dt_get_array(struct device *dev, const char *prop_name, u32 **out_array, int *len, int size) { int ret = 0; u32 *array = NULL; struct device_node *np = dev->of_node; if (of_get_property(np, prop_name, len)) { size_t sz; sz = *len = *len / sizeof(*array); if (sz > 0 && !(size > 0 && (sz != size))) { array = devm_kzalloc(dev, sz * sizeof(*array), GFP_KERNEL); if (!array) { dev_err(dev, "%s: no memory\n", prop_name); ret = -ENOMEM; goto out; } ret = of_property_read_u32_array(np, prop_name, array, sz); if (ret < 0) { dev_err(dev, "%s: error reading array %d\n", prop_name, ret); goto out; } } else { dev_err(dev, "%s invalid size\n", prop_name); ret = -EINVAL; goto out; } } else { dev_err(dev, "%s not specified\n", prop_name); ret = -EINVAL; goto out; } *out_array = array; out: if (ret) *len = 0; return ret; } static int msmsdcc_dt_get_pad_pull_info(struct device *dev, int id, struct msm_mmc_pad_pull_data **pad_pull_data) { int ret = 0, base = 0, len, i; u32 *tmp; struct msm_mmc_pad_pull_data *pull_data; struct msm_mmc_pad_pull *pull; switch (id) { case 1: base = TLMM_PULL_SDC1_CLK; break; case 2: base = TLMM_PULL_SDC2_CLK; break; case 3: base = TLMM_PULL_SDC3_CLK; break; case 4: base = TLMM_PULL_SDC4_CLK; break; default: dev_err(dev, "%s: Invalid slot id\n", __func__); ret = -EINVAL; goto err; } pull_data = devm_kzalloc(dev, sizeof(struct msm_mmc_pad_pull_data), GFP_KERNEL); if (!pull_data) { dev_err(dev, "No memory msm_mmc_pad_pull_data\n"); ret = -ENOMEM; goto err; } pull_data->size = 3; /* array size for clk, cmd, data */ /* Allocate on, off configs for clk, cmd, data */ pull = devm_kzalloc(dev, 2 * pull_data->size *\ sizeof(struct msm_mmc_pad_pull), GFP_KERNEL); if (!pull) { dev_err(dev, "No memory for msm_mmc_pad_pull\n"); ret = -ENOMEM; goto err; } pull_data->on = pull; pull_data->off = pull + pull_data->size; ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-pull-on", &tmp, &len, pull_data->size); if (!ret) { for (i = 0; i < len; i++) { pull_data->on[i].no = base + i; pull_data->on[i].val = tmp[i]; dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__, i, pull_data->on[i].val); } } else { goto err; } ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-pull-off", &tmp, &len, pull_data->size); if (!ret) { for (i = 0; i < len; i++) { pull_data->off[i].no = base + i; pull_data->off[i].val = tmp[i]; dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__, i, pull_data->off[i].val); } } else { goto err; } *pad_pull_data = pull_data; err: return ret; } static int msmsdcc_dt_get_pad_drv_info(struct device *dev, int id, struct msm_mmc_pad_drv_data **pad_drv_data) { int ret = 0, base = 0, len, i; u32 *tmp; struct msm_mmc_pad_drv_data *drv_data; struct msm_mmc_pad_drv *drv; switch (id) { case 1: base = TLMM_HDRV_SDC1_CLK; break; case 2: base = TLMM_HDRV_SDC2_CLK; break; case 3: base = TLMM_HDRV_SDC3_CLK; break; case 4: base = TLMM_HDRV_SDC4_CLK; break; default: dev_err(dev, "%s: Invalid slot id\n", __func__); ret = -EINVAL; goto err; } drv_data = devm_kzalloc(dev, sizeof(struct msm_mmc_pad_drv_data), GFP_KERNEL); if (!drv_data) { dev_err(dev, "No memory for msm_mmc_pad_drv_data\n"); ret = -ENOMEM; goto err; } drv_data->size = 3; /* array size for clk, cmd, data */ /* Allocate on, off configs for clk, cmd, data */ drv = devm_kzalloc(dev, 2 * drv_data->size *\ sizeof(struct msm_mmc_pad_drv), GFP_KERNEL); if (!drv) { dev_err(dev, "No memory msm_mmc_pad_drv\n"); ret = -ENOMEM; goto err; } drv_data->on = drv; drv_data->off = drv + drv_data->size; ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-drv-on", &tmp, &len, drv_data->size); if (!ret) { for (i = 0; i < len; i++) { drv_data->on[i].no = base + i; drv_data->on[i].val = tmp[i]; dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__, i, drv_data->on[i].val); } } else { goto err; } ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-drv-off", &tmp, &len, drv_data->size); if (!ret) { for (i = 0; i < len; i++) { drv_data->off[i].no = base + i; drv_data->off[i].val = tmp[i]; dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__, i, drv_data->off[i].val); } } else { goto err; } *pad_drv_data = drv_data; err: return ret; } static void msmsdcc_dt_get_cd_wp_gpio(struct device *dev, struct mmc_platform_data *pdata) { enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW; struct device_node *np = dev->of_node; pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); if (gpio_is_valid(pdata->status_gpio)) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); pdata->status_irq = platform_get_irq_byname(pdev, "status_irq"); pdata->is_status_gpio_active_low = flags & OF_GPIO_ACTIVE_LOW; } pdata->wpswitch_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); if (gpio_is_valid(pdata->wpswitch_gpio)) pdata->is_wpswitch_active_low = flags & OF_GPIO_ACTIVE_LOW; } static int msmsdcc_dt_parse_gpio_info(struct device *dev, struct mmc_platform_data *pdata) { int ret = 0, id = 0, cnt, i; struct msm_mmc_pin_data *pin_data; struct device_node *np = dev->of_node; msmsdcc_dt_get_cd_wp_gpio(dev, pdata); pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL); if (!pin_data) { dev_err(dev, "No memory for pin_data\n"); ret = -ENOMEM; goto err; } cnt = of_gpio_count(np); if (cnt > 0) { pin_data->is_gpio = true; pin_data->gpio_data = devm_kzalloc(dev, sizeof(struct msm_mmc_gpio_data), GFP_KERNEL); if (!pin_data->gpio_data) { dev_err(dev, "No memory for gpio_data\n"); ret = -ENOMEM; goto err; } pin_data->gpio_data->size = cnt; pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt * sizeof(struct msm_mmc_gpio), GFP_KERNEL); if (!pin_data->gpio_data->gpio) { dev_err(dev, "No memory for gpio\n"); ret = -ENOMEM; goto err; } for (i = 0; i < cnt; i++) { const char *name = NULL; char result[32]; pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i); of_property_read_string_index(np, "qcom,sdcc-gpio-names", i, &name); snprintf(result, 32, "%s-%s", dev_name(dev), name ? name : "?"); pin_data->gpio_data->gpio[i].name = result; dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__, pin_data->gpio_data->gpio[i].name, pin_data->gpio_data->gpio[i].no); } } else { pin_data->pad_data = devm_kzalloc(dev, sizeof(struct msm_mmc_pad_data), GFP_KERNEL); if (!pin_data->pad_data) { dev_err(dev, "No memory for pin_data->pad_data\n"); ret = -ENOMEM; goto err; } of_property_read_u32(np, "cell-index", &id); ret = msmsdcc_dt_get_pad_pull_info(dev, id, &pin_data->pad_data->pull); if (ret) goto err; ret = msmsdcc_dt_get_pad_drv_info(dev, id, &pin_data->pad_data->drv); if (ret) goto err; } pdata->pin_data = pin_data; err: if (ret) dev_err(dev, "%s failed with err %d\n", __func__, ret); return ret; } #define MAX_PROP_SIZE 32 static int msmsdcc_dt_parse_vreg_info(struct device *dev, struct msm_mmc_reg_data **vreg_data, const char *vreg_name) { int len, ret = 0; const __be32 *prop; char prop_name[MAX_PROP_SIZE]; struct msm_mmc_reg_data *vreg; struct device_node *np = dev->of_node; snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name); if (of_parse_phandle(np, prop_name, 0)) { vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); if (!vreg) { dev_err(dev, "No memory for vreg: %s\n", vreg_name); ret = -ENOMEM; goto err; } vreg->name = vreg_name; snprintf(prop_name, MAX_PROP_SIZE, "qcom,sdcc-%s-always_on", vreg_name); if (of_get_property(np, prop_name, NULL)) vreg->always_on = true; snprintf(prop_name, MAX_PROP_SIZE, "qcom,sdcc-%s-lpm_sup", vreg_name); if (of_get_property(np, prop_name, NULL)) vreg->lpm_sup = true; snprintf(prop_name, MAX_PROP_SIZE, "qcom,sdcc-%s-voltage_level", vreg_name); prop = of_get_property(np, prop_name, &len); if (!prop || (len != (2 * sizeof(__be32)))) { dev_warn(dev, "%s %s property\n", prop ? "invalid format" : "no", prop_name); } else { vreg->low_vol_level = be32_to_cpup(&prop[0]); vreg->high_vol_level = be32_to_cpup(&prop[1]); } snprintf(prop_name, MAX_PROP_SIZE, "qcom,sdcc-%s-current_level", vreg_name); prop = of_get_property(np, prop_name, &len); if (!prop || (len != (2 * sizeof(__be32)))) { dev_warn(dev, "%s %s property\n", prop ? "invalid format" : "no", prop_name); } else { vreg->lpm_uA = be32_to_cpup(&prop[0]); vreg->hpm_uA = be32_to_cpup(&prop[1]); } *vreg_data = vreg; dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n", vreg->name, vreg->always_on ? "always_on," : "", vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level, vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA); } err: return ret; } static struct mmc_platform_data *msmsdcc_populate_pdata(struct device *dev) { int i, ret; struct mmc_platform_data *pdata; struct device_node *np = dev->of_node; u32 bus_width = 0, current_limit = 0; u32 *clk_table, *sup_voltages; int clk_table_len, sup_volt_len, len; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(dev, "could not allocate memory for platform data\n"); goto err; } of_property_read_u32(np, "qcom,sdcc-bus-width", &bus_width); if (bus_width == 8) { pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA; } else if (bus_width == 4) { pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA; } else { dev_notice(dev, "Invalid bus width, default to 1 bit mode\n"); pdata->mmc_bus_width = 0; } ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-sup-voltages", &sup_voltages, &sup_volt_len, 0); if (!ret) { for (i = 0; i < sup_volt_len; i += 2) { u32 mask; mask = mmc_vddrange_to_ocrmask(sup_voltages[i], sup_voltages[i + 1]); if (!mask) dev_err(dev, "Invalide voltage range %d\n", i); pdata->ocr_mask |= mask; } dev_dbg(dev, "OCR mask=0x%x\n", pdata->ocr_mask); } ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-clk-rates", &clk_table, &clk_table_len, 0); if (!ret) { pdata->sup_clk_table = clk_table; pdata->sup_clk_cnt = clk_table_len; } pdata->vreg_data = devm_kzalloc(dev, sizeof(struct msm_mmc_slot_reg_data), GFP_KERNEL); if (!pdata->vreg_data) { dev_err(dev, "could not allocate memory for vreg_data\n"); goto err; } if (msmsdcc_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data, "vdd")) goto err; if (msmsdcc_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_io_data, "vdd-io")) goto err; if (msmsdcc_dt_parse_gpio_info(dev, pdata)) goto err; len = of_property_count_strings(np, "qcom,sdcc-bus-speed-mode"); for (i = 0; i < len; i++) { const char *name = NULL; of_property_read_string_index(np, "qcom,sdcc-bus-speed-mode", i, &name); if (!name) continue; if (!strncmp(name, "SDR12", sizeof("SDR12"))) pdata->uhs_caps |= MMC_CAP_UHS_SDR12; else if (!strncmp(name, "SDR25", sizeof("SDR25"))) pdata->uhs_caps |= MMC_CAP_UHS_SDR25; else if (!strncmp(name, "SDR50", sizeof("SDR50"))) pdata->uhs_caps |= MMC_CAP_UHS_SDR50; else if (!strncmp(name, "DDR50", sizeof("DDR50"))) pdata->uhs_caps |= MMC_CAP_UHS_DDR50; else if (!strncmp(name, "SDR104", sizeof("SDR104"))) pdata->uhs_caps |= MMC_CAP_UHS_SDR104; else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v"))) pdata->uhs_caps2 |= MMC_CAP2_HS200_1_8V_SDR; else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v"))) pdata->uhs_caps2 |= MMC_CAP2_HS200_1_2V_SDR; else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v"))) pdata->uhs_caps |= MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50; else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v"))) pdata->uhs_caps |= MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50; } of_property_read_u32(np, "qcom,sdcc-current-limit", &current_limit); if (current_limit == 800) pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_800; else if (current_limit == 600) pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_600; else if (current_limit == 400) pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_400; else if (current_limit == 200) pdata->uhs_caps |= MMC_CAP_MAX_CURRENT_200; if (of_get_property(np, "qcom,sdcc-xpc", NULL)) pdata->xpc_cap = true; if (of_get_property(np, "qcom,sdcc-nonremovable", NULL)) pdata->nonremovable = true; if (of_get_property(np, "qcom,sdcc-disable_cmd23", NULL)) pdata->disable_cmd23 = true; return pdata; err: return NULL; } /* SYSFS about SD Card Detection by soonil.lim */ static struct device *t_flash_detect_dev; static ssize_t t_flash_detect_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = dev_get_drvdata(dev); #if defined(CONFIG_MACH_SERRANO) if (mmc->card) { printk(KERN_DEBUG "sdcc3: card inserted.\n"); return sprintf(buf, "Insert\n"); } else { printk(KERN_DEBUG "sdcc3: card removed.\n"); return sprintf(buf, "Remove\n"); } #else struct msmsdcc_host *host = mmc_priv(mmc); unsigned int detect; if (host->plat->status_gpio) detect = gpio_get_value(host->plat->status_gpio); else { pr_info("%s : External SD detect pin Error\n", __func__); return sprintf(buf, "Error\n"); } pr_info("%s : detect = %d.\n", __func__, detect); if (!detect) { printk(KERN_DEBUG "sdcc3: card inserted.\n"); return sprintf(buf, "Insert\n"); } else { printk(KERN_DEBUG "sdcc3: card removed.\n"); return sprintf(buf, "Remove\n"); } #endif } static DEVICE_ATTR(status, 0444, t_flash_detect_show, NULL); static int msmsdcc_probe(struct platform_device *pdev) { struct mmc_platform_data *plat; struct msmsdcc_host *host; struct mmc_host *mmc; unsigned long flags; struct resource *core_irqres = NULL; struct resource *bam_irqres = NULL; struct resource *core_memres = NULL; struct resource *dml_memres = NULL; struct resource *bam_memres = NULL; struct resource *dmares = NULL; struct resource *dma_crci_res = NULL; int ret = 0; if (pdev->dev.of_node) { plat = msmsdcc_populate_pdata(&pdev->dev); of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); } else { plat = pdev->dev.platform_data; } /* must have platform data */ if (!plat) { pr_err("%s: Platform data not available\n", __func__); ret = -EINVAL; goto out; } if (pdev->id < 1 || pdev->id > 5) return -EINVAL; if (plat->is_sdio_al_client && !plat->sdiowakeup_irq) { pr_err("%s: No wakeup IRQ for sdio_al client\n", __func__); return -EINVAL; } if (pdev->resource == NULL || pdev->num_resources < 2) { pr_err("%s: Invalid resource\n", __func__); return -ENXIO; } core_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_mem"); bam_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bam_mem"); dml_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dml_mem"); core_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "core_irq"); bam_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bam_irq"); dmares = platform_get_resource_byname(pdev, IORESOURCE_DMA, "dma_chnl"); dma_crci_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "dma_crci"); if (!core_irqres || !core_memres) { pr_err("%s: Invalid sdcc core resource\n", __func__); return -ENXIO; } /* * Both BAM and DML memory resource should be preset. * BAM IRQ resource should also be present. */ if ((bam_memres && !dml_memres) || (!bam_memres && dml_memres) || ((bam_memres && dml_memres) && !bam_irqres)) { pr_err("%s: Invalid sdcc BAM/DML resource\n", __func__); return -ENXIO; } /* * Setup our host structure */ mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->pdev_id = pdev->id; host->plat = plat; host->mmc = mmc; host->curr.cmd = NULL; if (!plat->disable_bam && bam_memres && dml_memres && bam_irqres) set_hw_caps(host, MSMSDCC_SPS_BAM_SUP); else if (dmares) set_hw_caps(host, MSMSDCC_DMA_SUP); host->base = ioremap(core_memres->start, resource_size(core_memres)); if (!host->base) { ret = -ENOMEM; goto host_free; } host->core_irqres = core_irqres; host->bam_irqres = bam_irqres; host->core_memres = core_memres; host->dml_memres = dml_memres; host->bam_memres = bam_memres; host->dmares = dmares; host->dma_crci_res = dma_crci_res; spin_lock_init(&host->lock); mutex_init(&host->clk_mutex); #ifdef CONFIG_MMC_EMBEDDED_SDIO if (plat->embedded_sdio) mmc_set_embedded_sdio_data(mmc, &plat->embedded_sdio->cis, &plat->embedded_sdio->cccr, plat->embedded_sdio->funcs, plat->embedded_sdio->num_funcs); #endif tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet, (unsigned long)host); tasklet_init(&host->sps.tlet, msmsdcc_sps_complete_tlet, (unsigned long)host); if (is_dma_mode(host)) { /* Setup DMA */ ret = msmsdcc_init_dma(host); if (ret) goto ioremap_free; } else { host->dma.channel = -1; host->dma.crci = -1; } /* * Setup SDCC bus voter clock. */ host->bus_clk = clk_get(&pdev->dev, "bus_clk"); if (!IS_ERR_OR_NULL(host->bus_clk)) { /* Vote for max. clk rate for max. performance */ ret = clk_set_rate(host->bus_clk, MSMSDCC_BUS_VOTE_MAX_RATE); if (ret) goto bus_clk_put; ret = clk_prepare_enable(host->bus_clk); if (ret) goto bus_clk_put; host->bus_clk_rate = MSMSDCC_BUS_VOTE_MAX_RATE; } /* * Setup main peripheral bus clock */ host->pclk = clk_get(&pdev->dev, "iface_clk"); if (!IS_ERR(host->pclk)) { ret = clk_prepare_enable(host->pclk); if (ret) goto pclk_put; host->pclk_rate = clk_get_rate(host->pclk); } /* * Setup SDC MMC clock */ host->clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); goto pclk_disable; } ret = clk_set_rate(host->clk, msmsdcc_get_min_sup_clk_rate(host)); if (ret) { pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); goto clk_put; } ret = clk_prepare_enable(host->clk); if (ret) goto clk_put; host->clk_rate = clk_get_rate(host->clk); if (!host->clk_rate) dev_err(&pdev->dev, "Failed to read MCLK\n"); set_default_hw_caps(host); host->saved_tuning_phase = INVALID_TUNING_PHASE; /* * Set the register write delay according to min. clock frequency * supported and update later when the host->clk_rate changes. */ host->reg_write_delay = (1 + ((3 * USEC_PER_SEC) / msmsdcc_get_min_sup_clk_rate(host))); atomic_set(&host->clks_on, 1); /* Apply Hard reset to SDCC to put it in power on default state */ msmsdcc_hard_reset(host); #define MSM_MMC_DEFAULT_CPUDMA_LATENCY 200 /* usecs */ /* pm qos request to prevent apps idle power collapse */ if (host->plat->cpu_dma_latency) host->cpu_dma_latency = host->plat->cpu_dma_latency; else host->cpu_dma_latency = MSM_MMC_DEFAULT_CPUDMA_LATENCY; pm_qos_add_request(&host->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); ret = msmsdcc_msm_bus_register(host); if (ret) goto pm_qos_remove; if (host->msm_bus_vote.client_handle) INIT_DELAYED_WORK(&host->msm_bus_vote.vote_work, msmsdcc_msm_bus_work); ret = msmsdcc_vreg_init(host, true); if (ret) { pr_err("%s: msmsdcc_vreg_init() failed (%d)\n", __func__, ret); goto clk_disable; } /* Clocks has to be running before accessing SPS/DML HW blocks */ if (is_sps_mode(host)) { /* Initialize SPS */ ret = msmsdcc_sps_init(host); if (ret) goto vreg_deinit; /* Initialize DML */ ret = msmsdcc_dml_init(host); if (ret) goto sps_exit; } mmc_dev(mmc)->dma_mask = &dma_mask; /* * Setup MMC host structure */ mmc->ops = &msmsdcc_ops; mmc->f_min = msmsdcc_get_min_sup_clk_rate(host); mmc->f_max = msmsdcc_get_max_sup_clk_rate(host); mmc->ocr_avail = plat->ocr_mask; mmc->clkgate_delay = MSM_MMC_CLK_GATE_DELAY; mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; mmc->caps |= plat->mmc_bus_width; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; /* * If we send the CMD23 before multi block write/read command * then we need not to send CMD12 at the end of the transfer. * If we don't send the CMD12 then only way to detect the PROG_DONE * status is to use the AUTO_PROG_DONE status provided by SDCC4 * controller. So let's enable the CMD23 for SDCC4 only. */ if (!plat->disable_cmd23 && is_auto_prog_done(host)) mmc->caps |= MMC_CAP_CMD23; mmc->caps |= plat->uhs_caps; mmc->caps2 |= plat->uhs_caps2; /* * XPC controls the maximum current in the default speed mode of SDXC * card. XPC=0 means 100mA (max.) but speed class is not supported. * XPC=1 means 150mA (max.) and speed class is supported. */ if (plat->xpc_cap) mmc->caps |= (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 | MMC_CAP_SET_XPC_180); /* packed write */ mmc->caps2 |= plat->packed_write; mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR); /* Disable Sanitize & BKOPS * mmc->caps2 |= MMC_CAP2_SANITIZE; * mmc->caps2 |= MMC_CAP2_INIT_BKOPS; */ mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY; if (plat->nonremovable) mmc->caps |= MMC_CAP_NONREMOVABLE; mmc->caps |= MMC_CAP_SDIO_IRQ; if (plat->is_sdio_al_client) mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; if (plat->built_in) { printk("Set MMC_PM_IGNORE_PM_NOTIFY|MMC_PM_KEEP_POWER\n"); mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY | MMC_PM_KEEP_POWER; } mmc->max_segs = msmsdcc_get_nr_sg(host); mmc->max_blk_size = MMC_MAX_BLK_SIZE; mmc->max_blk_count = MMC_MAX_BLK_CNT; mmc->max_req_size = MMC_MAX_REQ_SIZE; mmc->max_seg_size = mmc->max_req_size; writel_relaxed(0, host->base + MMCIMASK0); writel_relaxed(MCI_CLEAR_STATIC_MASK, host->base + MMCICLEAR); msmsdcc_sync_reg_wr(host); writel_relaxed(MCI_IRQENABLE, host->base + MMCIMASK0); mb(); host->mci_irqenable = MCI_IRQENABLE; ret = request_irq(core_irqres->start, msmsdcc_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto dml_exit; ret = request_irq(core_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto irq_free; /* * Enable SDCC IRQ only when host is powered on. Otherwise, this * IRQ is un-necessarily being monitored by MPM (Modem power * management block) during idle-power collapse. The MPM will be * configured to monitor the DATA1 GPIO line with level-low trigger * and thus depending on the GPIO status, it prevents TCXO shutdown * during idle-power collapse. */ disable_irq(core_irqres->start); host->sdcc_irq_disabled = 1; if (plat->sdiowakeup_irq) { wake_lock_init(&host->sdio_wlock, WAKE_LOCK_SUSPEND, mmc_hostname(mmc)); ret = request_irq(plat->sdiowakeup_irq, msmsdcc_platform_sdiowakeup_irq, IRQF_SHARED | IRQF_TRIGGER_LOW, DRIVER_NAME "sdiowakeup", host); if (ret) { pr_err("Unable to get sdio wakeup IRQ %d (%d)\n", plat->sdiowakeup_irq, ret); goto pio_irq_free; } else { spin_lock_irqsave(&host->lock, flags); if (!host->sdio_wakeupirq_disabled) { disable_irq_nosync(plat->sdiowakeup_irq); host->sdio_wakeupirq_disabled = 1; } spin_unlock_irqrestore(&host->lock, flags); } } if (host->plat->mpm_sdiowakeup_int) { wake_lock_init(&host->sdio_wlock, WAKE_LOCK_SUSPEND, mmc_hostname(mmc)); } wake_lock_init(&host->sdio_suspend_wlock, WAKE_LOCK_SUSPEND, mmc_hostname(mmc)); /* * Setup card detect change */ if (!plat->status_gpio) plat->status_gpio = -ENOENT; if (!plat->wpswitch_gpio) plat->wpswitch_gpio = -ENOENT; if (plat->status || gpio_is_valid(plat->status_gpio)) { if (plat->status) host->oldstat = plat->status(mmc_dev(host->mmc)); else host->oldstat = msmsdcc_slot_status(host); host->eject = !host->oldstat; } if (plat->status_irq) { ret = request_threaded_irq(plat->status_irq, NULL, msmsdcc_platform_status_irq, plat->irq_flags, DRIVER_NAME " (slot)", host); if (ret) { pr_err("Unable to get slot IRQ %d (%d)\n", plat->status_irq, ret); goto sdiowakeup_irq_free; } } else if (plat->register_status_notify) { plat->register_status_notify(msmsdcc_status_notify_cb, host); } else if (!plat->status) pr_err("%s: No card detect facilities available\n", mmc_hostname(mmc)); /* SYSFS about SD Card Detection by soonil.lim */ #if defined(CONFIG_MACH_SERRANO) if (t_flash_detect_dev == NULL && (host->pdev_id == 3)) { #else if (t_flash_detect_dev == NULL && gpio_is_valid(plat->status_gpio)) { #endif printk(KERN_DEBUG "%s : Change sysfs Card Detect\n", __func__); t_flash_detect_dev = device_create(sec_class, NULL, 0, NULL, "sdcard"); if (IS_ERR(t_flash_detect_dev)) pr_err("%s : Failed to create device!\n", __func__); if (device_create_file(t_flash_detect_dev, &dev_attr_status) < 0) pr_err("%s : Failed to create device file(%s)!\n", __func__, dev_attr_status.attr.name); dev_set_drvdata(t_flash_detect_dev, mmc); } mmc_set_drvdata(pdev, mmc); ret = pm_runtime_set_active(&(pdev)->dev); if (ret < 0) pr_info("%s: %s: failed with error %d", mmc_hostname(mmc), __func__, ret); /* * There is no notion of suspend/resume for SD/MMC/SDIO * cards. So host can be suspended/resumed with out * worrying about its children. */ pm_suspend_ignore_children(&(pdev)->dev, true); /* * MMC/SD/SDIO bus suspend/resume operations are defined * only for the slots that will be used for non-removable * media or for all slots when CONFIG_MMC_UNSAFE_RESUME is * defined. Otherwise, they simply become card removal and * insertion events during suspend and resume respectively. * Hence, enable run-time PM only for slots for which bus * suspend/resume operations are defined. */ #ifdef CONFIG_MMC_UNSAFE_RESUME /* * If this capability is set, MMC core will enable/disable host * for every claim/release operation on a host. We use this * notification to increment/decrement runtime pm usage count. */ pm_runtime_enable(&(pdev)->dev); #else if (mmc->caps & MMC_CAP_NONREMOVABLE) { pm_runtime_enable(&(pdev)->dev); } #endif host->idle_tout = MSM_MMC_DEFAULT_IDLE_TIMEOUT; setup_timer(&host->req_tout_timer, msmsdcc_req_tout_timer_hdlr, (unsigned long)host); mmc_add_host(mmc); mmc->clk_scaling.up_threshold = 35; mmc->clk_scaling.down_threshold = 5; mmc->clk_scaling.polling_delay_ms = 100; mmc->caps2 |= MMC_CAP2_CLK_SCALE; #ifdef CONFIG_HAS_EARLYSUSPEND host->early_suspend.suspend = msmsdcc_early_suspend; host->early_suspend.resume = msmsdcc_late_resume; host->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; register_early_suspend(&host->early_suspend); #endif pr_info("%s: Qualcomm MSM SDCC-core at 0x%016llx irq %d,%d dma %d" " dmacrcri %d\n", mmc_hostname(mmc), (unsigned long long)core_memres->start, (unsigned int) core_irqres->start, (unsigned int) plat->status_irq, host->dma.channel, host->dma.crci); pr_info("%s: Controller capabilities: 0x%.8x\n", mmc_hostname(mmc), host->hw_caps); pr_info("%s: 8 bit data mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled")); pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled")); pr_info("%s: polling status mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_NEEDS_POLL ? "enabled" : "disabled")); pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n", mmc_hostname(mmc), msmsdcc_get_min_sup_clk_rate(host), msmsdcc_get_max_sup_clk_rate(host), host->pclk_rate); pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject); pr_info("%s: Power save feature enable = %d\n", mmc_hostname(mmc), msmsdcc_pwrsave); if (is_dma_mode(host) && host->dma.channel != -1 && host->dma.crci != -1) { pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n", mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr); pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n", mmc_hostname(mmc), host->dma.cmd_busaddr, host->dma.cmdptr_busaddr); } else if (is_sps_mode(host)) { pr_info("%s: SPS-BAM data transfer mode available\n", mmc_hostname(mmc)); } else pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc)); #if defined(CONFIG_DEBUG_FS) msmsdcc_dbg_createhost(host); #endif host->max_bus_bw.show = show_sdcc_to_mem_max_bus_bw; host->max_bus_bw.store = store_sdcc_to_mem_max_bus_bw; sysfs_attr_init(&host->max_bus_bw.attr); host->max_bus_bw.attr.name = "max_bus_bw"; host->max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&pdev->dev, &host->max_bus_bw); if (ret) goto platform_irq_free; if (!plat->status_irq) { host->polling.show = show_polling; host->polling.store = store_polling; sysfs_attr_init(&host->polling.attr); host->polling.attr.name = "polling"; host->polling.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&pdev->dev, &host->polling); if (ret) goto remove_max_bus_bw_file; } host->idle_timeout.show = show_idle_timeout; host->idle_timeout.store = store_idle_timeout; sysfs_attr_init(&host->idle_timeout.attr); host->idle_timeout.attr.name = "idle_timeout"; host->idle_timeout.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&pdev->dev, &host->idle_timeout); if (ret) goto remove_polling_file; if (!is_auto_cmd19(host)) goto add_auto_cmd21_atrr; /* Sysfs entry for AUTO CMD19 control */ host->auto_cmd19_attr.show = show_enable_auto_cmd19; host->auto_cmd19_attr.store = store_enable_auto_cmd19; sysfs_attr_init(&host->auto_cmd19_attr.attr); host->auto_cmd19_attr.attr.name = "enable_auto_cmd19"; host->auto_cmd19_attr.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&pdev->dev, &host->auto_cmd19_attr); if (ret) goto remove_idle_timeout_file; add_auto_cmd21_atrr: if (!is_auto_cmd21(host)) goto exit; /* Sysfs entry for AUTO CMD21 control */ host->auto_cmd21_attr.show = show_enable_auto_cmd21; host->auto_cmd21_attr.store = store_enable_auto_cmd21; sysfs_attr_init(&host->auto_cmd21_attr.attr); host->auto_cmd21_attr.attr.name = "enable_auto_cmd21"; host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&pdev->dev, &host->auto_cmd21_attr); if (ret) goto remove_auto_cmd19_attr_file; exit: return 0; remove_auto_cmd19_attr_file: if (is_auto_cmd19(host)) device_remove_file(&pdev->dev, &host->auto_cmd19_attr); remove_idle_timeout_file: device_remove_file(&pdev->dev, &host->idle_timeout); remove_polling_file: if (!plat->status_irq) device_remove_file(&pdev->dev, &host->polling); remove_max_bus_bw_file: device_remove_file(&pdev->dev, &host->max_bus_bw); platform_irq_free: del_timer_sync(&host->req_tout_timer); pm_runtime_disable(&(pdev)->dev); pm_runtime_set_suspended(&(pdev)->dev); if (plat->status_irq) free_irq(plat->status_irq, host); sdiowakeup_irq_free: wake_lock_destroy(&host->sdio_suspend_wlock); if (plat->sdiowakeup_irq) free_irq(plat->sdiowakeup_irq, host); pio_irq_free: if (plat->sdiowakeup_irq) wake_lock_destroy(&host->sdio_wlock); free_irq(core_irqres->start, host); irq_free: free_irq(core_irqres->start, host); dml_exit: if (is_sps_mode(host)) msmsdcc_dml_exit(host); sps_exit: if (is_sps_mode(host)) msmsdcc_sps_exit(host); vreg_deinit: msmsdcc_vreg_init(host, false); clk_disable: clk_disable(host->clk); msmsdcc_msm_bus_unregister(host); pm_qos_remove: if (host->cpu_dma_latency) pm_qos_remove_request(&host->pm_qos_req_dma); clk_put: clk_put(host->clk); pclk_disable: if (!IS_ERR(host->pclk)) clk_disable_unprepare(host->pclk); pclk_put: if (!IS_ERR(host->pclk)) clk_put(host->pclk); if (!IS_ERR_OR_NULL(host->bus_clk)) clk_disable_unprepare(host->bus_clk); bus_clk_put: if (!IS_ERR_OR_NULL(host->bus_clk)) clk_put(host->bus_clk); if (is_dma_mode(host)) { if (host->dmares) dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), host->dma.nc, host->dma.nc_busaddr); } ioremap_free: iounmap(host->base); host_free: mmc_free_host(mmc); out: return ret; } #ifdef CONFIG_DEBUG_FS static void msmsdcc_remove_debugfs(struct msmsdcc_host *host) { debugfs_remove_recursive(host->debugfs_host_dir); host->debugfs_host_dir = NULL; } #else static void msmsdcc_remove_debugfs(msmsdcc_host *host) {} #endif static int msmsdcc_remove(struct platform_device *pdev) { struct mmc_host *mmc = mmc_get_drvdata(pdev); struct mmc_platform_data *plat; struct msmsdcc_host *host; if (!mmc) return -ENXIO; if (pm_runtime_suspended(&(pdev)->dev)) pm_runtime_resume(&(pdev)->dev); host = mmc_priv(mmc); DBG(host, "Removing SDCC device = %d\n", pdev->id); plat = host->plat; if (is_auto_cmd19(host)) device_remove_file(&pdev->dev, &host->auto_cmd19_attr); if (is_auto_cmd21(host)) device_remove_file(&pdev->dev, &host->auto_cmd21_attr); device_remove_file(&pdev->dev, &host->max_bus_bw); if (!plat->status_irq) device_remove_file(&pdev->dev, &host->polling); device_remove_file(&pdev->dev, &host->idle_timeout); msmsdcc_remove_debugfs(host); del_timer_sync(&host->req_tout_timer); tasklet_kill(&host->dma_tlet); tasklet_kill(&host->sps.tlet); mmc_remove_host(mmc); if (plat->status_irq) free_irq(plat->status_irq, host); wake_lock_destroy(&host->sdio_suspend_wlock); if (plat->sdiowakeup_irq) { wake_lock_destroy(&host->sdio_wlock); irq_set_irq_wake(plat->sdiowakeup_irq, 0); free_irq(plat->sdiowakeup_irq, host); } free_irq(host->core_irqres->start, host); free_irq(host->core_irqres->start, host); clk_put(host->clk); if (!IS_ERR(host->pclk)) clk_put(host->pclk); if (!IS_ERR_OR_NULL(host->bus_clk)) clk_put(host->bus_clk); if (host->cpu_dma_latency) pm_qos_remove_request(&host->pm_qos_req_dma); if (host->msm_bus_vote.client_handle) { msmsdcc_msm_bus_cancel_work_and_set_vote(host, NULL); msmsdcc_msm_bus_unregister(host); } msmsdcc_vreg_init(host, false); if (is_dma_mode(host)) { if (host->dmares) dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), host->dma.nc, host->dma.nc_busaddr); } if (is_sps_mode(host)) { msmsdcc_dml_exit(host); msmsdcc_sps_exit(host); } iounmap(host->base); mmc_free_host(mmc); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&host->early_suspend); #endif pm_runtime_disable(&(pdev)->dev); pm_runtime_set_suspended(&(pdev)->dev); return 0; } #ifdef CONFIG_MSM_SDIO_AL int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; int rc = 0; mutex_lock(&host->clk_mutex); spin_lock_irqsave(&host->lock, flags); pr_debug("%s: %sabling LPM\n", mmc_hostname(mmc), enable ? "En" : "Dis"); if (enable) { if (!host->sdcc_irq_disabled) { writel_relaxed(0, host->base + MMCIMASK0); disable_irq_nosync(host->core_irqres->start); host->sdcc_irq_disabled = 1; } rc = msmsdcc_setup_clocks(host, false); if (rc) goto out; if (host->plat->sdio_lpm_gpio_setup && !host->sdio_gpio_lpm) { spin_unlock_irqrestore(&host->lock, flags); host->plat->sdio_lpm_gpio_setup(mmc_dev(mmc), 0); spin_lock_irqsave(&host->lock, flags); host->sdio_gpio_lpm = 1; } if (host->sdio_wakeupirq_disabled) { msmsdcc_enable_irq_wake(host); enable_irq(host->plat->sdiowakeup_irq); host->sdio_wakeupirq_disabled = 0; } } else { rc = msmsdcc_setup_clocks(host, true); if (rc) goto out; if (!host->sdio_wakeupirq_disabled) { disable_irq_nosync(host->plat->sdiowakeup_irq); host->sdio_wakeupirq_disabled = 1; msmsdcc_disable_irq_wake(host); } if (host->plat->sdio_lpm_gpio_setup && host->sdio_gpio_lpm) { spin_unlock_irqrestore(&host->lock, flags); host->plat->sdio_lpm_gpio_setup(mmc_dev(mmc), 1); spin_lock_irqsave(&host->lock, flags); host->sdio_gpio_lpm = 0; } if (host->sdcc_irq_disabled && atomic_read(&host->clks_on)) { writel_relaxed(host->mci_irqenable, host->base + MMCIMASK0); mb(); enable_irq(host->core_irqres->start); host->sdcc_irq_disabled = 0; } } out: spin_unlock_irqrestore(&host->lock, flags); mutex_unlock(&host->clk_mutex); return rc; } #else int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable) { return 0; } #endif #ifdef CONFIG_PM #ifdef CONFIG_MMC_CLKGATE static inline void msmsdcc_gate_clock(struct msmsdcc_host *host) { struct mmc_host *mmc = host->mmc; unsigned long flags; if (host->pdev_id == 3) { printk(KERN_INFO "%s: msmsdcc_gate_clock due to mmc_card_keep_power\n", __func__); } mmc_host_clk_hold(mmc); spin_lock_irqsave(&mmc->clk_lock, flags); mmc->clk_old = mmc->ios.clock; mmc->ios.clock = 0; mmc->clk_gated = true; spin_unlock_irqrestore(&mmc->clk_lock, flags); mmc_set_ios(mmc); mmc_host_clk_release(mmc); } static inline void msmsdcc_ungate_clock(struct msmsdcc_host *host) { struct mmc_host *mmc = host->mmc; mmc_host_clk_hold(mmc); mmc->ios.clock = host->clk_rate; mmc_set_ios(mmc); mmc_host_clk_release(mmc); } #else static inline void msmsdcc_gate_clock(struct msmsdcc_host *host) { struct mmc_host *mmc = host->mmc; mmc->ios.clock = 0; mmc_set_ios(mmc); } static inline void msmsdcc_ungate_clock(struct msmsdcc_host *host) { struct mmc_host *mmc = host->mmc; mmc->ios.clock = host->clk_rate; mmc_set_ios(mmc); } #endif #if CONFIG_DEBUG_FS static void msmsdcc_print_pm_stats(struct msmsdcc_host *host, ktime_t start, const char *func) { ktime_t diff; if (host->print_pm_stats) { diff = ktime_sub(ktime_get(), start); pr_info("%s: %s: Completed in %llu usec\n", func, mmc_hostname(host->mmc), (u64)ktime_to_us(diff)); } } #else static void msmsdcc_print_pm_stats(struct msmsdcc_host *host, ktime_t start, const char *func) {} #endif static int msmsdcc_runtime_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int rc = 0; unsigned long flags; ktime_t start = ktime_get(); if (host->plat->is_sdio_al_client) { rc = 0; goto out; } #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if (host->pdev_id == 4) { host->mmc->pm_flags |= MMC_PM_KEEP_POWER; printk(KERN_INFO "%s: Enter WIFI suspend\n", __func__); } #elif defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE) if (host->pdev_id == 3) { host->mmc->pm_flags |= MMC_PM_KEEP_POWER; printk(KERN_INFO "%s: Enter WIFI suspend\n", __func__); } #endif pr_debug("%s: %s: start\n", mmc_hostname(mmc), __func__); if (mmc) { host->sdcc_suspending = 1; mmc->suspend_task = current; /* * MMC core thinks that host is disabled by now since * runtime suspend is scheduled after msmsdcc_disable() * is called. Thus, MMC core will try to enable the host * while suspending it. This results in a synchronous * runtime resume request while in runtime suspending * context and hence inorder to complete this resume * requet, it will wait for suspend to be complete, * but runtime suspend also can not proceed further * until the host is resumed. Thus, it leads to a hang. * Hence, increase the pm usage count before suspending * the host so that any resume requests after this will * simple become pm usage counter increment operations. */ pm_runtime_get_noresume(dev); /* If there is pending detect work abort runtime suspend */ if (unlikely(work_busy(&mmc->detect.work))) rc = -EAGAIN; else rc = mmc_suspend_host(mmc); pm_runtime_put_noidle(dev); if (!rc) { spin_lock_irqsave(&host->lock, flags); host->sdcc_suspended = true; spin_unlock_irqrestore(&host->lock, flags); if (mmc->card && mmc_card_sdio(mmc->card) && mmc->ios.clock) { /* * If SDIO function driver doesn't want * to power off the card, atleast turn off * clocks to allow deep sleep (TCXO shutdown). */ msmsdcc_gate_clock(host); } } host->sdcc_suspending = 0; mmc->suspend_task = NULL; if (rc && wake_lock_active(&host->sdio_suspend_wlock)) wake_unlock(&host->sdio_suspend_wlock); } pr_debug("%s: %s: ends with err=%d\n", mmc_hostname(mmc), __func__, rc); out: /* set bus bandwidth to 0 immediately */ msmsdcc_msm_bus_cancel_work_and_set_vote(host, NULL); msmsdcc_print_pm_stats(host, start, __func__); return rc; } static int msmsdcc_runtime_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; ktime_t start = ktime_get(); if (host->plat->is_sdio_al_client) goto out; #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if (host->pdev_id == 4) { printk(KERN_INFO "%s: Enter WIFI resume\n", __func__); } #elif defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE) if (host->pdev_id == 3) { printk(KERN_INFO "%s: Enter WIFI resume\n", __func__); } #endif pr_debug("%s: %s: start\n", mmc_hostname(mmc), __func__); if (mmc) { if (mmc->card && mmc_card_sdio(mmc->card) && mmc_card_keep_power(mmc)) { msmsdcc_ungate_clock(host); #if defined(CONFIG_BCM4334) || defined(CONFIG_BCM4334_MODULE) if (host->pdev_id == 4) { printk(KERN_INFO "%s: To check whether skip the WIFI resume in mmc_card_keep_power\n", __func__); } #elif defined(CONFIG_BCM4335) || defined(CONFIG_BCM4335_MODULE) if (host->pdev_id == 3) { printk(KERN_INFO "%s: To check whether skip the WIFI resume in mmc_card_keep_power\n", __func__); } #endif } mmc_resume_host(mmc); /* * FIXME: Clearing of flags must be handled in clients * resume handler. */ spin_lock_irqsave(&host->lock, flags); mmc->pm_flags = 0; host->sdcc_suspended = false; spin_unlock_irqrestore(&host->lock, flags); /* * After resuming the host wait for sometime so that * the SDIO work will be processed. */ if (mmc->card && mmc_card_sdio(mmc->card)) { if ((host->plat->mpm_sdiowakeup_int || host->plat->sdiowakeup_irq) && wake_lock_active(&host->sdio_wlock)) wake_lock_timeout(&host->sdio_wlock, 1); } wake_unlock(&host->sdio_suspend_wlock); } host->pending_resume = false; pr_debug("%s: %s: end\n", mmc_hostname(mmc), __func__); out: msmsdcc_print_pm_stats(host, start, __func__); return 0; } static int msmsdcc_runtime_idle(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); if (host->plat->is_sdio_al_client) return 0; /* Idle timeout is not configurable for now */ /* Disable Runtime PM becasue of potential issues *pm_schedule_suspend(dev, host->idle_tout); */ return -EAGAIN; } static int msmsdcc_pm_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int rc = 0; ktime_t start = ktime_get(); if (host->plat->is_sdio_al_client) { rc = 0; goto out; } if (host->plat->status_irq) disable_irq(host->plat->status_irq); /* * If system comes out of suspend, msmsdcc_pm_resume() sets the * host->pending_resume flag if the SDCC wasn't runtime suspended. * Now if the system again goes to suspend without any SDCC activity * then host->pending_resume flag will remain set which may cause * the SDCC resume to happen first and then suspend. * To avoid this unnecessary resume/suspend, make sure that * pending_resume flag is cleared before calling the * msmsdcc_runtime_suspend(). */ if (!pm_runtime_suspended(dev) && !host->pending_resume) rc = msmsdcc_runtime_suspend(dev); out: msmsdcc_print_pm_stats(host, start, __func__); return rc; } static int msmsdcc_suspend_noirq(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int rc = 0; /* * After platform suspend there may be active request * which might have enabled clocks. For example, in SDIO * case, ksdioirq thread might have scheduled after sdcc * suspend but before system freeze. In that case abort * suspend and retry instead of keeping the clocks on * during suspend and not allowing TCXO. */ if (atomic_read(&host->clks_on) && !host->plat->is_sdio_al_client) { pr_warn("%s: clocks are on after suspend, aborting system " "suspend\n", mmc_hostname(mmc)); rc = -EAGAIN; } return rc; } static int msmsdcc_pm_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msmsdcc_host *host = mmc_priv(mmc); int rc = 0; ktime_t start = ktime_get(); if (host->plat->is_sdio_al_client) { rc = 0; goto out; } if (mmc->card && mmc_card_sdio(mmc->card)) rc = msmsdcc_runtime_resume(dev); /* * As runtime PM is enabled before calling the device's platform resume * callback, we use the pm_runtime_suspended API to know if SDCC is * really runtime suspended or not and set the pending_resume flag only * if its not runtime suspended. */ else if (!pm_runtime_suspended(dev)) host->pending_resume = true; if (host->plat->status_irq) { msmsdcc_check_status((unsigned long)host); enable_irq(host->plat->status_irq); } out: msmsdcc_print_pm_stats(host, start, __func__); return rc; } #else static int msmsdcc_runtime_suspend(struct device *dev) { return 0; } static int msmsdcc_runtime_idle(struct device *dev) { return 0; } static int msmsdcc_pm_suspend(struct device *dev) { return 0; } static int msmsdcc_pm_resume(struct device *dev) { return 0; } static int msmsdcc_suspend_noirq(struct device *dev) { return 0; } static int msmsdcc_runtime_resume(struct device *dev) { return 0; } #endif static const struct dev_pm_ops msmsdcc_dev_pm_ops = { .runtime_suspend = msmsdcc_runtime_suspend, .runtime_resume = msmsdcc_runtime_resume, .runtime_idle = msmsdcc_runtime_idle, .suspend = msmsdcc_pm_suspend, .resume = msmsdcc_pm_resume, .suspend_noirq = msmsdcc_suspend_noirq, }; static const struct of_device_id msmsdcc_dt_match[] = { {.compatible = "qcom,msm-sdcc"}, }; MODULE_DEVICE_TABLE(of, msmsdcc_dt_match); static struct platform_driver msmsdcc_driver = { .probe = msmsdcc_probe, .remove = msmsdcc_remove, .driver = { .name = "msm_sdcc", .pm = &msmsdcc_dev_pm_ops, .of_match_table = msmsdcc_dt_match, }, }; static int __init msmsdcc_init(void) { #if defined(CONFIG_DEBUG_FS) int ret = 0; ret = msmsdcc_dbg_init(); if (ret) { pr_err("Failed to create debug fs dir \n"); return ret; } #endif return platform_driver_register(&msmsdcc_driver); } static void __exit msmsdcc_exit(void) { platform_driver_unregister(&msmsdcc_driver); #if defined(CONFIG_DEBUG_FS) debugfs_remove(debugfs_dir); #endif } module_init(msmsdcc_init); module_exit(msmsdcc_exit); MODULE_DESCRIPTION("Qualcomm Multimedia Card Interface driver"); MODULE_LICENSE("GPL"); #if defined(CONFIG_DEBUG_FS) static int msmsdcc_dbg_idle_tout_get(void *data, u64 *val) { struct msmsdcc_host *host = data; *val = host->idle_tout / 1000L; return 0; } static int msmsdcc_dbg_idle_tout_set(void *data, u64 val) { struct msmsdcc_host *host = data; unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->idle_tout = (u32)val * 1000; spin_unlock_irqrestore(&host->lock, flags); return 0; } DEFINE_SIMPLE_ATTRIBUTE(msmsdcc_dbg_idle_tout_ops, msmsdcc_dbg_idle_tout_get, msmsdcc_dbg_idle_tout_set, "%llu\n"); static int msmsdcc_dbg_pio_mode_get(void *data, u64 *val) { struct msmsdcc_host *host = data; *val = (u64) host->enforce_pio_mode; return 0; } static int msmsdcc_dbg_pio_mode_set(void *data, u64 val) { struct msmsdcc_host *host = data; unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->enforce_pio_mode = !!val; spin_unlock_irqrestore(&host->lock, flags); return 0; } DEFINE_SIMPLE_ATTRIBUTE(msmsdcc_dbg_pio_mode_ops, msmsdcc_dbg_pio_mode_get, msmsdcc_dbg_pio_mode_set, "%llu\n"); static int msmsdcc_dbg_pm_stats_get(void *data, u64 *val) { struct msmsdcc_host *host = data; *val = !!host->print_pm_stats; return 0; } static int msmsdcc_dbg_pm_stats_set(void *data, u64 val) { struct msmsdcc_host *host = data; unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->print_pm_stats = !!val; spin_unlock_irqrestore(&host->lock, flags); return 0; } DEFINE_SIMPLE_ATTRIBUTE(msmsdcc_dbg_pm_stats_ops, msmsdcc_dbg_pm_stats_get, msmsdcc_dbg_pm_stats_set, "%llu\n"); static void msmsdcc_dbg_createhost(struct msmsdcc_host *host) { int err = 0; if (!debugfs_dir) return; host->debugfs_host_dir = debugfs_create_dir( mmc_hostname(host->mmc), debugfs_dir); if (IS_ERR(host->debugfs_host_dir)) { err = PTR_ERR(host->debugfs_host_dir); host->debugfs_host_dir = NULL; pr_err("%s: Failed to create debugfs dir for host with err=%d\n", mmc_hostname(host->mmc), err); return; } host->debugfs_idle_tout = debugfs_create_file("idle_tout", S_IRUSR | S_IWUSR, host->debugfs_host_dir, host, &msmsdcc_dbg_idle_tout_ops); if (IS_ERR(host->debugfs_idle_tout)) { err = PTR_ERR(host->debugfs_idle_tout); host->debugfs_idle_tout = NULL; pr_err("%s: Failed to create idle_tout debugfs entry with err=%d\n", mmc_hostname(host->mmc), err); } host->debugfs_pio_mode = debugfs_create_file("pio_mode", S_IRUSR | S_IWUSR, host->debugfs_host_dir, host, &msmsdcc_dbg_pio_mode_ops); if (IS_ERR(host->debugfs_pio_mode)) { err = PTR_ERR(host->debugfs_pio_mode); host->debugfs_pio_mode = NULL; pr_err("%s: Failed to create pio_mode debugfs entry with err=%d\n", mmc_hostname(host->mmc), err); } host->debugfs_pm_stats = debugfs_create_file("pm_stats", S_IRUSR | S_IWUSR, host->debugfs_host_dir, host, &msmsdcc_dbg_pm_stats_ops); if (IS_ERR(host->debugfs_pm_stats)) { err = PTR_ERR(host->debugfs_pm_stats); host->debugfs_pm_stats = NULL; pr_err("%s: Failed to create pm_stats debugfs entry with err=%d\n", mmc_hostname(host->mmc), err); } } static int __init msmsdcc_dbg_init(void) { int err; debugfs_dir = debugfs_create_dir("msm_sdcc", 0); if (IS_ERR(debugfs_dir)) { err = PTR_ERR(debugfs_dir); debugfs_dir = NULL; return err; } return 0; } #endif
gpl-2.0
rprata/boost
libs/log/test/compile/src_logger_assignable.cpp
36
1154
/* * Copyright Andrey Semashev 2007 - 2015. * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) */ /*! * \file src_logger_assignable.cpp * \author Andrey Semashev * \date 16.05.2011 * * \brief This header contains a test for logger assignability. */ #include <boost/log/sources/logger.hpp> #include <boost/log/sources/severity_logger.hpp> #include <boost/log/sources/channel_logger.hpp> #include <boost/log/sources/severity_channel_logger.hpp> template< typename LoggerT > void test() { LoggerT lg1, lg2; // Loggers must be assignable. The assignment operator must be taken // from the composite_logger class and not auto-generated (in which // case it will fail to compile because assignment in basic_logger is private). lg1 = lg2; } int main(int, char*[]) { test< boost::log::sources::logger >(); test< boost::log::sources::severity_logger< > >(); test< boost::log::sources::channel_logger< > >(); test< boost::log::sources::severity_channel_logger< > >(); return 0; }
gpl-2.0
xenord/android_kernel_samsung_trebon
fs/cachefiles/namei.c
292
24259
/* CacheFiles path walking and related routines * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/slab.h> #include "internal.h" #define CACHEFILES_KEYBUF_SIZE 512 /* * dump debugging info about an object */ static noinline void __cachefiles_printk_object(struct cachefiles_object *object, const char *prefix, u8 *keybuf) { struct fscache_cookie *cookie; unsigned keylen, loop; printk(KERN_ERR "%sobject: OBJ%x\n", prefix, object->fscache.debug_id); printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", prefix, fscache_object_states[object->fscache.state], object->fscache.flags, work_busy(&object->fscache.work), object->fscache.events, object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK); printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", prefix, object->fscache.n_ops, object->fscache.n_in_progress, object->fscache.n_exclusive); printk(KERN_ERR "%sparent=%p\n", prefix, object->fscache.parent); spin_lock(&object->fscache.lock); cookie = object->fscache.cookie; if (cookie) { printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n", prefix, object->fscache.cookie, object->fscache.cookie->parent, object->fscache.cookie->netfs_data, object->fscache.cookie->flags); if (keybuf) keylen = cookie->def->get_key(cookie->netfs_data, keybuf, CACHEFILES_KEYBUF_SIZE); else keylen = 0; } else { printk(KERN_ERR "%scookie=NULL\n", prefix); keylen = 0; } spin_unlock(&object->fscache.lock); if (keylen) { printk(KERN_ERR "%skey=[%u] '", prefix, keylen); for (loop = 0; loop < keylen; loop++) printk("%02x", keybuf[loop]); printk("'\n"); } } /* * dump debugging info about a pair of objects */ static noinline void cachefiles_printk_object(struct cachefiles_object *object, struct cachefiles_object *xobject) { u8 *keybuf; keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO); if (object) __cachefiles_printk_object(object, "", keybuf); if (xobject) __cachefiles_printk_object(xobject, "x", keybuf); kfree(keybuf); } /* * mark the owner of a dentry, if there is one, to indicate that that dentry * has been preemptively deleted * - the caller must hold the i_mutex on the dentry's parent as required to * call vfs_unlink(), vfs_rmdir() or vfs_rename() */ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, struct dentry *dentry) { struct cachefiles_object *object; struct rb_node *p; _enter(",'%*.*s'", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name); write_lock(&cache->active_lock); p = cache->active_nodes.rb_node; while (p) { object = rb_entry(p, struct cachefiles_object, active_node); if (object->dentry > dentry) p = p->rb_left; else if (object->dentry < dentry) p = p->rb_right; else goto found_dentry; } write_unlock(&cache->active_lock); _leave(" [no owner]"); return; /* found the dentry for */ found_dentry: kdebug("preemptive burial: OBJ%x [%s] %p", object->fscache.debug_id, fscache_object_states[object->fscache.state], dentry); if (object->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Can't preemptively bury live object\n"); cachefiles_printk_object(object, NULL); } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { printk(KERN_ERR "CacheFiles: Error:" " Object already preemptively buried\n"); } write_unlock(&cache->active_lock); _leave(" [owner marked]"); } /* * record the fact that an object is now active */ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, struct cachefiles_object *object) { struct cachefiles_object *xobject; struct rb_node **_p, *_parent = NULL; struct dentry *dentry; _enter(",%p", object); try_again: write_lock(&cache->active_lock); if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) { printk(KERN_ERR "CacheFiles: Error: Object already active\n"); cachefiles_printk_object(object, NULL); BUG(); } dentry = object->dentry; _p = &cache->active_nodes.rb_node; while (*_p) { _parent = *_p; xobject = rb_entry(_parent, struct cachefiles_object, active_node); ASSERT(xobject != object); if (xobject->dentry > dentry) _p = &(*_p)->rb_left; else if (xobject->dentry < dentry) _p = &(*_p)->rb_right; else goto wait_for_old_object; } rb_link_node(&object->active_node, _parent, _p); rb_insert_color(&object->active_node, &cache->active_nodes); write_unlock(&cache->active_lock); _leave(" = 0"); return 0; /* an old object from a previous incarnation is hogging the slot - we * need to wait for it to be destroyed */ wait_for_old_object: if (xobject->fscache.state < FSCACHE_OBJECT_DYING) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error:" " Unexpected object collision\n"); cachefiles_printk_object(object, xobject); BUG(); } atomic_inc(&xobject->usage); write_unlock(&cache->active_lock); if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { wait_queue_head_t *wq; signed long timeout = 60 * HZ; wait_queue_t wait; bool requeue; /* if the object we're waiting for is queued for processing, * then just put ourselves on the queue behind it */ if (work_pending(&xobject->fscache.work)) { _debug("queue OBJ%x behind OBJ%x immediately", object->fscache.debug_id, xobject->fscache.debug_id); goto requeue; } /* otherwise we sleep until either the object we're waiting for * is done, or the fscache_object is congested */ wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); init_wait(&wait); requeue = false; do { prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) break; requeue = fscache_object_sleep_till_congested(&timeout); } while (timeout > 0 && !requeue); finish_wait(wq, &wait); if (requeue && test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) { _debug("queue OBJ%x behind OBJ%x after wait", object->fscache.debug_id, xobject->fscache.debug_id); goto requeue; } if (timeout <= 0) { printk(KERN_ERR "\n"); printk(KERN_ERR "CacheFiles: Error: Overlong" " wait for old active object to go away\n"); cachefiles_printk_object(object, xobject); goto requeue; } } ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)); cache->cache.ops->put_object(&xobject->fscache); goto try_again; requeue: clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); cache->cache.ops->put_object(&xobject->fscache); _leave(" = -ETIMEDOUT"); return -ETIMEDOUT; } /* * delete an object representation from the cache * - file backed objects are unlinked * - directory backed objects are stuffed into the graveyard for userspace to * delete * - unlocks the directory mutex */ static int cachefiles_bury_object(struct cachefiles_cache *cache, struct dentry *dir, struct dentry *rep, bool preemptive) { struct dentry *grave, *trap; char nbuffer[8 + 8 + 1]; int ret; _enter(",'%*.*s','%*.*s'", dir->d_name.len, dir->d_name.len, dir->d_name.name, rep->d_name.len, rep->d_name.len, rep->d_name.name); _debug("remove %p from %p", rep, dir); /* non-directories can just be unlinked */ if (!S_ISDIR(rep->d_inode->i_mode)) { _debug("unlink stale object"); ret = vfs_unlink(dir->d_inode, rep); if (preemptive) cachefiles_mark_object_buried(cache, rep); mutex_unlock(&dir->d_inode->i_mutex); if (ret == -EIO) cachefiles_io_error(cache, "Unlink failed"); _leave(" = %d", ret); return ret; } /* directories have to be moved to the graveyard */ _debug("move stale object to graveyard"); mutex_unlock(&dir->d_inode->i_mutex); try_again: /* first step is to make up a grave dentry in the graveyard */ sprintf(nbuffer, "%08x%08x", (uint32_t) get_seconds(), (uint32_t) atomic_inc_return(&cache->gravecounter)); /* do the multiway lock magic */ trap = lock_rename(cache->graveyard, dir); /* do some checks before getting the grave dentry */ if (rep->d_parent != dir) { /* the entry was probably culled when we dropped the parent dir * lock */ unlock_rename(cache->graveyard, dir); _leave(" = 0 [culled?]"); return 0; } if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; } if (trap == rep) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } if (d_mountpoint(rep)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Mountpoint in cache"); return -EIO; } grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); if (IS_ERR(grave)) { unlock_rename(cache->graveyard, dir); if (PTR_ERR(grave) == -ENOMEM) { _leave(" = -ENOMEM"); return -ENOMEM; } cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave)); return -EIO; } if (grave->d_inode) { unlock_rename(cache->graveyard, dir); dput(grave); grave = NULL; cond_resched(); goto try_again; } if (d_mountpoint(grave)) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "Mountpoint in graveyard"); return -EIO; } /* target should not be an ancestor of source */ if (trap == grave) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } /* attempt the rename */ ret = vfs_rename(dir->d_inode, rep, cache->graveyard->d_inode, grave); if (ret != 0 && ret != -ENOMEM) cachefiles_io_error(cache, "Rename failed with error %d", ret); if (preemptive) cachefiles_mark_object_buried(cache, rep); unlock_rename(cache->graveyard, dir); dput(grave); _leave(" = 0"); return 0; } /* * delete an object representation from the cache */ int cachefiles_delete_object(struct cachefiles_cache *cache, struct cachefiles_object *object) { struct dentry *dir; int ret; _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry); ASSERT(object->dentry); ASSERT(object->dentry->d_inode); ASSERT(object->dentry->d_parent); dir = dget_parent(object->dentry); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { /* object allocation for the same key preemptively deleted this * object's file so that it could create its own file */ _debug("object preemptively buried"); mutex_unlock(&dir->d_inode->i_mutex); ret = 0; } else { /* we need to check that our parent is _still_ our parent - it * may have been renamed */ if (dir == object->dentry->d_parent) { ret = cachefiles_bury_object(cache, dir, object->dentry, false); } else { /* it got moved, presumably by cachefilesd culling it, * so it's no longer in the key path and we can ignore * it */ mutex_unlock(&dir->d_inode->i_mutex); ret = 0; } } dput(dir); _leave(" = %d", ret); return ret; } /* * walk from the parent object to the child object through the backing * filesystem, creating directories as we go */ int cachefiles_walk_to_object(struct cachefiles_object *parent, struct cachefiles_object *object, const char *key, struct cachefiles_xattr *auxdata) { struct cachefiles_cache *cache; struct dentry *dir, *next = NULL; unsigned long start; const char *name; int ret, nlen; _enter("OBJ%x{%p},OBJ%x,%s,", parent->fscache.debug_id, parent->dentry, object->fscache.debug_id, key); cache = container_of(parent->fscache.cache, struct cachefiles_cache, cache); ASSERT(parent->dentry); ASSERT(parent->dentry->d_inode); if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { // TODO: convert file to dir _leave("looking up in none directory"); return -ENOBUFS; } dir = dget(parent->dentry); advance: /* attempt to transit the first directory component */ name = key; nlen = strlen(key); /* key ends in a double NUL */ key = key + nlen + 1; if (!*key) key = NULL; lookup_again: /* search the current directory for the element name */ _debug("lookup '%s'", name); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); start = jiffies; next = lookup_one_len(name, dir, nlen); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(next)) goto lookup_error; _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative"); if (!key) object->new = !next->d_inode; /* if this element of the path doesn't exist, then the lookup phase * failed, and we can release any readers in the certain knowledge that * there's nothing for them to actually read */ if (!next->d_inode) fscache_object_lookup_negative(&object->fscache); /* we need to create the object if it's negative */ if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) { /* index objects and intervening tree levels must be subdirs */ if (!next->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; start = jiffies; ret = vfs_mkdir(dir->d_inode, next, 0); cachefiles_hist(cachefiles_mkdir_histogram, start); if (ret < 0) goto create_error; ASSERT(next->d_inode); _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); } else if (!S_ISDIR(next->d_inode->i_mode)) { kerror("inode %lu is not a directory", next->d_inode->i_ino); ret = -ENOBUFS; goto error; } } else { /* non-index objects start out life as files */ if (!next->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto create_error; start = jiffies; ret = vfs_create(dir->d_inode, next, S_IFREG, NULL); cachefiles_hist(cachefiles_create_histogram, start); if (ret < 0) goto create_error; ASSERT(next->d_inode); _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); } else if (!S_ISDIR(next->d_inode->i_mode) && !S_ISREG(next->d_inode->i_mode) ) { kerror("inode %lu is not a file or directory", next->d_inode->i_ino); ret = -ENOBUFS; goto error; } } /* process the next component */ if (key) { _debug("advance"); mutex_unlock(&dir->d_inode->i_mutex); dput(dir); dir = next; next = NULL; goto advance; } /* we've found the object we were looking for */ object->dentry = next; /* if we've found that the terminal object exists, then we need to * check its attributes and delete it if it's out of date */ if (!object->new) { _debug("validate '%*.*s'", next->d_name.len, next->d_name.len, next->d_name.name); ret = cachefiles_check_object_xattr(object, auxdata); if (ret == -ESTALE) { /* delete the object (the deleter drops the directory * mutex) */ object->dentry = NULL; ret = cachefiles_bury_object(cache, dir, next, true); dput(next); next = NULL; if (ret < 0) goto delete_error; _debug("redo lookup"); goto lookup_again; } } /* note that we're now using this object */ ret = cachefiles_mark_object_active(cache, object); mutex_unlock(&dir->d_inode->i_mutex); dput(dir); dir = NULL; if (ret == -ETIMEDOUT) goto mark_active_timed_out; _debug("=== OBTAINED_OBJECT ==="); if (object->new) { /* attach data to a newly constructed terminal object */ ret = cachefiles_set_object_xattr(object, auxdata); if (ret < 0) goto check_error; } else { /* always update the atime on an object we've just looked up * (this is used to keep track of culling, and atimes are only * updated by read, write and readdir but not lookup or * open) */ touch_atime(cache->mnt, next); } /* open a file interface onto a data file */ if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { if (S_ISREG(object->dentry->d_inode->i_mode)) { const struct address_space_operations *aops; ret = -EPERM; aops = object->dentry->d_inode->i_mapping->a_ops; if (!aops->bmap) goto check_error; object->backer = object->dentry; } else { BUG(); // TODO: open file in data-class subdir } } object->new = 0; fscache_obtained_object(&object->fscache); _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino); return 0; create_error: _debug("create error %d", ret); if (ret == -EIO) cachefiles_io_error(cache, "Create/mkdir failed"); goto error; mark_active_timed_out: _debug("mark active timed out"); goto release_dentry; check_error: _debug("check error %d", ret); write_lock(&cache->active_lock); rb_erase(&object->active_node, &cache->active_nodes); clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); write_unlock(&cache->active_lock); release_dentry: dput(object->dentry); object->dentry = NULL; goto error_out; delete_error: _debug("delete error %d", ret); goto error_out2; lookup_error: _debug("lookup error %ld", PTR_ERR(next)); ret = PTR_ERR(next); if (ret == -EIO) cachefiles_io_error(cache, "Lookup failed"); next = NULL; error: mutex_unlock(&dir->d_inode->i_mutex); dput(next); error_out2: dput(dir); error_out: _leave(" = error %d", -ret); return ret; } /* * get a subdirectory */ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, struct dentry *dir, const char *dirname) { struct dentry *subdir; unsigned long start; int ret; _enter(",,%s", dirname); /* search the current directory for the element name */ mutex_lock(&dir->d_inode->i_mutex); start = jiffies; subdir = lookup_one_len(dirname, dir, strlen(dirname)); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(subdir)) { if (PTR_ERR(subdir) == -ENOMEM) goto nomem_d_alloc; goto lookup_error; } _debug("subdir -> %p %s", subdir, subdir->d_inode ? "positive" : "negative"); /* we need to create the subdir if it doesn't exist yet */ if (!subdir->d_inode) { ret = cachefiles_has_space(cache, 1, 0); if (ret < 0) goto mkdir_error; _debug("attempt mkdir"); ret = vfs_mkdir(dir->d_inode, subdir, 0700); if (ret < 0) goto mkdir_error; ASSERT(subdir->d_inode); _debug("mkdir -> %p{%p{ino=%lu}}", subdir, subdir->d_inode, subdir->d_inode->i_ino); } mutex_unlock(&dir->d_inode->i_mutex); /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); if (!S_ISDIR(subdir->d_inode->i_mode)) { kerror("%s is not a directory", dirname); ret = -EIO; goto check_error; } ret = -EPERM; if (!subdir->d_inode->i_op || !subdir->d_inode->i_op->setxattr || !subdir->d_inode->i_op->getxattr || !subdir->d_inode->i_op->lookup || !subdir->d_inode->i_op->mkdir || !subdir->d_inode->i_op->create || !subdir->d_inode->i_op->rename || !subdir->d_inode->i_op->rmdir || !subdir->d_inode->i_op->unlink) goto check_error; _leave(" = [%lu]", subdir->d_inode->i_ino); return subdir; check_error: dput(subdir); _leave(" = %d [check]", ret); return ERR_PTR(ret); mkdir_error: mutex_unlock(&dir->d_inode->i_mutex); dput(subdir); kerror("mkdir %s failed with error %d", dirname, ret); return ERR_PTR(ret); lookup_error: mutex_unlock(&dir->d_inode->i_mutex); ret = PTR_ERR(subdir); kerror("Lookup %s failed with error %d", dirname, ret); return ERR_PTR(ret); nomem_d_alloc: mutex_unlock(&dir->d_inode->i_mutex); _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } /* * find out if an object is in use or not * - if finds object and it's not in use: * - returns a pointer to the object and a reference on it * - returns with the directory locked */ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct cachefiles_object *object; struct rb_node *_n; struct dentry *victim; unsigned long start; int ret; //_enter(",%*.*s/,%s", // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); /* look up the victim */ mutex_lock_nested(&dir->d_inode->i_mutex, 1); start = jiffies; victim = lookup_one_len(filename, dir, strlen(filename)); cachefiles_hist(cachefiles_lookup_histogram, start); if (IS_ERR(victim)) goto lookup_error; //_debug("victim -> %p %s", // victim, victim->d_inode ? "positive" : "negative"); /* if the object is no longer there then we probably retired the object * at the netfs's request whilst the cull was in progress */ if (!victim->d_inode) { mutex_unlock(&dir->d_inode->i_mutex); dput(victim); _leave(" = -ENOENT [absent]"); return ERR_PTR(-ENOENT); } /* check to see if we're using this object */ read_lock(&cache->active_lock); _n = cache->active_nodes.rb_node; while (_n) { object = rb_entry(_n, struct cachefiles_object, active_node); if (object->dentry > victim) _n = _n->rb_left; else if (object->dentry < victim) _n = _n->rb_right; else goto object_in_use; } read_unlock(&cache->active_lock); //_leave(" = %p", victim); return victim; object_in_use: read_unlock(&cache->active_lock); mutex_unlock(&dir->d_inode->i_mutex); dput(victim); //_leave(" = -EBUSY [in use]"); return ERR_PTR(-EBUSY); lookup_error: mutex_unlock(&dir->d_inode->i_mutex); ret = PTR_ERR(victim); if (ret == -ENOENT) { /* file or dir now absent - probably retired by netfs */ _leave(" = -ESTALE [absent]"); return ERR_PTR(-ESTALE); } if (ret == -EIO) { cachefiles_io_error(cache, "Lookup failed"); } else if (ret != -ENOMEM) { kerror("Internal error: %d", ret); ret = -EIO; } _leave(" = %d", ret); return ERR_PTR(ret); } /* * cull an object if it's not in use * - called only by cache manager daemon */ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct dentry *victim; int ret; _enter(",%*.*s/,%s", dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); victim = cachefiles_check_active(cache, dir, filename); if (IS_ERR(victim)) return PTR_ERR(victim); _debug("victim -> %p %s", victim, victim->d_inode ? "positive" : "negative"); /* okay... the victim is not being used so we can cull it * - start by marking it as stale */ _debug("victim is cullable"); ret = cachefiles_remove_object_xattr(cache, victim); if (ret < 0) goto error_unlock; /* actually remove the victim (drops the dir mutex) */ _debug("bury"); ret = cachefiles_bury_object(cache, dir, victim, false); if (ret < 0) goto error; dput(victim); _leave(" = 0"); return 0; error_unlock: mutex_unlock(&dir->d_inode->i_mutex); error: dput(victim); if (ret == -ENOENT) { /* file or dir now absent - probably retired by netfs */ _leave(" = -ESTALE [absent]"); return -ESTALE; } if (ret != -ENOMEM) { kerror("Internal error: %d", ret); ret = -EIO; } _leave(" = %d", ret); return ret; } /* * find out if an object is in use or not * - called only by cache manager daemon * - returns -EBUSY or 0 to indicate whether an object is in use or not */ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct dentry *victim; //_enter(",%*.*s/,%s", // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename); victim = cachefiles_check_active(cache, dir, filename); if (IS_ERR(victim)) return PTR_ERR(victim); mutex_unlock(&dir->d_inode->i_mutex); dput(victim); //_leave(" = 0"); return 0; }
gpl-2.0
sikarash/linux-pm
net/ax25/ax25_uid.c
804
4916
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/export.h> #include <net/ip.h> #include <net/arp.h> /* * Callsign/UID mapper. This is in kernel space for security on multi-amateur machines. */ static HLIST_HEAD(ax25_uid_list); static DEFINE_RWLOCK(ax25_uid_lock); int ax25_uid_policy; EXPORT_SYMBOL(ax25_uid_policy); ax25_uid_assoc *ax25_findbyuid(kuid_t uid) { ax25_uid_assoc *ax25_uid, *res = NULL; read_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (uid_eq(ax25_uid->uid, uid)) { ax25_uid_hold(ax25_uid); res = ax25_uid; break; } } read_unlock(&ax25_uid_lock); return res; } EXPORT_SYMBOL(ax25_findbyuid); int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) { ax25_uid_assoc *ax25_uid; ax25_uid_assoc *user; unsigned long res; switch (cmd) { case SIOCAX25GETUID: res = -ENOENT; read_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) { res = from_kuid_munged(current_user_ns(), ax25_uid->uid); break; } } read_unlock(&ax25_uid_lock); return res; case SIOCAX25ADDUID: { kuid_t sax25_kuid; if (!capable(CAP_NET_ADMIN)) return -EPERM; sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid); if (!uid_valid(sax25_kuid)) return -EINVAL; user = ax25_findbyuid(sax25_kuid); if (user) { ax25_uid_put(user); return -EEXIST; } if (sax->sax25_uid == 0) return -EINVAL; if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL) return -ENOMEM; atomic_set(&ax25_uid->refcount, 1); ax25_uid->uid = sax25_kuid; ax25_uid->call = sax->sax25_call; write_lock(&ax25_uid_lock); hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list); write_unlock(&ax25_uid_lock); return 0; } case SIOCAX25DELUID: if (!capable(CAP_NET_ADMIN)) return -EPERM; ax25_uid = NULL; write_lock(&ax25_uid_lock); ax25_uid_for_each(ax25_uid, &ax25_uid_list) { if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) break; } if (ax25_uid == NULL) { write_unlock(&ax25_uid_lock); return -ENOENT; } hlist_del_init(&ax25_uid->uid_node); ax25_uid_put(ax25_uid); write_unlock(&ax25_uid_lock); return 0; default: return -EINVAL; } return -EINVAL; /*NOTREACHED */ } #ifdef CONFIG_PROC_FS static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) __acquires(ax25_uid_lock) { read_lock(&ax25_uid_lock); return seq_hlist_start_head(&ax25_uid_list, *pos); } static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &ax25_uid_list, pos); } static void ax25_uid_seq_stop(struct seq_file *seq, void *v) __releases(ax25_uid_lock) { read_unlock(&ax25_uid_lock); } static int ax25_uid_seq_show(struct seq_file *seq, void *v) { char buf[11]; if (v == SEQ_START_TOKEN) seq_printf(seq, "Policy: %d\n", ax25_uid_policy); else { struct ax25_uid_assoc *pt; pt = hlist_entry(v, struct ax25_uid_assoc, uid_node); seq_printf(seq, "%6d %s\n", from_kuid_munged(seq_user_ns(seq), pt->uid), ax2asc(buf, &pt->call)); } return 0; } static const struct seq_operations ax25_uid_seqops = { .start = ax25_uid_seq_start, .next = ax25_uid_seq_next, .stop = ax25_uid_seq_stop, .show = ax25_uid_seq_show, }; static int ax25_uid_info_open(struct inode *inode, struct file *file) { return seq_open(file, &ax25_uid_seqops); } const struct file_operations ax25_uid_fops = { .owner = THIS_MODULE, .open = ax25_uid_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Free all memory associated with UID/Callsign structures. */ void __exit ax25_uid_free(void) { ax25_uid_assoc *ax25_uid; write_lock(&ax25_uid_lock); again: ax25_uid_for_each(ax25_uid, &ax25_uid_list) { hlist_del_init(&ax25_uid->uid_node); ax25_uid_put(ax25_uid); goto again; } write_unlock(&ax25_uid_lock); }
gpl-2.0
jledet/linux-xlnx
drivers/i2c/busses/i2c-sis630.c
1828
14698
/* Copyright (c) 2002,2003 Alexander Malysh <amalysh@web.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ /* Status: beta Supports: SIS 630 SIS 730 SIS 964 Notable differences between chips: +------------------------+--------------------+-------------------+ | | SIS630/730 | SIS964 | +------------------------+--------------------+-------------------+ | Clock | 14kHz/56kHz | 55.56kHz/27.78kHz | | SMBus registers offset | 0x80 | 0xE0 | | SMB_CNT | Bit 1 = Slave Busy | Bit 1 = Bus probe | | (not used yet) | Bit 3 is reserved | Bit 3 = Last byte | | SMB_PCOUNT | Offset + 0x06 | Offset + 0x14 | | SMB_COUNT | 4:0 bits | 5:0 bits | +------------------------+--------------------+-------------------+ (Other differences don't affect the functions provided by the driver) Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> /* SIS964 id is defined here as we are the only file using it */ #define PCI_DEVICE_ID_SI_964 0x0964 /* SIS630/730/964 SMBus registers */ #define SMB_STS 0x00 /* status */ #define SMB_CNT 0x02 /* control */ #define SMBHOST_CNT 0x03 /* host control */ #define SMB_ADDR 0x04 /* address */ #define SMB_CMD 0x05 /* command */ #define SMB_COUNT 0x07 /* byte count */ #define SMB_BYTE 0x08 /* ~0x8F data byte field */ /* SMB_STS register */ #define BYTE_DONE_STS 0x10 /* Byte Done Status / Block Array */ #define SMBCOL_STS 0x04 /* Collision */ #define SMBERR_STS 0x02 /* Device error */ /* SMB_CNT register */ #define MSTO_EN 0x40 /* Host Master Timeout Enable */ #define SMBCLK_SEL 0x20 /* Host master clock selection */ #define SMB_PROBE 0x02 /* Bus Probe/Slave busy */ #define SMB_HOSTBUSY 0x01 /* Host Busy */ /* SMBHOST_CNT register */ #define SMB_KILL 0x20 /* Kill */ #define SMB_START 0x10 /* Start */ /* register count for request_region * As we don't use SMB_PCOUNT, 20 is ok for SiS630 and SiS964 */ #define SIS630_SMB_IOREGION 20 /* PCI address constants */ /* acpi base address register */ #define SIS630_ACPI_BASE_REG 0x74 /* bios control register */ #define SIS630_BIOS_CTL_REG 0x40 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS630 constants */ #define SIS630_QUICK 0x00 #define SIS630_BYTE 0x01 #define SIS630_BYTE_DATA 0x02 #define SIS630_WORD_DATA 0x03 #define SIS630_PCALL 0x04 #define SIS630_BLOCK_DATA 0x05 static struct pci_driver sis630_driver; /* insmod parameters */ static bool high_clock; static bool force; module_param(high_clock, bool, 0); MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz) (SIS630/730 only)."); module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); /* SMBus base adress */ static unsigned short smbus_base; /* supported chips */ static int supported[] = { PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_760, 0 /* terminates the list */ }; static inline u8 sis630_read(u8 reg) { return inb(smbus_base + reg); } static inline void sis630_write(u8 reg, u8 data) { outb(data, smbus_base + reg); } static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock) { int temp; /* Make sure the SMBus host is ready to start transmitting. */ temp = sis630_read(SMB_CNT); if ((temp & (SMB_PROBE | SMB_HOSTBUSY)) != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%02x). Resetting...\n", temp); /* kill smbus transaction */ sis630_write(SMBHOST_CNT, SMB_KILL); temp = sis630_read(SMB_CNT); if (temp & (SMB_PROBE | SMB_HOSTBUSY)) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* save old clock, so we can prevent machine for hung */ *oldclock = sis630_read(SMB_CNT); dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock); /* disable timeout interrupt, * set Host Master Clock to 56KHz if requested */ if (high_clock) sis630_write(SMB_CNT, SMBCLK_SEL); else sis630_write(SMB_CNT, (*oldclock & ~MSTO_EN)); /* clear all sticky bits */ temp = sis630_read(SMB_STS); sis630_write(SMB_STS, temp & 0x1e); /* start the transaction by setting bit 4 and size */ sis630_write(SMBHOST_CNT, SMB_START | (size & 0x07)); return 0; } static int sis630_transaction_wait(struct i2c_adapter *adap, int size) { int temp, result = 0, timeout = 0; /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis630_read(SMB_STS); /* check if block transmitted */ if (size == SIS630_BLOCK_DATA && (temp & BYTE_DONE_STS)) break; } while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & SMBERR_STS) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & SMBCOL_STS) { dev_err(&adap->dev, "Bus collision!\n"); result = -EAGAIN; } return result; } static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock) { /* clear all status "sticky" bits */ sis630_write(SMB_STS, 0xFF); dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT)); /* * restore old Host Master Clock if high_clock is set * and oldclock was not 56KHz */ if (high_clock && !(oldclock & SMBCLK_SEL)) sis630_write(SMB_CNT, sis630_read(SMB_CNT) & ~SMBCLK_SEL); dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT)); } static int sis630_transaction(struct i2c_adapter *adap, int size) { int result = 0; u8 oldclock = 0; result = sis630_transaction_start(adap, size, &oldclock); if (!result) { result = sis630_transaction_wait(adap, size); sis630_transaction_end(adap, oldclock); } return result; } static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write) { int i, len = 0, rc = 0; u8 oldclock = 0; if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len < 0) len = 0; else if (len > 32) len = 32; sis630_write(SMB_COUNT, len); for (i = 1; i <= len; i++) { dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]); /* set data */ sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]); if (i == 8 || (len < 8 && i == len)) { dev_dbg(&adap->dev, "start trans len=%d i=%d\n", len, i); /* first transaction */ rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; } else if ((i - 1) % 8 == 7 || i == len) { dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n", len, i); if (i > 8) { dev_dbg(&adap->dev, "clear smbary_sts" " len=%d i=%d\n", len, i); /* If this is not first transaction, we must clear sticky bit. clear SMBARY_STS */ sis630_write(SMB_STS, BYTE_DONE_STS); } rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } } } } else { /* read request */ data->block[0] = len = 0; rc = sis630_transaction_start(adap, SIS630_BLOCK_DATA, &oldclock); if (rc) return rc; do { rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA); if (rc) { dev_dbg(&adap->dev, "trans_wait failed\n"); break; } /* if this first transaction then read byte count */ if (len == 0) data->block[0] = sis630_read(SMB_COUNT); /* just to be sure */ if (data->block[0] > 32) data->block[0] = 32; dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]); for (i = 0; i < 8 && len < data->block[0]; i++, len++) { dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len); data->block[len + 1] = sis630_read(SMB_BYTE + i); } dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n", len, i); /* clear SMBARY_STS */ sis630_write(SMB_STS, BYTE_DONE_STS); } while (len < data->block[0]); } sis630_transaction_end(adap, oldclock); return rc; } /* Return negative errno on error. */ static s32 sis630_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS630_QUICK; break; case I2C_SMBUS_BYTE: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_CMD, command); size = SIS630_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis630_write(SMB_BYTE, data->byte); size = SIS630_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis630_write(SMB_BYTE, data->word & 0xff); sis630_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA); break; case I2C_SMBUS_BLOCK_DATA: sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis630_write(SMB_CMD, command); size = SIS630_BLOCK_DATA; return sis630_block_data(adap, data, read_write); default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } status = sis630_transaction(adap, size); if (status) return status; if ((size != SIS630_PCALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) { return 0; } switch (size) { case SIS630_BYTE: case SIS630_BYTE_DATA: data->byte = sis630_read(SMB_BYTE); break; case SIS630_PCALL: case SIS630_WORD_DATA: data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis630_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA; } static int sis630_setup(struct pci_dev *sis630_dev) { unsigned char b; struct pci_dev *dummy = NULL; int retval, i; /* acpi base address */ unsigned short acpi_base; /* check for supported SiS devices */ for (i = 0; supported[i] > 0; i++) { dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy); if (dummy) break; /* found */ } if (dummy) { pci_dev_put(dummy); } else if (force) { dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but " "loading because of force option enabled\n"); } else { return -ENODEV; } /* Enable ACPI first , so we can accsess reg 74-75 in acpi io space and read acpi base addr */ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, &b)) { dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n"); retval = -ENODEV; goto exit; } /* if ACPI already enabled , do nothing */ if (!(b & 0x80) && pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) { dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n"); retval = -ENODEV; goto exit; } /* Determine the ACPI base address */ if (pci_read_config_word(sis630_dev, SIS630_ACPI_BASE_REG, &acpi_base)) { dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n"); retval = -ENODEV; goto exit; } dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04hx\n", acpi_base); if (supported[i] == PCI_DEVICE_ID_SI_760) smbus_base = acpi_base + 0xE0; else smbus_base = acpi_base + 0x80; dev_dbg(&sis630_dev->dev, "SMBus base at 0x%04hx\n", smbus_base); retval = acpi_check_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name); if (retval) goto exit; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, sis630_driver.name)) { dev_err(&sis630_dev->dev, "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n", smbus_base + SMB_STS, smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1); retval = -EBUSY; goto exit; } retval = 0; exit: if (retval) smbus_base = 0; return retval; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis630_access, .functionality = sis630_func, }; static struct i2c_adapter sis630_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, .retries = 3 }; static const struct pci_device_id sis630_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sis630_ids); static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 compatible bus not detected, " "module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis630_adapter.dev.parent = &dev->dev; snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS); return i2c_add_adapter(&sis630_adapter); } static void sis630_remove(struct pci_dev *dev) { if (smbus_base) { i2c_del_adapter(&sis630_adapter); release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION); smbus_base = 0; } } static struct pci_driver sis630_driver = { .name = "sis630_smbus", .id_table = sis630_ids, .probe = sis630_probe, .remove = sis630_remove, }; module_pci_driver(sis630_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>"); MODULE_DESCRIPTION("SIS630 SMBus driver");
gpl-2.0
HridayHS/Lightning-Kernel
drivers/rtc/rtc-pcap.c
2084
5077
/* * pcap rtc code for Motorola EZX phones * * Copyright (c) 2008 guiming zhuo <gmzhuo@gmail.com> * Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com> * * Based on Motorola's rtc.c Copyright (c) 2003-2005 Motorola * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mfd/ezx-pcap.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/platform_device.h> struct pcap_rtc { struct pcap_chip *pcap; struct rtc_device *rtc; }; static irqreturn_t pcap_rtc_irq(int irq, void *_pcap_rtc) { struct pcap_rtc *pcap_rtc = _pcap_rtc; unsigned long rtc_events; if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ)) rtc_events = RTC_IRQF | RTC_UF; else if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA)) rtc_events = RTC_IRQF | RTC_AF; else rtc_events = 0; rtc_update_irq(pcap_rtc->rtc, 1, rtc_events); return IRQ_HANDLED; } static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev); struct rtc_time *tm = &alrm->time; unsigned long secs; u32 tod; /* time of day, seconds since midnight */ u32 days; /* days since 1/1/1970 */ ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TODA, &tod); secs = tod & PCAP_RTC_TOD_MASK; ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, &days); secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY; rtc_time_to_tm(secs, tm); return 0; } static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev); struct rtc_time *tm = &alrm->time; unsigned long secs; u32 tod, days; rtc_tm_to_time(tm, &secs); tod = secs % SEC_PER_DAY; ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TODA, tod); days = secs / SEC_PER_DAY; ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, days); return 0; } static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev); unsigned long secs; u32 tod, days; ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TOD, &tod); secs = tod & PCAP_RTC_TOD_MASK; ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAY, &days); secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY; rtc_time_to_tm(secs, tm); return rtc_valid_tm(tm); } static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs) { struct platform_device *pdev = to_platform_device(dev); struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev); u32 tod, days; tod = secs % SEC_PER_DAY; ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TOD, tod); days = secs / SEC_PER_DAY; ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAY, days); return 0; } static int pcap_rtc_irq_enable(struct device *dev, int pirq, unsigned int en) { struct platform_device *pdev = to_platform_device(dev); struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev); if (en) enable_irq(pcap_to_irq(pcap_rtc->pcap, pirq)); else disable_irq(pcap_to_irq(pcap_rtc->pcap, pirq)); return 0; } static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en) { return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en); } static const struct rtc_class_ops pcap_rtc_ops = { .read_time = pcap_rtc_read_time, .read_alarm = pcap_rtc_read_alarm, .set_alarm = pcap_rtc_set_alarm, .set_mmss = pcap_rtc_set_mmss, .alarm_irq_enable = pcap_rtc_alarm_irq_enable, }; static int __init pcap_rtc_probe(struct platform_device *pdev) { struct pcap_rtc *pcap_rtc; int timer_irq, alarm_irq; int err = -ENOMEM; pcap_rtc = devm_kzalloc(&pdev->dev, sizeof(struct pcap_rtc), GFP_KERNEL); if (!pcap_rtc) return err; pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); platform_set_drvdata(pdev, pcap_rtc); pcap_rtc->rtc = devm_rtc_device_register(&pdev->dev, "pcap", &pcap_rtc_ops, THIS_MODULE); if (IS_ERR(pcap_rtc->rtc)) { err = PTR_ERR(pcap_rtc->rtc); goto fail; } timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); err = devm_request_irq(&pdev->dev, timer_irq, pcap_rtc_irq, 0, "RTC Timer", pcap_rtc); if (err) goto fail; err = devm_request_irq(&pdev->dev, alarm_irq, pcap_rtc_irq, 0, "RTC Alarm", pcap_rtc); if (err) goto fail; return 0; fail: platform_set_drvdata(pdev, NULL); return err; } static int __exit pcap_rtc_remove(struct platform_device *pdev) { return 0; } static struct platform_driver pcap_rtc_driver = { .remove = __exit_p(pcap_rtc_remove), .driver = { .name = "pcap-rtc", .owner = THIS_MODULE, }, }; module_platform_driver_probe(pcap_rtc_driver, pcap_rtc_probe); MODULE_DESCRIPTION("Motorola pcap rtc driver"); MODULE_AUTHOR("guiming zhuo <gmzhuo@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
InfinitiveOS-Devices/android_kernel_xiaomi_ferrari
drivers/mfd/ab3100-core.c
2084
22398
/* * Copyright (C) 2007-2010 ST-Ericsson * License terms: GNU General Public License (GPL) version 2 * Low-level core for exclusive access to the AB3100 IC on the I2C bus * and some basic chip-configuration. * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/mfd/core.h> #include <linux/mfd/ab3100.h> #include <linux/mfd/abx500.h> /* These are the only registers inside AB3100 used in this main file */ /* Interrupt event registers */ #define AB3100_EVENTA1 0x21 #define AB3100_EVENTA2 0x22 #define AB3100_EVENTA3 0x23 /* AB3100 DAC converter registers */ #define AB3100_DIS 0x00 #define AB3100_D0C 0x01 #define AB3100_D1C 0x02 #define AB3100_D2C 0x03 #define AB3100_D3C 0x04 /* Chip ID register */ #define AB3100_CID 0x20 /* AB3100 interrupt registers */ #define AB3100_IMRA1 0x24 #define AB3100_IMRA2 0x25 #define AB3100_IMRA3 0x26 #define AB3100_IMRB1 0x2B #define AB3100_IMRB2 0x2C #define AB3100_IMRB3 0x2D /* System Power Monitoring and control registers */ #define AB3100_MCA 0x2E #define AB3100_MCB 0x2F /* SIM power up */ #define AB3100_SUP 0x50 /* * I2C communication * * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ static int ab3100_get_chip_id(struct device *dev) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return (int)ab3100->chip_id; } static int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * A two-byte write message with the first byte containing the register * number and the second byte containing the value to be written * effectively sets a register in the AB3100. */ err = i2c_master_send(ab3100->i2c_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write register): %d\n", err); } else if (err != 2) { dev_err(ab3100->dev, "write error (write register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; } else { /* All is well */ err = 0; } mutex_unlock(&ab3100->access_mutex); return err; } static int set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 value) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_set_register_interruptible(ab3100, reg, value); } /* * The test registers exist at an I2C bus address up one * from the ordinary base. They are not supposed to be used * in production code, but sometimes you have to do that * anyway. It's currently only used from this file so declare * it static and do not export. */ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; err = i2c_master_send(ab3100->testreg_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write test register): %d\n", err); } else if (err != 2) { dev_err(ab3100->dev, "write error (write test register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; } else { /* All is well */ err = 0; } mutex_unlock(&ab3100->access_mutex); return err; } static int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) { int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * AB3100 require an I2C "stop" command between each message, else * it will not work. The only way of achieveing this with the * message transport layer is to send the read and write messages * separately. */ err = i2c_master_send(ab3100->i2c_client, &reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (send register address): %d\n", err); goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (send register address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; } else { /* All is well */ err = 0; } err = i2c_master_recv(ab3100->i2c_client, regval, 1); if (err < 0) { dev_err(ab3100->dev, "write error (read register): %d\n", err); goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (read register) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; } else { /* All is well */ err = 0; } get_reg_out_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int get_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 *value) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_get_register_interruptible(ab3100, reg, value); } static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs) { int err; if (ab3100->chip_id == 0xa0 || ab3100->chip_id == 0xa1) /* These don't support paged reads */ return -EIO; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* * Paged read also require an I2C "stop" command. */ err = i2c_master_send(ab3100->i2c_client, &first_reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (send first register address): %d\n", err); goto get_reg_page_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (send first register address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_page_out_unlock; } err = i2c_master_recv(ab3100->i2c_client, regvals, numregs); if (err < 0) { dev_err(ab3100->dev, "write error (read register page): %d\n", err); goto get_reg_page_out_unlock; } else if (err != numregs) { dev_err(ab3100->dev, "write error (read register page) " "%d bytes transferred (expected %d)\n", err, numregs); err = -EIO; goto get_reg_page_out_unlock; } /* All is well */ err = 0; get_reg_page_out_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int get_register_page_interruptible(struct device *dev, u8 bank, u8 first_reg, u8 *regvals, u8 numregs) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_get_register_page_interruptible(ab3100, first_reg, regvals, numregs); } static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask) { u8 regandval[2] = {reg, 0}; int err; err = mutex_lock_interruptible(&ab3100->access_mutex); if (err) return err; /* First read out the target register */ err = i2c_master_send(ab3100->i2c_client, &reg, 1); if (err < 0) { dev_err(ab3100->dev, "write error (maskset send address): %d\n", err); goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (maskset send address) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; } err = i2c_master_recv(ab3100->i2c_client, &regandval[1], 1); if (err < 0) { dev_err(ab3100->dev, "write error (maskset read register): %d\n", err); goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, "write error (maskset read register) " "%d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; } /* Modify the register */ regandval[1] &= andmask; regandval[1] |= ormask; /* Write the register */ err = i2c_master_send(ab3100->i2c_client, regandval, 2); if (err < 0) { dev_err(ab3100->dev, "write error (write register): %d\n", err); goto get_maskset_unlock; } else if (err != 2) { dev_err(ab3100->dev, "write error (write register) " "%d bytes transferred (expected 2)\n", err); err = -EIO; goto get_maskset_unlock; } /* All is well */ err = 0; get_maskset_unlock: mutex_unlock(&ab3100->access_mutex); return err; } static int mask_and_set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 bitmask, u8 bitvalues) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); return ab3100_mask_and_set_register_interruptible(ab3100, reg, bitmask, (bitmask & bitvalues)); } /* * Register a simple callback for handling any AB3100 events. */ int ab3100_event_register(struct ab3100 *ab3100, struct notifier_block *nb) { return blocking_notifier_chain_register(&ab3100->event_subscribers, nb); } EXPORT_SYMBOL(ab3100_event_register); /* * Remove a previously registered callback. */ int ab3100_event_unregister(struct ab3100 *ab3100, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&ab3100->event_subscribers, nb); } EXPORT_SYMBOL(ab3100_event_unregister); static int ab3100_event_registers_startup_state_get(struct device *dev, u8 *event) { struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); if (!ab3100->startup_events_read) return -EAGAIN; /* Try again later */ memcpy(event, ab3100->startup_events, 3); return 0; } static struct abx500_ops ab3100_ops = { .get_chip_id = ab3100_get_chip_id, .set_register = set_register_interruptible, .get_register = get_register_interruptible, .get_register_page = get_register_page_interruptible, .set_register_page = NULL, .mask_and_set_register = mask_and_set_register_interruptible, .event_registers_startup_state_get = ab3100_event_registers_startup_state_get, .startup_irq_enabled = NULL, }; /* * This is a threaded interrupt handler so we can make some * I2C calls etc. */ static irqreturn_t ab3100_irq_handler(int irq, void *data) { struct ab3100 *ab3100 = data; u8 event_regs[3]; u32 fatevent; int err; err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, event_regs, 3); if (err) goto err_event; fatevent = (event_regs[0] << 16) | (event_regs[1] << 8) | event_regs[2]; if (!ab3100->startup_events_read) { ab3100->startup_events[0] = event_regs[0]; ab3100->startup_events[1] = event_regs[1]; ab3100->startup_events[2] = event_regs[2]; ab3100->startup_events_read = true; } /* * The notified parties will have to mask out the events * they're interested in and react to them. They will be * notified on all events, then they use the fatevent value * to determine if they're interested. */ blocking_notifier_call_chain(&ab3100->event_subscribers, fatevent, NULL); dev_dbg(ab3100->dev, "IRQ Event: 0x%08x\n", fatevent); return IRQ_HANDLED; err_event: dev_dbg(ab3100->dev, "error reading event status\n"); return IRQ_HANDLED; } #ifdef CONFIG_DEBUG_FS /* * Some debugfs entries only exposed if we're using debug */ static int ab3100_registers_print(struct seq_file *s, void *p) { struct ab3100 *ab3100 = s->private; u8 value; u8 reg; seq_printf(s, "AB3100 registers:\n"); for (reg = 0; reg < 0xff; reg++) { ab3100_get_register_interruptible(ab3100, reg, &value); seq_printf(s, "[0x%x]: 0x%x\n", reg, value); } return 0; } static int ab3100_registers_open(struct inode *inode, struct file *file) { return single_open(file, ab3100_registers_print, inode->i_private); } static const struct file_operations ab3100_registers_fops = { .open = ab3100_registers_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; struct ab3100_get_set_reg_priv { struct ab3100 *ab3100; bool mode; }; static ssize_t ab3100_get_set_reg(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ab3100_get_set_reg_priv *priv = file->private_data; struct ab3100 *ab3100 = priv->ab3100; char buf[32]; ssize_t buf_size; int regp; unsigned long user_reg; int err; int i = 0; /* Get userspace string and assure termination */ buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; /* * The idea is here to parse a string which is either * "0xnn" for reading a register, or "0xaa 0xbb" for * writing 0xbb to the register 0xaa. First move past * whitespace and then begin to parse the register. */ while ((i < buf_size) && (buf[i] == ' ')) i++; regp = i; /* * Advance pointer to end of string then terminate * the register string. This is needed to satisfy * the strict_strtoul() function. */ while ((i < buf_size) && (buf[i] != ' ')) i++; buf[i] = '\0'; err = strict_strtoul(&buf[regp], 16, &user_reg); if (err) return err; if (user_reg > 0xff) return -EINVAL; /* Either we read or we write a register here */ if (!priv->mode) { /* Reading */ u8 reg = (u8) user_reg; u8 regvalue; ab3100_get_register_interruptible(ab3100, reg, &regvalue); dev_info(ab3100->dev, "debug read AB3100 reg[0x%02x]: 0x%02x\n", reg, regvalue); } else { int valp; unsigned long user_value; u8 reg = (u8) user_reg; u8 value; u8 regvalue; /* * Writing, we need some value to write to * the register so keep parsing the string * from userspace. */ i++; while ((i < buf_size) && (buf[i] == ' ')) i++; valp = i; while ((i < buf_size) && (buf[i] != ' ')) i++; buf[i] = '\0'; err = strict_strtoul(&buf[valp], 16, &user_value); if (err) return err; if (user_reg > 0xff) return -EINVAL; value = (u8) user_value; ab3100_set_register_interruptible(ab3100, reg, value); ab3100_get_register_interruptible(ab3100, reg, &regvalue); dev_info(ab3100->dev, "debug write reg[0x%02x] with 0x%02x, " "after readback: 0x%02x\n", reg, value, regvalue); } return buf_size; } static const struct file_operations ab3100_get_set_reg_fops = { .open = simple_open, .write = ab3100_get_set_reg, .llseek = noop_llseek, }; static struct dentry *ab3100_dir; static struct dentry *ab3100_reg_file; static struct ab3100_get_set_reg_priv ab3100_get_priv; static struct dentry *ab3100_get_reg_file; static struct ab3100_get_set_reg_priv ab3100_set_priv; static struct dentry *ab3100_set_reg_file; static void ab3100_setup_debugfs(struct ab3100 *ab3100) { int err; ab3100_dir = debugfs_create_dir("ab3100", NULL); if (!ab3100_dir) goto exit_no_debugfs; ab3100_reg_file = debugfs_create_file("registers", S_IRUGO, ab3100_dir, ab3100, &ab3100_registers_fops); if (!ab3100_reg_file) { err = -ENOMEM; goto exit_destroy_dir; } ab3100_get_priv.ab3100 = ab3100; ab3100_get_priv.mode = false; ab3100_get_reg_file = debugfs_create_file("get_reg", S_IWUSR, ab3100_dir, &ab3100_get_priv, &ab3100_get_set_reg_fops); if (!ab3100_get_reg_file) { err = -ENOMEM; goto exit_destroy_reg; } ab3100_set_priv.ab3100 = ab3100; ab3100_set_priv.mode = true; ab3100_set_reg_file = debugfs_create_file("set_reg", S_IWUSR, ab3100_dir, &ab3100_set_priv, &ab3100_get_set_reg_fops); if (!ab3100_set_reg_file) { err = -ENOMEM; goto exit_destroy_get_reg; } return; exit_destroy_get_reg: debugfs_remove(ab3100_get_reg_file); exit_destroy_reg: debugfs_remove(ab3100_reg_file); exit_destroy_dir: debugfs_remove(ab3100_dir); exit_no_debugfs: return; } static inline void ab3100_remove_debugfs(void) { debugfs_remove(ab3100_set_reg_file); debugfs_remove(ab3100_get_reg_file); debugfs_remove(ab3100_reg_file); debugfs_remove(ab3100_dir); } #else static inline void ab3100_setup_debugfs(struct ab3100 *ab3100) { } static inline void ab3100_remove_debugfs(void) { } #endif /* * Basic set-up, datastructure creation/destruction and I2C interface. * This sets up a default config in the AB3100 chip so that it * will work as expected. */ struct ab3100_init_setting { u8 abreg; u8 setting; }; static const struct ab3100_init_setting ab3100_init_settings[] = { { .abreg = AB3100_MCA, .setting = 0x01 }, { .abreg = AB3100_MCB, .setting = 0x30 }, { .abreg = AB3100_IMRA1, .setting = 0x00 }, { .abreg = AB3100_IMRA2, .setting = 0xFF }, { .abreg = AB3100_IMRA3, .setting = 0x01 }, { .abreg = AB3100_IMRB1, .setting = 0xBF }, { .abreg = AB3100_IMRB2, .setting = 0xFF }, { .abreg = AB3100_IMRB3, .setting = 0xFF }, { .abreg = AB3100_SUP, .setting = 0x00 }, { .abreg = AB3100_DIS, .setting = 0xF0 }, { .abreg = AB3100_D0C, .setting = 0x00 }, { .abreg = AB3100_D1C, .setting = 0x00 }, { .abreg = AB3100_D2C, .setting = 0x00 }, { .abreg = AB3100_D3C, .setting = 0x00 }, }; static int ab3100_setup(struct ab3100 *ab3100) { int err = 0; int i; for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) { err = ab3100_set_register_interruptible(ab3100, ab3100_init_settings[i].abreg, ab3100_init_settings[i].setting); if (err) goto exit_no_setup; } /* * Special trick to make the AB3100 use the 32kHz clock (RTC) * bit 3 in test register 0x02 is a special, undocumented test * register bit that only exist in AB3100 P1E */ if (ab3100->chip_id == 0xc4) { dev_warn(ab3100->dev, "AB3100 P1E variant detected, " "forcing chip to 32KHz\n"); err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); } exit_no_setup: return err; } /* The subdevices of the AB3100 */ static struct mfd_cell ab3100_devs[] = { { .name = "ab3100-dac", .id = -1, }, { .name = "ab3100-leds", .id = -1, }, { .name = "ab3100-power", .id = -1, }, { .name = "ab3100-regulators", .of_compatible = "stericsson,ab3100-regulators", .id = -1, }, { .name = "ab3100-sim", .id = -1, }, { .name = "ab3100-uart", .id = -1, }, { .name = "ab3100-rtc", .id = -1, }, { .name = "ab3100-charger", .id = -1, }, { .name = "ab3100-boost", .id = -1, }, { .name = "ab3100-adc", .id = -1, }, { .name = "ab3100-fuelgauge", .id = -1, }, { .name = "ab3100-vibrator", .id = -1, }, { .name = "ab3100-otp", .id = -1, }, { .name = "ab3100-codec", .id = -1, }, }; struct ab_family_id { u8 id; char *name; }; static const struct ab_family_id ids[] = { /* AB3100 */ { .id = 0xc0, .name = "P1A" }, { .id = 0xc1, .name = "P1B" }, { .id = 0xc2, .name = "P1C" }, { .id = 0xc3, .name = "P1D" }, { .id = 0xc4, .name = "P1E" }, { .id = 0xc5, .name = "P1F/R1A" }, { .id = 0xc6, .name = "P1G/R1A" }, { .id = 0xc7, .name = "P2A/R2A" }, { .id = 0xc8, .name = "P2B/R2B" }, /* AB3000 variants, not supported */ { .id = 0xa0 }, { .id = 0xa1 }, { .id = 0xa2 }, { .id = 0xa3 }, { .id = 0xa4 }, { .id = 0xa5 }, { .id = 0xa6 }, { .id = 0xa7 }, /* Terminator */ { .id = 0x00, }, }; static int ab3100_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ab3100 *ab3100; struct ab3100_platform_data *ab3100_plf_data = client->dev.platform_data; int err; int i; ab3100 = devm_kzalloc(&client->dev, sizeof(struct ab3100), GFP_KERNEL); if (!ab3100) { dev_err(&client->dev, "could not allocate AB3100 device\n"); return -ENOMEM; } /* Initialize data structure */ mutex_init(&ab3100->access_mutex); BLOCKING_INIT_NOTIFIER_HEAD(&ab3100->event_subscribers); ab3100->i2c_client = client; ab3100->dev = &ab3100->i2c_client->dev; i2c_set_clientdata(client, ab3100); /* Read chip ID register */ err = ab3100_get_register_interruptible(ab3100, AB3100_CID, &ab3100->chip_id); if (err) { dev_err(&client->dev, "could not communicate with the AB3100 analog " "baseband chip\n"); goto exit_no_detect; } for (i = 0; ids[i].id != 0x0; i++) { if (ids[i].id == ab3100->chip_id) { if (ids[i].name != NULL) { snprintf(&ab3100->chip_name[0], sizeof(ab3100->chip_name) - 1, "AB3100 %s", ids[i].name); break; } else { dev_err(&client->dev, "AB3000 is not supported\n"); goto exit_no_detect; } } } if (ids[i].id == 0x0) { dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", ab3100->chip_id); dev_err(&client->dev, "accepting it anyway. Please update " "the driver.\n"); goto exit_no_detect; } dev_info(&client->dev, "Detected chip: %s\n", &ab3100->chip_name[0]); /* Attach a second dummy i2c_client to the test register address */ ab3100->testreg_client = i2c_new_dummy(client->adapter, client->addr + 1); if (!ab3100->testreg_client) { err = -ENOMEM; goto exit_no_testreg_client; } err = ab3100_setup(ab3100); if (err) goto exit_no_setup; err = devm_request_threaded_irq(&client->dev, client->irq, NULL, ab3100_irq_handler, IRQF_ONESHOT, "ab3100-core", ab3100); if (err) goto exit_no_irq; err = abx500_register_ops(&client->dev, &ab3100_ops); if (err) goto exit_no_ops; /* Set up and register the platform devices. */ for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) { ab3100_devs[i].platform_data = ab3100_plf_data; ab3100_devs[i].pdata_size = sizeof(struct ab3100_platform_data); } err = mfd_add_devices(&client->dev, 0, ab3100_devs, ARRAY_SIZE(ab3100_devs), NULL, 0, NULL); ab3100_setup_debugfs(ab3100); return 0; exit_no_ops: exit_no_irq: exit_no_setup: i2c_unregister_device(ab3100->testreg_client); exit_no_testreg_client: exit_no_detect: return err; } static int ab3100_remove(struct i2c_client *client) { struct ab3100 *ab3100 = i2c_get_clientdata(client); /* Unregister subdevices */ mfd_remove_devices(&client->dev); ab3100_remove_debugfs(); i2c_unregister_device(ab3100->testreg_client); return 0; } static const struct i2c_device_id ab3100_id[] = { { "ab3100", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ab3100_id); static struct i2c_driver ab3100_driver = { .driver = { .name = "ab3100", .owner = THIS_MODULE, }, .id_table = ab3100_id, .probe = ab3100_probe, .remove = ab3100_remove, }; static int __init ab3100_i2c_init(void) { return i2c_add_driver(&ab3100_driver); } static void __exit ab3100_i2c_exit(void) { i2c_del_driver(&ab3100_driver); } subsys_initcall(ab3100_i2c_init); module_exit(ab3100_i2c_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("AB3100 core driver"); MODULE_LICENSE("GPL");
gpl-2.0
Nyks45/Veno-M
drivers/pinctrl/pinctrl-sirf.c
2084
48483
/* * pinmux driver for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/machine.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/bitops.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #define DRIVER_NAME "pinmux-sirf" #define SIRFSOC_NUM_PADS 622 #define SIRFSOC_RSC_PIN_MUX 0x4 #define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84) #define SIRFSOC_GPIO_PAD_EN_CLR(g) ((g)*0x100 + 0x90) #define SIRFSOC_GPIO_CTRL(g, i) ((g)*0x100 + (i)*4) #define SIRFSOC_GPIO_DSP_EN0 (0x80) #define SIRFSOC_GPIO_INT_STATUS(g) ((g)*0x100 + 0x8C) #define SIRFSOC_GPIO_CTL_INTR_LOW_MASK 0x1 #define SIRFSOC_GPIO_CTL_INTR_HIGH_MASK 0x2 #define SIRFSOC_GPIO_CTL_INTR_TYPE_MASK 0x4 #define SIRFSOC_GPIO_CTL_INTR_EN_MASK 0x8 #define SIRFSOC_GPIO_CTL_INTR_STS_MASK 0x10 #define SIRFSOC_GPIO_CTL_OUT_EN_MASK 0x20 #define SIRFSOC_GPIO_CTL_DATAOUT_MASK 0x40 #define SIRFSOC_GPIO_CTL_DATAIN_MASK 0x80 #define SIRFSOC_GPIO_CTL_PULL_MASK 0x100 #define SIRFSOC_GPIO_CTL_PULL_HIGH 0x200 #define SIRFSOC_GPIO_CTL_DSP_INT 0x400 #define SIRFSOC_GPIO_NO_OF_BANKS 5 #define SIRFSOC_GPIO_BANK_SIZE 32 #define SIRFSOC_GPIO_NUM(bank, index) (((bank)*(32)) + (index)) struct sirfsoc_gpio_bank { struct of_mm_gpio_chip chip; struct irq_domain *domain; int id; int parent_irq; spinlock_t lock; bool is_marco; /* for marco, some registers are different with prima2 */ }; static struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS]; static DEFINE_SPINLOCK(sgpio_lock); /* * pad list for the pinmux subsystem * refer to CS-131858-DC-6A.xls */ static const struct pinctrl_pin_desc sirfsoc_pads[] = { PINCTRL_PIN(0, "gpio0-0"), PINCTRL_PIN(1, "gpio0-1"), PINCTRL_PIN(2, "gpio0-2"), PINCTRL_PIN(3, "gpio0-3"), PINCTRL_PIN(4, "pwm0"), PINCTRL_PIN(5, "pwm1"), PINCTRL_PIN(6, "pwm2"), PINCTRL_PIN(7, "pwm3"), PINCTRL_PIN(8, "warm_rst_b"), PINCTRL_PIN(9, "odo_0"), PINCTRL_PIN(10, "odo_1"), PINCTRL_PIN(11, "dr_dir"), PINCTRL_PIN(12, "viprom_fa"), PINCTRL_PIN(13, "scl_1"), PINCTRL_PIN(14, "ntrst"), PINCTRL_PIN(15, "sda_1"), PINCTRL_PIN(16, "x_ldd[16]"), PINCTRL_PIN(17, "x_ldd[17]"), PINCTRL_PIN(18, "x_ldd[18]"), PINCTRL_PIN(19, "x_ldd[19]"), PINCTRL_PIN(20, "x_ldd[20]"), PINCTRL_PIN(21, "x_ldd[21]"), PINCTRL_PIN(22, "x_ldd[22]"), PINCTRL_PIN(23, "x_ldd[23], lcdrom_frdy"), PINCTRL_PIN(24, "gps_sgn"), PINCTRL_PIN(25, "gps_mag"), PINCTRL_PIN(26, "gps_clk"), PINCTRL_PIN(27, "sd_cd_b_1"), PINCTRL_PIN(28, "sd_vcc_on_1"), PINCTRL_PIN(29, "sd_wp_b_1"), PINCTRL_PIN(30, "sd_clk_3"), PINCTRL_PIN(31, "sd_cmd_3"), PINCTRL_PIN(32, "x_sd_dat_3[0]"), PINCTRL_PIN(33, "x_sd_dat_3[1]"), PINCTRL_PIN(34, "x_sd_dat_3[2]"), PINCTRL_PIN(35, "x_sd_dat_3[3]"), PINCTRL_PIN(36, "x_sd_clk_4"), PINCTRL_PIN(37, "x_sd_cmd_4"), PINCTRL_PIN(38, "x_sd_dat_4[0]"), PINCTRL_PIN(39, "x_sd_dat_4[1]"), PINCTRL_PIN(40, "x_sd_dat_4[2]"), PINCTRL_PIN(41, "x_sd_dat_4[3]"), PINCTRL_PIN(42, "x_cko_1"), PINCTRL_PIN(43, "x_ac97_bit_clk"), PINCTRL_PIN(44, "x_ac97_dout"), PINCTRL_PIN(45, "x_ac97_din"), PINCTRL_PIN(46, "x_ac97_sync"), PINCTRL_PIN(47, "x_txd_1"), PINCTRL_PIN(48, "x_txd_2"), PINCTRL_PIN(49, "x_rxd_1"), PINCTRL_PIN(50, "x_rxd_2"), PINCTRL_PIN(51, "x_usclk_0"), PINCTRL_PIN(52, "x_utxd_0"), PINCTRL_PIN(53, "x_urxd_0"), PINCTRL_PIN(54, "x_utfs_0"), PINCTRL_PIN(55, "x_urfs_0"), PINCTRL_PIN(56, "x_usclk_1"), PINCTRL_PIN(57, "x_utxd_1"), PINCTRL_PIN(58, "x_urxd_1"), PINCTRL_PIN(59, "x_utfs_1"), PINCTRL_PIN(60, "x_urfs_1"), PINCTRL_PIN(61, "x_usclk_2"), PINCTRL_PIN(62, "x_utxd_2"), PINCTRL_PIN(63, "x_urxd_2"), PINCTRL_PIN(64, "x_utfs_2"), PINCTRL_PIN(65, "x_urfs_2"), PINCTRL_PIN(66, "x_df_we_b"), PINCTRL_PIN(67, "x_df_re_b"), PINCTRL_PIN(68, "x_txd_0"), PINCTRL_PIN(69, "x_rxd_0"), PINCTRL_PIN(78, "x_cko_0"), PINCTRL_PIN(79, "x_vip_pxd[7]"), PINCTRL_PIN(80, "x_vip_pxd[6]"), PINCTRL_PIN(81, "x_vip_pxd[5]"), PINCTRL_PIN(82, "x_vip_pxd[4]"), PINCTRL_PIN(83, "x_vip_pxd[3]"), PINCTRL_PIN(84, "x_vip_pxd[2]"), PINCTRL_PIN(85, "x_vip_pxd[1]"), PINCTRL_PIN(86, "x_vip_pxd[0]"), PINCTRL_PIN(87, "x_vip_vsync"), PINCTRL_PIN(88, "x_vip_hsync"), PINCTRL_PIN(89, "x_vip_pxclk"), PINCTRL_PIN(90, "x_sda_0"), PINCTRL_PIN(91, "x_scl_0"), PINCTRL_PIN(92, "x_df_ry_by"), PINCTRL_PIN(93, "x_df_cs_b[1]"), PINCTRL_PIN(94, "x_df_cs_b[0]"), PINCTRL_PIN(95, "x_l_pclk"), PINCTRL_PIN(96, "x_l_lck"), PINCTRL_PIN(97, "x_l_fck"), PINCTRL_PIN(98, "x_l_de"), PINCTRL_PIN(99, "x_ldd[0]"), PINCTRL_PIN(100, "x_ldd[1]"), PINCTRL_PIN(101, "x_ldd[2]"), PINCTRL_PIN(102, "x_ldd[3]"), PINCTRL_PIN(103, "x_ldd[4]"), PINCTRL_PIN(104, "x_ldd[5]"), PINCTRL_PIN(105, "x_ldd[6]"), PINCTRL_PIN(106, "x_ldd[7]"), PINCTRL_PIN(107, "x_ldd[8]"), PINCTRL_PIN(108, "x_ldd[9]"), PINCTRL_PIN(109, "x_ldd[10]"), PINCTRL_PIN(110, "x_ldd[11]"), PINCTRL_PIN(111, "x_ldd[12]"), PINCTRL_PIN(112, "x_ldd[13]"), PINCTRL_PIN(113, "x_ldd[14]"), PINCTRL_PIN(114, "x_ldd[15]"), }; /** * @dev: a pointer back to containing device * @virtbase: the offset to the controller in virtual memory */ struct sirfsoc_pmx { struct device *dev; struct pinctrl_dev *pmx; void __iomem *gpio_virtbase; void __iomem *rsc_virtbase; bool is_marco; }; /* SIRFSOC_GPIO_PAD_EN set */ struct sirfsoc_muxmask { unsigned long group; unsigned long mask; }; struct sirfsoc_padmux { unsigned long muxmask_counts; const struct sirfsoc_muxmask *muxmask; /* RSC_PIN_MUX set */ unsigned long funcmask; unsigned long funcval; }; /** * struct sirfsoc_pin_group - describes a SiRFprimaII pin group * @name: the name of this specific pin group * @pins: an array of discrete physical pins used in this group, taken * from the driver-local pin enumeration space * @num_pins: the number of pins in this group array, i.e. the number of * elements in .pins so we can iterate over that array */ struct sirfsoc_pin_group { const char *name; const unsigned int *pins; const unsigned num_pins; }; static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = { { .group = 3, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, .mask = BIT(31), }, }; static const struct sirfsoc_padmux lcd_16bits_padmux = { .muxmask_counts = ARRAY_SIZE(lcd_16bits_sirfsoc_muxmask), .muxmask = lcd_16bits_sirfsoc_muxmask, .funcmask = BIT(4), .funcval = 0, }; static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = { { .group = 3, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, .mask = BIT(31), }, { .group = 0, .mask = BIT(16) | BIT(17), }, }; static const struct sirfsoc_padmux lcd_18bits_padmux = { .muxmask_counts = ARRAY_SIZE(lcd_18bits_muxmask), .muxmask = lcd_18bits_muxmask, .funcmask = BIT(4), .funcval = 0, }; static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}; static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = { { .group = 3, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, .mask = BIT(31), }, { .group = 0, .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23), }, }; static const struct sirfsoc_padmux lcd_24bits_padmux = { .muxmask_counts = ARRAY_SIZE(lcd_24bits_muxmask), .muxmask = lcd_24bits_muxmask, .funcmask = BIT(4), .funcval = 0, }; static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask lcdrom_muxmask[] = { { .group = 3, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, .mask = BIT(31), }, { .group = 0, .mask = BIT(23), }, }; static const struct sirfsoc_padmux lcdrom_padmux = { .muxmask_counts = ARRAY_SIZE(lcdrom_muxmask), .muxmask = lcdrom_muxmask, .funcmask = BIT(4), .funcval = BIT(4), }; static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask uart0_muxmask[] = { { .group = 2, .mask = BIT(4) | BIT(5), }, { .group = 1, .mask = BIT(23) | BIT(28), }, }; static const struct sirfsoc_padmux uart0_padmux = { .muxmask_counts = ARRAY_SIZE(uart0_muxmask), .muxmask = uart0_muxmask, .funcmask = BIT(9), .funcval = BIT(9), }; static const unsigned uart0_pins[] = { 55, 60, 68, 69 }; static const struct sirfsoc_muxmask uart0_nostreamctrl_muxmask[] = { { .group = 2, .mask = BIT(4) | BIT(5), }, }; static const struct sirfsoc_padmux uart0_nostreamctrl_padmux = { .muxmask_counts = ARRAY_SIZE(uart0_nostreamctrl_muxmask), .muxmask = uart0_nostreamctrl_muxmask, }; static const unsigned uart0_nostreamctrl_pins[] = { 68, 39 }; static const struct sirfsoc_muxmask uart1_muxmask[] = { { .group = 1, .mask = BIT(15) | BIT(17), }, }; static const struct sirfsoc_padmux uart1_padmux = { .muxmask_counts = ARRAY_SIZE(uart1_muxmask), .muxmask = uart1_muxmask, }; static const unsigned uart1_pins[] = { 47, 49 }; static const struct sirfsoc_muxmask uart2_muxmask[] = { { .group = 1, .mask = BIT(16) | BIT(18) | BIT(24) | BIT(27), }, }; static const struct sirfsoc_padmux uart2_padmux = { .muxmask_counts = ARRAY_SIZE(uart2_muxmask), .muxmask = uart2_muxmask, .funcmask = BIT(10), .funcval = BIT(10), }; static const unsigned uart2_pins[] = { 48, 50, 56, 59 }; static const struct sirfsoc_muxmask uart2_nostreamctrl_muxmask[] = { { .group = 1, .mask = BIT(16) | BIT(18), }, }; static const struct sirfsoc_padmux uart2_nostreamctrl_padmux = { .muxmask_counts = ARRAY_SIZE(uart2_nostreamctrl_muxmask), .muxmask = uart2_nostreamctrl_muxmask, }; static const unsigned uart2_nostreamctrl_pins[] = { 48, 50 }; static const struct sirfsoc_muxmask sdmmc3_muxmask[] = { { .group = 0, .mask = BIT(30) | BIT(31), }, { .group = 1, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), }, }; static const struct sirfsoc_padmux sdmmc3_padmux = { .muxmask_counts = ARRAY_SIZE(sdmmc3_muxmask), .muxmask = sdmmc3_muxmask, .funcmask = BIT(7), .funcval = 0, }; static const unsigned sdmmc3_pins[] = { 30, 31, 32, 33, 34, 35 }; static const struct sirfsoc_muxmask spi0_muxmask[] = { { .group = 1, .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3), }, }; static const struct sirfsoc_padmux spi0_padmux = { .muxmask_counts = ARRAY_SIZE(spi0_muxmask), .muxmask = spi0_muxmask, .funcmask = BIT(7), .funcval = BIT(7), }; static const unsigned spi0_pins[] = { 32, 33, 34, 35 }; static const struct sirfsoc_muxmask sdmmc4_muxmask[] = { { .group = 1, .mask = BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9), }, }; static const struct sirfsoc_padmux sdmmc4_padmux = { .muxmask_counts = ARRAY_SIZE(sdmmc4_muxmask), .muxmask = sdmmc4_muxmask, }; static const unsigned sdmmc4_pins[] = { 36, 37, 38, 39, 40, 41 }; static const struct sirfsoc_muxmask cko1_muxmask[] = { { .group = 1, .mask = BIT(10), }, }; static const struct sirfsoc_padmux cko1_padmux = { .muxmask_counts = ARRAY_SIZE(cko1_muxmask), .muxmask = cko1_muxmask, .funcmask = BIT(3), .funcval = 0, }; static const unsigned cko1_pins[] = { 42 }; static const struct sirfsoc_muxmask i2s_muxmask[] = { { .group = 1, .mask = BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19) | BIT(23) | BIT(28), }, }; static const struct sirfsoc_padmux i2s_padmux = { .muxmask_counts = ARRAY_SIZE(i2s_muxmask), .muxmask = i2s_muxmask, .funcmask = BIT(3) | BIT(9), .funcval = BIT(3), }; static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 }; static const struct sirfsoc_muxmask ac97_muxmask[] = { { .group = 1, .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14), }, }; static const struct sirfsoc_padmux ac97_padmux = { .muxmask_counts = ARRAY_SIZE(ac97_muxmask), .muxmask = ac97_muxmask, .funcmask = BIT(8), .funcval = 0, }; static const unsigned ac97_pins[] = { 33, 34, 35, 36 }; static const struct sirfsoc_muxmask spi1_muxmask[] = { { .group = 1, .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14), }, }; static const struct sirfsoc_padmux spi1_padmux = { .muxmask_counts = ARRAY_SIZE(spi1_muxmask), .muxmask = spi1_muxmask, .funcmask = BIT(8), .funcval = BIT(8), }; static const unsigned spi1_pins[] = { 43, 44, 45, 46 }; static const struct sirfsoc_muxmask sdmmc1_muxmask[] = { { .group = 0, .mask = BIT(27) | BIT(28) | BIT(29), }, }; static const struct sirfsoc_padmux sdmmc1_padmux = { .muxmask_counts = ARRAY_SIZE(sdmmc1_muxmask), .muxmask = sdmmc1_muxmask, }; static const unsigned sdmmc1_pins[] = { 27, 28, 29 }; static const struct sirfsoc_muxmask gps_muxmask[] = { { .group = 0, .mask = BIT(24) | BIT(25) | BIT(26), }, }; static const struct sirfsoc_padmux gps_padmux = { .muxmask_counts = ARRAY_SIZE(gps_muxmask), .muxmask = gps_muxmask, .funcmask = BIT(12) | BIT(13) | BIT(14), .funcval = BIT(12), }; static const unsigned gps_pins[] = { 24, 25, 26 }; static const struct sirfsoc_muxmask sdmmc5_muxmask[] = { { .group = 0, .mask = BIT(24) | BIT(25) | BIT(26), }, { .group = 1, .mask = BIT(29), }, { .group = 2, .mask = BIT(0) | BIT(1), }, }; static const struct sirfsoc_padmux sdmmc5_padmux = { .muxmask_counts = ARRAY_SIZE(sdmmc5_muxmask), .muxmask = sdmmc5_muxmask, .funcmask = BIT(13) | BIT(14), .funcval = BIT(13) | BIT(14), }; static const unsigned sdmmc5_pins[] = { 24, 25, 26, 61, 64, 65 }; static const struct sirfsoc_muxmask usp0_muxmask[] = { { .group = 1, .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23), }, }; static const struct sirfsoc_padmux usp0_padmux = { .muxmask_counts = ARRAY_SIZE(usp0_muxmask), .muxmask = usp0_muxmask, .funcmask = BIT(1) | BIT(2) | BIT(6) | BIT(9), .funcval = 0, }; static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 }; static const struct sirfsoc_muxmask usp1_muxmask[] = { { .group = 1, .mask = BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28), }, }; static const struct sirfsoc_padmux usp1_padmux = { .muxmask_counts = ARRAY_SIZE(usp1_muxmask), .muxmask = usp1_muxmask, .funcmask = BIT(1) | BIT(9) | BIT(10) | BIT(11), .funcval = 0, }; static const unsigned usp1_pins[] = { 56, 57, 58, 59, 60 }; static const struct sirfsoc_muxmask usp2_muxmask[] = { { .group = 1, .mask = BIT(29) | BIT(30) | BIT(31), }, { .group = 2, .mask = BIT(0) | BIT(1), }, }; static const struct sirfsoc_padmux usp2_padmux = { .muxmask_counts = ARRAY_SIZE(usp2_muxmask), .muxmask = usp2_muxmask, .funcmask = BIT(13) | BIT(14), .funcval = 0, }; static const unsigned usp2_pins[] = { 61, 62, 63, 64, 65 }; static const struct sirfsoc_muxmask nand_muxmask[] = { { .group = 2, .mask = BIT(2) | BIT(3) | BIT(28) | BIT(29) | BIT(30), }, }; static const struct sirfsoc_padmux nand_padmux = { .muxmask_counts = ARRAY_SIZE(nand_muxmask), .muxmask = nand_muxmask, .funcmask = BIT(5), .funcval = 0, }; static const unsigned nand_pins[] = { 64, 65, 92, 93, 94 }; static const struct sirfsoc_padmux sdmmc0_padmux = { .muxmask_counts = 0, .funcmask = BIT(5), .funcval = 0, }; static const unsigned sdmmc0_pins[] = { }; static const struct sirfsoc_muxmask sdmmc2_muxmask[] = { { .group = 2, .mask = BIT(2) | BIT(3), }, }; static const struct sirfsoc_padmux sdmmc2_padmux = { .muxmask_counts = ARRAY_SIZE(sdmmc2_muxmask), .muxmask = sdmmc2_muxmask, .funcmask = BIT(5), .funcval = BIT(5), }; static const unsigned sdmmc2_pins[] = { 66, 67 }; static const struct sirfsoc_muxmask cko0_muxmask[] = { { .group = 2, .mask = BIT(14), }, }; static const struct sirfsoc_padmux cko0_padmux = { .muxmask_counts = ARRAY_SIZE(cko0_muxmask), .muxmask = cko0_muxmask, }; static const unsigned cko0_pins[] = { 78 }; static const struct sirfsoc_muxmask vip_muxmask[] = { { .group = 2, .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25), }, }; static const struct sirfsoc_padmux vip_padmux = { .muxmask_counts = ARRAY_SIZE(vip_muxmask), .muxmask = vip_muxmask, .funcmask = BIT(0), .funcval = 0, }; static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 }; static const struct sirfsoc_muxmask i2c0_muxmask[] = { { .group = 2, .mask = BIT(26) | BIT(27), }, }; static const struct sirfsoc_padmux i2c0_padmux = { .muxmask_counts = ARRAY_SIZE(i2c0_muxmask), .muxmask = i2c0_muxmask, }; static const unsigned i2c0_pins[] = { 90, 91 }; static const struct sirfsoc_muxmask i2c1_muxmask[] = { { .group = 0, .mask = BIT(13) | BIT(15), }, }; static const struct sirfsoc_padmux i2c1_padmux = { .muxmask_counts = ARRAY_SIZE(i2c1_muxmask), .muxmask = i2c1_muxmask, }; static const unsigned i2c1_pins[] = { 13, 15 }; static const struct sirfsoc_muxmask viprom_muxmask[] = { { .group = 2, .mask = BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25), }, { .group = 0, .mask = BIT(12), }, }; static const struct sirfsoc_padmux viprom_padmux = { .muxmask_counts = ARRAY_SIZE(viprom_muxmask), .muxmask = viprom_muxmask, .funcmask = BIT(0), .funcval = BIT(0), }; static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 }; static const struct sirfsoc_muxmask pwm0_muxmask[] = { { .group = 0, .mask = BIT(4), }, }; static const struct sirfsoc_padmux pwm0_padmux = { .muxmask_counts = ARRAY_SIZE(pwm0_muxmask), .muxmask = pwm0_muxmask, .funcmask = BIT(12), .funcval = 0, }; static const unsigned pwm0_pins[] = { 4 }; static const struct sirfsoc_muxmask pwm1_muxmask[] = { { .group = 0, .mask = BIT(5), }, }; static const struct sirfsoc_padmux pwm1_padmux = { .muxmask_counts = ARRAY_SIZE(pwm1_muxmask), .muxmask = pwm1_muxmask, }; static const unsigned pwm1_pins[] = { 5 }; static const struct sirfsoc_muxmask pwm2_muxmask[] = { { .group = 0, .mask = BIT(6), }, }; static const struct sirfsoc_padmux pwm2_padmux = { .muxmask_counts = ARRAY_SIZE(pwm2_muxmask), .muxmask = pwm2_muxmask, }; static const unsigned pwm2_pins[] = { 6 }; static const struct sirfsoc_muxmask pwm3_muxmask[] = { { .group = 0, .mask = BIT(7), }, }; static const struct sirfsoc_padmux pwm3_padmux = { .muxmask_counts = ARRAY_SIZE(pwm3_muxmask), .muxmask = pwm3_muxmask, }; static const unsigned pwm3_pins[] = { 7 }; static const struct sirfsoc_muxmask warm_rst_muxmask[] = { { .group = 0, .mask = BIT(8), }, }; static const struct sirfsoc_padmux warm_rst_padmux = { .muxmask_counts = ARRAY_SIZE(warm_rst_muxmask), .muxmask = warm_rst_muxmask, }; static const unsigned warm_rst_pins[] = { 8 }; static const struct sirfsoc_muxmask usb0_utmi_drvbus_muxmask[] = { { .group = 1, .mask = BIT(22), }, }; static const struct sirfsoc_padmux usb0_utmi_drvbus_padmux = { .muxmask_counts = ARRAY_SIZE(usb0_utmi_drvbus_muxmask), .muxmask = usb0_utmi_drvbus_muxmask, .funcmask = BIT(6), .funcval = BIT(6), /* refer to PAD_UTMI_DRVVBUS0_ENABLE */ }; static const unsigned usb0_utmi_drvbus_pins[] = { 54 }; static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = { { .group = 1, .mask = BIT(27), }, }; static const struct sirfsoc_padmux usb1_utmi_drvbus_padmux = { .muxmask_counts = ARRAY_SIZE(usb1_utmi_drvbus_muxmask), .muxmask = usb1_utmi_drvbus_muxmask, .funcmask = BIT(11), .funcval = BIT(11), /* refer to PAD_UTMI_DRVVBUS1_ENABLE */ }; static const unsigned usb1_utmi_drvbus_pins[] = { 59 }; static const struct sirfsoc_muxmask pulse_count_muxmask[] = { { .group = 0, .mask = BIT(9) | BIT(10) | BIT(11), }, }; static const struct sirfsoc_padmux pulse_count_padmux = { .muxmask_counts = ARRAY_SIZE(pulse_count_muxmask), .muxmask = pulse_count_muxmask, }; static const unsigned pulse_count_pins[] = { 9, 10, 11 }; #define SIRFSOC_PIN_GROUP(n, p) \ { \ .name = n, \ .pins = p, \ .num_pins = ARRAY_SIZE(p), \ } static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = { SIRFSOC_PIN_GROUP("lcd_16bitsgrp", lcd_16bits_pins), SIRFSOC_PIN_GROUP("lcd_18bitsgrp", lcd_18bits_pins), SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins), SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins), SIRFSOC_PIN_GROUP("uart0grp", uart0_pins), SIRFSOC_PIN_GROUP("uart1grp", uart1_pins), SIRFSOC_PIN_GROUP("uart2grp", uart2_pins), SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins), SIRFSOC_PIN_GROUP("usp0grp", usp0_pins), SIRFSOC_PIN_GROUP("usp1grp", usp1_pins), SIRFSOC_PIN_GROUP("usp2grp", usp2_pins), SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins), SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins), SIRFSOC_PIN_GROUP("pwm0grp", pwm0_pins), SIRFSOC_PIN_GROUP("pwm1grp", pwm1_pins), SIRFSOC_PIN_GROUP("pwm2grp", pwm2_pins), SIRFSOC_PIN_GROUP("pwm3grp", pwm3_pins), SIRFSOC_PIN_GROUP("vipgrp", vip_pins), SIRFSOC_PIN_GROUP("vipromgrp", viprom_pins), SIRFSOC_PIN_GROUP("warm_rstgrp", warm_rst_pins), SIRFSOC_PIN_GROUP("cko0_rstgrp", cko0_pins), SIRFSOC_PIN_GROUP("cko1_rstgrp", cko1_pins), SIRFSOC_PIN_GROUP("sdmmc0grp", sdmmc0_pins), SIRFSOC_PIN_GROUP("sdmmc1grp", sdmmc1_pins), SIRFSOC_PIN_GROUP("sdmmc2grp", sdmmc2_pins), SIRFSOC_PIN_GROUP("sdmmc3grp", sdmmc3_pins), SIRFSOC_PIN_GROUP("sdmmc4grp", sdmmc4_pins), SIRFSOC_PIN_GROUP("sdmmc5grp", sdmmc5_pins), SIRFSOC_PIN_GROUP("usb0_utmi_drvbusgrp", usb0_utmi_drvbus_pins), SIRFSOC_PIN_GROUP("usb1_utmi_drvbusgrp", usb1_utmi_drvbus_pins), SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins), SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins), SIRFSOC_PIN_GROUP("ac97grp", ac97_pins), SIRFSOC_PIN_GROUP("nandgrp", nand_pins), SIRFSOC_PIN_GROUP("spi0grp", spi0_pins), SIRFSOC_PIN_GROUP("spi1grp", spi1_pins), SIRFSOC_PIN_GROUP("gpsgrp", gps_pins), }; static int sirfsoc_get_groups_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(sirfsoc_pin_groups); } static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { return sirfsoc_pin_groups[selector].name; } static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned **pins, unsigned *num_pins) { *pins = sirfsoc_pin_groups[selector].pins; *num_pins = sirfsoc_pin_groups[selector].num_pins; return 0; } static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { seq_printf(s, " " DRIVER_NAME); } static int sirfsoc_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np_config, struct pinctrl_map **map, unsigned *num_maps) { struct sirfsoc_pmx *spmx = pinctrl_dev_get_drvdata(pctldev); struct device_node *np; struct property *prop; const char *function, *group; int ret, index = 0, count = 0; /* calculate number of maps required */ for_each_child_of_node(np_config, np) { ret = of_property_read_string(np, "sirf,function", &function); if (ret < 0) return ret; ret = of_property_count_strings(np, "sirf,pins"); if (ret < 0) return ret; count += ret; } if (!count) { dev_err(spmx->dev, "No child nodes passed via DT\n"); return -ENODEV; } *map = kzalloc(sizeof(**map) * count, GFP_KERNEL); if (!*map) return -ENOMEM; for_each_child_of_node(np_config, np) { of_property_read_string(np, "sirf,function", &function); of_property_for_each_string(np, "sirf,pins", prop, group) { (*map)[index].type = PIN_MAP_TYPE_MUX_GROUP; (*map)[index].data.mux.group = group; (*map)[index].data.mux.function = function; index++; } } *num_maps = count; return 0; } static void sirfsoc_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { kfree(map); } static const struct pinctrl_ops sirfsoc_pctrl_ops = { .get_groups_count = sirfsoc_get_groups_count, .get_group_name = sirfsoc_get_group_name, .get_group_pins = sirfsoc_get_group_pins, .pin_dbg_show = sirfsoc_pin_dbg_show, .dt_node_to_map = sirfsoc_dt_node_to_map, .dt_free_map = sirfsoc_dt_free_map, }; struct sirfsoc_pmx_func { const char *name; const char * const *groups; const unsigned num_groups; const struct sirfsoc_padmux *padmux; }; static const char * const lcd_16bitsgrp[] = { "lcd_16bitsgrp" }; static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" }; static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" }; static const char * const lcdromgrp[] = { "lcdromgrp" }; static const char * const uart0grp[] = { "uart0grp" }; static const char * const uart1grp[] = { "uart1grp" }; static const char * const uart2grp[] = { "uart2grp" }; static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; static const char * const usp0grp[] = { "usp0grp" }; static const char * const usp1grp[] = { "usp1grp" }; static const char * const usp2grp[] = { "usp2grp" }; static const char * const i2c0grp[] = { "i2c0grp" }; static const char * const i2c1grp[] = { "i2c1grp" }; static const char * const pwm0grp[] = { "pwm0grp" }; static const char * const pwm1grp[] = { "pwm1grp" }; static const char * const pwm2grp[] = { "pwm2grp" }; static const char * const pwm3grp[] = { "pwm3grp" }; static const char * const vipgrp[] = { "vipgrp" }; static const char * const vipromgrp[] = { "vipromgrp" }; static const char * const warm_rstgrp[] = { "warm_rstgrp" }; static const char * const cko0grp[] = { "cko0grp" }; static const char * const cko1grp[] = { "cko1grp" }; static const char * const sdmmc0grp[] = { "sdmmc0grp" }; static const char * const sdmmc1grp[] = { "sdmmc1grp" }; static const char * const sdmmc2grp[] = { "sdmmc2grp" }; static const char * const sdmmc3grp[] = { "sdmmc3grp" }; static const char * const sdmmc4grp[] = { "sdmmc4grp" }; static const char * const sdmmc5grp[] = { "sdmmc5grp" }; static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" }; static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" }; static const char * const pulse_countgrp[] = { "pulse_countgrp" }; static const char * const i2sgrp[] = { "i2sgrp" }; static const char * const ac97grp[] = { "ac97grp" }; static const char * const nandgrp[] = { "nandgrp" }; static const char * const spi0grp[] = { "spi0grp" }; static const char * const spi1grp[] = { "spi1grp" }; static const char * const gpsgrp[] = { "gpsgrp" }; #define SIRFSOC_PMX_FUNCTION(n, g, m) \ { \ .name = n, \ .groups = g, \ .num_groups = ARRAY_SIZE(g), \ .padmux = &m, \ } static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { SIRFSOC_PMX_FUNCTION("lcd_16bits", lcd_16bitsgrp, lcd_16bits_padmux), SIRFSOC_PMX_FUNCTION("lcd_18bits", lcd_18bitsgrp, lcd_18bits_padmux), SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux), SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux), SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux), SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux), SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), SIRFSOC_PMX_FUNCTION("usp2", usp2grp, usp2_padmux), SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux), SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux), SIRFSOC_PMX_FUNCTION("pwm0", pwm0grp, pwm0_padmux), SIRFSOC_PMX_FUNCTION("pwm1", pwm1grp, pwm1_padmux), SIRFSOC_PMX_FUNCTION("pwm2", pwm2grp, pwm2_padmux), SIRFSOC_PMX_FUNCTION("pwm3", pwm3grp, pwm3_padmux), SIRFSOC_PMX_FUNCTION("vip", vipgrp, vip_padmux), SIRFSOC_PMX_FUNCTION("viprom", vipromgrp, viprom_padmux), SIRFSOC_PMX_FUNCTION("warm_rst", warm_rstgrp, warm_rst_padmux), SIRFSOC_PMX_FUNCTION("cko0", cko0grp, cko0_padmux), SIRFSOC_PMX_FUNCTION("cko1", cko1grp, cko1_padmux), SIRFSOC_PMX_FUNCTION("sdmmc0", sdmmc0grp, sdmmc0_padmux), SIRFSOC_PMX_FUNCTION("sdmmc1", sdmmc1grp, sdmmc1_padmux), SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux), SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux), SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux), SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux), SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux), SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux), SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux), SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux), SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux), SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux), SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux), SIRFSOC_PMX_FUNCTION("spi1", spi1grp, spi1_padmux), SIRFSOC_PMX_FUNCTION("gps", gpsgrp, gps_padmux), }; static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector, bool enable) { int i; const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux; const struct sirfsoc_muxmask *mask = mux->muxmask; for (i = 0; i < mux->muxmask_counts; i++) { u32 muxval; if (!spmx->is_marco) { muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group)); if (enable) muxval = muxval & ~mask[i].mask; else muxval = muxval | mask[i].mask; writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group)); } else { if (enable) writel(mask[i].mask, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN_CLR(mask[i].group)); else writel(mask[i].mask, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group)); } } if (mux->funcmask && enable) { u32 func_en_val; func_en_val = readl(spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX); func_en_val = (func_en_val & ~mux->funcmask) | (mux-> funcval); writel(func_en_val, spmx->rsc_virtbase + SIRFSOC_RSC_PIN_MUX); } } static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector, unsigned group) { struct sirfsoc_pmx *spmx; spmx = pinctrl_dev_get_drvdata(pmxdev); sirfsoc_pinmux_endisable(spmx, selector, true); return 0; } static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector, unsigned group) { struct sirfsoc_pmx *spmx; spmx = pinctrl_dev_get_drvdata(pmxdev); sirfsoc_pinmux_endisable(spmx, selector, false); } static int sirfsoc_pinmux_get_funcs_count(struct pinctrl_dev *pmxdev) { return ARRAY_SIZE(sirfsoc_pmx_functions); } static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev, unsigned selector) { return sirfsoc_pmx_functions[selector].name; } static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector, const char * const **groups, unsigned * const num_groups) { *groups = sirfsoc_pmx_functions[selector].groups; *num_groups = sirfsoc_pmx_functions[selector].num_groups; return 0; } static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev, struct pinctrl_gpio_range *range, unsigned offset) { struct sirfsoc_pmx *spmx; int group = range->id; u32 muxval; spmx = pinctrl_dev_get_drvdata(pmxdev); if (!spmx->is_marco) { muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); muxval = muxval | (1 << (offset - range->pin_base)); writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); } else { writel(1 << (offset - range->pin_base), spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); } return 0; } static const struct pinmux_ops sirfsoc_pinmux_ops = { .enable = sirfsoc_pinmux_enable, .disable = sirfsoc_pinmux_disable, .get_functions_count = sirfsoc_pinmux_get_funcs_count, .get_function_name = sirfsoc_pinmux_get_func_name, .get_function_groups = sirfsoc_pinmux_get_groups, .gpio_request_enable = sirfsoc_pinmux_request_gpio, }; static struct pinctrl_desc sirfsoc_pinmux_desc = { .name = DRIVER_NAME, .pins = sirfsoc_pads, .npins = ARRAY_SIZE(sirfsoc_pads), .pctlops = &sirfsoc_pctrl_ops, .pmxops = &sirfsoc_pinmux_ops, .owner = THIS_MODULE, }; /* * Todo: bind irq_chip to every pinctrl_gpio_range */ static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = { { .name = "sirfsoc-gpio*", .id = 0, .base = 0, .pin_base = 0, .npins = 32, }, { .name = "sirfsoc-gpio*", .id = 1, .base = 32, .pin_base = 32, .npins = 32, }, { .name = "sirfsoc-gpio*", .id = 2, .base = 64, .pin_base = 64, .npins = 32, }, { .name = "sirfsoc-gpio*", .id = 3, .base = 96, .pin_base = 96, .npins = 19, }, }; static void __iomem *sirfsoc_rsc_of_iomap(void) { const struct of_device_id rsc_ids[] = { { .compatible = "sirf,prima2-rsc" }, { .compatible = "sirf,marco-rsc" }, {} }; struct device_node *np; np = of_find_matching_node(NULL, rsc_ids); if (!np) panic("unable to find compatible rsc node in dtb\n"); return of_iomap(np, 0); } static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) return -EINVAL; if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; } static int sirfsoc_pinmux_probe(struct platform_device *pdev) { int ret; struct sirfsoc_pmx *spmx; struct device_node *np = pdev->dev.of_node; int i; /* Create state holders etc for this driver */ spmx = devm_kzalloc(&pdev->dev, sizeof(*spmx), GFP_KERNEL); if (!spmx) return -ENOMEM; spmx->dev = &pdev->dev; platform_set_drvdata(pdev, spmx); spmx->gpio_virtbase = of_iomap(np, 0); if (!spmx->gpio_virtbase) { ret = -ENOMEM; dev_err(&pdev->dev, "can't map gpio registers\n"); goto out_no_gpio_remap; } spmx->rsc_virtbase = sirfsoc_rsc_of_iomap(); if (!spmx->rsc_virtbase) { ret = -ENOMEM; dev_err(&pdev->dev, "can't map rsc registers\n"); goto out_no_rsc_remap; } if (of_device_is_compatible(np, "sirf,marco-pinctrl")) spmx->is_marco = 1; /* Now register the pin controller and all pins it handles */ spmx->pmx = pinctrl_register(&sirfsoc_pinmux_desc, &pdev->dev, spmx); if (!spmx->pmx) { dev_err(&pdev->dev, "could not register SIRFSOC pinmux driver\n"); ret = -EINVAL; goto out_no_pmx; } for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++) { sirfsoc_gpio_ranges[i].gc = &sgpio_bank[i].chip.gc; pinctrl_add_gpio_range(spmx->pmx, &sirfsoc_gpio_ranges[i]); } dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n"); return 0; out_no_pmx: iounmap(spmx->rsc_virtbase); out_no_rsc_remap: iounmap(spmx->gpio_virtbase); out_no_gpio_remap: platform_set_drvdata(pdev, NULL); return ret; } static const struct of_device_id pinmux_ids[] = { { .compatible = "sirf,prima2-pinctrl" }, { .compatible = "sirf,marco-pinctrl" }, {} }; static struct platform_driver sirfsoc_pinmux_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = pinmux_ids, }, .probe = sirfsoc_pinmux_probe, }; static int __init sirfsoc_pinmux_init(void) { return platform_driver_register(&sirfsoc_pinmux_driver); } arch_initcall(sirfsoc_pinmux_init); static inline int sirfsoc_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct sirfsoc_gpio_bank *bank = container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip); return irq_create_mapping(bank->domain, offset); } static inline int sirfsoc_gpio_to_offset(unsigned int gpio) { return gpio % SIRFSOC_GPIO_BANK_SIZE; } static inline struct sirfsoc_gpio_bank *sirfsoc_gpio_to_bank(unsigned int gpio) { return &sgpio_bank[gpio / SIRFSOC_GPIO_BANK_SIZE]; } static inline struct sirfsoc_gpio_bank *sirfsoc_irqchip_to_bank(struct gpio_chip *chip) { return container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip); } static void sirfsoc_gpio_irq_ack(struct irq_data *d) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE; u32 val, offset; unsigned long flags; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&sgpio_lock, flags); val = readl(bank->chip.regs + offset); writel(val, bank->chip.regs + offset); spin_unlock_irqrestore(&sgpio_lock, flags); } static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_bank *bank, int idx) { u32 val, offset; unsigned long flags; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&sgpio_lock, flags); val = readl(bank->chip.regs + offset); val &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK; val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK; writel(val, bank->chip.regs + offset); spin_unlock_irqrestore(&sgpio_lock, flags); } static void sirfsoc_gpio_irq_mask(struct irq_data *d) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); __sirfsoc_gpio_irq_mask(bank, d->hwirq % SIRFSOC_GPIO_BANK_SIZE); } static void sirfsoc_gpio_irq_unmask(struct irq_data *d) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE; u32 val, offset; unsigned long flags; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&sgpio_lock, flags); val = readl(bank->chip.regs + offset); val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK; val |= SIRFSOC_GPIO_CTL_INTR_EN_MASK; writel(val, bank->chip.regs + offset); spin_unlock_irqrestore(&sgpio_lock, flags); } static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE; u32 val, offset; unsigned long flags; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&sgpio_lock, flags); val = readl(bank->chip.regs + offset); val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK; switch (type) { case IRQ_TYPE_NONE: break; case IRQ_TYPE_EDGE_RISING: val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK; break; case IRQ_TYPE_EDGE_FALLING: val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK; val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; break; case IRQ_TYPE_EDGE_BOTH: val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; break; case IRQ_TYPE_LEVEL_LOW: val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK; break; case IRQ_TYPE_LEVEL_HIGH: val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK; val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); break; } writel(val, bank->chip.regs + offset); spin_unlock_irqrestore(&sgpio_lock, flags); return 0; } static struct irq_chip sirfsoc_irq_chip = { .name = "sirf-gpio-irq", .irq_ack = sirfsoc_gpio_irq_ack, .irq_mask = sirfsoc_gpio_irq_mask, .irq_unmask = sirfsoc_gpio_irq_unmask, .irq_set_type = sirfsoc_gpio_irq_type, }; static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc) { struct sirfsoc_gpio_bank *bank = irq_get_handler_data(irq); u32 status, ctrl; int idx = 0; struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); status = readl(bank->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id)); if (!status) { printk(KERN_WARNING "%s: gpio id %d status %#x no interrupt is flaged\n", __func__, bank->id, status); handle_bad_irq(irq, desc); return; } while (status) { ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, idx)); /* * Here we must check whether the corresponding GPIO's interrupt * has been enabled, otherwise just skip it */ if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) { pr_debug("%s: gpio id %d idx %d happens\n", __func__, bank->id, idx); generic_handle_irq(irq_find_mapping(bank->domain, idx)); } idx++; status = status >> 1; } chained_irq_exit(chip, desc); } static inline void sirfsoc_gpio_set_input(struct sirfsoc_gpio_bank *bank, unsigned ctrl_offset) { u32 val; val = readl(bank->chip.regs + ctrl_offset); val &= ~SIRFSOC_GPIO_CTL_OUT_EN_MASK; writel(val, bank->chip.regs + ctrl_offset); } static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); unsigned long flags; if (pinctrl_request_gpio(chip->base + offset)) return -ENODEV; spin_lock_irqsave(&bank->lock, flags); /* * default status: * set direction as input and mask irq */ sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset)); __sirfsoc_gpio_irq_mask(bank, offset); spin_unlock_irqrestore(&bank->lock, flags); return 0; } static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); unsigned long flags; spin_lock_irqsave(&bank->lock, flags); __sirfsoc_gpio_irq_mask(bank, offset); sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset)); spin_unlock_irqrestore(&bank->lock, flags); pinctrl_free_gpio(chip->base + offset); } static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); int idx = sirfsoc_gpio_to_offset(gpio); unsigned long flags; unsigned offset; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&bank->lock, flags); sirfsoc_gpio_set_input(bank, offset); spin_unlock_irqrestore(&bank->lock, flags); return 0; } static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_bank *bank, unsigned offset, int value) { u32 out_ctrl; unsigned long flags; spin_lock_irqsave(&bank->lock, flags); out_ctrl = readl(bank->chip.regs + offset); if (value) out_ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK; else out_ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK; out_ctrl &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK; out_ctrl |= SIRFSOC_GPIO_CTL_OUT_EN_MASK; writel(out_ctrl, bank->chip.regs + offset); spin_unlock_irqrestore(&bank->lock, flags); } static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); int idx = sirfsoc_gpio_to_offset(gpio); u32 offset; unsigned long flags; offset = SIRFSOC_GPIO_CTRL(bank->id, idx); spin_lock_irqsave(&sgpio_lock, flags); sirfsoc_gpio_set_output(bank, offset, value); spin_unlock_irqrestore(&sgpio_lock, flags); return 0; } static int sirfsoc_gpio_get_value(struct gpio_chip *chip, unsigned offset) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); u32 val; unsigned long flags; spin_lock_irqsave(&bank->lock, flags); val = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset)); spin_unlock_irqrestore(&bank->lock, flags); return !!(val & SIRFSOC_GPIO_CTL_DATAIN_MASK); } static void sirfsoc_gpio_set_value(struct gpio_chip *chip, unsigned offset, int value) { struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip); u32 ctrl; unsigned long flags; spin_lock_irqsave(&bank->lock, flags); ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset)); if (value) ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK; else ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK; writel(ctrl, bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset)); spin_unlock_irqrestore(&bank->lock, flags); } static int sirfsoc_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct sirfsoc_gpio_bank *bank = d->host_data; if (!bank) return -EINVAL; irq_set_chip(irq, &sirfsoc_irq_chip); irq_set_handler(irq, handle_level_irq); irq_set_chip_data(irq, bank); set_irq_flags(irq, IRQF_VALID); return 0; } const struct irq_domain_ops sirfsoc_gpio_irq_simple_ops = { .map = sirfsoc_gpio_irq_map, .xlate = irq_domain_xlate_twocell, }; static void sirfsoc_gpio_set_pullup(const u32 *pullups) { int i, n; const unsigned long *p = (const unsigned long *)pullups; for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) { for_each_set_bit(n, p + i, BITS_PER_LONG) { u32 offset = SIRFSOC_GPIO_CTRL(i, n); u32 val = readl(sgpio_bank[i].chip.regs + offset); val |= SIRFSOC_GPIO_CTL_PULL_MASK; val |= SIRFSOC_GPIO_CTL_PULL_HIGH; writel(val, sgpio_bank[i].chip.regs + offset); } } } static void sirfsoc_gpio_set_pulldown(const u32 *pulldowns) { int i, n; const unsigned long *p = (const unsigned long *)pulldowns; for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) { for_each_set_bit(n, p + i, BITS_PER_LONG) { u32 offset = SIRFSOC_GPIO_CTRL(i, n); u32 val = readl(sgpio_bank[i].chip.regs + offset); val |= SIRFSOC_GPIO_CTL_PULL_MASK; val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH; writel(val, sgpio_bank[i].chip.regs + offset); } } } static int sirfsoc_gpio_probe(struct device_node *np) { int i, err = 0; struct sirfsoc_gpio_bank *bank; void *regs; struct platform_device *pdev; bool is_marco = false; u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS]; pdev = of_find_device_by_node(np); if (!pdev) return -ENODEV; regs = of_iomap(np, 0); if (!regs) return -ENOMEM; if (of_device_is_compatible(np, "sirf,marco-pinctrl")) is_marco = 1; for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) { bank = &sgpio_bank[i]; spin_lock_init(&bank->lock); bank->chip.gc.request = sirfsoc_gpio_request; bank->chip.gc.free = sirfsoc_gpio_free; bank->chip.gc.direction_input = sirfsoc_gpio_direction_input; bank->chip.gc.get = sirfsoc_gpio_get_value; bank->chip.gc.direction_output = sirfsoc_gpio_direction_output; bank->chip.gc.set = sirfsoc_gpio_set_value; bank->chip.gc.to_irq = sirfsoc_gpio_to_irq; bank->chip.gc.base = i * SIRFSOC_GPIO_BANK_SIZE; bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); bank->chip.gc.of_node = np; bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; bank->chip.gc.of_gpio_n_cells = 2; bank->chip.regs = regs; bank->id = i; bank->is_marco = is_marco; bank->parent_irq = platform_get_irq(pdev, i); if (bank->parent_irq < 0) { err = bank->parent_irq; goto out; } err = gpiochip_add(&bank->chip.gc); if (err) { pr_err("%s: error in probe function with status %d\n", np->full_name, err); goto out; } bank->domain = irq_domain_add_linear(np, SIRFSOC_GPIO_BANK_SIZE, &sirfsoc_gpio_irq_simple_ops, bank); if (!bank->domain) { pr_err("%s: Failed to create irqdomain\n", np->full_name); err = -ENOSYS; goto out; } irq_set_chained_handler(bank->parent_irq, sirfsoc_gpio_handle_irq); irq_set_handler_data(bank->parent_irq, bank); } if (!of_property_read_u32_array(np, "sirf,pullups", pullups, SIRFSOC_GPIO_NO_OF_BANKS)) sirfsoc_gpio_set_pullup(pullups); if (!of_property_read_u32_array(np, "sirf,pulldowns", pulldowns, SIRFSOC_GPIO_NO_OF_BANKS)) sirfsoc_gpio_set_pulldown(pulldowns); return 0; out: iounmap(regs); return err; } static int __init sirfsoc_gpio_init(void) { struct device_node *np; np = of_find_matching_node(NULL, pinmux_ids); if (!np) return -ENODEV; return sirfsoc_gpio_probe(np); } subsys_initcall(sirfsoc_gpio_init); MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " "Yuping Luo <yuping.luo@csr.com>, " "Barry Song <baohua.song@csr.com>"); MODULE_DESCRIPTION("SIRFSOC pin control driver"); MODULE_LICENSE("GPL");
gpl-2.0
xapp-le/kernel
fs/xfs/xfs_ialloc.c
2084
44645
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_bmap.h" #include "xfs_cksum.h" #include "xfs_buf_item.h" /* * Allocation group level functions. */ static inline int xfs_ialloc_cluster_alignment( xfs_alloc_arg_t *args) { if (xfs_sb_version_hasalign(&args->mp->m_sb) && args->mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) return args->mp->m_sb.sb_inoalignmt; return 1; } /* * Lookup a record by ino in the btree given by cur. */ int /* error */ xfs_inobt_lookup( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agino_t ino, /* starting inode of chunk */ xfs_lookup_t dir, /* <=, >=, == */ int *stat) /* success/failure */ { cur->bc_rec.i.ir_startino = ino; cur->bc_rec.i.ir_freecount = 0; cur->bc_rec.i.ir_free = 0; return xfs_btree_lookup(cur, dir, stat); } /* * Update the record referred to by cur to the value given. * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int /* error */ xfs_inobt_update( struct xfs_btree_cur *cur, /* btree cursor */ xfs_inobt_rec_incore_t *irec) /* btree record */ { union xfs_btree_rec rec; rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); rec.inobt.ir_freecount = cpu_to_be32(irec->ir_freecount); rec.inobt.ir_free = cpu_to_be64(irec->ir_free); return xfs_btree_update(cur, &rec); } /* * Get the data from the pointed-to record. */ int /* error */ xfs_inobt_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ xfs_inobt_rec_incore_t *irec, /* btree record */ int *stat) /* output: success/failure */ { union xfs_btree_rec *rec; int error; error = xfs_btree_get_rec(cur, &rec, stat); if (!error && *stat == 1) { irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); irec->ir_freecount = be32_to_cpu(rec->inobt.ir_freecount); irec->ir_free = be64_to_cpu(rec->inobt.ir_free); } return error; } /* * Verify that the number of free inodes in the AGI is correct. */ #ifdef DEBUG STATIC int xfs_check_agi_freecount( struct xfs_btree_cur *cur, struct xfs_agi *agi) { if (cur->bc_nlevels == 1) { xfs_inobt_rec_incore_t rec; int freecount = 0; int error; int i; error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) return error; do { error = xfs_inobt_get_rec(cur, &rec, &i); if (error) return error; if (i) { freecount += rec.ir_freecount; error = xfs_btree_increment(cur, 0, &i); if (error) return error; } } while (i == 1); if (!XFS_FORCED_SHUTDOWN(cur->bc_mp)) ASSERT(freecount == be32_to_cpu(agi->agi_freecount)); } return 0; } #else #define xfs_check_agi_freecount(cur, agi) 0 #endif /* * Initialise a new set of inodes. */ STATIC int xfs_ialloc_inode_init( struct xfs_mount *mp, struct xfs_trans *tp, xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_agblock_t length, unsigned int gen) { struct xfs_buf *fbuf; struct xfs_dinode *free; int blks_per_cluster, nbufs, ninodes; int version; int i, j; xfs_daddr_t d; xfs_ino_t ino = 0; /* * Loop over the new block(s), filling in the inodes. * For small block sizes, manipulate the inodes in buffers * which are multiples of the blocks size. */ if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = 1; nbufs = length; ninodes = mp->m_sb.sb_inopblock; } else { blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / mp->m_sb.sb_blocksize; nbufs = length / blks_per_cluster; ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; } /* * Figure out what version number to use in the inodes we create. If * the superblock version has caught up to the one that supports the new * inode format, then use the new inode version. Otherwise use the old * version so that old kernels will continue to be able to use the file * system. * * For v3 inodes, we also need to write the inode number into the inode, * so calculate the first inode number of the chunk here as * XFS_OFFBNO_TO_AGINO() only works within a filesystem block, not * across multiple filesystem blocks (such as a cluster) and so cannot * be used in the cluster buffer loop below. * * Further, because we are writing the inode directly into the buffer * and calculating a CRC on the entire inode, we have ot log the entire * inode so that the entire range the CRC covers is present in the log. * That means for v3 inode we log the entire buffer rather than just the * inode cores. */ if (xfs_sb_version_hascrc(&mp->m_sb)) { version = 3; ino = XFS_AGINO_TO_INO(mp, agno, XFS_OFFBNO_TO_AGINO(mp, agbno, 0)); } else if (xfs_sb_version_hasnlink(&mp->m_sb)) version = 2; else version = 1; for (j = 0; j < nbufs; j++) { /* * Get the block. */ d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize * blks_per_cluster, XBF_UNMAPPED); if (!fbuf) return ENOMEM; /* * Initialize all inodes in this buffer and then log them. * * XXX: It would be much better if we had just one transaction * to log a whole cluster of inodes instead of all the * individual transactions causing a lot of log traffic. */ fbuf->b_ops = &xfs_inode_buf_ops; xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); for (i = 0; i < ninodes; i++) { int ioffset = i << mp->m_sb.sb_inodelog; uint isize = xfs_dinode_size(version); free = xfs_make_iptr(mp, fbuf, i); free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); free->di_version = version; free->di_gen = cpu_to_be32(gen); free->di_next_unlinked = cpu_to_be32(NULLAGINO); if (version == 3) { free->di_ino = cpu_to_be64(ino); ino++; uuid_copy(&free->di_uuid, &mp->m_sb.sb_uuid); xfs_dinode_calc_crc(mp, free); } else { /* just log the inode core */ xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1); } } if (version == 3) { /* need to log the entire buffer */ xfs_trans_log_buf(tp, fbuf, 0, BBTOB(fbuf->b_length) - 1); } xfs_trans_inode_alloc_buf(tp, fbuf); } return 0; } /* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. */ STATIC int /* error code or 0 */ xfs_ialloc_ag_alloc( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* alloc group buffer */ int *alloc) { xfs_agi_t *agi; /* allocation group header */ xfs_alloc_arg_t args; /* allocation argument structure */ xfs_btree_cur_t *cur; /* inode btree cursor */ xfs_agnumber_t agno; int error; int i; xfs_agino_t newino; /* new first inode's number */ xfs_agino_t newlen; /* new number of inodes */ xfs_agino_t thisino; /* current inode number, for loop */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ struct xfs_perag *pag; memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = tp->t_mountp; /* * Locking will ensure that we don't have two callers in here * at one time. */ newlen = XFS_IALLOC_INODES(args.mp); if (args.mp->m_maxicount && args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) return XFS_ERROR(ENOSPC); args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); /* * First try to allocate inodes contiguous with the last-allocated * chunk of inodes. If the filesystem is striped, this will fill * an entire stripe unit with inodes. */ agi = XFS_BUF_TO_AGI(agbp); newino = be32_to_cpu(agi->agi_newino); agno = be32_to_cpu(agi->agi_seqno); args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + XFS_IALLOC_BLOCKS(args.mp); if (likely(newino != NULLAGINO && (args.agbno < be32_to_cpu(agi->agi_length)))) { args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.type = XFS_ALLOCTYPE_THIS_BNO; args.prod = 1; /* * We need to take into account alignment here to ensure that * we don't modify the free list if we fail to have an exact * block. If we don't have an exact match, and every oher * attempt allocation attempt fails, we'll end up cancelling * a dirty transaction and shutting down. * * For an exact allocation, alignment must be 1, * however we need to take cluster alignment into account when * fixing up the freelist. Use the minalignslop field to * indicate that extra blocks might be required for alignment, * but not to use them in the actual exact allocation. */ args.alignment = 1; args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; /* Allow space for the inode btree to split. */ args.minleft = args.mp->m_in_maxlevels - 1; if ((error = xfs_alloc_vextent(&args))) return error; } else args.fsbno = NULLFSBLOCK; if (unlikely(args.fsbno == NULLFSBLOCK)) { /* * Set the alignment for the allocation. * If stripe alignment is turned on then align at stripe unit * boundary. * If the cluster size is smaller than a filesystem block * then we're doing I/O for inodes in filesystem block size * pieces, so don't need alignment anyway. */ isaligned = 0; if (args.mp->m_sinoalign) { ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); args.alignment = args.mp->m_dalign; isaligned = 1; } else args.alignment = xfs_ialloc_cluster_alignment(&args); /* * Need to figure out where to allocate the inode blocks. * Ideally they should be spaced out through the a.g. * For now, just allocate blocks up front. */ args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); /* * Allocate a fixed-size extent of inodes. */ args.type = XFS_ALLOCTYPE_NEAR_BNO; args.prod = 1; /* * Allow space for the inode btree to split. */ args.minleft = args.mp->m_in_maxlevels - 1; if ((error = xfs_alloc_vextent(&args))) return error; } /* * If stripe alignment is turned on, then try again with cluster * alignment. */ if (isaligned && args.fsbno == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.alignment = xfs_ialloc_cluster_alignment(&args); if ((error = xfs_alloc_vextent(&args))) return error; } if (args.fsbno == NULLFSBLOCK) { *alloc = 0; return 0; } ASSERT(args.len == args.minlen); /* * Stamp and write the inode buffers. * * Seed the new inode cluster with a random generation number. This * prevents short-term reuse of generation numbers if a chunk is * freed and then immediately reallocated. We use random numbers * rather than a linear progression to prevent the next generation * number from being easily guessable. */ error = xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len, prandom_u32()); if (error) return error; /* * Convert the results. */ newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); be32_add_cpu(&agi->agi_count, newlen); be32_add_cpu(&agi->agi_freecount, newlen); pag = xfs_perag_get(args.mp, agno); pag->pagi_freecount += newlen; xfs_perag_put(pag); agi->agi_newino = cpu_to_be32(newino); /* * Insert records describing the new inode chunk into the btree. */ cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno); for (thisino = newino; thisino < newino + newlen; thisino += XFS_INODES_PER_CHUNK) { cur->bc_rec.i.ir_startino = thisino; cur->bc_rec.i.ir_freecount = XFS_INODES_PER_CHUNK; cur->bc_rec.i.ir_free = XFS_INOBT_ALL_FREE; error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, &i); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 0); error = xfs_btree_insert(cur, &i); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 1); } xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); /* * Log allocation group header fields */ xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); /* * Modify/log superblock values for inode count and inode free count. */ xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); *alloc = 1; return 0; } STATIC xfs_agnumber_t xfs_ialloc_next_ag( xfs_mount_t *mp) { xfs_agnumber_t agno; spin_lock(&mp->m_agirotor_lock); agno = mp->m_agirotor; if (++mp->m_agirotor >= mp->m_maxagi) mp->m_agirotor = 0; spin_unlock(&mp->m_agirotor_lock); return agno; } /* * Select an allocation group to look for a free inode in, based on the parent * inode and then mode. Return the allocation group buffer. */ STATIC xfs_agnumber_t xfs_ialloc_ag_select( xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t parent, /* parent directory inode number */ umode_t mode, /* bits set to indicate file type */ int okalloc) /* ok to allocate more space */ { xfs_agnumber_t agcount; /* number of ag's in the filesystem */ xfs_agnumber_t agno; /* current ag number */ int flags; /* alloc buffer locking flags */ xfs_extlen_t ineed; /* blocks needed for inode allocation */ xfs_extlen_t longest = 0; /* longest extent available */ xfs_mount_t *mp; /* mount point structure */ int needspace; /* file mode implies space allocated */ xfs_perag_t *pag; /* per allocation group data */ xfs_agnumber_t pagno; /* parent (starting) ag number */ int error; /* * Files of these types need at least one block if length > 0 * (and they won't fit in the inode, but that's hard to figure out). */ needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); mp = tp->t_mountp; agcount = mp->m_maxagi; if (S_ISDIR(mode)) pagno = xfs_ialloc_next_ag(mp); else { pagno = XFS_INO_TO_AGNO(mp, parent); if (pagno >= agcount) pagno = 0; } ASSERT(pagno < agcount); /* * Loop through allocation groups, looking for one with a little * free space in it. Note we don't look for free inodes, exactly. * Instead, we include whether there is a need to allocate inodes * to mean that blocks must be allocated for them, * if none are currently free. */ agno = pagno; flags = XFS_ALLOC_FLAG_TRYLOCK; for (;;) { pag = xfs_perag_get(mp, agno); if (!pag->pagi_inodeok) { xfs_ialloc_next_ag(mp); goto nextag; } if (!pag->pagi_init) { error = xfs_ialloc_pagi_init(mp, tp, agno); if (error) goto nextag; } if (pag->pagi_freecount) { xfs_perag_put(pag); return agno; } if (!okalloc) goto nextag; if (!pag->pagf_init) { error = xfs_alloc_pagf_init(mp, tp, agno, flags); if (error) goto nextag; } /* * Is there enough free space for the file plus a block of * inodes? (if we need to allocate some)? */ ineed = XFS_IALLOC_BLOCKS(mp); longest = pag->pagf_longest; if (!longest) longest = pag->pagf_flcount > 0; if (pag->pagf_freeblks >= needspace + ineed && longest >= ineed) { xfs_perag_put(pag); return agno; } nextag: xfs_perag_put(pag); /* * No point in iterating over the rest, if we're shutting * down. */ if (XFS_FORCED_SHUTDOWN(mp)) return NULLAGNUMBER; agno++; if (agno >= agcount) agno = 0; if (agno == pagno) { if (flags == 0) return NULLAGNUMBER; flags = 0; } } } /* * Try to retrieve the next record to the left/right from the current one. */ STATIC int xfs_ialloc_next_rec( struct xfs_btree_cur *cur, xfs_inobt_rec_incore_t *rec, int *done, int left) { int error; int i; if (left) error = xfs_btree_decrement(cur, 0, &i); else error = xfs_btree_increment(cur, 0, &i); if (error) return error; *done = !i; if (i) { error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } return 0; } STATIC int xfs_ialloc_get_rec( struct xfs_btree_cur *cur, xfs_agino_t agino, xfs_inobt_rec_incore_t *rec, int *done, int left) { int error; int i; error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); if (error) return error; *done = !i; if (i) { error = xfs_inobt_get_rec(cur, rec, &i); if (error) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } return 0; } /* * Allocate an inode. * * The caller selected an AG for us, and made sure that free inodes are * available. */ STATIC int xfs_dialloc_ag( struct xfs_trans *tp, struct xfs_buf *agbp, xfs_ino_t parent, xfs_ino_t *inop) { struct xfs_mount *mp = tp->t_mountp; struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); struct xfs_perag *pag; struct xfs_btree_cur *cur, *tcur; struct xfs_inobt_rec_incore rec, trec; xfs_ino_t ino; int error; int offset; int i, j; pag = xfs_perag_get(mp, agno); ASSERT(pag->pagi_init); ASSERT(pag->pagi_inodeok); ASSERT(pag->pagi_freecount > 0); restart_pagno: cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. */ if (!pagino) pagino = be32_to_cpu(agi->agi_newino); error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; /* * If in the same AG as the parent, try to get near the parent. */ if (pagno == agno) { int doneleft; /* done, to the left */ int doneright; /* done, to the right */ int searchdistance = 10; error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) { /* * Found a free inode in the same chunk * as the parent, done. */ goto alloc_inode; } /* * In the same AG as parent, but parent's chunk is full. */ /* duplicate the cursor, search left & right simultaneously */ error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; /* * Skip to last blocks looked up if same parent inode. */ if (pagino != NULLAGINO && pag->pagl_pagino == pagino && pag->pagl_leftrec != NULLAGINO && pag->pagl_rightrec != NULLAGINO) { error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, &trec, &doneleft, 1); if (error) goto error1; error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, &rec, &doneright, 0); if (error) goto error1; } else { /* search left with tcur, back up 1 record */ error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); if (error) goto error1; /* search right with cur, go forward 1 record. */ error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); if (error) goto error1; } /* * Loop until we find an inode chunk with a free inode. */ while (!doneleft || !doneright) { int useleft; /* using left inode chunk this time */ if (!--searchdistance) { /* * Not in range - save last search * location and allocate a new inode */ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto newino; } /* figure out the closer block if both are valid. */ if (!doneleft && !doneright) { useleft = pagino - (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < rec.ir_startino - pagino; } else { useleft = !doneleft; } /* free inodes to the left? */ if (useleft && trec.ir_freecount) { rec = trec; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); cur = tcur; pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* free inodes to the right? */ if (!useleft && rec.ir_freecount) { xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* get next record to check */ if (useleft) { error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); } else { error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); } if (error) goto error1; } /* * We've reached the end of the btree. because * we are only searching a small chunk of the * btree each search, there is obviously free * inodes closer to the parent inode than we * are now. restart the search again. */ pag->pagl_pagino = NULLAGINO; pag->pagl_leftrec = NULLAGINO; pag->pagl_rightrec = NULLAGINO; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); goto restart_pagno; } /* * In a different AG from the parent. * See if the most recently allocated block has any free. */ newino: if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), XFS_LOOKUP_EQ, &i); if (error) goto error0; if (i == 1) { error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; if (j == 1 && rec.ir_freecount > 0) { /* * The last chunk allocated in the group * still has a free inode. */ goto alloc_inode; } } } /* * None left in the last group, search the whole AG */ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); for (;;) { error = xfs_inobt_get_rec(cur, &rec, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) break; error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } alloc_inode: offset = xfs_lowbit64(rec.ir_free); ASSERT(offset >= 0); ASSERT(offset < XFS_INODES_PER_CHUNK); ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % XFS_INODES_PER_CHUNK) == 0); ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); rec.ir_free &= ~XFS_INOBT_MASK(offset); rec.ir_freecount--; error = xfs_inobt_update(cur, &rec); if (error) goto error0; be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); pag->pagi_freecount--; error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); xfs_perag_put(pag); *inop = ino; return 0; error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_perag_put(pag); return error; } /* * Allocate an inode on disk. * * Mode is used to tell whether the new inode will need space, and whether it * is a directory. * * This function is designed to be called twice if it has to do an allocation * to make more free inodes. On the first call, *IO_agbp should be set to NULL. * If an inode is available without having to performn an allocation, an inode * number is returned. In this case, *IO_agbp is set to NULL. If an allocation * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp. * The caller should then commit the current transaction, allocate a * new transaction, and call xfs_dialloc() again, passing in the previous value * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI * buffer is locked across the two calls, the second call is guaranteed to have * a free inode available. * * Once we successfully pick an inode its number is returned and the on-disk * data structures are updated. The inode itself is not read in, since doing so * would break ordering constraints with xfs_reclaim. */ int xfs_dialloc( struct xfs_trans *tp, xfs_ino_t parent, umode_t mode, int okalloc, struct xfs_buf **IO_agbp, xfs_ino_t *inop) { struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *agbp; xfs_agnumber_t agno; int error; int ialloced; int noroom = 0; xfs_agnumber_t start_agno; struct xfs_perag *pag; if (*IO_agbp) { /* * If the caller passes in a pointer to the AGI buffer, * continue where we left off before. In this case, we * know that the allocation group has free inodes. */ agbp = *IO_agbp; goto out_alloc; } /* * We do not have an agbp, so select an initial allocation * group for inode allocation. */ start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); if (start_agno == NULLAGNUMBER) { *inop = NULLFSINO; return 0; } /* * If we have already hit the ceiling of inode blocks then clear * okalloc so we scan all available agi structures for a free * inode. */ if (mp->m_maxicount && mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { noroom = 1; okalloc = 0; } /* * Loop until we find an allocation group that either has free inodes * or in which we can allocate some inodes. Iterate through the * allocation groups upward, wrapping at the end. */ agno = start_agno; for (;;) { pag = xfs_perag_get(mp, agno); if (!pag->pagi_inodeok) { xfs_ialloc_next_ag(mp); goto nextag; } if (!pag->pagi_init) { error = xfs_ialloc_pagi_init(mp, tp, agno); if (error) goto out_error; } /* * Do a first racy fast path check if this AG is usable. */ if (!pag->pagi_freecount && !okalloc) goto nextag; /* * Then read in the AGI buffer and recheck with the AGI buffer * lock held. */ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); if (error) goto out_error; if (pag->pagi_freecount) { xfs_perag_put(pag); goto out_alloc; } if (!okalloc) goto nextag_relse_buffer; error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); if (error) { xfs_trans_brelse(tp, agbp); if (error != ENOSPC) goto out_error; xfs_perag_put(pag); *inop = NULLFSINO; return 0; } if (ialloced) { /* * We successfully allocated some inodes, return * the current context to the caller so that it * can commit the current transaction and call * us again where we left off. */ ASSERT(pag->pagi_freecount > 0); xfs_perag_put(pag); *IO_agbp = agbp; *inop = NULLFSINO; return 0; } nextag_relse_buffer: xfs_trans_brelse(tp, agbp); nextag: xfs_perag_put(pag); if (++agno == mp->m_sb.sb_agcount) agno = 0; if (agno == start_agno) { *inop = NULLFSINO; return noroom ? ENOSPC : 0; } } out_alloc: *IO_agbp = NULL; return xfs_dialloc_ag(tp, agbp, parent, inop); out_error: xfs_perag_put(pag); return XFS_ERROR(error); } /* * Free disk inode. Carefully avoids touching the incore inode, all * manipulations incore are the caller's responsibility. * The on-disk inode is not changed by this operation, only the * btree (free inode mask) is changed. */ int xfs_difree( xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t inode, /* inode to be freed */ xfs_bmap_free_t *flist, /* extents to free */ int *delete, /* set if inode cluster was deleted */ xfs_ino_t *first_ino) /* first inode in deleted cluster */ { /* REFERENCED */ xfs_agblock_t agbno; /* block number containing inode */ xfs_buf_t *agbp; /* buffer containing allocation group header */ xfs_agino_t agino; /* inode number relative to allocation group */ xfs_agnumber_t agno; /* allocation group number */ xfs_agi_t *agi; /* allocation group header */ xfs_btree_cur_t *cur; /* inode btree cursor */ int error; /* error return value */ int i; /* result code */ int ilen; /* inodes in an inode cluster */ xfs_mount_t *mp; /* mount structure for filesystem */ int off; /* offset of inode in inode chunk */ xfs_inobt_rec_incore_t rec; /* btree record */ struct xfs_perag *pag; mp = tp->t_mountp; /* * Break up inode number into its components. */ agno = XFS_INO_TO_AGNO(mp, inode); if (agno >= mp->m_sb.sb_agcount) { xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", __func__, agno, mp->m_sb.sb_agcount); ASSERT(0); return XFS_ERROR(EINVAL); } agino = XFS_INO_TO_AGINO(mp, inode); if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", __func__, (unsigned long long)inode, (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); ASSERT(0); return XFS_ERROR(EINVAL); } agbno = XFS_AGINO_TO_AGBNO(mp, agino); if (agbno >= mp->m_sb.sb_agblocks) { xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", __func__, agbno, mp->m_sb.sb_agblocks); ASSERT(0); return XFS_ERROR(EINVAL); } /* * Get the allocation group header. */ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); if (error) { xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", __func__, error); return error; } agi = XFS_BUF_TO_AGI(agbp); ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(agbno < be32_to_cpu(agi->agi_length)); /* * Initialize the cursor. */ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; /* * Look for the entry describing this inode. */ if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", __func__, error); goto error0; } XFS_WANT_CORRUPTED_GOTO(i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &i); if (error) { xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", __func__, error); goto error0; } XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Get the offset in the inode chunk. */ off = agino - rec.ir_startino; ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK); ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off))); /* * Mark the inode free & increment the count. */ rec.ir_free |= XFS_INOBT_MASK(off); rec.ir_freecount++; /* * When an inode cluster is free, it becomes eligible for removal */ if (!(mp->m_flags & XFS_MOUNT_IKEEP) && (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { *delete = 1; *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); /* * Remove the inode cluster from the AGI B+Tree, adjust the * AGI and Superblock inode counts, and mark the disk space * to be freed when the transaction is committed. */ ilen = XFS_IALLOC_INODES(mp); be32_add_cpu(&agi->agi_count, -ilen); be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); pag = xfs_perag_get(mp, agno); pag->pagi_freecount -= ilen - 1; xfs_perag_put(pag); xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); if ((error = xfs_btree_delete(cur, &i))) { xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", __func__, error); goto error0; } xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)), XFS_IALLOC_BLOCKS(mp), flist, mp); } else { *delete = 0; error = xfs_inobt_update(cur, &rec); if (error) { xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", __func__, error); goto error0; } /* * Change the inode free counts and log the ag/sb changes. */ be32_add_cpu(&agi->agi_freecount, 1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); pag = xfs_perag_get(mp, agno); pag->pagi_freecount++; xfs_perag_put(pag); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); } error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); return 0; error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } STATIC int xfs_imap_lookup( struct xfs_mount *mp, struct xfs_trans *tp, xfs_agnumber_t agno, xfs_agino_t agino, xfs_agblock_t agbno, xfs_agblock_t *chunk_agbno, xfs_agblock_t *offset_agbno, int flags) { struct xfs_inobt_rec_incore rec; struct xfs_btree_cur *cur; struct xfs_buf *agbp; int error; int i; error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); if (error) { xfs_alert(mp, "%s: xfs_ialloc_read_agi() returned error %d, agno %d", __func__, error, agno); return error; } /* * Lookup the inode record for the given agino. If the record cannot be * found, then it's an invalid inode number and we should abort. Once * we have a record, we need to ensure it contains the inode number * we are looking up. */ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); if (!error) { if (i) error = xfs_inobt_get_rec(cur, &rec, &i); if (!error && i == 0) error = EINVAL; } xfs_trans_brelse(tp, agbp); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); if (error) return error; /* check that the returned record contains the required inode */ if (rec.ir_startino > agino || rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) return EINVAL; /* for untrusted inodes check it is allocated first */ if ((flags & XFS_IGET_UNTRUSTED) && (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) return EINVAL; *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); *offset_agbno = agbno - *chunk_agbno; return 0; } /* * Return the location of the inode in imap, for mapping it into a buffer. */ int xfs_imap( xfs_mount_t *mp, /* file system mount structure */ xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t ino, /* inode to locate */ struct xfs_imap *imap, /* location map structure */ uint flags) /* flags for inode btree lookup */ { xfs_agblock_t agbno; /* block number of inode in the alloc group */ xfs_agino_t agino; /* inode number within alloc group */ xfs_agnumber_t agno; /* allocation group number */ int blks_per_cluster; /* num blocks per inode cluster */ xfs_agblock_t chunk_agbno; /* first block in inode chunk */ xfs_agblock_t cluster_agbno; /* first block in inode cluster */ int error; /* error code */ int offset; /* index of inode in its buffer */ int offset_agbno; /* blks from chunk start to inode */ ASSERT(ino != NULLFSINO); /* * Split up the inode number into its parts. */ agno = XFS_INO_TO_AGNO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino); agbno = XFS_AGINO_TO_AGBNO(mp, agino); if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || ino != XFS_AGINO_TO_INO(mp, agno, agino)) { #ifdef DEBUG /* * Don't output diagnostic information for untrusted inodes * as they can be invalid without implying corruption. */ if (flags & XFS_IGET_UNTRUSTED) return XFS_ERROR(EINVAL); if (agno >= mp->m_sb.sb_agcount) { xfs_alert(mp, "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", __func__, agno, mp->m_sb.sb_agcount); } if (agbno >= mp->m_sb.sb_agblocks) { xfs_alert(mp, "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", __func__, (unsigned long long)agbno, (unsigned long)mp->m_sb.sb_agblocks); } if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { xfs_alert(mp, "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", __func__, ino, XFS_AGINO_TO_INO(mp, agno, agino)); } xfs_stack_trace(); #endif /* DEBUG */ return XFS_ERROR(EINVAL); } blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; /* * For bulkstat and handle lookups, we have an untrusted inode number * that we have to verify is valid. We cannot do this just by reading * the inode buffer as it may have been unlinked and removed leaving * inodes in stale state on disk. Hence we have to do a btree lookup * in all cases where an untrusted inode number is passed. */ if (flags & XFS_IGET_UNTRUSTED) { error = xfs_imap_lookup(mp, tp, agno, agino, agbno, &chunk_agbno, &offset_agbno, flags); if (error) return error; goto out_map; } /* * If the inode cluster size is the same as the blocksize or * smaller we get to the buffer by simple arithmetics. */ if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { offset = XFS_INO_TO_OFFSET(mp, ino); ASSERT(offset < mp->m_sb.sb_inopblock); imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); imap->im_len = XFS_FSB_TO_BB(mp, 1); imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); return 0; } /* * If the inode chunks are aligned then use simple maths to * find the location. Otherwise we have to do a btree * lookup to find the location. */ if (mp->m_inoalign_mask) { offset_agbno = agbno & mp->m_inoalign_mask; chunk_agbno = agbno - offset_agbno; } else { error = xfs_imap_lookup(mp, tp, agno, agino, agbno, &chunk_agbno, &offset_agbno, flags); if (error) return error; } out_map: ASSERT(agbno >= chunk_agbno); cluster_agbno = chunk_agbno + ((offset_agbno / blks_per_cluster) * blks_per_cluster); offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + XFS_INO_TO_OFFSET(mp, ino); imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno); imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); /* * If the inode number maps to a block outside the bounds * of the file system then return NULL rather than calling * read_buf and panicing when we get an error from the * driver. */ if ((imap->im_blkno + imap->im_len) > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { xfs_alert(mp, "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", __func__, (unsigned long long) imap->im_blkno, (unsigned long long) imap->im_len, XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); return XFS_ERROR(EINVAL); } return 0; } /* * Compute and fill in value of m_in_maxlevels. */ void xfs_ialloc_compute_maxlevels( xfs_mount_t *mp) /* file system mount structure */ { int level; uint maxblocks; uint maxleafents; int minleafrecs; int minnoderecs; maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG; minleafrecs = mp->m_alloc_mnr[0]; minnoderecs = mp->m_alloc_mnr[1]; maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; for (level = 1; maxblocks > 1; level++) maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs; mp->m_in_maxlevels = level; } /* * Log specified fields for the ag hdr (inode section) */ void xfs_ialloc_log_agi( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *bp, /* allocation group header buffer */ int fields) /* bitmask of fields to log */ { int first; /* first byte number */ int last; /* last byte number */ static const short offsets[] = { /* field starting offsets */ /* keep in sync with bit definitions */ offsetof(xfs_agi_t, agi_magicnum), offsetof(xfs_agi_t, agi_versionnum), offsetof(xfs_agi_t, agi_seqno), offsetof(xfs_agi_t, agi_length), offsetof(xfs_agi_t, agi_count), offsetof(xfs_agi_t, agi_root), offsetof(xfs_agi_t, agi_level), offsetof(xfs_agi_t, agi_freecount), offsetof(xfs_agi_t, agi_newino), offsetof(xfs_agi_t, agi_dirino), offsetof(xfs_agi_t, agi_unlinked), sizeof(xfs_agi_t) }; #ifdef DEBUG xfs_agi_t *agi; /* allocation group header */ agi = XFS_BUF_TO_AGI(bp); ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); #endif /* * Compute byte offsets for the first and last fields. */ xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last); /* * Log the allocation group inode header buffer. */ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF); xfs_trans_log_buf(tp, bp, first, last); } #ifdef DEBUG STATIC void xfs_check_agi_unlinked( struct xfs_agi *agi) { int i; for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) ASSERT(agi->agi_unlinked[i]); } #else #define xfs_check_agi_unlinked(agi) #endif static bool xfs_agi_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_agi *agi = XFS_BUF_TO_AGI(bp); if (xfs_sb_version_hascrc(&mp->m_sb) && !uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_uuid)) return false; /* * Validate the magic number of the agi block. */ if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC)) return false; if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum))) return false; /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) return false; xfs_check_agi_unlinked(agi); return true; } static void xfs_agi_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; int agi_ok = 1; if (xfs_sb_version_hascrc(&mp->m_sb)) agi_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agi, agi_crc)); agi_ok = agi_ok && xfs_agi_verify(bp); if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, XFS_RANDOM_IALLOC_READ_AGI))) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); } } static void xfs_agi_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; if (!xfs_agi_verify(bp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr); xfs_buf_ioerror(bp, EFSCORRUPTED); return; } if (!xfs_sb_version_hascrc(&mp->m_sb)) return; if (bip) XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), offsetof(struct xfs_agi, agi_crc)); } const struct xfs_buf_ops xfs_agi_buf_ops = { .verify_read = xfs_agi_read_verify, .verify_write = xfs_agi_write_verify, }; /* * Read in the allocation group header (inode allocation section) */ int xfs_read_agi( struct xfs_mount *mp, /* file system mount structure */ struct xfs_trans *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ struct xfs_buf **bpp) /* allocation group hdr buf */ { int error; ASSERT(agno != NULLAGNUMBER); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops); if (error) return error; ASSERT(!xfs_buf_geterror(*bpp)); xfs_buf_set_ref(*bpp, XFS_AGI_REF); return 0; } int xfs_ialloc_read_agi( struct xfs_mount *mp, /* file system mount structure */ struct xfs_trans *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ struct xfs_buf **bpp) /* allocation group hdr buf */ { struct xfs_agi *agi; /* allocation group header */ struct xfs_perag *pag; /* per allocation group data */ int error; error = xfs_read_agi(mp, tp, agno, bpp); if (error) return error; agi = XFS_BUF_TO_AGI(*bpp); pag = xfs_perag_get(mp, agno); if (!pag->pagi_init) { pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_count = be32_to_cpu(agi->agi_count); pag->pagi_init = 1; } /* * It's possible for these to be out of sync if * we are in the middle of a forced shutdown. */ ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || XFS_FORCED_SHUTDOWN(mp)); xfs_perag_put(pag); return 0; } /* * Read in the agi to initialise the per-ag data in the mount structure */ int xfs_ialloc_pagi_init( xfs_mount_t *mp, /* file system mount structure */ xfs_trans_t *tp, /* transaction pointer */ xfs_agnumber_t agno) /* allocation group number */ { xfs_buf_t *bp = NULL; int error; error = xfs_ialloc_read_agi(mp, tp, agno, &bp); if (error) return error; if (bp) xfs_trans_brelse(tp, bp); return 0; }
gpl-2.0
AndroidDeveloperAlliance/ZenKernel_TUNA
drivers/char/tpm/tpm_nsc.c
3620
10298
/* * Copyright (C) 2004 IBM Corporation * * Authors: * Leendert van Doorn <leendert@watson.ibm.com> * Dave Safford <safford@watson.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * */ #include <linux/platform_device.h> #include <linux/slab.h> #include "tpm.h" /* National definitions */ enum tpm_nsc_addr{ TPM_NSC_IRQ = 0x07, TPM_NSC_BASE0_HI = 0x60, TPM_NSC_BASE0_LO = 0x61, TPM_NSC_BASE1_HI = 0x62, TPM_NSC_BASE1_LO = 0x63 }; enum tpm_nsc_index { NSC_LDN_INDEX = 0x07, NSC_SID_INDEX = 0x20, NSC_LDC_INDEX = 0x30, NSC_DIO_INDEX = 0x60, NSC_CIO_INDEX = 0x62, NSC_IRQ_INDEX = 0x70, NSC_ITS_INDEX = 0x71 }; enum tpm_nsc_status_loc { NSC_STATUS = 0x01, NSC_COMMAND = 0x01, NSC_DATA = 0x00 }; /* status bits */ enum tpm_nsc_status { NSC_STATUS_OBF = 0x01, /* output buffer full */ NSC_STATUS_IBF = 0x02, /* input buffer full */ NSC_STATUS_F0 = 0x04, /* F0 */ NSC_STATUS_A2 = 0x08, /* A2 */ NSC_STATUS_RDY = 0x10, /* ready to receive command */ NSC_STATUS_IBR = 0x20 /* ready to receive data */ }; /* command bits */ enum tpm_nsc_cmd_mode { NSC_COMMAND_NORMAL = 0x01, /* normal mode */ NSC_COMMAND_EOC = 0x03, NSC_COMMAND_CANCEL = 0x22 }; /* * Wait for a certain status to appear */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) { unsigned long stop; /* status immediately available check */ *data = inb(chip->vendor.base + NSC_STATUS); if ((*data & mask) == val) return 0; /* wait for status */ stop = jiffies + 10 * HZ; do { msleep(TPM_TIMEOUT); *data = inb(chip->vendor.base + 1); if ((*data & mask) == val) return 0; } while (time_before(jiffies, stop)); return -EBUSY; } static int nsc_wait_for_ready(struct tpm_chip *chip) { int status; unsigned long stop; /* status immediately available check */ status = inb(chip->vendor.base + NSC_STATUS); if (status & NSC_STATUS_OBF) status = inb(chip->vendor.base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; /* wait for status */ stop = jiffies + 100; do { msleep(TPM_TIMEOUT); status = inb(chip->vendor.base + NSC_STATUS); if (status & NSC_STATUS_OBF) status = inb(chip->vendor.base + NSC_DATA); if (status & NSC_STATUS_RDY) return 0; } while (time_before(jiffies, stop)); dev_info(chip->dev, "wait for ready failed\n"); return -EBUSY; } static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) { u8 *buffer = buf; u8 data, *p; u32 size; __be32 *native_size; if (count < 6) return -EIO; if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { dev_err(chip->dev, "F0 timeout\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { dev_err(chip->dev, "not in normal mode (0x%x)\n", data); return -EIO; } /* read the whole packet */ for (p = buffer; p < &buffer[count]; p++) { if (wait_for_stat (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { dev_err(chip->dev, "OBF timeout (while reading data)\n"); return -EIO; } if (data & NSC_STATUS_F0) break; *p = inb(chip->vendor.base + NSC_DATA); } if ((data & NSC_STATUS_F0) == 0 && (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { dev_err(chip->dev, "F0 not set\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { dev_err(chip->dev, "expected end of command(0x%x)\n", data); return -EIO; } native_size = (__force __be32 *) (buf + 2); size = be32_to_cpu(*native_size); if (count < size) return -EIO; return size; } static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) { u8 data; int i; /* * If we hit the chip with back to back commands it locks up * and never set IBF. Hitting it with this "hammer" seems to * fix it. Not sure why this is needed, we followed the flow * chart in the manual to the letter. */ outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); if (nsc_wait_for_ready(chip) != 0) return -EIO; if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { dev_err(chip->dev, "IBR timeout\n"); return -EIO; } for (i = 0; i < count; i++) { if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(chip->dev, "IBF timeout (while writing data)\n"); return -EIO; } outb(buf[i], chip->vendor.base + NSC_DATA); } if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { dev_err(chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); return count; } static void tpm_nsc_cancel(struct tpm_chip *chip) { outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND); } static u8 tpm_nsc_status(struct tpm_chip *chip) { return inb(chip->vendor.base + NSC_STATUS); } static const struct file_operations nsc_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpm_open, .read = tpm_read, .write = tpm_write, .release = tpm_release, }; static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel); static struct attribute * nsc_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, NULL, }; static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs }; static const struct tpm_vendor_specific tpm_nsc = { .recv = tpm_nsc_recv, .send = tpm_nsc_send, .cancel = tpm_nsc_cancel, .status = tpm_nsc_status, .req_complete_mask = NSC_STATUS_OBF, .req_complete_val = NSC_STATUS_OBF, .req_canceled = NSC_STATUS_RDY, .attr_group = &nsc_attr_grp, .miscdev = { .fops = &nsc_ops, }, }; static struct platform_device *pdev = NULL; static void tpm_nsc_remove(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if ( chip ) { release_region(chip->vendor.base, 2); tpm_remove_hardware(chip->dev); } } static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg) { return tpm_pm_suspend(&dev->dev, msg); } static int tpm_nsc_resume(struct platform_device *dev) { return tpm_pm_resume(&dev->dev); } static struct platform_driver nsc_drv = { .suspend = tpm_nsc_suspend, .resume = tpm_nsc_resume, .driver = { .name = "tpm_nsc", .owner = THIS_MODULE, }, }; static int __init init_nsc(void) { int rc = 0; int lo, hi, err; int nscAddrBase = TPM_ADDR; struct tpm_chip *chip; unsigned long base; /* verify that it is a National part (SID) */ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)| (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE); if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) return -ENODEV; } err = platform_driver_register(&nsc_drv); if (err) return err; hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); base = (hi<<8) | lo; /* enable the DPM module */ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); pdev = platform_device_alloc("tpm_nscl0", -1); if (!pdev) { rc = -ENOMEM; goto err_unreg_drv; } pdev->num_resources = 0; pdev->dev.driver = &nsc_drv.driver; pdev->dev.release = tpm_nsc_remove; if ((rc = platform_device_register(pdev)) < 0) goto err_free_dev; if (request_region(base, 2, "tpm_nsc0") == NULL ) { rc = -EBUSY; goto err_unreg_dev; } if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) { rc = -ENODEV; goto err_rel_reg; } dev_dbg(&pdev->dev, "NSC TPM detected\n"); dev_dbg(&pdev->dev, "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20), tpm_read_index(nscAddrBase,0x27)); dev_dbg(&pdev->dev, "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25), tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28)); dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n", (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61)); dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n", (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63)); dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n", tpm_read_index(nscAddrBase,0x70)); dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n", tpm_read_index(nscAddrBase,0x71)); dev_dbg(&pdev->dev, "NSC DMA channel select0 0x%x, select1 0x%x\n", tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75)); dev_dbg(&pdev->dev, "NSC Config " "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1), tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3), tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5), tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7), tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9)); dev_info(&pdev->dev, "NSC TPM revision %d\n", tpm_read_index(nscAddrBase, 0x27) & 0x1F); chip->vendor.base = base; return 0; err_rel_reg: release_region(base, 2); err_unreg_dev: platform_device_unregister(pdev); err_free_dev: kfree(pdev); err_unreg_drv: platform_driver_unregister(&nsc_drv); return rc; } static void __exit cleanup_nsc(void) { if (pdev) { tpm_nsc_remove(&pdev->dev); platform_device_unregister(pdev); kfree(pdev); pdev = NULL; } platform_driver_unregister(&nsc_drv); } module_init(init_nsc); module_exit(cleanup_nsc); MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
free-z4u/android_kernel_htc_msm7x30
drivers/leds/leds-hp6xx.c
4388
2496
/* * LED Triggers Core * For the HP Jornada 620/660/680/690 handhelds * * Copyright 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com> * this driver is based on leds-spitz.c by Richard Purdie. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <asm/hd64461.h> #include <mach/hp6xx.h> static void hp6xxled_green_set(struct led_classdev *led_cdev, enum led_brightness value) { u8 v8; v8 = inb(PKDR); if (value) outb(v8 & (~PKDR_LED_GREEN), PKDR); else outb(v8 | PKDR_LED_GREEN, PKDR); } static void hp6xxled_red_set(struct led_classdev *led_cdev, enum led_brightness value) { u16 v16; v16 = inw(HD64461_GPBDR); if (value) outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR); else outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR); } static struct led_classdev hp6xx_red_led = { .name = "hp6xx:red", .default_trigger = "hp6xx-charge", .brightness_set = hp6xxled_red_set, .flags = LED_CORE_SUSPENDRESUME, }; static struct led_classdev hp6xx_green_led = { .name = "hp6xx:green", .default_trigger = "ide-disk", .brightness_set = hp6xxled_green_set, .flags = LED_CORE_SUSPENDRESUME, }; static int hp6xxled_probe(struct platform_device *pdev) { int ret; ret = led_classdev_register(&pdev->dev, &hp6xx_red_led); if (ret < 0) return ret; ret = led_classdev_register(&pdev->dev, &hp6xx_green_led); if (ret < 0) led_classdev_unregister(&hp6xx_red_led); return ret; } static int hp6xxled_remove(struct platform_device *pdev) { led_classdev_unregister(&hp6xx_red_led); led_classdev_unregister(&hp6xx_green_led); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:hp6xx-led"); static struct platform_driver hp6xxled_driver = { .probe = hp6xxled_probe, .remove = hp6xxled_remove, .driver = { .name = "hp6xx-led", .owner = THIS_MODULE, }, }; static int __init hp6xxled_init(void) { return platform_driver_register(&hp6xxled_driver); } static void __exit hp6xxled_exit(void) { platform_driver_unregister(&hp6xxled_driver); } module_init(hp6xxled_init); module_exit(hp6xxled_exit); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 6xx LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
somcom3x/android_kernel_samsung_msm8660-caf
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
4900
8952
/* * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/slab.h> #include "atl1c.h" static int atl1c_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_TP; ecmd->advertising |= hw->autoneg_advertised; ecmd->port = PORT_TP; ecmd->phy_address = 0; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed != SPEED_0) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); if (adapter->link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } ecmd->autoneg = AUTONEG_ENABLE; return 0; } static int atl1c_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u16 autoneg_advertised; while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); if (ecmd->autoneg == AUTONEG_ENABLE) { autoneg_advertised = ADVERTISED_Autoneg; } else { u32 speed = ethtool_cmd_speed(ecmd); if (speed == SPEED_1000) { if (ecmd->duplex != DUPLEX_FULL) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "1000M half is invalid\n"); clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } autoneg_advertised = ADVERTISED_1000baseT_Full; } else if (speed == SPEED_100) { if (ecmd->duplex == DUPLEX_FULL) autoneg_advertised = ADVERTISED_100baseT_Full; else autoneg_advertised = ADVERTISED_100baseT_Half; } else { if (ecmd->duplex == DUPLEX_FULL) autoneg_advertised = ADVERTISED_10baseT_Full; else autoneg_advertised = ADVERTISED_10baseT_Half; } } if (hw->autoneg_advertised != autoneg_advertised) { hw->autoneg_advertised = autoneg_advertised; if (atl1c_restart_autoneg(hw) != 0) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "ethtool speed/duplex setting failed\n"); clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } } clear_bit(__AT_RESETTING, &adapter->flags); return 0; } static u32 atl1c_get_msglevel(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void atl1c_set_msglevel(struct net_device *netdev, u32 data) { struct atl1c_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int atl1c_get_regs_len(struct net_device *netdev) { return AT_REGS_LEN; } static void atl1c_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, AT_REGS_LEN); regs->version = 0; AT_READ_REG(hw, REG_VPD_CAP, p++); AT_READ_REG(hw, REG_PM_CTRL, p++); AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); AT_READ_REG(hw, REG_TWSI_CTRL, p++); AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); AT_READ_REG(hw, REG_MASTER_CTRL, p++); AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); AT_READ_REG(hw, REG_GPHY_CTRL, p++); AT_READ_REG(hw, REG_LINK_CTRL, p++); AT_READ_REG(hw, REG_IDLE_STATUS, p++); AT_READ_REG(hw, REG_MDIO_CTRL, p++); AT_READ_REG(hw, REG_SERDES_LOCK, p++); AT_READ_REG(hw, REG_MAC_CTRL, p++); AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); AT_READ_REG(hw, REG_RXQ_CTRL, p++); AT_READ_REG(hw, REG_TXQ_CTRL, p++); AT_READ_REG(hw, REG_MTU, p++); AT_READ_REG(hw, REG_WOL_CTRL, p++); atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[73] = (u32) phy_data; atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[74] = (u32) phy_data; } static int atl1c_get_eeprom_len(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (atl1c_check_eeprom_exist(&adapter->hw)) return AT_EEPROM_LEN; else return 0; } static int atl1c_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (!atl1c_check_eeprom_exist(hw)) /* not exist */ return -EINVAL; eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { kfree(eeprom_buff); return -EIO; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); kfree(eeprom_buff); return ret_val; return 0; } static void atl1c_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1c_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, atl1c_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl1c_get_regs_len(netdev); drvinfo->eedump_len = atl1c_get_eeprom_len(netdev); } static void atl1c_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1c_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; if (adapter->wol & AT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & AT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & AT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & AT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & AT_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= AT_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int atl1c_nway_reset(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl1c_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl1c_ethtool_ops = { .get_settings = atl1c_get_settings, .set_settings = atl1c_set_settings, .get_drvinfo = atl1c_get_drvinfo, .get_regs_len = atl1c_get_regs_len, .get_regs = atl1c_get_regs, .get_wol = atl1c_get_wol, .set_wol = atl1c_set_wol, .get_msglevel = atl1c_get_msglevel, .set_msglevel = atl1c_set_msglevel, .nway_reset = atl1c_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl1c_get_eeprom_len, .get_eeprom = atl1c_get_eeprom, }; void atl1c_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); }
gpl-2.0
CyanogenMod-E1/cafkernel
drivers/target/iscsi/iscsi_target_stat.c
5156
28566
/******************************************************************************* * Modern ConfigFS group context specific iSCSI statistics based on original * iscsi_target_mib.c code * * Copyright (c) 2011 Rising Tide Systems * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <linux/configfs.h> #include <linux/export.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/configfs_macros.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" #include "iscsi_target_device.h" #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" #include "iscsi_target_stat.h" #ifndef INITIAL_JIFFIES #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) #endif /* Instance Attributes Table */ #define ISCSI_INST_NUM_NODES 1 #define ISCSI_INST_DESCR "Storage Engine Target" #define ISCSI_INST_LAST_FAILURE_TYPE 0 #define ISCSI_DISCONTINUITY_TIME 0 #define ISCSI_NODE_INDEX 1 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) /**************************************************************************** * iSCSI MIB Tables ****************************************************************************/ /* * Instance Attributes Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps); #define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \ static struct iscsi_stat_instance_attribute \ iscsi_stat_instance_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_instance_show_attr_##_name, \ iscsi_stat_instance_store_attr_##_name); #define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \ static struct iscsi_stat_instance_attribute \ iscsi_stat_instance_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_instance_show_attr_##_name); static ssize_t iscsi_stat_instance_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_INSTANCE_ATTR_RO(inst); static ssize_t iscsi_stat_instance_show_attr_min_ver( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); } ISCSI_STAT_INSTANCE_ATTR_RO(min_ver); static ssize_t iscsi_stat_instance_show_attr_max_ver( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); } ISCSI_STAT_INSTANCE_ATTR_RO(max_ver); static ssize_t iscsi_stat_instance_show_attr_portals( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps); } ISCSI_STAT_INSTANCE_ATTR_RO(portals); static ssize_t iscsi_stat_instance_show_attr_nodes( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); } ISCSI_STAT_INSTANCE_ATTR_RO(nodes); static ssize_t iscsi_stat_instance_show_attr_sessions( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions); } ISCSI_STAT_INSTANCE_ATTR_RO(sessions); static ssize_t iscsi_stat_instance_show_attr_fail_sess( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; u32 sess_err_count; spin_lock_bh(&sess_err->lock); sess_err_count = (sess_err->digest_errors + sess_err->cxn_timeout_errors + sess_err->pdu_format_errors); spin_unlock_bh(&sess_err->lock); return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count); } ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess); static ssize_t iscsi_stat_instance_show_attr_fail_type( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; return snprintf(page, PAGE_SIZE, "%u\n", sess_err->last_sess_failure_type); } ISCSI_STAT_INSTANCE_ATTR_RO(fail_type); static ssize_t iscsi_stat_instance_show_attr_fail_rem_name( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; return snprintf(page, PAGE_SIZE, "%s\n", sess_err->last_sess_fail_rem_name[0] ? sess_err->last_sess_fail_rem_name : NONE); } ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name); static ssize_t iscsi_stat_instance_show_attr_disc_time( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME); } ISCSI_STAT_INSTANCE_ATTR_RO(disc_time); static ssize_t iscsi_stat_instance_show_attr_description( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR); } ISCSI_STAT_INSTANCE_ATTR_RO(description); static ssize_t iscsi_stat_instance_show_attr_vendor( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n"); } ISCSI_STAT_INSTANCE_ATTR_RO(vendor); static ssize_t iscsi_stat_instance_show_attr_version( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION); } ISCSI_STAT_INSTANCE_ATTR_RO(version); CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps, iscsi_instance_group); static struct configfs_attribute *iscsi_stat_instance_attrs[] = { &iscsi_stat_instance_inst.attr, &iscsi_stat_instance_min_ver.attr, &iscsi_stat_instance_max_ver.attr, &iscsi_stat_instance_portals.attr, &iscsi_stat_instance_nodes.attr, &iscsi_stat_instance_sessions.attr, &iscsi_stat_instance_fail_sess.attr, &iscsi_stat_instance_fail_type.attr, &iscsi_stat_instance_fail_rem_name.attr, &iscsi_stat_instance_disc_time.attr, &iscsi_stat_instance_description.attr, &iscsi_stat_instance_vendor.attr, &iscsi_stat_instance_version.attr, NULL, }; static struct configfs_item_operations iscsi_stat_instance_item_ops = { .show_attribute = iscsi_stat_instance_attr_show, .store_attribute = iscsi_stat_instance_attr_store, }; struct config_item_type iscsi_stat_instance_cit = { .ct_item_ops = &iscsi_stat_instance_item_ops, .ct_attrs = iscsi_stat_instance_attrs, .ct_owner = THIS_MODULE, }; /* * Instance Session Failure Stats Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps); #define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \ static struct iscsi_stat_sess_err_attribute \ iscsi_stat_sess_err_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_sess_err_show_attr_##_name, \ iscsi_stat_sess_err_store_attr_##_name); #define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \ static struct iscsi_stat_sess_err_attribute \ iscsi_stat_sess_err_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_sess_err_show_attr_##_name); static ssize_t iscsi_stat_sess_err_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_SESS_ERR_ATTR_RO(inst); static ssize_t iscsi_stat_sess_err_show_attr_digest_errors( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors); } ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors); static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors); } ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors); static ssize_t iscsi_stat_sess_err_show_attr_format_errors( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors); } ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors); CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps, iscsi_sess_err_group); static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = { &iscsi_stat_sess_err_inst.attr, &iscsi_stat_sess_err_digest_errors.attr, &iscsi_stat_sess_err_cxn_errors.attr, &iscsi_stat_sess_err_format_errors.attr, NULL, }; static struct configfs_item_operations iscsi_stat_sess_err_item_ops = { .show_attribute = iscsi_stat_sess_err_attr_show, .store_attribute = iscsi_stat_sess_err_attr_store, }; struct config_item_type iscsi_stat_sess_err_cit = { .ct_item_ops = &iscsi_stat_sess_err_item_ops, .ct_attrs = iscsi_stat_sess_err_attrs, .ct_owner = THIS_MODULE, }; /* * Target Attributes Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps); #define ISCSI_STAT_TGT_ATTR(_name, _mode) \ static struct iscsi_stat_tgt_attr_attribute \ iscsi_stat_tgt_attr_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_tgt-attr_show_attr_##_name, \ iscsi_stat_tgt_attr_store_attr_##_name); #define ISCSI_STAT_TGT_ATTR_RO(_name) \ static struct iscsi_stat_tgt_attr_attribute \ iscsi_stat_tgt_attr_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_tgt_attr_show_attr_##_name); static ssize_t iscsi_stat_tgt_attr_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_TGT_ATTR_RO(inst); static ssize_t iscsi_stat_tgt_attr_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); } ISCSI_STAT_TGT_ATTR_RO(indx); static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; u32 fail_count; spin_lock(&lstat->lock); fail_count = (lstat->redirects + lstat->authorize_fails + lstat->authenticate_fails + lstat->negotiate_fails + lstat->other_fails); spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%u\n", fail_count); } ISCSI_STAT_TGT_ATTR_RO(login_fails); static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; u32 last_fail_time; spin_lock(&lstat->lock); last_fail_time = lstat->last_fail_time ? (u32)(((u32)lstat->last_fail_time - INITIAL_JIFFIES) * 100 / HZ) : 0; spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time); } ISCSI_STAT_TGT_ATTR_RO(last_fail_time); static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; u32 last_fail_type; spin_lock(&lstat->lock); last_fail_type = lstat->last_fail_type; spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type); } ISCSI_STAT_TGT_ATTR_RO(last_fail_type); static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; unsigned char buf[224]; spin_lock(&lstat->lock); snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ? lstat->last_intr_fail_name : NONE); spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%s\n", buf); } ISCSI_STAT_TGT_ATTR_RO(fail_intr_name); static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; unsigned char buf[8]; spin_lock(&lstat->lock); snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ? "ipv6" : "ipv4"); spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%s\n", buf); } ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type); static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; unsigned char buf[32]; spin_lock(&lstat->lock); if (lstat->last_intr_fail_ip_family == AF_INET6) snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr); else snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr); spin_unlock(&lstat->lock); return snprintf(page, PAGE_SIZE, "%s\n", buf); } ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr); CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps, iscsi_tgt_attr_group); static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = { &iscsi_stat_tgt_attr_inst.attr, &iscsi_stat_tgt_attr_indx.attr, &iscsi_stat_tgt_attr_login_fails.attr, &iscsi_stat_tgt_attr_last_fail_time.attr, &iscsi_stat_tgt_attr_last_fail_type.attr, &iscsi_stat_tgt_attr_fail_intr_name.attr, &iscsi_stat_tgt_attr_fail_intr_addr_type.attr, &iscsi_stat_tgt_attr_fail_intr_addr.attr, NULL, }; static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = { .show_attribute = iscsi_stat_tgt_attr_attr_show, .store_attribute = iscsi_stat_tgt_attr_attr_store, }; struct config_item_type iscsi_stat_tgt_attr_cit = { .ct_item_ops = &iscsi_stat_tgt_attr_item_ops, .ct_attrs = iscsi_stat_tgt_attr_attrs, .ct_owner = THIS_MODULE, }; /* * Target Login Stats Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps); #define ISCSI_STAT_LOGIN(_name, _mode) \ static struct iscsi_stat_login_attribute \ iscsi_stat_login_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_login_show_attr_##_name, \ iscsi_stat_login_store_attr_##_name); #define ISCSI_STAT_LOGIN_RO(_name) \ static struct iscsi_stat_login_attribute \ iscsi_stat_login_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_login_show_attr_##_name); static ssize_t iscsi_stat_login_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_LOGIN_RO(inst); static ssize_t iscsi_stat_login_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); } ISCSI_STAT_LOGIN_RO(indx); static ssize_t iscsi_stat_login_show_attr_accepts( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(accepts); static ssize_t iscsi_stat_login_show_attr_other_fails( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(other_fails); static ssize_t iscsi_stat_login_show_attr_redirects( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(redirects); static ssize_t iscsi_stat_login_show_attr_authorize_fails( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(authorize_fails); static ssize_t iscsi_stat_login_show_attr_authenticate_fails( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(authenticate_fails); static ssize_t iscsi_stat_login_show_attr_negotiate_fails( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_login_stats *lstat = &tiqn->login_stats; ssize_t ret; spin_lock(&lstat->lock); ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails); spin_unlock(&lstat->lock); return ret; } ISCSI_STAT_LOGIN_RO(negotiate_fails); CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps, iscsi_login_stats_group); static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = { &iscsi_stat_login_inst.attr, &iscsi_stat_login_indx.attr, &iscsi_stat_login_accepts.attr, &iscsi_stat_login_other_fails.attr, &iscsi_stat_login_redirects.attr, &iscsi_stat_login_authorize_fails.attr, &iscsi_stat_login_authenticate_fails.attr, &iscsi_stat_login_negotiate_fails.attr, NULL, }; static struct configfs_item_operations iscsi_stat_login_stats_item_ops = { .show_attribute = iscsi_stat_login_attr_show, .store_attribute = iscsi_stat_login_attr_store, }; struct config_item_type iscsi_stat_login_cit = { .ct_item_ops = &iscsi_stat_login_stats_item_ops, .ct_attrs = iscsi_stat_login_stats_attrs, .ct_owner = THIS_MODULE, }; /* * Target Logout Stats Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps); #define ISCSI_STAT_LOGOUT(_name, _mode) \ static struct iscsi_stat_logout_attribute \ iscsi_stat_logout_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_logout_show_attr_##_name, \ iscsi_stat_logout_store_attr_##_name); #define ISCSI_STAT_LOGOUT_RO(_name) \ static struct iscsi_stat_logout_attribute \ iscsi_stat_logout_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_logout_show_attr_##_name); static ssize_t iscsi_stat_logout_show_attr_inst( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_LOGOUT_RO(inst); static ssize_t iscsi_stat_logout_show_attr_indx( struct iscsi_wwn_stat_grps *igrps, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); } ISCSI_STAT_LOGOUT_RO(indx); static ssize_t iscsi_stat_logout_show_attr_normal_logouts( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_logout_stats *lstats = &tiqn->logout_stats; return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts); } ISCSI_STAT_LOGOUT_RO(normal_logouts); static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts( struct iscsi_wwn_stat_grps *igrps, char *page) { struct iscsi_tiqn *tiqn = container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps); struct iscsi_logout_stats *lstats = &tiqn->logout_stats; return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts); } ISCSI_STAT_LOGOUT_RO(abnormal_logouts); CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps, iscsi_logout_stats_group); static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = { &iscsi_stat_logout_inst.attr, &iscsi_stat_logout_indx.attr, &iscsi_stat_logout_normal_logouts.attr, &iscsi_stat_logout_abnormal_logouts.attr, NULL, }; static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = { .show_attribute = iscsi_stat_logout_attr_show, .store_attribute = iscsi_stat_logout_attr_store, }; struct config_item_type iscsi_stat_logout_cit = { .ct_item_ops = &iscsi_stat_logout_stats_item_ops, .ct_attrs = iscsi_stat_logout_stats_attrs, .ct_owner = THIS_MODULE, }; /* * Session Stats Table */ CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps); #define ISCSI_STAT_SESS(_name, _mode) \ static struct iscsi_stat_sess_attribute \ iscsi_stat_sess_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ iscsi_stat_sess_show_attr_##_name, \ iscsi_stat_sess_store_attr_##_name); #define ISCSI_STAT_SESS_RO(_name) \ static struct iscsi_stat_sess_attribute \ iscsi_stat_sess_##_name = \ __CONFIGFS_EATTR_RO(_name, \ iscsi_stat_sess_show_attr_##_name); static ssize_t iscsi_stat_sess_show_attr_inst( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn; struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); } ISCSI_STAT_SESS_RO(inst); static ssize_t iscsi_stat_sess_show_attr_node( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(node); static ssize_t iscsi_stat_sess_show_attr_indx( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->session_index); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(indx); static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(cmd_pdus); static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(rsp_pdus); static ssize_t iscsi_stat_sess_show_attr_txdata_octs( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)sess->tx_data_octets); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(txdata_octs); static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)sess->rx_data_octets); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(rxdata_octs); static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->conn_digest_errors); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(conn_digest_errors); static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( struct iscsi_node_stat_grps *igrps, char *page) { struct iscsi_node_acl *acl = container_of(igrps, struct iscsi_node_acl, node_stat_grps); struct se_node_acl *se_nacl = &acl->se_node_acl; struct iscsi_session *sess; struct se_session *se_sess; ssize_t ret = 0; spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->conn_timeout_errors); } spin_unlock_bh(&se_nacl->nacl_sess_lock); return ret; } ISCSI_STAT_SESS_RO(conn_timeout_errors); CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps, iscsi_sess_stats_group); static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = { &iscsi_stat_sess_inst.attr, &iscsi_stat_sess_node.attr, &iscsi_stat_sess_indx.attr, &iscsi_stat_sess_cmd_pdus.attr, &iscsi_stat_sess_rsp_pdus.attr, &iscsi_stat_sess_txdata_octs.attr, &iscsi_stat_sess_rxdata_octs.attr, &iscsi_stat_sess_conn_digest_errors.attr, &iscsi_stat_sess_conn_timeout_errors.attr, NULL, }; static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = { .show_attribute = iscsi_stat_sess_attr_show, .store_attribute = iscsi_stat_sess_attr_store, }; struct config_item_type iscsi_stat_sess_cit = { .ct_item_ops = &iscsi_stat_sess_stats_item_ops, .ct_attrs = iscsi_stat_sess_stats_attrs, .ct_owner = THIS_MODULE, };
gpl-2.0
matyushov/vs311
drivers/media/i2c/msp3400-kthreads.c
9508
33535
/* * Programming the mspx4xx sound processor family * * (c) 1997-2001 Gerd Knorr <kraxel@bytesex.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/freezer.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/msp3400.h> #include <linux/kthread.h> #include <linux/suspend.h> #include "msp3400-driver.h" /* this one uses the automatic sound standard detection of newer msp34xx chip versions */ static struct { int retval; int main, second; char *name; v4l2_std_id std; } msp_stdlist[] = { { 0x0000, 0, 0, "could not detect sound standard", V4L2_STD_ALL }, { 0x0001, 0, 0, "autodetect start", V4L2_STD_ALL }, { 0x0002, MSP_CARRIER(4.5), MSP_CARRIER(4.72), "4.5/4.72 M Dual FM-Stereo", V4L2_STD_MN }, { 0x0003, MSP_CARRIER(5.5), MSP_CARRIER(5.7421875), "5.5/5.74 B/G Dual FM-Stereo", V4L2_STD_BG }, { 0x0004, MSP_CARRIER(6.5), MSP_CARRIER(6.2578125), "6.5/6.25 D/K1 Dual FM-Stereo", V4L2_STD_DK }, { 0x0005, MSP_CARRIER(6.5), MSP_CARRIER(6.7421875), "6.5/6.74 D/K2 Dual FM-Stereo", V4L2_STD_DK }, { 0x0006, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 D/K FM-Mono (HDEV3)", V4L2_STD_DK }, { 0x0007, MSP_CARRIER(6.5), MSP_CARRIER(5.7421875), "6.5/5.74 D/K3 Dual FM-Stereo", V4L2_STD_DK }, { 0x0008, MSP_CARRIER(5.5), MSP_CARRIER(5.85), "5.5/5.85 B/G NICAM FM", V4L2_STD_BG }, { 0x0009, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 L NICAM AM", V4L2_STD_L }, { 0x000a, MSP_CARRIER(6.0), MSP_CARRIER(6.55), "6.0/6.55 I NICAM FM", V4L2_STD_PAL_I }, { 0x000b, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM", V4L2_STD_DK }, { 0x000c, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV2)", V4L2_STD_DK }, { 0x000d, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV3)", V4L2_STD_DK }, { 0x0020, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Stereo", V4L2_STD_MTS }, { 0x0021, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Mono + SAP", V4L2_STD_MTS }, { 0x0030, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M EIA-J Japan Stereo", V4L2_STD_NTSC_M_JP }, { 0x0040, MSP_CARRIER(10.7), MSP_CARRIER(10.7), "10.7 FM-Stereo Radio", V4L2_STD_ALL }, { 0x0050, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 SAT-Mono", V4L2_STD_ALL }, { 0x0051, MSP_CARRIER(7.02), MSP_CARRIER(7.20), "7.02/7.20 SAT-Stereo", V4L2_STD_ALL }, { 0x0060, MSP_CARRIER(7.2), MSP_CARRIER(7.2), "7.2 SAT ADR", V4L2_STD_ALL }, { -1, 0, 0, NULL, 0 }, /* EOF */ }; static struct msp3400c_init_data_dem { int fir1[6]; int fir2[6]; int cdo1; int cdo2; int ad_cv; int mode_reg; int dsp_src; int dsp_matrix; } msp3400c_init_data[] = { { /* AM (for carrier detect / msp3400) */ {75, 19, 36, 35, 39, 40}, {75, 19, 36, 35, 39, 40}, MSP_CARRIER(5.5), MSP_CARRIER(5.5), 0x00d0, 0x0500, 0x0020, 0x3000 }, { /* AM (for carrier detect / msp3410) */ {-1, -1, -8, 2, 59, 126}, {-1, -1, -8, 2, 59, 126}, MSP_CARRIER(5.5), MSP_CARRIER(5.5), 0x00d0, 0x0100, 0x0020, 0x3000 }, { /* FM Radio */ {-8, -8, 4, 6, 78, 107}, {-8, -8, 4, 6, 78, 107}, MSP_CARRIER(10.7), MSP_CARRIER(10.7), 0x00d0, 0x0480, 0x0020, 0x3000 }, { /* Terrestrial FM-mono + FM-stereo */ {3, 18, 27, 48, 66, 72}, {3, 18, 27, 48, 66, 72}, MSP_CARRIER(5.5), MSP_CARRIER(5.5), 0x00d0, 0x0480, 0x0030, 0x3000 }, { /* Sat FM-mono */ { 1, 9, 14, 24, 33, 37}, { 3, 18, 27, 48, 66, 72}, MSP_CARRIER(6.5), MSP_CARRIER(6.5), 0x00c6, 0x0480, 0x0000, 0x3000 }, { /* NICAM/FM -- B/G (5.5/5.85), D/K (6.5/5.85) */ {-2, -8, -10, 10, 50, 86}, {3, 18, 27, 48, 66, 72}, MSP_CARRIER(5.5), MSP_CARRIER(5.5), 0x00d0, 0x0040, 0x0120, 0x3000 }, { /* NICAM/FM -- I (6.0/6.552) */ {2, 4, -6, -4, 40, 94}, {3, 18, 27, 48, 66, 72}, MSP_CARRIER(6.0), MSP_CARRIER(6.0), 0x00d0, 0x0040, 0x0120, 0x3000 }, { /* NICAM/AM -- L (6.5/5.85) */ {-2, -8, -10, 10, 50, 86}, {-4, -12, -9, 23, 79, 126}, MSP_CARRIER(6.5), MSP_CARRIER(6.5), 0x00c6, 0x0140, 0x0120, 0x7c00 }, }; struct msp3400c_carrier_detect { int cdo; char *name; }; static struct msp3400c_carrier_detect msp3400c_carrier_detect_main[] = { /* main carrier */ { MSP_CARRIER(4.5), "4.5 NTSC" }, { MSP_CARRIER(5.5), "5.5 PAL B/G" }, { MSP_CARRIER(6.0), "6.0 PAL I" }, { MSP_CARRIER(6.5), "6.5 PAL D/K + SAT + SECAM" } }; static struct msp3400c_carrier_detect msp3400c_carrier_detect_55[] = { /* PAL B/G */ { MSP_CARRIER(5.7421875), "5.742 PAL B/G FM-stereo" }, { MSP_CARRIER(5.85), "5.85 PAL B/G NICAM" } }; static struct msp3400c_carrier_detect msp3400c_carrier_detect_65[] = { /* PAL SAT / SECAM */ { MSP_CARRIER(5.85), "5.85 PAL D/K + SECAM NICAM" }, { MSP_CARRIER(6.2578125), "6.25 PAL D/K1 FM-stereo" }, { MSP_CARRIER(6.7421875), "6.74 PAL D/K2 FM-stereo" }, { MSP_CARRIER(7.02), "7.02 PAL SAT FM-stereo s/b" }, { MSP_CARRIER(7.20), "7.20 PAL SAT FM-stereo s" }, { MSP_CARRIER(7.38), "7.38 PAL SAT FM-stereo b" }, }; /* ------------------------------------------------------------------------ */ const char *msp_standard_std_name(int std) { int i; for (i = 0; msp_stdlist[i].name != NULL; i++) if (msp_stdlist[i].retval == std) return msp_stdlist[i].name; return "unknown"; } static v4l2_std_id msp_standard_std(int std) { int i; for (i = 0; msp_stdlist[i].name != NULL; i++) if (msp_stdlist[i].retval == std) return msp_stdlist[i].std; return V4L2_STD_ALL; } static void msp_set_source(struct i2c_client *client, u16 src) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (msp_dolby) { msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */ msp_write_dsp(client, 0x0009, 0x0620); /* I2S2 */ } else { msp_write_dsp(client, 0x0008, src); msp_write_dsp(client, 0x0009, src); } msp_write_dsp(client, 0x000a, src); msp_write_dsp(client, 0x000b, src); msp_write_dsp(client, 0x000c, src); if (state->has_scart2_out) msp_write_dsp(client, 0x0041, src); } void msp3400c_set_carrier(struct i2c_client *client, int cdo1, int cdo2) { msp_write_dem(client, 0x0093, cdo1 & 0xfff); msp_write_dem(client, 0x009b, cdo1 >> 12); msp_write_dem(client, 0x00a3, cdo2 & 0xfff); msp_write_dem(client, 0x00ab, cdo2 >> 12); msp_write_dem(client, 0x0056, 0); /* LOAD_REG_1/2 */ } void msp3400c_set_mode(struct i2c_client *client, int mode) { struct msp_state *state = to_state(i2c_get_clientdata(client)); struct msp3400c_init_data_dem *data = &msp3400c_init_data[mode]; int tuner = (state->route_in >> 3) & 1; int i; v4l_dbg(1, msp_debug, client, "set_mode: %d\n", mode); state->mode = mode; state->rxsubchans = V4L2_TUNER_SUB_MONO; msp_write_dem(client, 0x00bb, data->ad_cv | (tuner ? 0x100 : 0)); for (i = 5; i >= 0; i--) /* fir 1 */ msp_write_dem(client, 0x0001, data->fir1[i]); msp_write_dem(client, 0x0005, 0x0004); /* fir 2 */ msp_write_dem(client, 0x0005, 0x0040); msp_write_dem(client, 0x0005, 0x0000); for (i = 5; i >= 0; i--) msp_write_dem(client, 0x0005, data->fir2[i]); msp_write_dem(client, 0x0083, data->mode_reg); msp3400c_set_carrier(client, data->cdo1, data->cdo2); msp_set_source(client, data->dsp_src); /* set prescales */ /* volume prescale for SCART (AM mono input) */ msp_write_dsp(client, 0x000d, 0x1900); msp_write_dsp(client, 0x000e, data->dsp_matrix); if (state->has_nicam) /* nicam prescale */ msp_write_dsp(client, 0x0010, 0x5a00); } /* Set audio mode. Note that the pre-'G' models do not support BTSC+SAP, nor do they support stereo BTSC. */ static void msp3400c_set_audmode(struct i2c_client *client) { static char *strmode[] = { "mono", "stereo", "lang2", "lang1", "lang1+lang2" }; struct msp_state *state = to_state(i2c_get_clientdata(client)); char *modestr = (state->audmode >= 0 && state->audmode < 5) ? strmode[state->audmode] : "unknown"; int src = 0; /* channel source: FM/AM, nicam or SCART */ int audmode = state->audmode; if (state->opmode == OPMODE_AUTOSELECT) { /* this method would break everything, let's make sure * it's never called */ v4l_dbg(1, msp_debug, client, "set_audmode called with mode=%d instead of set_source (ignored)\n", state->audmode); return; } /* Note: for the C and D revs no NTSC stereo + SAP is possible as the hardware does not support SAP. So the rxsubchans combination of STEREO | LANG2 does not occur. */ if (state->mode != MSP_MODE_EXTERN) { /* switch to mono if only mono is available */ if (state->rxsubchans == V4L2_TUNER_SUB_MONO) audmode = V4L2_TUNER_MODE_MONO; /* if bilingual */ else if (state->rxsubchans & V4L2_TUNER_SUB_LANG2) { /* and mono or stereo, then fallback to lang1 */ if (audmode == V4L2_TUNER_MODE_MONO || audmode == V4L2_TUNER_MODE_STEREO) audmode = V4L2_TUNER_MODE_LANG1; } /* if stereo, and audmode is not mono, then switch to stereo */ else if (audmode != V4L2_TUNER_MODE_MONO) audmode = V4L2_TUNER_MODE_STEREO; } /* switch demodulator */ switch (state->mode) { case MSP_MODE_FM_TERRA: v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr); switch (audmode) { case V4L2_TUNER_MODE_STEREO: msp_write_dsp(client, 0x000e, 0x3001); break; case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_LANG1: case V4L2_TUNER_MODE_LANG2: case V4L2_TUNER_MODE_LANG1_LANG2: msp_write_dsp(client, 0x000e, 0x3000); break; } break; case MSP_MODE_FM_SAT: v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr); switch (audmode) { case V4L2_TUNER_MODE_MONO: msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5)); break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1_LANG2: msp3400c_set_carrier(client, MSP_CARRIER(7.2), MSP_CARRIER(7.02)); break; case V4L2_TUNER_MODE_LANG1: msp3400c_set_carrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02)); break; case V4L2_TUNER_MODE_LANG2: msp3400c_set_carrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02)); break; } break; case MSP_MODE_FM_NICAM1: case MSP_MODE_FM_NICAM2: case MSP_MODE_AM_NICAM: v4l_dbg(1, msp_debug, client, "NICAM set_audmode: %s\n", modestr); if (state->nicam_on) src = 0x0100; /* NICAM */ break; case MSP_MODE_BTSC: v4l_dbg(1, msp_debug, client, "BTSC set_audmode: %s\n", modestr); break; case MSP_MODE_EXTERN: v4l_dbg(1, msp_debug, client, "extern set_audmode: %s\n", modestr); src = 0x0200; /* SCART */ break; case MSP_MODE_FM_RADIO: v4l_dbg(1, msp_debug, client, "FM-Radio set_audmode: %s\n", modestr); break; default: v4l_dbg(1, msp_debug, client, "mono set_audmode\n"); return; } /* switch audio */ v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode); switch (audmode) { case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1_LANG2: src |= 0x0020; break; case V4L2_TUNER_MODE_MONO: if (state->mode == MSP_MODE_AM_NICAM) { v4l_dbg(1, msp_debug, client, "switching to AM mono\n"); /* AM mono decoding is handled by tuner, not MSP chip */ /* SCART switching control register */ msp_set_scart(client, SCART_MONO, 0); src = 0x0200; break; } if (state->rxsubchans & V4L2_TUNER_SUB_STEREO) src = 0x0030; break; case V4L2_TUNER_MODE_LANG1: break; case V4L2_TUNER_MODE_LANG2: src |= 0x0010; break; } v4l_dbg(1, msp_debug, client, "set_audmode final source/matrix = 0x%x\n", src); msp_set_source(client, src); } static void msp3400c_print_mode(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (state->main == state->second) v4l_dbg(1, msp_debug, client, "mono sound carrier: %d.%03d MHz\n", state->main / 910000, (state->main / 910) % 1000); else v4l_dbg(1, msp_debug, client, "main sound carrier: %d.%03d MHz\n", state->main / 910000, (state->main / 910) % 1000); if (state->mode == MSP_MODE_FM_NICAM1 || state->mode == MSP_MODE_FM_NICAM2) v4l_dbg(1, msp_debug, client, "NICAM/FM carrier : %d.%03d MHz\n", state->second / 910000, (state->second/910) % 1000); if (state->mode == MSP_MODE_AM_NICAM) v4l_dbg(1, msp_debug, client, "NICAM/AM carrier : %d.%03d MHz\n", state->second / 910000, (state->second / 910) % 1000); if (state->mode == MSP_MODE_FM_TERRA && state->main != state->second) { v4l_dbg(1, msp_debug, client, "FM-stereo carrier : %d.%03d MHz\n", state->second / 910000, (state->second / 910) % 1000); } } /* ----------------------------------------------------------------------- */ static int msp3400c_detect_stereo(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); int val; int rxsubchans = state->rxsubchans; int newnicam = state->nicam_on; int update = 0; switch (state->mode) { case MSP_MODE_FM_TERRA: val = msp_read_dsp(client, 0x18); if (val > 32767) val -= 65536; v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val); if (val > 8192) { rxsubchans = V4L2_TUNER_SUB_STEREO; } else if (val < -4096) { rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; } else { rxsubchans = V4L2_TUNER_SUB_MONO; } newnicam = 0; break; case MSP_MODE_FM_NICAM1: case MSP_MODE_FM_NICAM2: case MSP_MODE_AM_NICAM: val = msp_read_dem(client, 0x23); v4l_dbg(2, msp_debug, client, "nicam sync=%d, mode=%d\n", val & 1, (val & 0x1e) >> 1); if (val & 1) { /* nicam synced */ switch ((val & 0x1e) >> 1) { case 0: case 8: rxsubchans = V4L2_TUNER_SUB_STEREO; break; case 1: case 9: rxsubchans = V4L2_TUNER_SUB_MONO; break; case 2: case 10: rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; default: rxsubchans = V4L2_TUNER_SUB_MONO; break; } newnicam = 1; } else { newnicam = 0; rxsubchans = V4L2_TUNER_SUB_MONO; } break; } if (rxsubchans != state->rxsubchans) { update = 1; v4l_dbg(1, msp_debug, client, "watch: rxsubchans %02x => %02x\n", state->rxsubchans, rxsubchans); state->rxsubchans = rxsubchans; } if (newnicam != state->nicam_on) { update = 1; v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n", state->nicam_on, newnicam); state->nicam_on = newnicam; } return update; } /* * A kernel thread for msp3400 control -- we don't want to block the * in the ioctl while doing the sound carrier & stereo detect */ /* stereo/multilang monitoring */ static void watch_stereo(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (msp_detect_stereo(client)) msp_set_audmode(client); if (msp_once) state->watch_stereo = 0; } int msp3400c_thread(void *data) { struct i2c_client *client = data; struct msp_state *state = to_state(i2c_get_clientdata(client)); struct msp3400c_carrier_detect *cd; int count, max1, max2, val1, val2, val, i; v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n"); state->detected_std = V4L2_STD_ALL; set_freezable(); for (;;) { v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n"); msp_sleep(state, -1); v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n"); restart: v4l_dbg(2, msp_debug, client, "thread: restart scan\n"); state->restart = 0; if (kthread_should_stop()) break; if (state->radio || MSP_MODE_EXTERN == state->mode) { /* no carrier scan, just unmute */ v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n"); state->scan_in_progress = 0; msp_update_volume(state); continue; } /* mute audio */ state->scan_in_progress = 1; msp_update_volume(state); msp3400c_set_mode(client, MSP_MODE_AM_DETECT); val1 = val2 = 0; max1 = max2 = -1; state->watch_stereo = 0; state->nicam_on = 0; /* wait for tuner to settle down after a channel change */ if (msp_sleep(state, 200)) goto restart; /* carrier detect pass #1 -- main carrier */ cd = msp3400c_carrier_detect_main; count = ARRAY_SIZE(msp3400c_carrier_detect_main); if (msp_amsound && (state->v4l2_std & V4L2_STD_SECAM)) { /* autodetect doesn't work well with AM ... */ max1 = 3; count = 0; v4l_dbg(1, msp_debug, client, "AM sound override\n"); } for (i = 0; i < count; i++) { msp3400c_set_carrier(client, cd[i].cdo, cd[i].cdo); if (msp_sleep(state, 100)) goto restart; val = msp_read_dsp(client, 0x1b); if (val > 32767) val -= 65536; if (val1 < val) val1 = val, max1 = i; v4l_dbg(1, msp_debug, client, "carrier1 val: %5d / %s\n", val, cd[i].name); } /* carrier detect pass #2 -- second (stereo) carrier */ switch (max1) { case 1: /* 5.5 */ cd = msp3400c_carrier_detect_55; count = ARRAY_SIZE(msp3400c_carrier_detect_55); break; case 3: /* 6.5 */ cd = msp3400c_carrier_detect_65; count = ARRAY_SIZE(msp3400c_carrier_detect_65); break; case 0: /* 4.5 */ case 2: /* 6.0 */ default: cd = NULL; count = 0; break; } if (msp_amsound && (state->v4l2_std & V4L2_STD_SECAM)) { /* autodetect doesn't work well with AM ... */ cd = NULL; count = 0; max2 = 0; } for (i = 0; i < count; i++) { msp3400c_set_carrier(client, cd[i].cdo, cd[i].cdo); if (msp_sleep(state, 100)) goto restart; val = msp_read_dsp(client, 0x1b); if (val > 32767) val -= 65536; if (val2 < val) val2 = val, max2 = i; v4l_dbg(1, msp_debug, client, "carrier2 val: %5d / %s\n", val, cd[i].name); } /* program the msp3400 according to the results */ state->main = msp3400c_carrier_detect_main[max1].cdo; switch (max1) { case 1: /* 5.5 */ state->detected_std = V4L2_STD_BG | V4L2_STD_PAL_H; if (max2 == 0) { /* B/G FM-stereo */ state->second = msp3400c_carrier_detect_55[max2].cdo; msp3400c_set_mode(client, MSP_MODE_FM_TERRA); state->watch_stereo = 1; } else if (max2 == 1 && state->has_nicam) { /* B/G NICAM */ state->second = msp3400c_carrier_detect_55[max2].cdo; msp3400c_set_mode(client, MSP_MODE_FM_NICAM1); state->nicam_on = 1; state->watch_stereo = 1; } else { goto no_second; } break; case 2: /* 6.0 */ /* PAL I NICAM */ state->detected_std = V4L2_STD_PAL_I; state->second = MSP_CARRIER(6.552); msp3400c_set_mode(client, MSP_MODE_FM_NICAM2); state->nicam_on = 1; state->watch_stereo = 1; break; case 3: /* 6.5 */ if (max2 == 1 || max2 == 2) { /* D/K FM-stereo */ state->second = msp3400c_carrier_detect_65[max2].cdo; msp3400c_set_mode(client, MSP_MODE_FM_TERRA); state->watch_stereo = 1; state->detected_std = V4L2_STD_DK; } else if (max2 == 0 && (state->v4l2_std & V4L2_STD_SECAM)) { /* L NICAM or AM-mono */ state->second = msp3400c_carrier_detect_65[max2].cdo; msp3400c_set_mode(client, MSP_MODE_AM_NICAM); state->watch_stereo = 1; state->detected_std = V4L2_STD_L; } else if (max2 == 0 && state->has_nicam) { /* D/K NICAM */ state->second = msp3400c_carrier_detect_65[max2].cdo; msp3400c_set_mode(client, MSP_MODE_FM_NICAM1); state->nicam_on = 1; state->watch_stereo = 1; state->detected_std = V4L2_STD_DK; } else { goto no_second; } break; case 0: /* 4.5 */ state->detected_std = V4L2_STD_MN; default: no_second: state->second = msp3400c_carrier_detect_main[max1].cdo; msp3400c_set_mode(client, MSP_MODE_FM_TERRA); break; } msp3400c_set_carrier(client, state->second, state->main); /* unmute */ state->scan_in_progress = 0; msp3400c_set_audmode(client); msp_update_volume(state); if (msp_debug) msp3400c_print_mode(client); /* monitor tv audio mode, the first time don't wait so long to get a quick stereo/bilingual result */ count = 3; while (state->watch_stereo) { if (msp_sleep(state, count ? 1000 : 5000)) goto restart; if (count) count--; watch_stereo(client); } } v4l_dbg(1, msp_debug, client, "thread: exit\n"); return 0; } int msp3410d_thread(void *data) { struct i2c_client *client = data; struct msp_state *state = to_state(i2c_get_clientdata(client)); int val, i, std, count; v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n"); state->detected_std = V4L2_STD_ALL; set_freezable(); for (;;) { v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n"); msp_sleep(state, -1); v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n"); restart: v4l_dbg(2, msp_debug, client, "thread: restart scan\n"); state->restart = 0; if (kthread_should_stop()) break; if (state->mode == MSP_MODE_EXTERN) { /* no carrier scan needed, just unmute */ v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n"); state->scan_in_progress = 0; msp_update_volume(state); continue; } /* mute audio */ state->scan_in_progress = 1; msp_update_volume(state); /* start autodetect. Note: autodetect is not supported for NTSC-M and radio, hence we force the standard in those cases. */ if (state->radio) std = 0x40; else std = (state->v4l2_std & V4L2_STD_NTSC) ? 0x20 : 1; state->watch_stereo = 0; state->nicam_on = 0; /* wait for tuner to settle down after a channel change */ if (msp_sleep(state, 200)) goto restart; if (msp_debug) v4l_dbg(2, msp_debug, client, "setting standard: %s (0x%04x)\n", msp_standard_std_name(std), std); if (std != 1) { /* programmed some specific mode */ val = std; } else { /* triggered autodetect */ msp_write_dem(client, 0x20, std); for (;;) { if (msp_sleep(state, 100)) goto restart; /* check results */ val = msp_read_dem(client, 0x7e); if (val < 0x07ff) break; v4l_dbg(2, msp_debug, client, "detection still in progress\n"); } } for (i = 0; msp_stdlist[i].name != NULL; i++) if (msp_stdlist[i].retval == val) break; v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n", msp_standard_std_name(val), val); state->main = msp_stdlist[i].main; state->second = msp_stdlist[i].second; state->std = val; state->rxsubchans = V4L2_TUNER_SUB_MONO; if (msp_amsound && !state->radio && (state->v4l2_std & V4L2_STD_SECAM) && (val != 0x0009)) { /* autodetection has failed, let backup */ v4l_dbg(1, msp_debug, client, "autodetection failed," " switching to backup standard: %s (0x%04x)\n", msp_stdlist[8].name ? msp_stdlist[8].name : "unknown", val); state->std = val = 0x0009; msp_write_dem(client, 0x20, val); } else { state->detected_std = msp_standard_std(state->std); } /* set stereo */ switch (val) { case 0x0008: /* B/G NICAM */ case 0x000a: /* I NICAM */ case 0x000b: /* D/K NICAM */ if (val == 0x000a) state->mode = MSP_MODE_FM_NICAM2; else state->mode = MSP_MODE_FM_NICAM1; /* just turn on stereo */ state->nicam_on = 1; state->watch_stereo = 1; break; case 0x0009: state->mode = MSP_MODE_AM_NICAM; state->nicam_on = 1; state->watch_stereo = 1; break; case 0x0020: /* BTSC */ /* The pre-'G' models only have BTSC-mono */ state->mode = MSP_MODE_BTSC; break; case 0x0040: /* FM radio */ state->mode = MSP_MODE_FM_RADIO; state->rxsubchans = V4L2_TUNER_SUB_STEREO; /* not needed in theory if we have radio, but short programming enables carrier mute */ msp3400c_set_mode(client, MSP_MODE_FM_RADIO); msp3400c_set_carrier(client, MSP_CARRIER(10.7), MSP_CARRIER(10.7)); break; case 0x0002: case 0x0003: case 0x0004: case 0x0005: state->mode = MSP_MODE_FM_TERRA; state->watch_stereo = 1; break; } /* set various prescales */ msp_write_dsp(client, 0x0d, 0x1900); /* scart */ msp_write_dsp(client, 0x0e, 0x3000); /* FM */ if (state->has_nicam) msp_write_dsp(client, 0x10, 0x5a00); /* nicam */ if (state->has_i2s_conf) msp_write_dem(client, 0x40, state->i2s_mode); /* unmute */ msp3400c_set_audmode(client); state->scan_in_progress = 0; msp_update_volume(state); /* monitor tv audio mode, the first time don't wait so long to get a quick stereo/bilingual result */ count = 3; while (state->watch_stereo) { if (msp_sleep(state, count ? 1000 : 5000)) goto restart; if (count) count--; watch_stereo(client); } } v4l_dbg(1, msp_debug, client, "thread: exit\n"); return 0; } /* ----------------------------------------------------------------------- */ /* msp34xxG + (autoselect no-thread) * this one uses both automatic standard detection and automatic sound * select which are available in the newer G versions * struct msp: only norm, acb and source are really used in this mode */ static int msp34xxg_modus(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (state->radio) { v4l_dbg(1, msp_debug, client, "selected radio modus\n"); return 0x0001; } if (state->v4l2_std == V4L2_STD_NTSC_M_JP) { v4l_dbg(1, msp_debug, client, "selected M (EIA-J) modus\n"); return 0x4001; } if (state->v4l2_std == V4L2_STD_NTSC_M_KR) { v4l_dbg(1, msp_debug, client, "selected M (A2) modus\n"); return 0x0001; } if (state->v4l2_std == V4L2_STD_SECAM_L) { v4l_dbg(1, msp_debug, client, "selected SECAM-L modus\n"); return 0x6001; } if (state->v4l2_std & V4L2_STD_MN) { v4l_dbg(1, msp_debug, client, "selected M (BTSC) modus\n"); return 0x2001; } return 0x7001; } static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in) { struct msp_state *state = to_state(i2c_get_clientdata(client)); int source, matrix; switch (state->audmode) { case V4L2_TUNER_MODE_MONO: source = 0; /* mono only */ matrix = 0x30; break; case V4L2_TUNER_MODE_LANG2: source = 4; /* stereo or B */ matrix = 0x10; break; case V4L2_TUNER_MODE_LANG1_LANG2: source = 1; /* stereo or A|B */ matrix = 0x20; break; case V4L2_TUNER_MODE_LANG1: source = 3; /* stereo or A */ matrix = 0x00; break; case V4L2_TUNER_MODE_STEREO: default: source = 3; /* stereo or A */ matrix = 0x20; break; } if (in == MSP_DSP_IN_TUNER) source = (source << 8) | 0x20; /* the msp34x2g puts the MAIN_AVC, MAIN and AUX sources in 12, 13, 14 instead of 11, 12, 13. So we add one for that msp version. */ else if (in >= MSP_DSP_IN_MAIN_AVC && state->has_dolby_pro_logic) source = ((in + 1) << 8) | matrix; else source = (in << 8) | matrix; v4l_dbg(1, msp_debug, client, "set source to %d (0x%x) for output %02x\n", in, source, reg); msp_write_dsp(client, reg, source); } static void msp34xxg_set_sources(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); u32 in = state->route_in; msp34xxg_set_source(client, 0x0008, (in >> 4) & 0xf); /* quasi-peak detector is set to same input as the loudspeaker (MAIN) */ msp34xxg_set_source(client, 0x000c, (in >> 4) & 0xf); msp34xxg_set_source(client, 0x0009, (in >> 8) & 0xf); msp34xxg_set_source(client, 0x000a, (in >> 12) & 0xf); if (state->has_scart2_out) msp34xxg_set_source(client, 0x0041, (in >> 16) & 0xf); msp34xxg_set_source(client, 0x000b, (in >> 20) & 0xf); } /* (re-)initialize the msp34xxg */ static void msp34xxg_reset(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); int tuner = (state->route_in >> 3) & 1; int modus; /* initialize std to 1 (autodetect) to signal that no standard is selected yet. */ state->std = 1; msp_reset(client); if (state->has_i2s_conf) msp_write_dem(client, 0x40, state->i2s_mode); /* step-by-step initialisation, as described in the manual */ modus = msp34xxg_modus(client); modus |= tuner ? 0x100 : 0; msp_write_dem(client, 0x30, modus); /* write the dsps that may have an influence on standard/audio autodetection right now */ msp34xxg_set_sources(client); msp_write_dsp(client, 0x0d, 0x1900); /* scart */ msp_write_dsp(client, 0x0e, 0x3000); /* FM */ if (state->has_nicam) msp_write_dsp(client, 0x10, 0x5a00); /* nicam */ /* set identification threshold. Personally, I * I set it to a higher value than the default * of 0x190 to ignore noisy stereo signals. * this needs tuning. (recommended range 0x00a0-0x03c0) * 0x7f0 = forced mono mode * * a2 threshold for stereo/bilingual. * Note: this register is part of the Manual/Compatibility mode. * It is supported by all 'G'-family chips. */ msp_write_dem(client, 0x22, msp_stereo_thresh); } int msp34xxg_thread(void *data) { struct i2c_client *client = data; struct msp_state *state = to_state(i2c_get_clientdata(client)); int val, i; v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n"); state->detected_std = V4L2_STD_ALL; set_freezable(); for (;;) { v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n"); msp_sleep(state, -1); v4l_dbg(2, msp_debug, client, "msp34xxg thread: wakeup\n"); restart: v4l_dbg(1, msp_debug, client, "thread: restart scan\n"); state->restart = 0; if (kthread_should_stop()) break; if (state->mode == MSP_MODE_EXTERN) { /* no carrier scan needed, just unmute */ v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n"); state->scan_in_progress = 0; msp_update_volume(state); continue; } /* setup the chip*/ msp34xxg_reset(client); state->std = state->radio ? 0x40 : (state->force_btsc && msp_standard == 1) ? 32 : msp_standard; msp_write_dem(client, 0x20, state->std); /* start autodetect */ if (state->std != 1) goto unmute; /* watch autodetect */ v4l_dbg(1, msp_debug, client, "started autodetect, waiting for result\n"); for (i = 0; i < 10; i++) { if (msp_sleep(state, 100)) goto restart; /* check results */ val = msp_read_dem(client, 0x7e); if (val < 0x07ff) { state->std = val; break; } v4l_dbg(2, msp_debug, client, "detection still in progress\n"); } if (state->std == 1) { v4l_dbg(1, msp_debug, client, "detection still in progress after 10 tries. giving up.\n"); continue; } unmute: v4l_dbg(1, msp_debug, client, "detected standard: %s (0x%04x)\n", msp_standard_std_name(state->std), state->std); state->detected_std = msp_standard_std(state->std); if (state->std == 9) { /* AM NICAM mode */ msp_write_dsp(client, 0x0e, 0x7c00); } /* unmute: dispatch sound to scart output, set scart volume */ msp_update_volume(state); /* restore ACB */ if (msp_write_dsp(client, 0x13, state->acb)) return -1; /* the periodic stereo/SAP check is only relevant for the 0x20 standard (BTSC) */ if (state->std != 0x20) continue; state->watch_stereo = 1; /* monitor tv audio mode, the first time don't wait in order to get a quick stereo/SAP update */ watch_stereo(client); while (state->watch_stereo) { watch_stereo(client); if (msp_sleep(state, 5000)) goto restart; } } v4l_dbg(1, msp_debug, client, "thread: exit\n"); return 0; } static int msp34xxg_detect_stereo(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); int status = msp_read_dem(client, 0x0200); int is_bilingual = status & 0x100; int is_stereo = status & 0x40; int oldrx = state->rxsubchans; if (state->mode == MSP_MODE_EXTERN) return 0; state->rxsubchans = 0; if (is_stereo) state->rxsubchans = V4L2_TUNER_SUB_STEREO; else state->rxsubchans = V4L2_TUNER_SUB_MONO; if (is_bilingual) { if (state->std == 0x20) state->rxsubchans |= V4L2_TUNER_SUB_SAP; else state->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; } v4l_dbg(1, msp_debug, client, "status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n", status, is_stereo, is_bilingual, state->rxsubchans); return (oldrx != state->rxsubchans); } static void msp34xxg_set_audmode(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); if (state->std == 0x20) { if ((state->rxsubchans & V4L2_TUNER_SUB_SAP) && (state->audmode == V4L2_TUNER_MODE_LANG1_LANG2 || state->audmode == V4L2_TUNER_MODE_LANG2)) { msp_write_dem(client, 0x20, 0x21); } else { msp_write_dem(client, 0x20, 0x20); } } msp34xxg_set_sources(client); } void msp_set_audmode(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); switch (state->opmode) { case OPMODE_MANUAL: case OPMODE_AUTODETECT: msp3400c_set_audmode(client); break; case OPMODE_AUTOSELECT: msp34xxg_set_audmode(client); break; } } int msp_detect_stereo(struct i2c_client *client) { struct msp_state *state = to_state(i2c_get_clientdata(client)); switch (state->opmode) { case OPMODE_MANUAL: case OPMODE_AUTODETECT: return msp3400c_detect_stereo(client); case OPMODE_AUTOSELECT: return msp34xxg_detect_stereo(client); } return 0; }
gpl-2.0
DecimalMan/dkp
kernel/rtmutex.c
37
26859
/* * RT-Mutexes: simple blocking mutual exclusion locks with PI support * * started by Ingo Molnar and Thomas Gleixner. * * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen * * See Documentation/rt-mutex-design.txt for details. */ #include <linux/spinlock.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/timer.h> #include "rtmutex_common.h" /* * lock->owner state tracking: * * lock->owner holds the task_struct pointer of the owner. Bit 0 * is used to keep track of the "lock has waiters" state. * * owner bit0 * NULL 0 lock is free (fast acquire possible) * NULL 1 lock is free and has waiters and the top waiter * is going to take the lock* * taskpointer 0 lock is held (fast release possible) * taskpointer 1 lock is held and has waiters** * * The fast atomic compare exchange based acquire and release is only * possible when bit 0 of lock->owner is 0. * * (*) It also can be a transitional state when grabbing the lock * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, * we need to set the bit0 before looking at the lock, and the owner may be * NULL in this small time, hence this can be a transitional state. * * (**) There is a small time when bit 0 is set but there are no * waiters. This can happen when grabbing the lock in the slow path. * To prevent a cmpxchg of the owner releasing the lock, we need to * set this bit before looking at the lock. */ static void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; if (rt_mutex_has_waiters(lock)) val |= RT_MUTEX_HAS_WAITERS; lock->owner = (struct task_struct *)val; } static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } static void fixup_rt_mutex_waiters(struct rt_mutex *lock) { if (!rt_mutex_has_waiters(lock)) clear_rt_mutex_waiters(lock); } /* * We can speed up the acquire/release, if there's no debugging state to be * set up. */ #ifndef CONFIG_DEBUG_RT_MUTEXES # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; do { owner = *p; } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); } #else # define rt_mutex_cmpxchg(l,c,n) (0) static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); } #endif /* * Calculate task priority from the waiter list priority * * Return task->normal_prio when the waiter list is empty or when * the waiter is not allowed to do priority boosting */ int rt_mutex_getprio(struct task_struct *task) { if (likely(!task_has_pi_waiters(task))) return task->normal_prio; return min(task_top_pi_waiter(task)->pi_list_entry.prio, task->normal_prio); } /* * Adjust the priority of a task, after its pi_waiters got modified. * * This can be both boosting and unboosting. task->pi_lock must be held. */ static void __rt_mutex_adjust_prio(struct task_struct *task) { int prio = rt_mutex_getprio(task); if (task->prio != prio) rt_mutex_setprio(task, prio); } /* * Adjust task priority (undo boosting). Called from the exit path of * rt_mutex_slowunlock() and rt_mutex_slowlock(). * * (Note: We do this outside of the protection of lock->wait_lock to * allow the lock to be taken while or before we readjust the priority * of task. We do not use the spin_xx_mutex() variants here as we are * outside of the debug path.) */ static void rt_mutex_adjust_prio(struct task_struct *task) { unsigned long flags; raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* * Max number of times we'll walk the boosting chain: */ int max_lock_depth = 1024; /* * Adjust the priority chain. Also used for deadlock detection. * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; int detect_deadlock, ret = 0, depth = 0; unsigned long flags; detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, deadlock_detect); /* * The (de)boosting is a step by step approach with a lot of * pitfalls. We want this to be preemptible and we want hold a * maximum of two locks per step. So we have to check * carefully whether things change under us. */ again: if (++depth > max_lock_depth) { static int prev_max; /* * Print this only once. If the admin changes the limit, * print a new message when reaching the limit again. */ if (prev_max != max_lock_depth) { prev_max = max_lock_depth; printk(KERN_WARNING "Maximum lock depth %d reached " "task: %s (%d)\n", max_lock_depth, top_task->comm, task_pid_nr(top_task)); } put_task_struct(task); return deadlock_detect ? -EDEADLK : 0; } retry: /* * Task can not go away as we did a get_task() before ! */ raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; /* * Check whether the end of the boosting chain has been * reached or the state of the chain has changed while we * dropped the locks. */ if (!waiter) goto out_unlock_pi; /* * Check the orig_waiter state. After we dropped the locks, * the previous owner of the lock might have released the lock. */ if (orig_waiter && !rt_mutex_owner(orig_lock)) goto out_unlock_pi; /* * Drop out, when the task has no waiters. Note, * top_waiter can be NULL, when we are in the deboosting * mode! */ if (top_waiter && (!task_has_pi_waiters(task) || top_waiter != task_top_pi_waiter(task))) goto out_unlock_pi; /* * When deadlock detection is off then we check, if further * priority adjustment is necessary. */ if (!detect_deadlock && waiter->list_entry.prio == task->prio) goto out_unlock_pi; lock = waiter->lock; if (!raw_spin_trylock(&lock->wait_lock)) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } /* Deadlock detection */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = deadlock_detect ? -EDEADLK : 0; goto out_unlock_pi; } top_waiter = rt_mutex_top_waiter(lock); /* Requeue the waiter */ plist_del(&waiter->list_entry, &lock->wait_list); waiter->list_entry.prio = task->prio; plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ if (top_waiter != rt_mutex_top_waiter(lock)) wake_up_process(rt_mutex_top_waiter(lock)->task); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); raw_spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } else if (top_waiter == waiter) { /* Deboost the owner */ plist_del(&waiter->pi_list_entry, &task->pi_waiters); waiter = rt_mutex_top_waiter(lock); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); raw_spin_unlock(&lock->wait_lock); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; goto again; out_unlock_pi: raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); return ret; } /* * Try to take an rt-mutex * * Must be called with lock->wait_lock held. * * @lock: the lock to be acquired. * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) { /* * We have to be careful here if the atomic speedups are * enabled, such that, when * - no other waiter is on the lock * - the lock has been released since we did the cmpxchg * the lock can be released or taken while we are doing the * checks and marking the lock with RT_MUTEX_HAS_WAITERS. * * The atomic acquire/release aware variant of * mark_rt_mutex_waiters uses a cmpxchg loop. After setting * the WAITERS bit, the atomic release / acquire can not * happen anymore and lock->wait_lock protects us from the * non-atomic case. * * Note, that this might set lock->owner = * RT_MUTEX_HAS_WAITERS in the case the lock is not contended * any more. This is fixed up when we take the ownership. * This is the transitional state explained at the top of this file. */ mark_rt_mutex_waiters(lock); if (rt_mutex_owner(lock)) return 0; /* * It will get the lock because of one of these conditions: * 1) there is no waiter * 2) higher priority than waiters * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { if (!waiter || waiter != rt_mutex_top_waiter(lock)) return 0; } } if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; raw_spin_lock_irqsave(&task->pi_lock, flags); /* remove the queued waiter. */ if (waiter) { plist_del(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = NULL; } /* * We have to enqueue the top waiter(if it exists) into * task->pi_waiters list. */ if (rt_mutex_has_waiters(lock)) { top = rt_mutex_top_waiter(lock); top->pi_list_entry.prio = top->list_entry.prio; plist_add(&top->pi_list_entry, &task->pi_waiters); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); rt_mutex_deadlock_account_lock(lock, task); return 1; } /* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; plist_node_init(&waiter->list_entry, task->prio); plist_node_init(&waiter->pi_list_entry, task->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = waiter; raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!owner) return 0; if (waiter == rt_mutex_top_waiter(lock)) { raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; if (!chain_walk) return 0; /* * The owner can't disappear while holding a lock, * so the owner struct is protected by wait_lock. * Gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); raw_spin_lock(&lock->wait_lock); return res; } /* * Wake up the next waiter on the lock. * * Remove the top waiter from the current tasks waiter list and wake it up. * * Called with lock->wait_lock held. */ static void wakeup_next_waiter(struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; unsigned long flags; raw_spin_lock_irqsave(&current->pi_lock, flags); waiter = rt_mutex_top_waiter(lock); /* * Remove it from current->pi_waiters. We do not adjust a * possible priority boost right now. We execute wakeup in the * boosted mode and go back to normal after releasing * lock->wait_lock. */ plist_del(&waiter->pi_list_entry, &current->pi_waiters); rt_mutex_set_owner(lock, NULL); raw_spin_unlock_irqrestore(&current->pi_lock, flags); wake_up_process(waiter->task); } /* * Remove a waiter from a lock and give up * * Must be called with lock->wait_lock held and * have just failed to try_to_take_rt_mutex(). */ static void remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); unsigned long flags; int chain_walk = 0; raw_spin_lock_irqsave(&current->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); current->pi_blocked_on = NULL; raw_spin_unlock_irqrestore(&current->pi_lock, flags); if (!owner) return; if (first) { raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&waiter->pi_list_entry, &owner->pi_waiters); if (rt_mutex_has_waiters(lock)) { struct rt_mutex_waiter *next; next = rt_mutex_top_waiter(lock); plist_add(&next->pi_list_entry, &owner->pi_waiters); } __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); if (!chain_walk) return; /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); raw_spin_lock(&lock->wait_lock); } /* * Recheck the pi chain, in case we got a priority setting * * Called from sched_setscheduler */ void rt_mutex_adjust_pi(struct task_struct *task) { struct rt_mutex_waiter *waiter; unsigned long flags; raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; if (!waiter || waiter->list_entry.prio == task->prio) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * * lock->wait_lock must be held by the caller. */ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) { int ret = 0; for (;;) { /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock, current, waiter)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); schedule_rt_mutex(lock); raw_spin_lock(&lock->wait_lock); set_current_state(state); } return ret; } /* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; debug_rt_mutex_init_waiter(&waiter); raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { raw_spin_unlock(&lock->wait_lock); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); if (likely(!ret)) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); debug_rt_mutex_free_waiter(&waiter); return ret; } /* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; raw_spin_lock(&lock->wait_lock); if (likely(rt_mutex_owner(lock) != current)) { ret = try_to_take_rt_mutex(lock, current, NULL); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. */ fixup_rt_mutex_waiters(lock); } raw_spin_unlock(&lock->wait_lock); return ret; } /* * Slow path to release a rt-mutex: */ static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { raw_spin_lock(&lock->wait_lock); debug_rt_mutex_unlock(lock); rt_mutex_deadlock_account_unlock(current); if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; raw_spin_unlock(&lock->wait_lock); return; } wakeup_next_waiter(lock); raw_spin_unlock(&lock->wait_lock); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); } /* * debug aware fast / slowpath lock,trylock,unlock * * The atomic acquire/release ops are compiled away, when either the * architecture does not support cmpxchg or when debugging is enabled. */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else return slowfn(lock, state, NULL, detect_deadlock); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else return slowfn(lock, state, timeout, detect_deadlock); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 1; } return slowfn(lock); } static inline void rt_mutex_fastunlock(struct rt_mutex *lock, void (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, current, NULL))) rt_mutex_deadlock_account_unlock(current); else slowfn(lock); } /** * rt_mutex_lock - lock a rt_mutex * * @lock: the rt_mutex to be locked */ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * * @lock: the rt_mutex to be locked * @detect_deadlock: deadlock detection on/off * * Returns: * 0 on success * -EINTR when interrupted by a signal * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock) { might_sleep(); return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, detect_deadlock, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided * by the caller * * @lock: the rt_mutex to be locked * @timeout: timeout structure or NULL (no timeout) * @detect_deadlock: deadlock detection on/off * * Returns: * 0 on success * -EINTR when interrupted by a signal * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock) { might_sleep(); return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, detect_deadlock, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); /** * rt_mutex_trylock - try to lock a rt_mutex * * @lock: the rt_mutex to be locked * * Returns 1 on success and 0 on contention */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); } EXPORT_SYMBOL_GPL(rt_mutex_trylock); /** * rt_mutex_unlock - unlock a rt_mutex * * @lock: the rt_mutex to be unlocked */ void __sched rt_mutex_unlock(struct rt_mutex *lock) { rt_mutex_fastunlock(lock, rt_mutex_slowunlock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); /** * rt_mutex_destroy - mark a mutex unusable * @lock: the mutex to be destroyed * * This function marks the mutex uninitialized, and any subsequent * use of the mutex is forbidden. The mutex must not be locked when * this function is called. */ void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); #ifdef CONFIG_DEBUG_RT_MUTEXES lock->magic = NULL; #endif } EXPORT_SYMBOL_GPL(rt_mutex_destroy); /** * __rt_mutex_init - initialize the rt lock * * @lock: the rt lock to be initialized * * Initialize the rt lock to unlocked state. * * Initializing of a locked rt lock is not allowed */ void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; raw_spin_lock_init(&lock->wait_lock); plist_head_init(&lock->wait_list); debug_rt_mutex_init(lock, name); } EXPORT_SYMBOL_GPL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a * proxy owner * * @lock: the rt_mutex to be locked * @proxy_owner:the task to set as owner * * No locking. Caller has to do serializing itself * Special API call for PI-futex support */ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); rt_mutex_deadlock_account_lock(lock, proxy_owner); } /** * rt_mutex_proxy_unlock - release a lock on behalf of owner * * @lock: the rt_mutex to be locked * * No locking. Caller has to do serializing itself * Special API call for PI-futex support */ void rt_mutex_proxy_unlock(struct rt_mutex *lock, struct task_struct *proxy_owner) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); rt_mutex_deadlock_account_unlock(proxy_owner); } /** * rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take * @waiter: the pre-initialized rt_mutex_waiter * @task: the task to prepare * @detect_deadlock: perform deadlock detection (1) or not (0) * * Returns: * 0 - task blocked on lock * 1 - acquired the lock for task, caller should wake it up * <0 - error * * Special API call for FUTEX_REQUEUE_PI support. */ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { int ret; raw_spin_lock(&lock->wait_lock); if (try_to_take_rt_mutex(lock, task, NULL)) { raw_spin_unlock(&lock->wait_lock); return 1; } ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { /* * Reset the return value. We might have * returned with -EDEADLK and the owner * released the lock while we were walking the * pi chain. Let the waiter sort it out. */ ret = 0; } if (unlikely(ret)) remove_waiter(lock, waiter); raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); return ret; } /** * rt_mutex_next_owner - return the next owner of the lock * * @lock: the rt lock query * * Returns the next owner of the lock or NULL * * Caller has to serialize against other accessors to the lock * itself. * * Special API call for PI-futex support */ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) { if (!rt_mutex_has_waiters(lock)) return NULL; return rt_mutex_top_waiter(lock)->task; } /** * rt_mutex_finish_proxy_lock() - Complete lock acquisition * @lock: the rt_mutex we were woken on * @to: the timeout, null if none. hrtimer should already have * been started. * @waiter: the pre-initialized rt_mutex_waiter * @detect_deadlock: perform deadlock detection (1) or not (0) * * Complete the lock acquisition started our behalf by another thread. * * Returns: * 0 - success * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK * * Special API call for PI-futex requeue support */ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter, int detect_deadlock) { int ret; raw_spin_lock(&lock->wait_lock); set_current_state(TASK_INTERRUPTIBLE); ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, waiter); /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock(&lock->wait_lock); return ret; }
gpl-2.0
Desch/CataclysmV9
src/server/scripts/Northrend/Naxxramas/boss_thaddius.cpp
37
17912
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellScript.h" #include "Player.h" #include "naxxramas.h" //Stalagg enum StalaggYells { SAY_STAL_AGGRO = 0, SAY_STAL_SLAY = 1, SAY_STAL_DEATH = 2 }; enum StalagSpells { SPELL_POWERSURGE = 28134, H_SPELL_POWERSURGE = 54529, SPELL_MAGNETIC_PULL = 28338, SPELL_STALAGG_TESLA = 28097 }; //Feugen enum FeugenYells { SAY_FEUG_AGGRO = 0, SAY_FEUG_SLAY = 1, SAY_FEUG_DEATH = 2 }; enum FeugenSpells { SPELL_STATICFIELD = 28135, H_SPELL_STATICFIELD = 54528, SPELL_FEUGEN_TESLA = 28109 }; // Thaddius DoAction enum ThaddiusActions { ACTION_FEUGEN_RESET, ACTION_FEUGEN_DIED, ACTION_STALAGG_RESET, ACTION_STALAGG_DIED }; //generic #define C_TESLA_COIL 16218 //the coils (emotes "Tesla Coil overloads!") //Thaddius enum ThaddiusYells { SAY_GREET = 0, SAY_AGGRO = 1, SAY_SLAY = 2, SAY_ELECT = 3, SAY_DEATH = 4, SAY_SCREAM = 5 }; enum ThaddiusSpells { SPELL_POLARITY_SHIFT = 28089, SPELL_BALL_LIGHTNING = 28299, SPELL_CHAIN_LIGHTNING = 28167, H_SPELL_CHAIN_LIGHTNING = 54531, SPELL_BERSERK = 27680, SPELL_POSITIVE_CHARGE = 28062, SPELL_POSITIVE_CHARGE_STACK = 29659, SPELL_NEGATIVE_CHARGE = 28085, SPELL_NEGATIVE_CHARGE_STACK = 29660, SPELL_POSITIVE_POLARITY = 28059, SPELL_NEGATIVE_POLARITY = 28084, }; enum Events { EVENT_NONE, EVENT_SHIFT, EVENT_CHAIN, EVENT_BERSERK, }; enum Achievement { DATA_POLARITY_SWITCH = 76047605, }; class boss_thaddius : public CreatureScript { public: boss_thaddius() : CreatureScript("boss_thaddius") { } CreatureAI* GetAI(Creature* creature) const { return new boss_thaddiusAI (creature); } struct boss_thaddiusAI : public BossAI { boss_thaddiusAI(Creature* creature) : BossAI(creature, BOSS_THADDIUS) { // init is a bit tricky because thaddius shall track the life of both adds, but not if there was a wipe // and, in particular, if there was a crash after both adds were killed (should not respawn) // Moreover, the adds may not yet be spawn. So just track down the status if mob is spawn // and each mob will send its status at reset (meaning that it is alive) checkFeugenAlive = false; if (Creature* pFeugen = me->GetCreature(*me, instance->GetData64(DATA_FEUGEN))) checkFeugenAlive = pFeugen->isAlive(); checkStalaggAlive = false; if (Creature* pStalagg = me->GetCreature(*me, instance->GetData64(DATA_STALAGG))) checkStalaggAlive = pStalagg->isAlive(); if (!checkFeugenAlive && !checkStalaggAlive) { me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_STUNNED); me->SetReactState(REACT_AGGRESSIVE); } else { me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_STUNNED); me->SetReactState(REACT_PASSIVE); } } bool checkStalaggAlive; bool checkFeugenAlive; bool polaritySwitch; uint32 uiAddsTimer; void KilledUnit(Unit* /*victim*/) { if (!(rand()%5)) Talk(SAY_SLAY); } void JustDied(Unit* /*killer*/) { _JustDied(); Talk(SAY_DEATH); } void DoAction(const int32 action) { switch (action) { case ACTION_FEUGEN_RESET: checkFeugenAlive = true; break; case ACTION_FEUGEN_DIED: checkFeugenAlive = false; break; case ACTION_STALAGG_RESET: checkStalaggAlive = true; break; case ACTION_STALAGG_DIED: checkStalaggAlive = false; break; } if (!checkFeugenAlive && !checkStalaggAlive) { me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_STUNNED); // REACT_AGGRESSIVE only reset when he takes damage. DoZoneInCombat(); } else { me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_STUNNED); me->SetReactState(REACT_PASSIVE); } } void EnterCombat(Unit* /*who*/) { _EnterCombat(); Talk(SAY_AGGRO); events.ScheduleEvent(EVENT_SHIFT, 30000); events.ScheduleEvent(EVENT_CHAIN, urand(10000, 20000)); events.ScheduleEvent(EVENT_BERSERK, 360000); } void DamageTaken(Unit* /*pDoneBy*/, uint32 & /*uiDamage*/) { me->SetReactState(REACT_AGGRESSIVE); } void SetData(uint32 id, uint32 data) { if (id == DATA_POLARITY_SWITCH) polaritySwitch = data ? true : false; } uint32 GetData(uint32 id) const { if (id != DATA_POLARITY_SWITCH) return 0; return uint32(polaritySwitch); } void UpdateAI(const uint32 diff) { if (checkFeugenAlive && checkStalaggAlive) uiAddsTimer = 0; if (checkStalaggAlive != checkFeugenAlive) { uiAddsTimer += diff; if (uiAddsTimer > 5000) { if (!checkStalaggAlive) { if (instance) if (Creature* pStalagg = me->GetCreature(*me, instance->GetData64(DATA_STALAGG))) pStalagg->Respawn(); } else { if (instance) if (Creature* pFeugen = me->GetCreature(*me, instance->GetData64(DATA_FEUGEN))) pFeugen->Respawn(); } } } if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_SHIFT: DoCastAOE(SPELL_POLARITY_SHIFT); events.ScheduleEvent(EVENT_SHIFT, 30000); return; case EVENT_CHAIN: DoCastVictim(RAID_MODE(SPELL_CHAIN_LIGHTNING, H_SPELL_CHAIN_LIGHTNING)); events.ScheduleEvent(EVENT_CHAIN, urand(10000, 20000)); return; case EVENT_BERSERK: DoCast(me, SPELL_BERSERK); return; } } if (events.GetTimer() > 15000 && !me->IsWithinMeleeRange(me->GetVictim())) DoCastVictim(SPELL_BALL_LIGHTNING); else DoMeleeAttackIfReady(); } }; }; class mob_stalagg : public CreatureScript { public: mob_stalagg() : CreatureScript("mob_stalagg") { } CreatureAI* GetAI(Creature* creature) const { return new mob_stalaggAI(creature); } struct mob_stalaggAI : public ScriptedAI { mob_stalaggAI(Creature* creature) : ScriptedAI(creature) { instance = creature->GetInstanceScript(); } InstanceScript* instance; uint32 powerSurgeTimer; uint32 magneticPullTimer; void Reset() { if (instance) if (Creature* pThaddius = me->GetCreature(*me, instance->GetData64(DATA_THADDIUS))) if (pThaddius->AI()) pThaddius->AI()->DoAction(ACTION_STALAGG_RESET); powerSurgeTimer = urand(20000, 25000); magneticPullTimer = 20000; } void KilledUnit(Unit* /*victim*/) { if (!(rand()%5)) Talk(SAY_STAL_SLAY); } void EnterCombat(Unit* /*who*/) { Talk(SAY_STAL_AGGRO); DoCast(SPELL_STALAGG_TESLA); } void JustDied(Unit* /*killer*/) { Talk(SAY_STAL_DEATH); if (instance) if (Creature* pThaddius = me->GetCreature(*me, instance->GetData64(DATA_THADDIUS))) if (pThaddius->AI()) pThaddius->AI()->DoAction(ACTION_STALAGG_DIED); } void UpdateAI(const uint32 uiDiff) { if (!UpdateVictim()) return; if (magneticPullTimer <= uiDiff) { if (Creature* pFeugen = me->GetCreature(*me, instance->GetData64(DATA_FEUGEN))) { Unit* pStalaggVictim = me->GetVictim(); Unit* pFeugenVictim = pFeugen->GetVictim(); if (pFeugenVictim && pStalaggVictim) { // magnetic pull is not working. So just jump. // reset aggro to be sure that feugen will not follow the jump pFeugen->getThreatManager().modifyThreatPercent(pFeugenVictim, -100); pFeugenVictim->JumpTo(me, 0.3f); me->getThreatManager().modifyThreatPercent(pStalaggVictim, -100); pStalaggVictim->JumpTo(pFeugen, 0.3f); } } magneticPullTimer = 20000; } else magneticPullTimer -= uiDiff; if (powerSurgeTimer <= uiDiff) { DoCast(me, RAID_MODE(SPELL_POWERSURGE, H_SPELL_POWERSURGE)); powerSurgeTimer = urand(15000, 20000); } else powerSurgeTimer -= uiDiff; DoMeleeAttackIfReady(); } }; }; class mob_feugen : public CreatureScript { public: mob_feugen() : CreatureScript("mob_feugen") { } CreatureAI* GetAI(Creature* creature) const { return new mob_feugenAI(creature); } struct mob_feugenAI : public ScriptedAI { mob_feugenAI(Creature* creature) : ScriptedAI(creature) { instance = creature->GetInstanceScript(); } InstanceScript* instance; uint32 staticFieldTimer; void Reset() { if (instance) if (Creature* pThaddius = me->GetCreature(*me, instance->GetData64(DATA_THADDIUS))) if (pThaddius->AI()) pThaddius->AI()->DoAction(ACTION_FEUGEN_RESET); staticFieldTimer = 5000; } void KilledUnit(Unit* /*victim*/) { if (!(rand()%5)) Talk(SAY_FEUG_SLAY); } void EnterCombat(Unit* /*who*/) { Talk(SAY_FEUG_AGGRO); DoCast(SPELL_FEUGEN_TESLA); } void JustDied(Unit* /*killer*/) { Talk(SAY_FEUG_DEATH); if (instance) if (Creature* pThaddius = me->GetCreature(*me, instance->GetData64(DATA_THADDIUS))) if (pThaddius->AI()) pThaddius->AI()->DoAction(ACTION_FEUGEN_DIED); } void UpdateAI(const uint32 uiDiff) { if (!UpdateVictim()) return; if (staticFieldTimer <= uiDiff) { DoCast(me, RAID_MODE(SPELL_STATICFIELD, H_SPELL_STATICFIELD)); staticFieldTimer = 5000; } else staticFieldTimer -= uiDiff; DoMeleeAttackIfReady(); } }; }; class spell_thaddius_pos_neg_charge : public SpellScriptLoader { public: spell_thaddius_pos_neg_charge() : SpellScriptLoader("spell_thaddius_pos_neg_charge") { } class spell_thaddius_pos_neg_charge_SpellScript : public SpellScript { PrepareSpellScript(spell_thaddius_pos_neg_charge_SpellScript); bool Validate(SpellInfo const* /*spell*/) { if (!sSpellMgr->GetSpellInfo(SPELL_POSITIVE_CHARGE)) return false; if (!sSpellMgr->GetSpellInfo(SPELL_POSITIVE_CHARGE_STACK)) return false; if (!sSpellMgr->GetSpellInfo(SPELL_NEGATIVE_CHARGE)) return false; if (!sSpellMgr->GetSpellInfo(SPELL_NEGATIVE_CHARGE_STACK)) return false; return true; } bool Load() { return GetCaster()->GetTypeId() == TYPEID_UNIT; } void HandleTargets(std::list<WorldObject*>& targets) { uint8 count = 0; for (std::list<WorldObject*>::iterator ihit = targets.begin(); ihit != targets.end(); ++ihit) if ((*ihit)->GetGUID() != GetCaster()->GetGUID()) if (Player* target = (*ihit)->ToPlayer()) if (target->HasAura(GetTriggeringSpell()->Id)) ++count; if (count) { uint32 spellId = 0; if (GetSpellInfo()->Id == SPELL_POSITIVE_CHARGE) spellId = SPELL_POSITIVE_CHARGE_STACK; else // if (GetSpellInfo()->Id == SPELL_NEGATIVE_CHARGE) spellId = SPELL_NEGATIVE_CHARGE_STACK; GetCaster()->SetAuraStack(spellId, GetCaster(), count); } } void HandleDamage(SpellEffIndex /*effIndex*/) { if (!GetTriggeringSpell()) return; Unit* target = GetHitUnit(); Unit* caster = GetCaster(); if (target->HasAura(GetTriggeringSpell()->Id)) SetHitDamage(0); else { if (target->GetTypeId() == TYPEID_PLAYER && caster->IsAIEnabled) caster->ToCreature()->AI()->SetData(DATA_POLARITY_SWITCH, 1); } } void Register() { OnEffectHitTarget += SpellEffectFn(spell_thaddius_pos_neg_charge_SpellScript::HandleDamage, EFFECT_0, SPELL_EFFECT_SCHOOL_DAMAGE); OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_thaddius_pos_neg_charge_SpellScript::HandleTargets, EFFECT_0, TARGET_UNIT_SRC_AREA_ALLY); } }; SpellScript* GetSpellScript() const { return new spell_thaddius_pos_neg_charge_SpellScript(); } }; class spell_thaddius_polarity_shift : public SpellScriptLoader { public: spell_thaddius_polarity_shift() : SpellScriptLoader("spell_thaddius_polarity_shift") { } class spell_thaddius_polarity_shift_SpellScript : public SpellScript { PrepareSpellScript(spell_thaddius_polarity_shift_SpellScript); bool Validate(SpellInfo const* /*spell*/) { if (!sSpellMgr->GetSpellInfo(SPELL_POSITIVE_POLARITY) || !sSpellMgr->GetSpellInfo(SPELL_NEGATIVE_POLARITY)) return false; return true; } void HandleDummy(SpellEffIndex /* effIndex */) { Unit* caster = GetCaster(); if (Unit* target = GetHitUnit()) target->CastSpell(target, roll_chance_i(50) ? SPELL_POSITIVE_POLARITY : SPELL_NEGATIVE_POLARITY, true, NULL, NULL, caster->GetGUID()); } void Register() { OnEffectHitTarget += SpellEffectFn(spell_thaddius_polarity_shift_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const { return new spell_thaddius_polarity_shift_SpellScript(); } }; class achievement_polarity_switch : public AchievementCriteriaScript { public: achievement_polarity_switch() : AchievementCriteriaScript("achievement_polarity_switch") { } bool OnCheck(Player* /*source*/, Unit* target) { return target && target->GetAI()->GetData(DATA_POLARITY_SWITCH); } }; void AddSC_boss_thaddius() { new boss_thaddius(); new mob_stalagg(); new mob_feugen(); new spell_thaddius_pos_neg_charge(); new spell_thaddius_polarity_shift(); new achievement_polarity_switch(); }
gpl-2.0
HarveyHunt/linux
drivers/pwm/pwm-atmel-tcb.c
37
13587
/* * Copyright (C) Overkiz SAS 2012 * * Author: Boris BREZILLON <b.brezillon@overkiz.com> * License terms: GNU General Public License (GPL) version 2 */ #include <linux/module.h> #include <linux/init.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/atmel_tc.h> #include <linux/pwm.h> #include <linux/of_device.h> #include <linux/slab.h> #define NPWM 6 #define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \ ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG) #define ATMEL_TC_BCMR_MASK (ATMEL_TC_BCPB | ATMEL_TC_BCPC | \ ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG) struct atmel_tcb_pwm_device { enum pwm_polarity polarity; /* PWM polarity */ unsigned div; /* PWM clock divider */ unsigned duty; /* PWM duty expressed in clk cycles */ unsigned period; /* PWM period expressed in clk cycles */ }; struct atmel_tcb_channel { u32 enabled; u32 cmr; u32 ra; u32 rb; u32 rc; }; struct atmel_tcb_pwm_chip { struct pwm_chip chip; spinlock_t lock; struct atmel_tc *tc; struct atmel_tcb_pwm_device *pwms[NPWM]; struct atmel_tcb_channel bkup[NPWM / 2]; }; static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip) { return container_of(chip, struct atmel_tcb_pwm_chip, chip); } static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); tcbpwm->polarity = polarity; return 0; } static int atmel_tcb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm; struct atmel_tc *tc = tcbpwmc->tc; void __iomem *regs = tc->regs; unsigned group = pwm->hwpwm / 2; unsigned index = pwm->hwpwm % 2; unsigned cmr; int ret; tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL); if (!tcbpwm) return -ENOMEM; ret = clk_prepare_enable(tc->clk[group]); if (ret) { devm_kfree(chip->dev, tcbpwm); return ret; } pwm_set_chip_data(pwm, tcbpwm); tcbpwm->polarity = PWM_POLARITY_NORMAL; tcbpwm->duty = 0; tcbpwm->period = 0; tcbpwm->div = 0; spin_lock(&tcbpwmc->lock); cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); /* * Get init config from Timer Counter registers if * Timer Counter is already configured as a PWM generator. */ if (cmr & ATMEL_TC_WAVE) { if (index == 0) tcbpwm->duty = __raw_readl(regs + ATMEL_TC_REG(group, RA)); else tcbpwm->duty = __raw_readl(regs + ATMEL_TC_REG(group, RB)); tcbpwm->div = cmr & ATMEL_TC_TCCLKS; tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC)); cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK | ATMEL_TC_BCMR_MASK); } else cmr = 0; cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0; __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); spin_unlock(&tcbpwmc->lock); tcbpwmc->pwms[pwm->hwpwm] = tcbpwm; return 0; } static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); struct atmel_tc *tc = tcbpwmc->tc; clk_disable_unprepare(tc->clk[pwm->hwpwm / 2]); tcbpwmc->pwms[pwm->hwpwm] = NULL; devm_kfree(chip->dev, tcbpwm); } static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); struct atmel_tc *tc = tcbpwmc->tc; void __iomem *regs = tc->regs; unsigned group = pwm->hwpwm / 2; unsigned index = pwm->hwpwm % 2; unsigned cmr; enum pwm_polarity polarity = tcbpwm->polarity; /* * If duty is 0 the timer will be stopped and we have to * configure the output correctly on software trigger: * - set output to high if PWM_POLARITY_INVERSED * - set output to low if PWM_POLARITY_NORMAL * * This is why we're reverting polarity in this case. */ if (tcbpwm->duty == 0) polarity = !polarity; spin_lock(&tcbpwmc->lock); cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); /* flush old setting and set the new one */ if (index == 0) { cmr &= ~ATMEL_TC_ACMR_MASK; if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_ASWTRG_CLEAR; else cmr |= ATMEL_TC_ASWTRG_SET; } else { cmr &= ~ATMEL_TC_BCMR_MASK; if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_BSWTRG_CLEAR; else cmr |= ATMEL_TC_BSWTRG_SET; } __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); /* * Use software trigger to apply the new setting. * If both PWM devices in this group are disabled we stop the clock. */ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) { __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(group, CCR)); tcbpwmc->bkup[group].enabled = 1; } else { __raw_writel(ATMEL_TC_SWTRG, regs + ATMEL_TC_REG(group, CCR)); tcbpwmc->bkup[group].enabled = 0; } spin_unlock(&tcbpwmc->lock); } static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); struct atmel_tc *tc = tcbpwmc->tc; void __iomem *regs = tc->regs; unsigned group = pwm->hwpwm / 2; unsigned index = pwm->hwpwm % 2; u32 cmr; enum pwm_polarity polarity = tcbpwm->polarity; /* * If duty is 0 the timer will be stopped and we have to * configure the output correctly on software trigger: * - set output to high if PWM_POLARITY_INVERSED * - set output to low if PWM_POLARITY_NORMAL * * This is why we're reverting polarity in this case. */ if (tcbpwm->duty == 0) polarity = !polarity; spin_lock(&tcbpwmc->lock); cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); /* flush old setting and set the new one */ cmr &= ~ATMEL_TC_TCCLKS; if (index == 0) { cmr &= ~ATMEL_TC_ACMR_MASK; /* Set CMR flags according to given polarity */ if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_ASWTRG_CLEAR; else cmr |= ATMEL_TC_ASWTRG_SET; } else { cmr &= ~ATMEL_TC_BCMR_MASK; if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_BSWTRG_CLEAR; else cmr |= ATMEL_TC_BSWTRG_SET; } /* * If duty is 0 or equal to period there's no need to register * a specific action on RA/RB and RC compare. * The output will be configured on software trigger and keep * this config till next config call. */ if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) { if (index == 0) { if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR; else cmr |= ATMEL_TC_ACPA_CLEAR | ATMEL_TC_ACPC_SET; } else { if (polarity == PWM_POLARITY_INVERSED) cmr |= ATMEL_TC_BCPB_SET | ATMEL_TC_BCPC_CLEAR; else cmr |= ATMEL_TC_BCPB_CLEAR | ATMEL_TC_BCPC_SET; } } cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS); __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); if (index == 0) __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA)); else __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB)); __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC)); /* Use software trigger to apply the new setting */ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + ATMEL_TC_REG(group, CCR)); tcbpwmc->bkup[group].enabled = 1; spin_unlock(&tcbpwmc->lock); return 0; } static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); unsigned group = pwm->hwpwm / 2; unsigned index = pwm->hwpwm % 2; struct atmel_tcb_pwm_device *atcbpwm = NULL; struct atmel_tc *tc = tcbpwmc->tc; int i; int slowclk = 0; unsigned period; unsigned duty; unsigned rate = clk_get_rate(tc->clk[group]); unsigned long long min; unsigned long long max; /* * Find best clk divisor: * the smallest divisor which can fulfill the period_ns requirements. */ for (i = 0; i < 5; ++i) { if (atmel_tc_divisors[i] == 0) { slowclk = i; continue; } min = div_u64((u64)NSEC_PER_SEC * atmel_tc_divisors[i], rate); max = min << tc->tcb_config->counter_width; if (max >= period_ns) break; } /* * If none of the divisor are small enough to represent period_ns * take slow clock (32KHz). */ if (i == 5) { i = slowclk; rate = clk_get_rate(tc->slow_clk); min = div_u64(NSEC_PER_SEC, rate); max = min << tc->tcb_config->counter_width; /* If period is too big return ERANGE error */ if (max < period_ns) return -ERANGE; } duty = div_u64(duty_ns, min); period = div_u64(period_ns, min); if (index == 0) atcbpwm = tcbpwmc->pwms[pwm->hwpwm + 1]; else atcbpwm = tcbpwmc->pwms[pwm->hwpwm - 1]; /* * PWM devices provided by TCB driver are grouped by 2: * - group 0: PWM 0 & 1 * - group 1: PWM 2 & 3 * - group 2: PWM 4 & 5 * * PWM devices in a given group must be configured with the * same period_ns. * * We're checking the period value of the second PWM device * in this group before applying the new config. */ if ((atcbpwm && atcbpwm->duty > 0 && atcbpwm->duty != atcbpwm->period) && (atcbpwm->div != i || atcbpwm->period != period)) { dev_err(chip->dev, "failed to configure period_ns: PWM group already configured with a different value\n"); return -EINVAL; } tcbpwm->period = period; tcbpwm->div = i; tcbpwm->duty = duty; /* If the PWM is enabled, call enable to apply the new conf */ if (pwm_is_enabled(pwm)) atmel_tcb_pwm_enable(chip, pwm); return 0; } static const struct pwm_ops atmel_tcb_pwm_ops = { .request = atmel_tcb_pwm_request, .free = atmel_tcb_pwm_free, .config = atmel_tcb_pwm_config, .set_polarity = atmel_tcb_pwm_set_polarity, .enable = atmel_tcb_pwm_enable, .disable = atmel_tcb_pwm_disable, .owner = THIS_MODULE, }; static int atmel_tcb_pwm_probe(struct platform_device *pdev) { struct atmel_tcb_pwm_chip *tcbpwm; struct device_node *np = pdev->dev.of_node; struct atmel_tc *tc; int err; int tcblock; err = of_property_read_u32(np, "tc-block", &tcblock); if (err < 0) { dev_err(&pdev->dev, "failed to get Timer Counter Block number from device tree (error: %d)\n", err); return err; } tc = atmel_tc_alloc(tcblock); if (tc == NULL) { dev_err(&pdev->dev, "failed to allocate Timer Counter Block\n"); return -ENOMEM; } tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL); if (tcbpwm == NULL) { err = -ENOMEM; dev_err(&pdev->dev, "failed to allocate memory\n"); goto err_free_tc; } tcbpwm->chip.dev = &pdev->dev; tcbpwm->chip.ops = &atmel_tcb_pwm_ops; tcbpwm->chip.of_xlate = of_pwm_xlate_with_flags; tcbpwm->chip.of_pwm_n_cells = 3; tcbpwm->chip.base = -1; tcbpwm->chip.npwm = NPWM; tcbpwm->tc = tc; err = clk_prepare_enable(tc->slow_clk); if (err) goto err_free_tc; spin_lock_init(&tcbpwm->lock); err = pwmchip_add(&tcbpwm->chip); if (err < 0) goto err_disable_clk; platform_set_drvdata(pdev, tcbpwm); return 0; err_disable_clk: clk_disable_unprepare(tcbpwm->tc->slow_clk); err_free_tc: atmel_tc_free(tc); return err; } static int atmel_tcb_pwm_remove(struct platform_device *pdev) { struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); int err; clk_disable_unprepare(tcbpwm->tc->slow_clk); err = pwmchip_remove(&tcbpwm->chip); if (err < 0) return err; atmel_tc_free(tcbpwm->tc); return 0; } static const struct of_device_id atmel_tcb_pwm_dt_ids[] = { { .compatible = "atmel,tcb-pwm", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids); #ifdef CONFIG_PM_SLEEP static int atmel_tcb_pwm_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); void __iomem *base = tcbpwm->tc->regs; int i; for (i = 0; i < (NPWM / 2); i++) { struct atmel_tcb_channel *chan = &tcbpwm->bkup[i]; chan->cmr = readl(base + ATMEL_TC_REG(i, CMR)); chan->ra = readl(base + ATMEL_TC_REG(i, RA)); chan->rb = readl(base + ATMEL_TC_REG(i, RB)); chan->rc = readl(base + ATMEL_TC_REG(i, RC)); } return 0; } static int atmel_tcb_pwm_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); void __iomem *base = tcbpwm->tc->regs; int i; for (i = 0; i < (NPWM / 2); i++) { struct atmel_tcb_channel *chan = &tcbpwm->bkup[i]; writel(chan->cmr, base + ATMEL_TC_REG(i, CMR)); writel(chan->ra, base + ATMEL_TC_REG(i, RA)); writel(chan->rb, base + ATMEL_TC_REG(i, RB)); writel(chan->rc, base + ATMEL_TC_REG(i, RC)); if (chan->enabled) { writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, base + ATMEL_TC_REG(i, CCR)); } } return 0; } #endif static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend, atmel_tcb_pwm_resume); static struct platform_driver atmel_tcb_pwm_driver = { .driver = { .name = "atmel-tcb-pwm", .of_match_table = atmel_tcb_pwm_dt_ids, .pm = &atmel_tcb_pwm_pm_ops, }, .probe = atmel_tcb_pwm_probe, .remove = atmel_tcb_pwm_remove, }; module_platform_driver(atmel_tcb_pwm_driver); MODULE_AUTHOR("Boris BREZILLON <b.brezillon@overkiz.com>"); MODULE_DESCRIPTION("Atmel Timer Counter Pulse Width Modulation Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
val2k/linux
drivers/hwtracing/coresight/coresight-replicator.c
293
4343
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Replicator driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/coresight.h> #include "coresight-priv.h" /** * struct replicator_drvdata - specifics associated to a replicator component * @dev: the device entity associated with this component * @atclk: optional clock for the core parts of the replicator. * @csdev: component vitals needed by the framework */ struct replicator_drvdata { struct device *dev; struct clk *atclk; struct coresight_device *csdev; }; static int replicator_enable(struct coresight_device *csdev, int inport, int outport) { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); dev_info(drvdata->dev, "REPLICATOR enabled\n"); return 0; } static void replicator_disable(struct coresight_device *csdev, int inport, int outport) { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); dev_info(drvdata->dev, "REPLICATOR disabled\n"); } static const struct coresight_ops_link replicator_link_ops = { .enable = replicator_enable, .disable = replicator_disable, }; static const struct coresight_ops replicator_cs_ops = { .link_ops = &replicator_link_ops, }; static int replicator_probe(struct platform_device *pdev) { int ret; struct device *dev = &pdev->dev; struct coresight_platform_data *pdata = NULL; struct replicator_drvdata *drvdata; struct coresight_desc desc = { 0 }; struct device_node *np = pdev->dev.of_node; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); pdev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &pdev->dev; drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); platform_set_drvdata(pdev, drvdata); desc.type = CORESIGHT_DEV_TYPE_LINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; desc.ops = &replicator_cs_ops; desc.pdata = pdev->dev.platform_data; desc.dev = &pdev->dev; drvdata->csdev = coresight_register(&desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto out_disable_pm; } pm_runtime_put(&pdev->dev); return 0; out_disable_pm: if (!IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; } #ifdef CONFIG_PM static int replicator_runtime_suspend(struct device *dev) { struct replicator_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); return 0; } static int replicator_runtime_resume(struct device *dev) { struct replicator_drvdata *drvdata = dev_get_drvdata(dev); if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); return 0; } #endif static const struct dev_pm_ops replicator_dev_pm_ops = { SET_RUNTIME_PM_OPS(replicator_runtime_suspend, replicator_runtime_resume, NULL) }; static const struct of_device_id replicator_match[] = { {.compatible = "arm,coresight-replicator"}, {} }; static struct platform_driver replicator_driver = { .probe = replicator_probe, .driver = { .name = "coresight-replicator", .of_match_table = replicator_match, .pm = &replicator_dev_pm_ops, .suppress_bind_attrs = true, }, }; builtin_platform_driver(replicator_driver);
gpl-2.0
vijay03/optfs
drivers/net/ethernet/dnet.c
549
25845
/* * Dave DNET Ethernet Controller driver * * Copyright (C) 2008 Dave S.r.l. <www.dave.eu> * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/phy.h> #include "dnet.h" #undef DEBUG /* function for reading internal MAC register */ static u16 dnet_readw_mac(struct dnet *bp, u16 reg) { u16 data_read; /* issue a read */ dnet_writel(bp, reg, MACREG_ADDR); /* since a read/write op to the MAC is very slow, * we must wait before reading the data */ ndelay(500); /* read data read from the MAC register */ data_read = dnet_readl(bp, MACREG_DATA); /* all done */ return data_read; } /* function for writing internal MAC register */ static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) { /* load data to write */ dnet_writel(bp, val, MACREG_DATA); /* issue a write */ dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); /* since a read/write op to the MAC is very slow, * we must wait before exiting */ ndelay(500); } static void __dnet_set_hwaddr(struct dnet *bp) { u16 tmp; tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); } static void __devinit dnet_get_hwaddr(struct dnet *bp) { u16 tmp; u8 addr[6]; /* * from MAC docs: * "Note that the MAC address is stored in the registers in Hexadecimal * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 * would require writing 0xAC (octet 0) to address 0x0B (high byte of * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of * Mac_addr[15:0]). */ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); *((__be16 *)addr) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); if (is_valid_ether_addr(addr)) memcpy(bp->dev->dev_addr, addr, sizeof(addr)); } static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct dnet *bp = bus->priv; u16 value; while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); /* only 5 bits allowed for phy-addr and reg_offset */ mii_id &= 0x1f; regnum &= 0x1f; /* prepare reg_value for a read */ value = (mii_id << 8); value |= regnum; /* write control word */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); /* wait for end of transfer */ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); return value; } static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct dnet *bp = bus->priv; u16 tmp; pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); /* prepare for a write operation */ tmp = (1 << 13); /* only 5 bits allowed for phy-addr and reg_offset */ mii_id &= 0x1f; regnum &= 0x1f; /* only 16 bits on data */ value &= 0xffff; /* prepare reg_value for a write */ tmp |= (mii_id << 8); tmp |= regnum; /* write data to write first */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); /* write control word */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); return 0; } static int dnet_mdio_reset(struct mii_bus *bus) { return 0; } static void dnet_handle_link_change(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; unsigned long flags; u32 mode_reg, ctl_reg; int status_change = 0; spin_lock_irqsave(&bp->lock, flags); mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); if (phydev->link) { if (bp->duplex != phydev->duplex) { if (phydev->duplex) ctl_reg &= ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); else ctl_reg |= DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; bp->duplex = phydev->duplex; status_change = 1; } if (bp->speed != phydev->speed) { status_change = 1; switch (phydev->speed) { case 1000: mode_reg |= DNET_INTERNAL_MODE_GBITEN; break; case 100: case 10: mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; break; default: printk(KERN_WARNING "%s: Ack! Speed (%d) is not " "10/100/1000!\n", dev->name, phydev->speed); break; } bp->speed = phydev->speed; } } if (phydev->link != bp->link) { if (phydev->link) { mode_reg |= (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); } else { mode_reg &= ~(DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); bp->speed = 0; bp->duplex = -1; } bp->link = phydev->link; status_change = 1; } if (status_change) { dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); } spin_unlock_irqrestore(&bp->lock, flags); if (status_change) { if (phydev->link) printk(KERN_INFO "%s: link up (%d/%s)\n", dev->name, phydev->speed, DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); else printk(KERN_INFO "%s: link down\n", dev->name); } } static int dnet_mii_probe(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = NULL; int phy_addr; /* find the first phy */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (bp->mii_bus->phy_map[phy_addr]) { phydev = bp->mii_bus->phy_map[phy_addr]; break; } } if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* TODO : add pin_irq */ /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { phydev = phy_connect(dev, dev_name(&phydev->dev), &dnet_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); } else { phydev = phy_connect(dev, dev_name(&phydev->dev), &dnet_handle_link_change, 0, PHY_INTERFACE_MODE_MII); } if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } /* mask with MAC supported features */ if (bp->capabilities & DNET_HAS_GIGABIT) phydev->supported &= PHY_GBIT_FEATURES; else phydev->supported &= PHY_BASIC_FEATURES; phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; phydev->advertising = phydev->supported; bp->link = 0; bp->speed = 0; bp->duplex = -1; bp->phy_dev = phydev; return 0; } static int dnet_mii_init(struct dnet *bp) { int err, i; bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) return -ENOMEM; bp->mii_bus->name = "dnet_mii_bus"; bp->mii_bus->read = &dnet_mdio_read; bp->mii_bus->write = &dnet_mdio_write; bp->mii_bus->reset = &dnet_mdio_reset; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); bp->mii_bus->priv = bp; bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bp->mii_bus->irq) { err = -ENOMEM; goto err_out; } for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out_free_mdio_irq; } if (dnet_mii_probe(bp->dev) != 0) { err = -ENXIO; goto err_out_unregister_bus; } return 0; err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdio_irq: kfree(bp->mii_bus->irq); err_out: mdiobus_free(bp->mii_bus); return err; } /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ static int dnet_phy_marvell_fixup(struct phy_device *phydev) { return phy_write(phydev, 0x18, 0x4148); } static void dnet_update_stats(struct dnet *bp) { u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; u32 *p = &bp->hw_stats.rx_pkt_ignr; u32 *end = &bp->hw_stats.rx_byte + 1; WARN_ON((unsigned long)(end - p - 1) != (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); for (; p < end; p++, reg++) *p += readl(reg); reg = bp->regs + DNET_TX_UNICAST_CNT; p = &bp->hw_stats.tx_unicast; end = &bp->hw_stats.tx_byte + 1; WARN_ON((unsigned long)(end - p - 1) != (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); for (; p < end; p++, reg++) *p += readl(reg); } static int dnet_poll(struct napi_struct *napi, int budget) { struct dnet *bp = container_of(napi, struct dnet, napi); struct net_device *dev = bp->dev; int npackets = 0; unsigned int pkt_len; struct sk_buff *skb; unsigned int *data_ptr; u32 int_enable; u32 cmd_word; int i; while (npackets < budget) { /* * break out of while loop if there are no more * packets waiting */ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); return 0; } cmd_word = dnet_readl(bp, RX_LEN_FIFO); pkt_len = cmd_word & 0xFFFF; if (cmd_word & 0xDF180000) printk(KERN_ERR "%s packet receive error %x\n", __func__, cmd_word); skb = dev_alloc_skb(pkt_len + 5); if (skb != NULL) { /* Align IP on 16 byte boundaries */ skb_reserve(skb, 2); /* * 'skb_put()' points to the start of sk_buff * data area. */ data_ptr = (unsigned int *)skb_put(skb, pkt_len); for (i = 0; i < (pkt_len + 3) >> 2; i++) *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); npackets++; } else printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %u.\n", dev->name, pkt_len); } budget -= npackets; if (npackets < budget) { /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts */ napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); return 0; } /* There are still packets waiting */ return 1; } static irqreturn_t dnet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct dnet *bp = netdev_priv(dev); u32 int_src, int_enable, int_current; unsigned long flags; unsigned int handled = 0; spin_lock_irqsave(&bp->lock, flags); /* read and clear the DNET irq (clear on read) */ int_src = dnet_readl(bp, INTR_SRC); int_enable = dnet_readl(bp, INTR_ENB); int_current = int_src & int_enable; /* restart the queue if we had stopped it for TX fifo almost full */ if (int_current & DNET_INTR_SRC_TX_FIFOAE) { int_enable = dnet_readl(bp, INTR_ENB); int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; dnet_writel(bp, int_enable, INTR_ENB); netif_wake_queue(dev); handled = 1; } /* RX FIFO error checking */ if (int_current & (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, dnet_readl(bp, RX_STATUS), int_current); /* we can only flush the RX FIFOs */ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); ndelay(500); dnet_writel(bp, 0, SYS_CTL); handled = 1; } /* TX FIFO error checking */ if (int_current & (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, dnet_readl(bp, TX_STATUS), int_current); /* we can only flush the TX FIFOs */ dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); ndelay(500); dnet_writel(bp, 0, SYS_CTL); handled = 1; } if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { if (napi_schedule_prep(&bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers */ /* Disable Rx interrupts and schedule NAPI poll */ int_enable = dnet_readl(bp, INTR_ENB); int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); __napi_schedule(&bp->napi); } handled = 1; } if (!handled) pr_debug("%s: irq %x remains\n", __func__, int_current); spin_unlock_irqrestore(&bp->lock, flags); return IRQ_RETVAL(handled); } #ifdef DEBUG static inline void dnet_print_skb(struct sk_buff *skb) { int k; printk(KERN_DEBUG PFX "data:"); for (k = 0; k < skb->len; k++) printk(" %02x", (unsigned int)skb->data[k]); printk("\n"); } #else #define dnet_print_skb(skb) do {} while (0) #endif static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct dnet *bp = netdev_priv(dev); u32 tx_status, irq_enable; unsigned int len, i, tx_cmd, wrsz; unsigned long flags; unsigned int *bufp; tx_status = dnet_readl(bp, TX_STATUS); pr_debug("start_xmit: len %u head %p data %p\n", skb->len, skb->head, skb->data); dnet_print_skb(skb); /* frame size (words) */ len = (skb->len + 3) >> 2; spin_lock_irqsave(&bp->lock, flags); tx_status = dnet_readl(bp, TX_STATUS); bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); wrsz = (u32) skb->len + 3; wrsz += ((unsigned long) skb->data) & 0x3; wrsz >>= 2; tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; /* check if there is enough room for the current frame */ if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { for (i = 0; i < wrsz; i++) dnet_writel(bp, *bufp++, TX_DATA_FIFO); /* * inform MAC that a packet's written and ready to be * shipped out */ dnet_writel(bp, tx_cmd, TX_LEN_FIFO); } if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { netif_stop_queue(dev); tx_status = dnet_readl(bp, INTR_SRC); irq_enable = dnet_readl(bp, INTR_ENB); irq_enable |= DNET_INTR_ENB_TX_FIFOAE; dnet_writel(bp, irq_enable, INTR_ENB); } skb_tx_timestamp(skb); /* free the buffer */ dev_kfree_skb(skb); spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_OK; } static void dnet_reset_hw(struct dnet *bp) { /* put ts_mac in IDLE state i.e. disable rx/tx */ dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); /* * RX FIFO almost full threshold: only cmd FIFO almost full is * implemented for RX side */ dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); /* * TX FIFO almost empty threshold: only data FIFO almost empty * is implemented for TX side */ dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); /* flush rx/tx fifos */ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); msleep(1); dnet_writel(bp, 0, SYS_CTL); } static void dnet_init_hw(struct dnet *bp) { u32 config; dnet_reset_hw(bp); __dnet_set_hwaddr(bp); config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); if (bp->dev->flags & IFF_PROMISC) /* Copy All Frames */ config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; if (!(bp->dev->flags & IFF_BROADCAST)) /* No BroadCast */ config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); /* clear irq before enabling them */ config = dnet_readl(bp, INTR_SRC); /* enable RX/TX interrupt, recv packet ready interrupt */ dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); } static int dnet_open(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); /* if the phy is not yet register, retry later */ if (!bp->phy_dev) return -EAGAIN; if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; napi_enable(&bp->napi); dnet_init_hw(bp); phy_start_aneg(bp->phy_dev); /* schedule a link state check */ phy_start(bp->phy_dev); netif_start_queue(dev); return 0; } static int dnet_close(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&bp->napi); if (bp->phy_dev) phy_stop(bp->phy_dev); dnet_reset_hw(bp); netif_carrier_off(dev); return 0; } static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) { pr_debug("%s\n", __func__); pr_debug("----------------------------- RX statistics " "-------------------------------\n"); pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); pr_debug("----------------------------- TX statistics " "-------------------------------\n"); pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); } static struct net_device_stats *dnet_get_stats(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct net_device_stats *nstat = &dev->stats; struct dnet_stats *hwstat = &bp->hw_stats; /* read stats from hardware */ dnet_update_stats(bp); /* Convert HW stats into netdevice stats */ nstat->rx_errors = (hwstat->rx_len_chk_err + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + /* ignore IGP violation error hwstat->rx_ipg_viol + */ hwstat->rx_crc_err + hwstat->rx_pre_shrink + hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); nstat->tx_errors = hwstat->tx_bad_fcs; nstat->rx_length_errors = (hwstat->rx_len_chk_err + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); nstat->rx_crc_errors = hwstat->rx_crc_err; nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; nstat->rx_packets = hwstat->rx_ok_pkt; nstat->tx_packets = (hwstat->tx_unicast + hwstat->tx_multicast + hwstat->tx_brdcast); nstat->rx_bytes = hwstat->rx_byte; nstat->tx_bytes = hwstat->tx_byte; nstat->multicast = hwstat->rx_multicast; nstat->rx_missed_errors = hwstat->rx_pkt_ignr; dnet_print_pretty_hwstats(hwstat); return nstat; } static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_gset(phydev, cmd); } static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!netif_running(dev)) return -EINVAL; if (!phydev) return -ENODEV; return phy_mii_ioctl(phydev, rq, cmd); } static void dnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, "0"); } static const struct ethtool_ops dnet_ethtool_ops = { .get_settings = dnet_get_settings, .set_settings = dnet_set_settings, .get_drvinfo = dnet_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops dnet_netdev_ops = { .ndo_open = dnet_open, .ndo_stop = dnet_close, .ndo_get_stats = dnet_get_stats, .ndo_start_xmit = dnet_start_xmit, .ndo_do_ioctl = dnet_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static int __devinit dnet_probe(struct platform_device *pdev) { struct resource *res; struct net_device *dev; struct dnet *bp; struct phy_device *phydev; int err = -ENXIO; unsigned int mem_base, mem_size, irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mmio resource defined\n"); goto err_out; } mem_base = res->start; mem_size = resource_size(res); irq = platform_get_irq(pdev, 0); if (!request_mem_region(mem_base, mem_size, DRV_NAME)) { dev_err(&pdev->dev, "no memory region available\n"); err = -EBUSY; goto err_out; } err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) { dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); goto err_out_release_mem; } /* TODO: Actually, we have some interesting features... */ dev->features |= 0; bp = netdev_priv(dev); bp->dev = dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); spin_lock_init(&bp->lock); bp->regs = ioremap(mem_base, mem_size); if (!bp->regs) { dev_err(&pdev->dev, "failed to map registers, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } dev->irq = irq; err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); if (err) { dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", irq, err); goto err_out_iounmap; } dev->netdev_ops = &dnet_netdev_ops; netif_napi_add(dev, &bp->napi, dnet_poll, 64); dev->ethtool_ops = &dnet_ethtool_ops; dev->base_addr = (unsigned long)bp->regs; bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; dnet_get_hwaddr(bp); if (!is_valid_ether_addr(dev->dev_addr)) { /* choose a random ethernet address */ random_ether_addr(dev->dev_addr); __dnet_set_hwaddr(bp); } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); goto err_out_free_irq; } /* register the PHY board fixup (for Marvell 88E1111) */ err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, dnet_phy_marvell_fixup); /* we can live without it, so just issue a warning */ if (err) dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); err = dnet_mii_init(bp); if (err) goto err_out_unregister_netdev; dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", bp->regs, mem_base, dev->irq, dev->dev_addr); dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); phydev = bp->phy_dev; dev_info(&pdev->dev, "attached PHY driver [%s] " "(mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; err_out_unregister_netdev: unregister_netdev(dev); err_out_free_irq: free_irq(dev->irq, dev); err_out_iounmap: iounmap(bp->regs); err_out_free_dev: free_netdev(dev); err_out_release_mem: release_mem_region(mem_base, mem_size); err_out: return err; } static int __devexit dnet_remove(struct platform_device *pdev) { struct net_device *dev; struct dnet *bp; dev = platform_get_drvdata(pdev); if (dev) { bp = netdev_priv(dev); if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(bp->regs); free_netdev(dev); } return 0; } static struct platform_driver dnet_driver = { .probe = dnet_probe, .remove = __devexit_p(dnet_remove), .driver = { .name = "dnet", }, }; static int __init dnet_init(void) { return platform_driver_register(&dnet_driver); } static void __exit dnet_exit(void) { platform_driver_unregister(&dnet_driver); } module_init(dnet_init); module_exit(dnet_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Dave DNET Ethernet driver"); MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, " "Matteo Vit <matteo.vit@dave.eu>");
gpl-2.0
hduffddybz/rt-thread
bsp/stm32f10x/Libraries/STM32F10x_StdPeriph_Driver/src/stm32f10x_usart.c
549
37250
/** ****************************************************************************** * @file stm32f10x_usart.c * @author MCD Application Team * @version V3.5.0 * @date 11-March-2011 * @brief This file provides all the USART firmware functions. ****************************************************************************** * @attention * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2011 STMicroelectronics</center></h2> ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f10x_usart.h" #include "stm32f10x_rcc.h" /** @addtogroup STM32F10x_StdPeriph_Driver * @{ */ /** @defgroup USART * @brief USART driver modules * @{ */ /** @defgroup USART_Private_TypesDefinitions * @{ */ /** * @} */ /** @defgroup USART_Private_Defines * @{ */ #define CR1_UE_Set ((uint16_t)0x2000) /*!< USART Enable Mask */ #define CR1_UE_Reset ((uint16_t)0xDFFF) /*!< USART Disable Mask */ #define CR1_WAKE_Mask ((uint16_t)0xF7FF) /*!< USART WakeUp Method Mask */ #define CR1_RWU_Set ((uint16_t)0x0002) /*!< USART mute mode Enable Mask */ #define CR1_RWU_Reset ((uint16_t)0xFFFD) /*!< USART mute mode Enable Mask */ #define CR1_SBK_Set ((uint16_t)0x0001) /*!< USART Break Character send Mask */ #define CR1_CLEAR_Mask ((uint16_t)0xE9F3) /*!< USART CR1 Mask */ #define CR2_Address_Mask ((uint16_t)0xFFF0) /*!< USART address Mask */ #define CR2_LINEN_Set ((uint16_t)0x4000) /*!< USART LIN Enable Mask */ #define CR2_LINEN_Reset ((uint16_t)0xBFFF) /*!< USART LIN Disable Mask */ #define CR2_LBDL_Mask ((uint16_t)0xFFDF) /*!< USART LIN Break detection Mask */ #define CR2_STOP_CLEAR_Mask ((uint16_t)0xCFFF) /*!< USART CR2 STOP Bits Mask */ #define CR2_CLOCK_CLEAR_Mask ((uint16_t)0xF0FF) /*!< USART CR2 Clock Mask */ #define CR3_SCEN_Set ((uint16_t)0x0020) /*!< USART SC Enable Mask */ #define CR3_SCEN_Reset ((uint16_t)0xFFDF) /*!< USART SC Disable Mask */ #define CR3_NACK_Set ((uint16_t)0x0010) /*!< USART SC NACK Enable Mask */ #define CR3_NACK_Reset ((uint16_t)0xFFEF) /*!< USART SC NACK Disable Mask */ #define CR3_HDSEL_Set ((uint16_t)0x0008) /*!< USART Half-Duplex Enable Mask */ #define CR3_HDSEL_Reset ((uint16_t)0xFFF7) /*!< USART Half-Duplex Disable Mask */ #define CR3_IRLP_Mask ((uint16_t)0xFFFB) /*!< USART IrDA LowPower mode Mask */ #define CR3_CLEAR_Mask ((uint16_t)0xFCFF) /*!< USART CR3 Mask */ #define CR3_IREN_Set ((uint16_t)0x0002) /*!< USART IrDA Enable Mask */ #define CR3_IREN_Reset ((uint16_t)0xFFFD) /*!< USART IrDA Disable Mask */ #define GTPR_LSB_Mask ((uint16_t)0x00FF) /*!< Guard Time Register LSB Mask */ #define GTPR_MSB_Mask ((uint16_t)0xFF00) /*!< Guard Time Register MSB Mask */ #define IT_Mask ((uint16_t)0x001F) /*!< USART Interrupt Mask */ /* USART OverSampling-8 Mask */ #define CR1_OVER8_Set ((u16)0x8000) /* USART OVER8 mode Enable Mask */ #define CR1_OVER8_Reset ((u16)0x7FFF) /* USART OVER8 mode Disable Mask */ /* USART One Bit Sampling Mask */ #define CR3_ONEBITE_Set ((u16)0x0800) /* USART ONEBITE mode Enable Mask */ #define CR3_ONEBITE_Reset ((u16)0xF7FF) /* USART ONEBITE mode Disable Mask */ /** * @} */ /** @defgroup USART_Private_Macros * @{ */ /** * @} */ /** @defgroup USART_Private_Variables * @{ */ /** * @} */ /** @defgroup USART_Private_FunctionPrototypes * @{ */ /** * @} */ /** @defgroup USART_Private_Functions * @{ */ /** * @brief Deinitializes the USARTx peripheral registers to their default reset values. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @retval None */ void USART_DeInit(USART_TypeDef* USARTx) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); if (USARTx == USART1) { RCC_APB2PeriphResetCmd(RCC_APB2Periph_USART1, ENABLE); RCC_APB2PeriphResetCmd(RCC_APB2Periph_USART1, DISABLE); } else if (USARTx == USART2) { RCC_APB1PeriphResetCmd(RCC_APB1Periph_USART2, ENABLE); RCC_APB1PeriphResetCmd(RCC_APB1Periph_USART2, DISABLE); } else if (USARTx == USART3) { RCC_APB1PeriphResetCmd(RCC_APB1Periph_USART3, ENABLE); RCC_APB1PeriphResetCmd(RCC_APB1Periph_USART3, DISABLE); } else if (USARTx == UART4) { RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART4, ENABLE); RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART4, DISABLE); } else { if (USARTx == UART5) { RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART5, ENABLE); RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART5, DISABLE); } } } /** * @brief Initializes the USARTx peripheral according to the specified * parameters in the USART_InitStruct . * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_InitStruct: pointer to a USART_InitTypeDef structure * that contains the configuration information for the specified USART * peripheral. * @retval None */ void USART_Init(USART_TypeDef* USARTx, USART_InitTypeDef* USART_InitStruct) { uint32_t tmpreg = 0x00, apbclock = 0x00; uint32_t integerdivider = 0x00; uint32_t fractionaldivider = 0x00; uint32_t usartxbase = 0; RCC_ClocksTypeDef RCC_ClocksStatus; /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_BAUDRATE(USART_InitStruct->USART_BaudRate)); assert_param(IS_USART_WORD_LENGTH(USART_InitStruct->USART_WordLength)); assert_param(IS_USART_STOPBITS(USART_InitStruct->USART_StopBits)); assert_param(IS_USART_PARITY(USART_InitStruct->USART_Parity)); assert_param(IS_USART_MODE(USART_InitStruct->USART_Mode)); assert_param(IS_USART_HARDWARE_FLOW_CONTROL(USART_InitStruct->USART_HardwareFlowControl)); /* The hardware flow control is available only for USART1, USART2 and USART3 */ if (USART_InitStruct->USART_HardwareFlowControl != USART_HardwareFlowControl_None) { assert_param(IS_USART_123_PERIPH(USARTx)); } usartxbase = (uint32_t)USARTx; /*---------------------------- USART CR2 Configuration -----------------------*/ tmpreg = USARTx->CR2; /* Clear STOP[13:12] bits */ tmpreg &= CR2_STOP_CLEAR_Mask; /* Configure the USART Stop Bits, Clock, CPOL, CPHA and LastBit ------------*/ /* Set STOP[13:12] bits according to USART_StopBits value */ tmpreg |= (uint32_t)USART_InitStruct->USART_StopBits; /* Write to USART CR2 */ USARTx->CR2 = (uint16_t)tmpreg; /*---------------------------- USART CR1 Configuration -----------------------*/ tmpreg = USARTx->CR1; /* Clear M, PCE, PS, TE and RE bits */ tmpreg &= CR1_CLEAR_Mask; /* Configure the USART Word Length, Parity and mode ----------------------- */ /* Set the M bits according to USART_WordLength value */ /* Set PCE and PS bits according to USART_Parity value */ /* Set TE and RE bits according to USART_Mode value */ tmpreg |= (uint32_t)USART_InitStruct->USART_WordLength | USART_InitStruct->USART_Parity | USART_InitStruct->USART_Mode; /* Write to USART CR1 */ USARTx->CR1 = (uint16_t)tmpreg; /*---------------------------- USART CR3 Configuration -----------------------*/ tmpreg = USARTx->CR3; /* Clear CTSE and RTSE bits */ tmpreg &= CR3_CLEAR_Mask; /* Configure the USART HFC -------------------------------------------------*/ /* Set CTSE and RTSE bits according to USART_HardwareFlowControl value */ tmpreg |= USART_InitStruct->USART_HardwareFlowControl; /* Write to USART CR3 */ USARTx->CR3 = (uint16_t)tmpreg; /*---------------------------- USART BRR Configuration -----------------------*/ /* Configure the USART Baud Rate -------------------------------------------*/ RCC_GetClocksFreq(&RCC_ClocksStatus); if (usartxbase == USART1_BASE) { apbclock = RCC_ClocksStatus.PCLK2_Frequency; } else { apbclock = RCC_ClocksStatus.PCLK1_Frequency; } /* Determine the integer part */ if ((USARTx->CR1 & CR1_OVER8_Set) != 0) { /* Integer part computing in case Oversampling mode is 8 Samples */ integerdivider = ((25 * apbclock) / (2 * (USART_InitStruct->USART_BaudRate))); } else /* if ((USARTx->CR1 & CR1_OVER8_Set) == 0) */ { /* Integer part computing in case Oversampling mode is 16 Samples */ integerdivider = ((25 * apbclock) / (4 * (USART_InitStruct->USART_BaudRate))); } tmpreg = (integerdivider / 100) << 4; /* Determine the fractional part */ fractionaldivider = integerdivider - (100 * (tmpreg >> 4)); /* Implement the fractional part in the register */ if ((USARTx->CR1 & CR1_OVER8_Set) != 0) { tmpreg |= ((((fractionaldivider * 8) + 50) / 100)) & ((uint8_t)0x07); } else /* if ((USARTx->CR1 & CR1_OVER8_Set) == 0) */ { tmpreg |= ((((fractionaldivider * 16) + 50) / 100)) & ((uint8_t)0x0F); } /* Write to USART BRR */ USARTx->BRR = (uint16_t)tmpreg; } /** * @brief Fills each USART_InitStruct member with its default value. * @param USART_InitStruct: pointer to a USART_InitTypeDef structure * which will be initialized. * @retval None */ void USART_StructInit(USART_InitTypeDef* USART_InitStruct) { /* USART_InitStruct members default value */ USART_InitStruct->USART_BaudRate = 9600; USART_InitStruct->USART_WordLength = USART_WordLength_8b; USART_InitStruct->USART_StopBits = USART_StopBits_1; USART_InitStruct->USART_Parity = USART_Parity_No ; USART_InitStruct->USART_Mode = USART_Mode_Rx | USART_Mode_Tx; USART_InitStruct->USART_HardwareFlowControl = USART_HardwareFlowControl_None; } /** * @brief Initializes the USARTx peripheral Clock according to the * specified parameters in the USART_ClockInitStruct . * @param USARTx: where x can be 1, 2, 3 to select the USART peripheral. * @param USART_ClockInitStruct: pointer to a USART_ClockInitTypeDef * structure that contains the configuration information for the specified * USART peripheral. * @note The Smart Card and Synchronous modes are not available for UART4 and UART5. * @retval None */ void USART_ClockInit(USART_TypeDef* USARTx, USART_ClockInitTypeDef* USART_ClockInitStruct) { uint32_t tmpreg = 0x00; /* Check the parameters */ assert_param(IS_USART_123_PERIPH(USARTx)); assert_param(IS_USART_CLOCK(USART_ClockInitStruct->USART_Clock)); assert_param(IS_USART_CPOL(USART_ClockInitStruct->USART_CPOL)); assert_param(IS_USART_CPHA(USART_ClockInitStruct->USART_CPHA)); assert_param(IS_USART_LASTBIT(USART_ClockInitStruct->USART_LastBit)); /*---------------------------- USART CR2 Configuration -----------------------*/ tmpreg = USARTx->CR2; /* Clear CLKEN, CPOL, CPHA and LBCL bits */ tmpreg &= CR2_CLOCK_CLEAR_Mask; /* Configure the USART Clock, CPOL, CPHA and LastBit ------------*/ /* Set CLKEN bit according to USART_Clock value */ /* Set CPOL bit according to USART_CPOL value */ /* Set CPHA bit according to USART_CPHA value */ /* Set LBCL bit according to USART_LastBit value */ tmpreg |= (uint32_t)USART_ClockInitStruct->USART_Clock | USART_ClockInitStruct->USART_CPOL | USART_ClockInitStruct->USART_CPHA | USART_ClockInitStruct->USART_LastBit; /* Write to USART CR2 */ USARTx->CR2 = (uint16_t)tmpreg; } /** * @brief Fills each USART_ClockInitStruct member with its default value. * @param USART_ClockInitStruct: pointer to a USART_ClockInitTypeDef * structure which will be initialized. * @retval None */ void USART_ClockStructInit(USART_ClockInitTypeDef* USART_ClockInitStruct) { /* USART_ClockInitStruct members default value */ USART_ClockInitStruct->USART_Clock = USART_Clock_Disable; USART_ClockInitStruct->USART_CPOL = USART_CPOL_Low; USART_ClockInitStruct->USART_CPHA = USART_CPHA_1Edge; USART_ClockInitStruct->USART_LastBit = USART_LastBit_Disable; } /** * @brief Enables or disables the specified USART peripheral. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USARTx peripheral. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_Cmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the selected USART by setting the UE bit in the CR1 register */ USARTx->CR1 |= CR1_UE_Set; } else { /* Disable the selected USART by clearing the UE bit in the CR1 register */ USARTx->CR1 &= CR1_UE_Reset; } } /** * @brief Enables or disables the specified USART interrupts. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_IT: specifies the USART interrupt sources to be enabled or disabled. * This parameter can be one of the following values: * @arg USART_IT_CTS: CTS change interrupt (not available for UART4 and UART5) * @arg USART_IT_LBD: LIN Break detection interrupt * @arg USART_IT_TXE: Transmit Data Register empty interrupt * @arg USART_IT_TC: Transmission complete interrupt * @arg USART_IT_RXNE: Receive Data register not empty interrupt * @arg USART_IT_IDLE: Idle line detection interrupt * @arg USART_IT_PE: Parity Error interrupt * @arg USART_IT_ERR: Error interrupt(Frame error, noise error, overrun error) * @param NewState: new state of the specified USARTx interrupts. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_ITConfig(USART_TypeDef* USARTx, uint16_t USART_IT, FunctionalState NewState) { uint32_t usartreg = 0x00, itpos = 0x00, itmask = 0x00; uint32_t usartxbase = 0x00; /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_CONFIG_IT(USART_IT)); assert_param(IS_FUNCTIONAL_STATE(NewState)); /* The CTS interrupt is not available for UART4 and UART5 */ if (USART_IT == USART_IT_CTS) { assert_param(IS_USART_123_PERIPH(USARTx)); } usartxbase = (uint32_t)USARTx; /* Get the USART register index */ usartreg = (((uint8_t)USART_IT) >> 0x05); /* Get the interrupt position */ itpos = USART_IT & IT_Mask; itmask = (((uint32_t)0x01) << itpos); if (usartreg == 0x01) /* The IT is in CR1 register */ { usartxbase += 0x0C; } else if (usartreg == 0x02) /* The IT is in CR2 register */ { usartxbase += 0x10; } else /* The IT is in CR3 register */ { usartxbase += 0x14; } if (NewState != DISABLE) { *(__IO uint32_t*)usartxbase |= itmask; } else { *(__IO uint32_t*)usartxbase &= ~itmask; } } /** * @brief Enables or disables the USART’s DMA interface. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_DMAReq: specifies the DMA request. * This parameter can be any combination of the following values: * @arg USART_DMAReq_Tx: USART DMA transmit request * @arg USART_DMAReq_Rx: USART DMA receive request * @param NewState: new state of the DMA Request sources. * This parameter can be: ENABLE or DISABLE. * @note The DMA mode is not available for UART5 except in the STM32 * High density value line devices(STM32F10X_HD_VL). * @retval None */ void USART_DMACmd(USART_TypeDef* USARTx, uint16_t USART_DMAReq, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_DMAREQ(USART_DMAReq)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the DMA transfer for selected requests by setting the DMAT and/or DMAR bits in the USART CR3 register */ USARTx->CR3 |= USART_DMAReq; } else { /* Disable the DMA transfer for selected requests by clearing the DMAT and/or DMAR bits in the USART CR3 register */ USARTx->CR3 &= (uint16_t)~USART_DMAReq; } } /** * @brief Sets the address of the USART node. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_Address: Indicates the address of the USART node. * @retval None */ void USART_SetAddress(USART_TypeDef* USARTx, uint8_t USART_Address) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_ADDRESS(USART_Address)); /* Clear the USART address */ USARTx->CR2 &= CR2_Address_Mask; /* Set the USART address node */ USARTx->CR2 |= USART_Address; } /** * @brief Selects the USART WakeUp method. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_WakeUp: specifies the USART wakeup method. * This parameter can be one of the following values: * @arg USART_WakeUp_IdleLine: WakeUp by an idle line detection * @arg USART_WakeUp_AddressMark: WakeUp by an address mark * @retval None */ void USART_WakeUpConfig(USART_TypeDef* USARTx, uint16_t USART_WakeUp) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_WAKEUP(USART_WakeUp)); USARTx->CR1 &= CR1_WAKE_Mask; USARTx->CR1 |= USART_WakeUp; } /** * @brief Determines if the USART is in mute mode or not. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USART mute mode. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_ReceiverWakeUpCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the USART mute mode by setting the RWU bit in the CR1 register */ USARTx->CR1 |= CR1_RWU_Set; } else { /* Disable the USART mute mode by clearing the RWU bit in the CR1 register */ USARTx->CR1 &= CR1_RWU_Reset; } } /** * @brief Sets the USART LIN Break detection length. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_LINBreakDetectLength: specifies the LIN break detection length. * This parameter can be one of the following values: * @arg USART_LINBreakDetectLength_10b: 10-bit break detection * @arg USART_LINBreakDetectLength_11b: 11-bit break detection * @retval None */ void USART_LINBreakDetectLengthConfig(USART_TypeDef* USARTx, uint16_t USART_LINBreakDetectLength) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_LIN_BREAK_DETECT_LENGTH(USART_LINBreakDetectLength)); USARTx->CR2 &= CR2_LBDL_Mask; USARTx->CR2 |= USART_LINBreakDetectLength; } /** * @brief Enables or disables the USART’s LIN mode. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USART LIN mode. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_LINCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the LIN mode by setting the LINEN bit in the CR2 register */ USARTx->CR2 |= CR2_LINEN_Set; } else { /* Disable the LIN mode by clearing the LINEN bit in the CR2 register */ USARTx->CR2 &= CR2_LINEN_Reset; } } /** * @brief Transmits single data through the USARTx peripheral. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param Data: the data to transmit. * @retval None */ void USART_SendData(USART_TypeDef* USARTx, uint16_t Data) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_DATA(Data)); /* Transmit Data */ USARTx->DR = (Data & (uint16_t)0x01FF); } /** * @brief Returns the most recent received data by the USARTx peripheral. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @retval The received data. */ uint16_t USART_ReceiveData(USART_TypeDef* USARTx) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); /* Receive Data */ return (uint16_t)(USARTx->DR & (uint16_t)0x01FF); } /** * @brief Transmits break characters. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @retval None */ void USART_SendBreak(USART_TypeDef* USARTx) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); /* Send break characters */ USARTx->CR1 |= CR1_SBK_Set; } /** * @brief Sets the specified USART guard time. * @param USARTx: where x can be 1, 2 or 3 to select the USART peripheral. * @param USART_GuardTime: specifies the guard time. * @note The guard time bits are not available for UART4 and UART5. * @retval None */ void USART_SetGuardTime(USART_TypeDef* USARTx, uint8_t USART_GuardTime) { /* Check the parameters */ assert_param(IS_USART_123_PERIPH(USARTx)); /* Clear the USART Guard time */ USARTx->GTPR &= GTPR_LSB_Mask; /* Set the USART guard time */ USARTx->GTPR |= (uint16_t)((uint16_t)USART_GuardTime << 0x08); } /** * @brief Sets the system clock prescaler. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_Prescaler: specifies the prescaler clock. * @note The function is used for IrDA mode with UART4 and UART5. * @retval None */ void USART_SetPrescaler(USART_TypeDef* USARTx, uint8_t USART_Prescaler) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); /* Clear the USART prescaler */ USARTx->GTPR &= GTPR_MSB_Mask; /* Set the USART prescaler */ USARTx->GTPR |= USART_Prescaler; } /** * @brief Enables or disables the USART’s Smart Card mode. * @param USARTx: where x can be 1, 2 or 3 to select the USART peripheral. * @param NewState: new state of the Smart Card mode. * This parameter can be: ENABLE or DISABLE. * @note The Smart Card mode is not available for UART4 and UART5. * @retval None */ void USART_SmartCardCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_123_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the SC mode by setting the SCEN bit in the CR3 register */ USARTx->CR3 |= CR3_SCEN_Set; } else { /* Disable the SC mode by clearing the SCEN bit in the CR3 register */ USARTx->CR3 &= CR3_SCEN_Reset; } } /** * @brief Enables or disables NACK transmission. * @param USARTx: where x can be 1, 2 or 3 to select the USART peripheral. * @param NewState: new state of the NACK transmission. * This parameter can be: ENABLE or DISABLE. * @note The Smart Card mode is not available for UART4 and UART5. * @retval None */ void USART_SmartCardNACKCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_123_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the NACK transmission by setting the NACK bit in the CR3 register */ USARTx->CR3 |= CR3_NACK_Set; } else { /* Disable the NACK transmission by clearing the NACK bit in the CR3 register */ USARTx->CR3 &= CR3_NACK_Reset; } } /** * @brief Enables or disables the USART’s Half Duplex communication. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USART Communication. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_HalfDuplexCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the Half-Duplex mode by setting the HDSEL bit in the CR3 register */ USARTx->CR3 |= CR3_HDSEL_Set; } else { /* Disable the Half-Duplex mode by clearing the HDSEL bit in the CR3 register */ USARTx->CR3 &= CR3_HDSEL_Reset; } } /** * @brief Enables or disables the USART's 8x oversampling mode. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USART one bit sampling method. * This parameter can be: ENABLE or DISABLE. * @note * This function has to be called before calling USART_Init() * function in order to have correct baudrate Divider value. * @retval None */ void USART_OverSampling8Cmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the 8x Oversampling mode by setting the OVER8 bit in the CR1 register */ USARTx->CR1 |= CR1_OVER8_Set; } else { /* Disable the 8x Oversampling mode by clearing the OVER8 bit in the CR1 register */ USARTx->CR1 &= CR1_OVER8_Reset; } } /** * @brief Enables or disables the USART's one bit sampling method. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the USART one bit sampling method. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_OneBitMethodCmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the one bit method by setting the ONEBITE bit in the CR3 register */ USARTx->CR3 |= CR3_ONEBITE_Set; } else { /* Disable tthe one bit method by clearing the ONEBITE bit in the CR3 register */ USARTx->CR3 &= CR3_ONEBITE_Reset; } } /** * @brief Configures the USART's IrDA interface. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_IrDAMode: specifies the IrDA mode. * This parameter can be one of the following values: * @arg USART_IrDAMode_LowPower * @arg USART_IrDAMode_Normal * @retval None */ void USART_IrDAConfig(USART_TypeDef* USARTx, uint16_t USART_IrDAMode) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_IRDA_MODE(USART_IrDAMode)); USARTx->CR3 &= CR3_IRLP_Mask; USARTx->CR3 |= USART_IrDAMode; } /** * @brief Enables or disables the USART's IrDA interface. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param NewState: new state of the IrDA mode. * This parameter can be: ENABLE or DISABLE. * @retval None */ void USART_IrDACmd(USART_TypeDef* USARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the IrDA mode by setting the IREN bit in the CR3 register */ USARTx->CR3 |= CR3_IREN_Set; } else { /* Disable the IrDA mode by clearing the IREN bit in the CR3 register */ USARTx->CR3 &= CR3_IREN_Reset; } } /** * @brief Checks whether the specified USART flag is set or not. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_FLAG: specifies the flag to check. * This parameter can be one of the following values: * @arg USART_FLAG_CTS: CTS Change flag (not available for UART4 and UART5) * @arg USART_FLAG_LBD: LIN Break detection flag * @arg USART_FLAG_TXE: Transmit data register empty flag * @arg USART_FLAG_TC: Transmission Complete flag * @arg USART_FLAG_RXNE: Receive data register not empty flag * @arg USART_FLAG_IDLE: Idle Line detection flag * @arg USART_FLAG_ORE: OverRun Error flag * @arg USART_FLAG_NE: Noise Error flag * @arg USART_FLAG_FE: Framing Error flag * @arg USART_FLAG_PE: Parity Error flag * @retval The new state of USART_FLAG (SET or RESET). */ FlagStatus USART_GetFlagStatus(USART_TypeDef* USARTx, uint16_t USART_FLAG) { FlagStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_FLAG(USART_FLAG)); /* The CTS flag is not available for UART4 and UART5 */ if (USART_FLAG == USART_FLAG_CTS) { assert_param(IS_USART_123_PERIPH(USARTx)); } if ((USARTx->SR & USART_FLAG) != (uint16_t)RESET) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the USARTx's pending flags. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_FLAG: specifies the flag to clear. * This parameter can be any combination of the following values: * @arg USART_FLAG_CTS: CTS Change flag (not available for UART4 and UART5). * @arg USART_FLAG_LBD: LIN Break detection flag. * @arg USART_FLAG_TC: Transmission Complete flag. * @arg USART_FLAG_RXNE: Receive data register not empty flag. * * @note * - PE (Parity error), FE (Framing error), NE (Noise error), ORE (OverRun * error) and IDLE (Idle line detected) flags are cleared by software * sequence: a read operation to USART_SR register (USART_GetFlagStatus()) * followed by a read operation to USART_DR register (USART_ReceiveData()). * - RXNE flag can be also cleared by a read to the USART_DR register * (USART_ReceiveData()). * - TC flag can be also cleared by software sequence: a read operation to * USART_SR register (USART_GetFlagStatus()) followed by a write operation * to USART_DR register (USART_SendData()). * - TXE flag is cleared only by a write to the USART_DR register * (USART_SendData()). * @retval None */ void USART_ClearFlag(USART_TypeDef* USARTx, uint16_t USART_FLAG) { /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_CLEAR_FLAG(USART_FLAG)); /* The CTS flag is not available for UART4 and UART5 */ if ((USART_FLAG & USART_FLAG_CTS) == USART_FLAG_CTS) { assert_param(IS_USART_123_PERIPH(USARTx)); } USARTx->SR = (uint16_t)~USART_FLAG; } /** * @brief Checks whether the specified USART interrupt has occurred or not. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_IT: specifies the USART interrupt source to check. * This parameter can be one of the following values: * @arg USART_IT_CTS: CTS change interrupt (not available for UART4 and UART5) * @arg USART_IT_LBD: LIN Break detection interrupt * @arg USART_IT_TXE: Tansmit Data Register empty interrupt * @arg USART_IT_TC: Transmission complete interrupt * @arg USART_IT_RXNE: Receive Data register not empty interrupt * @arg USART_IT_IDLE: Idle line detection interrupt * @arg USART_IT_ORE: OverRun Error interrupt * @arg USART_IT_NE: Noise Error interrupt * @arg USART_IT_FE: Framing Error interrupt * @arg USART_IT_PE: Parity Error interrupt * @retval The new state of USART_IT (SET or RESET). */ ITStatus USART_GetITStatus(USART_TypeDef* USARTx, uint16_t USART_IT) { uint32_t bitpos = 0x00, itmask = 0x00, usartreg = 0x00; ITStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_GET_IT(USART_IT)); /* The CTS interrupt is not available for UART4 and UART5 */ if (USART_IT == USART_IT_CTS) { assert_param(IS_USART_123_PERIPH(USARTx)); } /* Get the USART register index */ usartreg = (((uint8_t)USART_IT) >> 0x05); /* Get the interrupt position */ itmask = USART_IT & IT_Mask; itmask = (uint32_t)0x01 << itmask; if (usartreg == 0x01) /* The IT is in CR1 register */ { itmask &= USARTx->CR1; } else if (usartreg == 0x02) /* The IT is in CR2 register */ { itmask &= USARTx->CR2; } else /* The IT is in CR3 register */ { itmask &= USARTx->CR3; } bitpos = USART_IT >> 0x08; bitpos = (uint32_t)0x01 << bitpos; bitpos &= USARTx->SR; if ((itmask != (uint16_t)RESET)&&(bitpos != (uint16_t)RESET)) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the USARTx's interrupt pending bits. * @param USARTx: Select the USART or the UART peripheral. * This parameter can be one of the following values: * USART1, USART2, USART3, UART4 or UART5. * @param USART_IT: specifies the interrupt pending bit to clear. * This parameter can be one of the following values: * @arg USART_IT_CTS: CTS change interrupt (not available for UART4 and UART5) * @arg USART_IT_LBD: LIN Break detection interrupt * @arg USART_IT_TC: Transmission complete interrupt. * @arg USART_IT_RXNE: Receive Data register not empty interrupt. * * @note * - PE (Parity error), FE (Framing error), NE (Noise error), ORE (OverRun * error) and IDLE (Idle line detected) pending bits are cleared by * software sequence: a read operation to USART_SR register * (USART_GetITStatus()) followed by a read operation to USART_DR register * (USART_ReceiveData()). * - RXNE pending bit can be also cleared by a read to the USART_DR register * (USART_ReceiveData()). * - TC pending bit can be also cleared by software sequence: a read * operation to USART_SR register (USART_GetITStatus()) followed by a write * operation to USART_DR register (USART_SendData()). * - TXE pending bit is cleared only by a write to the USART_DR register * (USART_SendData()). * @retval None */ void USART_ClearITPendingBit(USART_TypeDef* USARTx, uint16_t USART_IT) { uint16_t bitpos = 0x00, itmask = 0x00; /* Check the parameters */ assert_param(IS_USART_ALL_PERIPH(USARTx)); assert_param(IS_USART_CLEAR_IT(USART_IT)); /* The CTS interrupt is not available for UART4 and UART5 */ if (USART_IT == USART_IT_CTS) { assert_param(IS_USART_123_PERIPH(USARTx)); } bitpos = USART_IT >> 0x08; itmask = ((uint16_t)0x01 << (uint16_t)bitpos); USARTx->SR = (uint16_t)~itmask; } /** * @} */ /** * @} */ /** * @} */ /******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
gpl-2.0
doungni/linux
drivers/video/fbdev/imsttfb.c
805
44352
/* * drivers/video/imsttfb.c -- frame buffer device for IMS TwinTurbo * * This file is derived from the powermac console "imstt" driver: * Copyright (C) 1997 Sigurdur Asgeirsson * With additional hacking by Jeffrey Kuskin (jsk@mojave.stanford.edu) * Modified by Danilo Beuche 1998 * Some register values added by Damien Doligez, INRIA Rocquencourt * Various cleanups by Paul Mundt (lethal@chaoticdreams.org) * * This file was written by Ryan Nielsen (ran@krazynet.com) * Most of the frame buffer device stuff was copied from atyfb.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/io.h> #include <linux/uaccess.h> #if defined(CONFIG_PPC) #include <linux/nvram.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include "macmodes.h" #endif #ifndef __powerpc__ #define eieio() /* Enforce In-order Execution of I/O */ #endif /* TwinTurbo (Cosmo) registers */ enum { S1SA = 0, /* 0x00 */ S2SA = 1, /* 0x04 */ SP = 2, /* 0x08 */ DSA = 3, /* 0x0C */ CNT = 4, /* 0x10 */ DP_OCTL = 5, /* 0x14 */ CLR = 6, /* 0x18 */ BI = 8, /* 0x20 */ MBC = 9, /* 0x24 */ BLTCTL = 10, /* 0x28 */ /* Scan Timing Generator Registers */ HES = 12, /* 0x30 */ HEB = 13, /* 0x34 */ HSB = 14, /* 0x38 */ HT = 15, /* 0x3C */ VES = 16, /* 0x40 */ VEB = 17, /* 0x44 */ VSB = 18, /* 0x48 */ VT = 19, /* 0x4C */ HCIV = 20, /* 0x50 */ VCIV = 21, /* 0x54 */ TCDR = 22, /* 0x58 */ VIL = 23, /* 0x5C */ STGCTL = 24, /* 0x60 */ /* Screen Refresh Generator Registers */ SSR = 25, /* 0x64 */ HRIR = 26, /* 0x68 */ SPR = 27, /* 0x6C */ CMR = 28, /* 0x70 */ SRGCTL = 29, /* 0x74 */ /* RAM Refresh Generator Registers */ RRCIV = 30, /* 0x78 */ RRSC = 31, /* 0x7C */ RRCR = 34, /* 0x88 */ /* System Registers */ GIOE = 32, /* 0x80 */ GIO = 33, /* 0x84 */ SCR = 35, /* 0x8C */ SSTATUS = 36, /* 0x90 */ PRC = 37, /* 0x94 */ #if 0 /* PCI Registers */ DVID = 0x00000000L, SC = 0x00000004L, CCR = 0x00000008L, OG = 0x0000000CL, BARM = 0x00000010L, BARER = 0x00000030L, #endif }; /* IBM 624 RAMDAC Direct Registers */ enum { PADDRW = 0x00, PDATA = 0x04, PPMASK = 0x08, PADDRR = 0x0c, PIDXLO = 0x10, PIDXHI = 0x14, PIDXDATA= 0x18, PIDXCTL = 0x1c }; /* IBM 624 RAMDAC Indirect Registers */ enum { CLKCTL = 0x02, /* (0x01) Miscellaneous Clock Control */ SYNCCTL = 0x03, /* (0x00) Sync Control */ HSYNCPOS = 0x04, /* (0x00) Horizontal Sync Position */ PWRMNGMT = 0x05, /* (0x00) Power Management */ DACOP = 0x06, /* (0x02) DAC Operation */ PALETCTL = 0x07, /* (0x00) Palette Control */ SYSCLKCTL = 0x08, /* (0x01) System Clock Control */ PIXFMT = 0x0a, /* () Pixel Format [bpp >> 3 + 2] */ BPP8 = 0x0b, /* () 8 Bits/Pixel Control */ BPP16 = 0x0c, /* () 16 Bits/Pixel Control [bit 1=1 for 565] */ BPP24 = 0x0d, /* () 24 Bits/Pixel Control */ BPP32 = 0x0e, /* () 32 Bits/Pixel Control */ PIXCTL1 = 0x10, /* (0x05) Pixel PLL Control 1 */ PIXCTL2 = 0x11, /* (0x00) Pixel PLL Control 2 */ SYSCLKN = 0x15, /* () System Clock N (System PLL Reference Divider) */ SYSCLKM = 0x16, /* () System Clock M (System PLL VCO Divider) */ SYSCLKP = 0x17, /* () System Clock P */ SYSCLKC = 0x18, /* () System Clock C */ /* * Dot clock rate is 20MHz * (m + 1) / ((n + 1) * (p ? 2 * p : 1) * c is charge pump bias which depends on the VCO frequency */ PIXM0 = 0x20, /* () Pixel M 0 */ PIXN0 = 0x21, /* () Pixel N 0 */ PIXP0 = 0x22, /* () Pixel P 0 */ PIXC0 = 0x23, /* () Pixel C 0 */ CURSCTL = 0x30, /* (0x00) Cursor Control */ CURSXLO = 0x31, /* () Cursor X position, low 8 bits */ CURSXHI = 0x32, /* () Cursor X position, high 8 bits */ CURSYLO = 0x33, /* () Cursor Y position, low 8 bits */ CURSYHI = 0x34, /* () Cursor Y position, high 8 bits */ CURSHOTX = 0x35, /* () Cursor Hot Spot X */ CURSHOTY = 0x36, /* () Cursor Hot Spot Y */ CURSACCTL = 0x37, /* () Advanced Cursor Control Enable */ CURSACATTR = 0x38, /* () Advanced Cursor Attribute */ CURS1R = 0x40, /* () Cursor 1 Red */ CURS1G = 0x41, /* () Cursor 1 Green */ CURS1B = 0x42, /* () Cursor 1 Blue */ CURS2R = 0x43, /* () Cursor 2 Red */ CURS2G = 0x44, /* () Cursor 2 Green */ CURS2B = 0x45, /* () Cursor 2 Blue */ CURS3R = 0x46, /* () Cursor 3 Red */ CURS3G = 0x47, /* () Cursor 3 Green */ CURS3B = 0x48, /* () Cursor 3 Blue */ BORDR = 0x60, /* () Border Color Red */ BORDG = 0x61, /* () Border Color Green */ BORDB = 0x62, /* () Border Color Blue */ MISCTL1 = 0x70, /* (0x00) Miscellaneous Control 1 */ MISCTL2 = 0x71, /* (0x00) Miscellaneous Control 2 */ MISCTL3 = 0x72, /* (0x00) Miscellaneous Control 3 */ KEYCTL = 0x78 /* (0x00) Key Control/DB Operation */ }; /* TI TVP 3030 RAMDAC Direct Registers */ enum { TVPADDRW = 0x00, /* 0 Palette/Cursor RAM Write Address/Index */ TVPPDATA = 0x04, /* 1 Palette Data RAM Data */ TVPPMASK = 0x08, /* 2 Pixel Read-Mask */ TVPPADRR = 0x0c, /* 3 Palette/Cursor RAM Read Address */ TVPCADRW = 0x10, /* 4 Cursor/Overscan Color Write Address */ TVPCDATA = 0x14, /* 5 Cursor/Overscan Color Data */ /* 6 reserved */ TVPCADRR = 0x1c, /* 7 Cursor/Overscan Color Read Address */ /* 8 reserved */ TVPDCCTL = 0x24, /* 9 Direct Cursor Control */ TVPIDATA = 0x28, /* 10 Index Data */ TVPCRDAT = 0x2c, /* 11 Cursor RAM Data */ TVPCXPOL = 0x30, /* 12 Cursor-Position X LSB */ TVPCXPOH = 0x34, /* 13 Cursor-Position X MSB */ TVPCYPOL = 0x38, /* 14 Cursor-Position Y LSB */ TVPCYPOH = 0x3c, /* 15 Cursor-Position Y MSB */ }; /* TI TVP 3030 RAMDAC Indirect Registers */ enum { TVPIRREV = 0x01, /* Silicon Revision [RO] */ TVPIRICC = 0x06, /* Indirect Cursor Control (0x00) */ TVPIRBRC = 0x07, /* Byte Router Control (0xe4) */ TVPIRLAC = 0x0f, /* Latch Control (0x06) */ TVPIRTCC = 0x18, /* True Color Control (0x80) */ TVPIRMXC = 0x19, /* Multiplex Control (0x98) */ TVPIRCLS = 0x1a, /* Clock Selection (0x07) */ TVPIRPPG = 0x1c, /* Palette Page (0x00) */ TVPIRGEC = 0x1d, /* General Control (0x00) */ TVPIRMIC = 0x1e, /* Miscellaneous Control (0x00) */ TVPIRPLA = 0x2c, /* PLL Address */ TVPIRPPD = 0x2d, /* Pixel Clock PLL Data */ TVPIRMPD = 0x2e, /* Memory Clock PLL Data */ TVPIRLPD = 0x2f, /* Loop Clock PLL Data */ TVPIRCKL = 0x30, /* Color-Key Overlay Low */ TVPIRCKH = 0x31, /* Color-Key Overlay High */ TVPIRCRL = 0x32, /* Color-Key Red Low */ TVPIRCRH = 0x33, /* Color-Key Red High */ TVPIRCGL = 0x34, /* Color-Key Green Low */ TVPIRCGH = 0x35, /* Color-Key Green High */ TVPIRCBL = 0x36, /* Color-Key Blue Low */ TVPIRCBH = 0x37, /* Color-Key Blue High */ TVPIRCKC = 0x38, /* Color-Key Control (0x00) */ TVPIRMLC = 0x39, /* MCLK/Loop Clock Control (0x18) */ TVPIRSEN = 0x3a, /* Sense Test (0x00) */ TVPIRTMD = 0x3b, /* Test Mode Data */ TVPIRRML = 0x3c, /* CRC Remainder LSB [RO] */ TVPIRRMM = 0x3d, /* CRC Remainder MSB [RO] */ TVPIRRMS = 0x3e, /* CRC Bit Select [WO] */ TVPIRDID = 0x3f, /* Device ID [RO] (0x30) */ TVPIRRES = 0xff /* Software Reset [WO] */ }; struct initvalues { __u8 addr, value; }; static struct initvalues ibm_initregs[] = { { CLKCTL, 0x21 }, { SYNCCTL, 0x00 }, { HSYNCPOS, 0x00 }, { PWRMNGMT, 0x00 }, { DACOP, 0x02 }, { PALETCTL, 0x00 }, { SYSCLKCTL, 0x01 }, /* * Note that colors in X are correct only if all video data is * passed through the palette in the DAC. That is, "indirect * color" must be configured. This is the case for the IBM DAC * used in the 2MB and 4MB cards, at least. */ { BPP8, 0x00 }, { BPP16, 0x01 }, { BPP24, 0x00 }, { BPP32, 0x00 }, { PIXCTL1, 0x05 }, { PIXCTL2, 0x00 }, { SYSCLKN, 0x08 }, { SYSCLKM, 0x4f }, { SYSCLKP, 0x00 }, { SYSCLKC, 0x00 }, { CURSCTL, 0x00 }, { CURSACCTL, 0x01 }, { CURSACATTR, 0xa8 }, { CURS1R, 0xff }, { CURS1G, 0xff }, { CURS1B, 0xff }, { CURS2R, 0xff }, { CURS2G, 0xff }, { CURS2B, 0xff }, { CURS3R, 0xff }, { CURS3G, 0xff }, { CURS3B, 0xff }, { BORDR, 0xff }, { BORDG, 0xff }, { BORDB, 0xff }, { MISCTL1, 0x01 }, { MISCTL2, 0x45 }, { MISCTL3, 0x00 }, { KEYCTL, 0x00 } }; static struct initvalues tvp_initregs[] = { { TVPIRICC, 0x00 }, { TVPIRBRC, 0xe4 }, { TVPIRLAC, 0x06 }, { TVPIRTCC, 0x80 }, { TVPIRMXC, 0x4d }, { TVPIRCLS, 0x05 }, { TVPIRPPG, 0x00 }, { TVPIRGEC, 0x00 }, { TVPIRMIC, 0x08 }, { TVPIRCKL, 0xff }, { TVPIRCKH, 0xff }, { TVPIRCRL, 0xff }, { TVPIRCRH, 0xff }, { TVPIRCGL, 0xff }, { TVPIRCGH, 0xff }, { TVPIRCBL, 0xff }, { TVPIRCBH, 0xff }, { TVPIRCKC, 0x00 }, { TVPIRPLA, 0x00 }, { TVPIRPPD, 0xc0 }, { TVPIRPPD, 0xd5 }, { TVPIRPPD, 0xea }, { TVPIRPLA, 0x00 }, { TVPIRMPD, 0xb9 }, { TVPIRMPD, 0x3a }, { TVPIRMPD, 0xb1 }, { TVPIRPLA, 0x00 }, { TVPIRLPD, 0xc1 }, { TVPIRLPD, 0x3d }, { TVPIRLPD, 0xf3 }, }; struct imstt_regvals { __u32 pitch; __u16 hes, heb, hsb, ht, ves, veb, vsb, vt, vil; __u8 pclk_m, pclk_n, pclk_p; /* Values of the tvp which change depending on colormode x resolution */ __u8 mlc[3]; /* Memory Loop Config 0x39 */ __u8 lckl_p[3]; /* P value of LCKL PLL */ }; struct imstt_par { struct imstt_regvals init; __u32 __iomem *dc_regs; unsigned long cmap_regs_phys; __u8 *cmap_regs; __u32 ramdac; __u32 palette[16]; }; enum { IBM = 0, TVP = 1 }; #define USE_NV_MODES 1 #define INIT_BPP 8 #define INIT_XRES 640 #define INIT_YRES 480 static int inverse = 0; static char fontname[40] __initdata = { 0 }; #if defined(CONFIG_PPC) static signed char init_vmode = -1, init_cmode = -1; #endif static struct imstt_regvals tvp_reg_init_2 = { 512, 0x0002, 0x0006, 0x0026, 0x0028, 0x0003, 0x0016, 0x0196, 0x0197, 0x0196, 0xec, 0x2a, 0xf3, { 0x3c, 0x3b, 0x39 }, { 0xf3, 0xf3, 0xf3 } }; static struct imstt_regvals tvp_reg_init_6 = { 640, 0x0004, 0x0009, 0x0031, 0x0036, 0x0003, 0x002a, 0x020a, 0x020d, 0x020a, 0xef, 0x2e, 0xb2, { 0x39, 0x39, 0x38 }, { 0xf3, 0xf3, 0xf3 } }; static struct imstt_regvals tvp_reg_init_12 = { 800, 0x0005, 0x000e, 0x0040, 0x0042, 0x0003, 0x018, 0x270, 0x271, 0x270, 0xf6, 0x2e, 0xf2, { 0x3a, 0x39, 0x38 }, { 0xf3, 0xf3, 0xf3 } }; static struct imstt_regvals tvp_reg_init_13 = { 832, 0x0004, 0x0011, 0x0045, 0x0048, 0x0003, 0x002a, 0x029a, 0x029b, 0x0000, 0xfe, 0x3e, 0xf1, { 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 } }; static struct imstt_regvals tvp_reg_init_17 = { 1024, 0x0006, 0x0210, 0x0250, 0x0053, 0x1003, 0x0021, 0x0321, 0x0324, 0x0000, 0xfc, 0x3a, 0xf1, { 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 } }; static struct imstt_regvals tvp_reg_init_18 = { 1152, 0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000, 0xfd, 0x3a, 0xf1, { 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 } }; static struct imstt_regvals tvp_reg_init_19 = { 1280, 0x0009, 0x0016, 0x0066, 0x0069, 0x0003, 0x0027, 0x03e7, 0x03e8, 0x03e7, 0xf7, 0x36, 0xf0, { 0x38, 0x38, 0x38 }, { 0xf3, 0xf2, 0xf1 } }; static struct imstt_regvals tvp_reg_init_20 = { 1280, 0x0009, 0x0018, 0x0068, 0x006a, 0x0003, 0x0029, 0x0429, 0x042a, 0x0000, 0xf0, 0x2d, 0xf0, { 0x38, 0x38, 0x38 }, { 0xf3, 0xf2, 0xf1 } }; /* * PCI driver prototypes */ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void imsttfb_remove(struct pci_dev *pdev); /* * Register access */ static inline u32 read_reg_le32(volatile u32 __iomem *base, int regindex) { #ifdef __powerpc__ return in_le32(base + regindex); #else return readl(base + regindex); #endif } static inline void write_reg_le32(volatile u32 __iomem *base, int regindex, u32 val) { #ifdef __powerpc__ out_le32(base + regindex, val); #else writel(val, base + regindex); #endif } static __u32 getclkMHz(struct imstt_par *par) { __u32 clk_m, clk_n, clk_p; clk_m = par->init.pclk_m; clk_n = par->init.pclk_n; clk_p = par->init.pclk_p; return 20 * (clk_m + 1) / ((clk_n + 1) * (clk_p ? 2 * clk_p : 1)); } static void setclkMHz(struct imstt_par *par, __u32 MHz) { __u32 clk_m, clk_n, x, stage, spilled; clk_m = clk_n = 0; stage = spilled = 0; for (;;) { switch (stage) { case 0: clk_m++; break; case 1: clk_n++; break; } x = 20 * (clk_m + 1) / (clk_n + 1); if (x == MHz) break; if (x > MHz) { spilled = 1; stage = 1; } else if (spilled && x < MHz) { stage = 0; } } par->init.pclk_m = clk_m; par->init.pclk_n = clk_n; par->init.pclk_p = 0; } static struct imstt_regvals * compute_imstt_regvals_ibm(struct imstt_par *par, int xres, int yres) { struct imstt_regvals *init = &par->init; __u32 MHz, hes, heb, veb, htp, vtp; switch (xres) { case 640: hes = 0x0008; heb = 0x0012; veb = 0x002a; htp = 10; vtp = 2; MHz = 30 /* .25 */ ; break; case 832: hes = 0x0005; heb = 0x0020; veb = 0x0028; htp = 8; vtp = 3; MHz = 57 /* .27_ */ ; break; case 1024: hes = 0x000a; heb = 0x001c; veb = 0x0020; htp = 8; vtp = 3; MHz = 80; break; case 1152: hes = 0x0012; heb = 0x0022; veb = 0x0031; htp = 4; vtp = 3; MHz = 101 /* .6_ */ ; break; case 1280: hes = 0x0012; heb = 0x002f; veb = 0x0029; htp = 4; vtp = 1; MHz = yres == 960 ? 126 : 135; break; case 1600: hes = 0x0018; heb = 0x0040; veb = 0x002a; htp = 4; vtp = 3; MHz = 200; break; default: return NULL; } setclkMHz(par, MHz); init->hes = hes; init->heb = heb; init->hsb = init->heb + (xres >> 3); init->ht = init->hsb + htp; init->ves = 0x0003; init->veb = veb; init->vsb = init->veb + yres; init->vt = init->vsb + vtp; init->vil = init->vsb; init->pitch = xres; return init; } static struct imstt_regvals * compute_imstt_regvals_tvp(struct imstt_par *par, int xres, int yres) { struct imstt_regvals *init; switch (xres) { case 512: init = &tvp_reg_init_2; break; case 640: init = &tvp_reg_init_6; break; case 800: init = &tvp_reg_init_12; break; case 832: init = &tvp_reg_init_13; break; case 1024: init = &tvp_reg_init_17; break; case 1152: init = &tvp_reg_init_18; break; case 1280: init = yres == 960 ? &tvp_reg_init_19 : &tvp_reg_init_20; break; default: return NULL; } par->init = *init; return init; } static struct imstt_regvals * compute_imstt_regvals (struct imstt_par *par, u_int xres, u_int yres) { if (par->ramdac == IBM) return compute_imstt_regvals_ibm(par, xres, yres); else return compute_imstt_regvals_tvp(par, xres, yres); } static void set_imstt_regvals_ibm (struct imstt_par *par, u_int bpp) { struct imstt_regvals *init = &par->init; __u8 pformat = (bpp >> 3) + 2; par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = PIXM0; eieio(); par->cmap_regs[PIDXDATA] = init->pclk_m;eieio(); par->cmap_regs[PIDXLO] = PIXN0; eieio(); par->cmap_regs[PIDXDATA] = init->pclk_n;eieio(); par->cmap_regs[PIDXLO] = PIXP0; eieio(); par->cmap_regs[PIDXDATA] = init->pclk_p;eieio(); par->cmap_regs[PIDXLO] = PIXC0; eieio(); par->cmap_regs[PIDXDATA] = 0x02; eieio(); par->cmap_regs[PIDXLO] = PIXFMT; eieio(); par->cmap_regs[PIDXDATA] = pformat; eieio(); } static void set_imstt_regvals_tvp (struct imstt_par *par, u_int bpp) { struct imstt_regvals *init = &par->init; __u8 tcc, mxc, lckl_n, mic; __u8 mlc, lckl_p; switch (bpp) { default: case 8: tcc = 0x80; mxc = 0x4d; lckl_n = 0xc1; mlc = init->mlc[0]; lckl_p = init->lckl_p[0]; break; case 16: tcc = 0x44; mxc = 0x55; lckl_n = 0xe1; mlc = init->mlc[1]; lckl_p = init->lckl_p[1]; break; case 24: tcc = 0x5e; mxc = 0x5d; lckl_n = 0xf1; mlc = init->mlc[2]; lckl_p = init->lckl_p[2]; break; case 32: tcc = 0x46; mxc = 0x5d; lckl_n = 0xf1; mlc = init->mlc[2]; lckl_p = init->lckl_p[2]; break; } mic = 0x08; par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio(); par->cmap_regs[TVPIDATA] = 0x00; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio(); par->cmap_regs[TVPIDATA] = init->pclk_m; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio(); par->cmap_regs[TVPIDATA] = init->pclk_n; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPPD; eieio(); par->cmap_regs[TVPIDATA] = init->pclk_p; eieio(); par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio(); par->cmap_regs[TVPIDATA] = tcc; eieio(); par->cmap_regs[TVPADDRW] = TVPIRMXC; eieio(); par->cmap_regs[TVPIDATA] = mxc; eieio(); par->cmap_regs[TVPADDRW] = TVPIRMIC; eieio(); par->cmap_regs[TVPIDATA] = mic; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio(); par->cmap_regs[TVPIDATA] = 0x00; eieio(); par->cmap_regs[TVPADDRW] = TVPIRLPD; eieio(); par->cmap_regs[TVPIDATA] = lckl_n; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio(); par->cmap_regs[TVPIDATA] = 0x15; eieio(); par->cmap_regs[TVPADDRW] = TVPIRMLC; eieio(); par->cmap_regs[TVPIDATA] = mlc; eieio(); par->cmap_regs[TVPADDRW] = TVPIRPLA; eieio(); par->cmap_regs[TVPIDATA] = 0x2a; eieio(); par->cmap_regs[TVPADDRW] = TVPIRLPD; eieio(); par->cmap_regs[TVPIDATA] = lckl_p; eieio(); } static void set_imstt_regvals (struct fb_info *info, u_int bpp) { struct imstt_par *par = info->par; struct imstt_regvals *init = &par->init; __u32 ctl, pitch, byteswap, scr; if (par->ramdac == IBM) set_imstt_regvals_ibm(par, bpp); else set_imstt_regvals_tvp(par, bpp); /* * From what I (jsk) can gather poking around with MacsBug, * bits 8 and 9 in the SCR register control endianness * correction (byte swapping). These bits must be set according * to the color depth as follows: * Color depth Bit 9 Bit 8 * ========== ===== ===== * 8bpp 0 0 * 16bpp 0 1 * 32bpp 1 1 */ switch (bpp) { default: case 8: ctl = 0x17b1; pitch = init->pitch >> 2; byteswap = 0x000; break; case 16: ctl = 0x17b3; pitch = init->pitch >> 1; byteswap = 0x100; break; case 24: ctl = 0x17b9; pitch = init->pitch - (init->pitch >> 2); byteswap = 0x200; break; case 32: ctl = 0x17b5; pitch = init->pitch; byteswap = 0x300; break; } if (par->ramdac == TVP) ctl -= 0x30; write_reg_le32(par->dc_regs, HES, init->hes); write_reg_le32(par->dc_regs, HEB, init->heb); write_reg_le32(par->dc_regs, HSB, init->hsb); write_reg_le32(par->dc_regs, HT, init->ht); write_reg_le32(par->dc_regs, VES, init->ves); write_reg_le32(par->dc_regs, VEB, init->veb); write_reg_le32(par->dc_regs, VSB, init->vsb); write_reg_le32(par->dc_regs, VT, init->vt); write_reg_le32(par->dc_regs, VIL, init->vil); write_reg_le32(par->dc_regs, HCIV, 1); write_reg_le32(par->dc_regs, VCIV, 1); write_reg_le32(par->dc_regs, TCDR, 4); write_reg_le32(par->dc_regs, RRCIV, 1); write_reg_le32(par->dc_regs, RRSC, 0x980); write_reg_le32(par->dc_regs, RRCR, 0x11); if (par->ramdac == IBM) { write_reg_le32(par->dc_regs, HRIR, 0x0100); write_reg_le32(par->dc_regs, CMR, 0x00ff); write_reg_le32(par->dc_regs, SRGCTL, 0x0073); } else { write_reg_le32(par->dc_regs, HRIR, 0x0200); write_reg_le32(par->dc_regs, CMR, 0x01ff); write_reg_le32(par->dc_regs, SRGCTL, 0x0003); } switch (info->fix.smem_len) { case 0x200000: scr = 0x059d | byteswap; break; /* case 0x400000: case 0x800000: */ default: pitch >>= 1; scr = 0x150dd | byteswap; break; } write_reg_le32(par->dc_regs, SCR, scr); write_reg_le32(par->dc_regs, SPR, pitch); write_reg_le32(par->dc_regs, STGCTL, ctl); } static inline void set_offset (struct fb_var_screeninfo *var, struct fb_info *info) { struct imstt_par *par = info->par; __u32 off = var->yoffset * (info->fix.line_length >> 3) + ((var->xoffset * (info->var.bits_per_pixel >> 3)) >> 3); write_reg_le32(par->dc_regs, SSR, off); } static inline void set_555 (struct imstt_par *par) { if (par->ramdac == IBM) { par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = BPP16; eieio(); par->cmap_regs[PIDXDATA] = 0x01; eieio(); } else { par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio(); par->cmap_regs[TVPIDATA] = 0x44; eieio(); } } static inline void set_565 (struct imstt_par *par) { if (par->ramdac == IBM) { par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = BPP16; eieio(); par->cmap_regs[PIDXDATA] = 0x03; eieio(); } else { par->cmap_regs[TVPADDRW] = TVPIRTCC; eieio(); par->cmap_regs[TVPIDATA] = 0x45; eieio(); } } static int imsttfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { if ((var->bits_per_pixel != 8 && var->bits_per_pixel != 16 && var->bits_per_pixel != 24 && var->bits_per_pixel != 32) || var->xres_virtual < var->xres || var->yres_virtual < var->yres || var->nonstd || (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) return -EINVAL; if ((var->xres * var->yres) * (var->bits_per_pixel >> 3) > info->fix.smem_len || (var->xres_virtual * var->yres_virtual) * (var->bits_per_pixel >> 3) > info->fix.smem_len) return -EINVAL; switch (var->bits_per_pixel) { case 8: var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGB 555 or 565 */ if (var->green.length != 6) var->red.offset = 10; var->red.length = 5; var->green.offset = 5; if (var->green.length != 6) var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case 24: /* RGB 888 */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGBA 8888 */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } if (var->yres == var->yres_virtual) { __u32 vram = (info->fix.smem_len - (PAGE_SIZE << 2)); var->yres_virtual = ((vram << 3) / var->bits_per_pixel) / var->xres_virtual; if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; var->height = -1; var->width = -1; var->vmode = FB_VMODE_NONINTERLACED; var->left_margin = var->right_margin = 16; var->upper_margin = var->lower_margin = 16; var->hsync_len = var->vsync_len = 8; return 0; } static int imsttfb_set_par(struct fb_info *info) { struct imstt_par *par = info->par; if (!compute_imstt_regvals(par, info->var.xres, info->var.yres)) return -EINVAL; if (info->var.green.length == 6) set_565(par); else set_555(par); set_imstt_regvals(info, info->var.bits_per_pixel); info->var.pixclock = 1000000 / getclkMHz(par); return 0; } static int imsttfb_setcolreg (u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct imstt_par *par = info->par; u_int bpp = info->var.bits_per_pixel; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; /* PADDRW/PDATA are the same as TVPPADDRW/TVPPDATA */ if (0 && bpp == 16) /* screws up X */ par->cmap_regs[PADDRW] = regno << 3; else par->cmap_regs[PADDRW] = regno; eieio(); par->cmap_regs[PDATA] = red; eieio(); par->cmap_regs[PDATA] = green; eieio(); par->cmap_regs[PDATA] = blue; eieio(); if (regno < 16) switch (bpp) { case 16: par->palette[regno] = (regno << (info->var.green.length == 5 ? 10 : 11)) | (regno << 5) | regno; break; case 24: par->palette[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: { int i = (regno << 8) | regno; par->palette[regno] = (i << 16) |i; break; } } return 0; } static int imsttfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; set_offset(var, info); return 0; } static int imsttfb_blank(int blank, struct fb_info *info) { struct imstt_par *par = info->par; __u32 ctrl; ctrl = read_reg_le32(par->dc_regs, STGCTL); if (blank > 0) { switch (blank) { case FB_BLANK_NORMAL: case FB_BLANK_POWERDOWN: ctrl &= ~0x00000380; if (par->ramdac == IBM) { par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = MISCTL2; eieio(); par->cmap_regs[PIDXDATA] = 0x55; eieio(); par->cmap_regs[PIDXLO] = MISCTL1; eieio(); par->cmap_regs[PIDXDATA] = 0x11; eieio(); par->cmap_regs[PIDXLO] = SYNCCTL; eieio(); par->cmap_regs[PIDXDATA] = 0x0f; eieio(); par->cmap_regs[PIDXLO] = PWRMNGMT; eieio(); par->cmap_regs[PIDXDATA] = 0x1f; eieio(); par->cmap_regs[PIDXLO] = CLKCTL; eieio(); par->cmap_regs[PIDXDATA] = 0xc0; } break; case FB_BLANK_VSYNC_SUSPEND: ctrl &= ~0x00000020; break; case FB_BLANK_HSYNC_SUSPEND: ctrl &= ~0x00000010; break; } } else { if (par->ramdac == IBM) { ctrl |= 0x000017b0; par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = CLKCTL; eieio(); par->cmap_regs[PIDXDATA] = 0x01; eieio(); par->cmap_regs[PIDXLO] = PWRMNGMT; eieio(); par->cmap_regs[PIDXDATA] = 0x00; eieio(); par->cmap_regs[PIDXLO] = SYNCCTL; eieio(); par->cmap_regs[PIDXDATA] = 0x00; eieio(); par->cmap_regs[PIDXLO] = MISCTL1; eieio(); par->cmap_regs[PIDXDATA] = 0x01; eieio(); par->cmap_regs[PIDXLO] = MISCTL2; eieio(); par->cmap_regs[PIDXDATA] = 0x45; eieio(); } else ctrl |= 0x00001780; } write_reg_le32(par->dc_regs, STGCTL, ctrl); return 0; } static void imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct imstt_par *par = info->par; __u32 Bpp, line_pitch, bgc, dx, dy, width, height; bgc = rect->color; bgc |= (bgc << 8); bgc |= (bgc << 16); Bpp = info->var.bits_per_pixel >> 3, line_pitch = info->fix.line_length; dy = rect->dy * line_pitch; dx = rect->dx * Bpp; height = rect->height; height--; width = rect->width * Bpp; width--; if (rect->rop == ROP_COPY) { while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); write_reg_le32(par->dc_regs, DSA, dy + dx); write_reg_le32(par->dc_regs, CNT, (height << 16) | width); write_reg_le32(par->dc_regs, DP_OCTL, line_pitch); write_reg_le32(par->dc_regs, BI, 0xffffffff); write_reg_le32(par->dc_regs, MBC, 0xffffffff); write_reg_le32(par->dc_regs, CLR, bgc); write_reg_le32(par->dc_regs, BLTCTL, 0x840); /* 0x200000 */ while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40); } else { while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); write_reg_le32(par->dc_regs, DSA, dy + dx); write_reg_le32(par->dc_regs, S1SA, dy + dx); write_reg_le32(par->dc_regs, CNT, (height << 16) | width); write_reg_le32(par->dc_regs, DP_OCTL, line_pitch); write_reg_le32(par->dc_regs, SP, line_pitch); write_reg_le32(par->dc_regs, BLTCTL, 0x40005); while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40); } } static void imsttfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct imstt_par *par = info->par; __u32 Bpp, line_pitch, fb_offset_old, fb_offset_new, sp, dp_octl; __u32 cnt, bltctl, sx, sy, dx, dy, height, width; Bpp = info->var.bits_per_pixel >> 3, sx = area->sx * Bpp; sy = area->sy; dx = area->dx * Bpp; dy = area->dy; height = area->height; height--; width = area->width * Bpp; width--; line_pitch = info->fix.line_length; bltctl = 0x05; sp = line_pitch << 16; cnt = height << 16; if (sy < dy) { sy += height; dy += height; sp |= -(line_pitch) & 0xffff; dp_octl = -(line_pitch) & 0xffff; } else { sp |= line_pitch; dp_octl = line_pitch; } if (sx < dx) { sx += width; dx += width; bltctl |= 0x80; cnt |= -(width) & 0xffff; } else { cnt |= width; } fb_offset_old = sy * line_pitch + sx; fb_offset_new = dy * line_pitch + dx; while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); write_reg_le32(par->dc_regs, S1SA, fb_offset_old); write_reg_le32(par->dc_regs, SP, sp); write_reg_le32(par->dc_regs, DSA, fb_offset_new); write_reg_le32(par->dc_regs, CNT, cnt); write_reg_le32(par->dc_regs, DP_OCTL, dp_octl); write_reg_le32(par->dc_regs, BLTCTL, bltctl); while(read_reg_le32(par->dc_regs, SSTATUS) & 0x80); while(read_reg_le32(par->dc_regs, SSTATUS) & 0x40); } #if 0 static int imsttfb_load_cursor_image(struct imstt_par *par, int width, int height, __u8 fgc) { u_int x, y; if (width > 32 || height > 32) return -EINVAL; if (par->ramdac == IBM) { par->cmap_regs[PIDXHI] = 1; eieio(); for (x = 0; x < 0x100; x++) { par->cmap_regs[PIDXLO] = x; eieio(); par->cmap_regs[PIDXDATA] = 0x00; eieio(); } par->cmap_regs[PIDXHI] = 1; eieio(); for (y = 0; y < height; y++) for (x = 0; x < width >> 2; x++) { par->cmap_regs[PIDXLO] = x + y * 8; eieio(); par->cmap_regs[PIDXDATA] = 0xff; eieio(); } par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = CURS1R; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS1G; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS1B; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS2R; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS2G; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS2B; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS3R; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS3G; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); par->cmap_regs[PIDXLO] = CURS3B; eieio(); par->cmap_regs[PIDXDATA] = fgc; eieio(); } else { par->cmap_regs[TVPADDRW] = TVPIRICC; eieio(); par->cmap_regs[TVPIDATA] &= 0x03; eieio(); par->cmap_regs[TVPADDRW] = 0; eieio(); for (x = 0; x < 0x200; x++) { par->cmap_regs[TVPCRDAT] = 0x00; eieio(); } for (x = 0; x < 0x200; x++) { par->cmap_regs[TVPCRDAT] = 0xff; eieio(); } par->cmap_regs[TVPADDRW] = TVPIRICC; eieio(); par->cmap_regs[TVPIDATA] &= 0x03; eieio(); for (y = 0; y < height; y++) for (x = 0; x < width >> 3; x++) { par->cmap_regs[TVPADDRW] = x + y * 8; eieio(); par->cmap_regs[TVPCRDAT] = 0xff; eieio(); } par->cmap_regs[TVPADDRW] = TVPIRICC; eieio(); par->cmap_regs[TVPIDATA] |= 0x08; eieio(); for (y = 0; y < height; y++) for (x = 0; x < width >> 3; x++) { par->cmap_regs[TVPADDRW] = x + y * 8; eieio(); par->cmap_regs[TVPCRDAT] = 0xff; eieio(); } par->cmap_regs[TVPCADRW] = 0x00; eieio(); for (x = 0; x < 12; x++) { par->cmap_regs[TVPCDATA] = fgc; eieio(); } } return 1; } static void imstt_set_cursor(struct imstt_par *par, struct fb_image *d, int on) { if (par->ramdac == IBM) { par->cmap_regs[PIDXHI] = 0; eieio(); if (!on) { par->cmap_regs[PIDXLO] = CURSCTL; eieio(); par->cmap_regs[PIDXDATA] = 0x00; eieio(); } else { par->cmap_regs[PIDXLO] = CURSXHI; eieio(); par->cmap_regs[PIDXDATA] = d->dx >> 8; eieio(); par->cmap_regs[PIDXLO] = CURSXLO; eieio(); par->cmap_regs[PIDXDATA] = d->dx & 0xff;eieio(); par->cmap_regs[PIDXLO] = CURSYHI; eieio(); par->cmap_regs[PIDXDATA] = d->dy >> 8; eieio(); par->cmap_regs[PIDXLO] = CURSYLO; eieio(); par->cmap_regs[PIDXDATA] = d->dy & 0xff;eieio(); par->cmap_regs[PIDXLO] = CURSCTL; eieio(); par->cmap_regs[PIDXDATA] = 0x02; eieio(); } } else { if (!on) { par->cmap_regs[TVPADDRW] = TVPIRICC; eieio(); par->cmap_regs[TVPIDATA] = 0x00; eieio(); } else { __u16 x = d->dx + 0x40, y = d->dy + 0x40; par->cmap_regs[TVPCXPOH] = x >> 8; eieio(); par->cmap_regs[TVPCXPOL] = x & 0xff; eieio(); par->cmap_regs[TVPCYPOH] = y >> 8; eieio(); par->cmap_regs[TVPCYPOL] = y & 0xff; eieio(); par->cmap_regs[TVPADDRW] = TVPIRICC; eieio(); par->cmap_regs[TVPIDATA] = 0x02; eieio(); } } } static int imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct imstt_par *par = info->par; u32 flags = cursor->set, fg, bg, xx, yy; if (cursor->dest == NULL && cursor->rop == ROP_XOR) return 1; imstt_set_cursor(info, cursor, 0); if (flags & FB_CUR_SETPOS) { xx = cursor->image.dx - info->var.xoffset; yy = cursor->image.dy - info->var.yoffset; } if (flags & FB_CUR_SETSIZE) { } if (flags & (FB_CUR_SETSHAPE | FB_CUR_SETCMAP)) { int fg_idx = cursor->image.fg_color; int width = (cursor->image.width+7)/8; u8 *dat = (u8 *) cursor->image.data; u8 *dst = (u8 *) cursor->dest; u8 *msk = (u8 *) cursor->mask; switch (cursor->rop) { case ROP_XOR: for (i = 0; i < cursor->image.height; i++) { for (j = 0; j < width; j++) { d_idx = i * MAX_CURS/8 + j; data[d_idx] = byte_rev[dat[s_idx] ^ dst[s_idx]]; mask[d_idx] = byte_rev[msk[s_idx]]; s_idx++; } } break; case ROP_COPY: default: for (i = 0; i < cursor->image.height; i++) { for (j = 0; j < width; j++) { d_idx = i * MAX_CURS/8 + j; data[d_idx] = byte_rev[dat[s_idx]]; mask[d_idx] = byte_rev[msk[s_idx]]; s_idx++; } } break; } fg = ((info->cmap.red[fg_idx] & 0xf8) << 7) | ((info->cmap.green[fg_idx] & 0xf8) << 2) | ((info->cmap.blue[fg_idx] & 0xf8) >> 3) | 1 << 15; imsttfb_load_cursor_image(par, xx, yy, fgc); } if (cursor->enable) imstt_set_cursor(info, cursor, 1); return 0; } #endif #define FBIMSTT_SETREG 0x545401 #define FBIMSTT_GETREG 0x545402 #define FBIMSTT_SETCMAPREG 0x545403 #define FBIMSTT_GETCMAPREG 0x545404 #define FBIMSTT_SETIDXREG 0x545405 #define FBIMSTT_GETIDXREG 0x545406 static int imsttfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { struct imstt_par *par = info->par; void __user *argp = (void __user *)arg; __u32 reg[2]; __u8 idx[2]; switch (cmd) { case FBIMSTT_SETREG: if (copy_from_user(reg, argp, 8) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0])) return -EFAULT; write_reg_le32(par->dc_regs, reg[0], reg[1]); return 0; case FBIMSTT_GETREG: if (copy_from_user(reg, argp, 4) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0])) return -EFAULT; reg[1] = read_reg_le32(par->dc_regs, reg[0]); if (copy_to_user((void __user *)(arg + 4), &reg[1], 4)) return -EFAULT; return 0; case FBIMSTT_SETCMAPREG: if (copy_from_user(reg, argp, 8) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0])) return -EFAULT; write_reg_le32(((u_int __iomem *)par->cmap_regs), reg[0], reg[1]); return 0; case FBIMSTT_GETCMAPREG: if (copy_from_user(reg, argp, 4) || reg[0] > (0x1000 - sizeof(reg[0])) / sizeof(reg[0])) return -EFAULT; reg[1] = read_reg_le32(((u_int __iomem *)par->cmap_regs), reg[0]); if (copy_to_user((void __user *)(arg + 4), &reg[1], 4)) return -EFAULT; return 0; case FBIMSTT_SETIDXREG: if (copy_from_user(idx, argp, 2)) return -EFAULT; par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = idx[0]; eieio(); par->cmap_regs[PIDXDATA] = idx[1]; eieio(); return 0; case FBIMSTT_GETIDXREG: if (copy_from_user(idx, argp, 1)) return -EFAULT; par->cmap_regs[PIDXHI] = 0; eieio(); par->cmap_regs[PIDXLO] = idx[0]; eieio(); idx[1] = par->cmap_regs[PIDXDATA]; if (copy_to_user((void __user *)(arg + 1), &idx[1], 1)) return -EFAULT; return 0; default: return -ENOIOCTLCMD; } } static struct pci_device_id imsttfb_pci_tbl[] = { { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IBM }, { PCI_VENDOR_ID_IMS, PCI_DEVICE_ID_IMS_TT3D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TVP }, { 0, } }; MODULE_DEVICE_TABLE(pci, imsttfb_pci_tbl); static struct pci_driver imsttfb_pci_driver = { .name = "imsttfb", .id_table = imsttfb_pci_tbl, .probe = imsttfb_probe, .remove = imsttfb_remove, }; static struct fb_ops imsttfb_ops = { .owner = THIS_MODULE, .fb_check_var = imsttfb_check_var, .fb_set_par = imsttfb_set_par, .fb_setcolreg = imsttfb_setcolreg, .fb_pan_display = imsttfb_pan_display, .fb_blank = imsttfb_blank, .fb_fillrect = imsttfb_fillrect, .fb_copyarea = imsttfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_ioctl = imsttfb_ioctl, }; static void init_imstt(struct fb_info *info) { struct imstt_par *par = info->par; __u32 i, tmp, *ip, *end; tmp = read_reg_le32(par->dc_regs, PRC); if (par->ramdac == IBM) info->fix.smem_len = (tmp & 0x0004) ? 0x400000 : 0x200000; else info->fix.smem_len = 0x800000; ip = (__u32 *)info->screen_base; end = (__u32 *)(info->screen_base + info->fix.smem_len); while (ip < end) *ip++ = 0; /* initialize the card */ tmp = read_reg_le32(par->dc_regs, STGCTL); write_reg_le32(par->dc_regs, STGCTL, tmp & ~0x1); write_reg_le32(par->dc_regs, SSR, 0); /* set default values for DAC registers */ if (par->ramdac == IBM) { par->cmap_regs[PPMASK] = 0xff; eieio(); par->cmap_regs[PIDXHI] = 0; eieio(); for (i = 0; i < ARRAY_SIZE(ibm_initregs); i++) { par->cmap_regs[PIDXLO] = ibm_initregs[i].addr; eieio(); par->cmap_regs[PIDXDATA] = ibm_initregs[i].value; eieio(); } } else { for (i = 0; i < ARRAY_SIZE(tvp_initregs); i++) { par->cmap_regs[TVPADDRW] = tvp_initregs[i].addr; eieio(); par->cmap_regs[TVPIDATA] = tvp_initregs[i].value; eieio(); } } #if USE_NV_MODES && defined(CONFIG_PPC32) { int vmode = init_vmode, cmode = init_cmode; if (vmode == -1) { vmode = nvram_read_byte(NV_VMODE); if (vmode <= 0 || vmode > VMODE_MAX) vmode = VMODE_640_480_67; } if (cmode == -1) { cmode = nvram_read_byte(NV_CMODE); if (cmode < CMODE_8 || cmode > CMODE_32) cmode = CMODE_8; } if (mac_vmode_to_var(vmode, cmode, &info->var)) { info->var.xres = info->var.xres_virtual = INIT_XRES; info->var.yres = info->var.yres_virtual = INIT_YRES; info->var.bits_per_pixel = INIT_BPP; } } #else info->var.xres = info->var.xres_virtual = INIT_XRES; info->var.yres = info->var.yres_virtual = INIT_YRES; info->var.bits_per_pixel = INIT_BPP; #endif if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); framebuffer_release(info); return; } sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP"); info->fix.mmio_len = 0x1000; info->fix.accel = FB_ACCEL_IMS_TWINTURBO; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = info->var.bits_per_pixel == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; info->fix.line_length = info->var.xres * (info->var.bits_per_pixel >> 3); info->fix.xpanstep = 8; info->fix.ypanstep = 1; info->fix.ywrapstep = 0; info->var.accel_flags = FB_ACCELF_TEXT; // if (par->ramdac == IBM) // imstt_cursor_init(info); if (info->var.green.length == 6) set_565(par); else set_555(par); set_imstt_regvals(info, info->var.bits_per_pixel); info->var.pixclock = 1000000 / getclkMHz(par); info->fbops = &imsttfb_ops; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; fb_alloc_cmap(&info->cmap, 0, 0); if (register_framebuffer(info) < 0) { framebuffer_release(info); return; } tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8; fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n", info->fix.id, info->fix.smem_len >> 20, tmp); } static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long addr, size; struct imstt_par *par; struct fb_info *info; struct device_node *dp; dp = pci_device_to_OF_node(pdev); if(dp) printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name); else if (IS_ENABLED(CONFIG_OF)) printk(KERN_ERR "imsttfb: no OF node for pci device\n"); info = framebuffer_alloc(sizeof(struct imstt_par), &pdev->dev); if (!info) { printk(KERN_ERR "imsttfb: Can't allocate memory\n"); return -ENOMEM; } par = info->par; addr = pci_resource_start (pdev, 0); size = pci_resource_len (pdev, 0); if (!request_mem_region(addr, size, "imsttfb")) { printk(KERN_ERR "imsttfb: Can't reserve memory region\n"); framebuffer_release(info); return -ENODEV; } switch (pdev->device) { case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */ par->ramdac = IBM; if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) || (strcmp(dp->name, "IMS,tt128mb8A") == 0))) par->ramdac = TVP; break; case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */ par->ramdac = TVP; break; default: printk(KERN_INFO "imsttfb: Device 0x%x unknown, " "contact maintainer.\n", pdev->device); release_mem_region(addr, size); framebuffer_release(info); return -ENODEV; } info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); par->cmap_regs_phys = addr + 0x840000; par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000); info->pseudo_palette = par->palette; init_imstt(info); pci_set_drvdata(pdev, info); return 0; } static void imsttfb_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct imstt_par *par = info->par; int size = pci_resource_len(pdev, 0); unregister_framebuffer(info); iounmap(par->cmap_regs); iounmap(par->dc_regs); iounmap(info->screen_base); release_mem_region(info->fix.smem_start, size); framebuffer_release(info); } #ifndef MODULE static int __init imsttfb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "font:", 5)) { char *p; int i; p = this_opt + 5; for (i = 0; i < sizeof(fontname) - 1; i++) if (!*p || *p == ' ' || *p == ',') break; memcpy(fontname, this_opt + 5, i); fontname[i] = 0; } else if (!strncmp(this_opt, "inverse", 7)) { inverse = 1; fb_invert_cmaps(); } #if defined(CONFIG_PPC) else if (!strncmp(this_opt, "vmode:", 6)) { int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) init_vmode = vmode; } else if (!strncmp(this_opt, "cmode:", 6)) { int cmode = simple_strtoul(this_opt+6, NULL, 0); switch (cmode) { case CMODE_8: case 8: init_cmode = CMODE_8; break; case CMODE_16: case 15: case 16: init_cmode = CMODE_16; break; case CMODE_32: case 24: case 32: init_cmode = CMODE_32; break; } } #endif } return 0; } #endif /* MODULE */ static int __init imsttfb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("imsttfb", &option)) return -ENODEV; imsttfb_setup(option); #endif return pci_register_driver(&imsttfb_pci_driver); } static void __exit imsttfb_exit(void) { pci_unregister_driver(&imsttfb_pci_driver); } MODULE_LICENSE("GPL"); module_init(imsttfb_init); module_exit(imsttfb_exit);
gpl-2.0
Kshitij-Jain/android_kernel_cyanogen_msm8916
security/selinux/ss/services.c
805
76760
/* * Implementation of the security services. * * Authors : Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> * * Support for enhanced MLS infrastructure. * Support for context based audit filters. * * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com> * * Added conditional policy language extensions * * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for NetLabel * Added support for the policy capability bitmap * * Updated: Chad Sellers <csellers@tresys.com> * * Added validation of kernel classes and permissions * * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * * Added support for bounds domain and audit messaged on masked permissions * * Updated: Guido Trentalancia <guido@trentalancia.com> * * Added support for runtime switching of the policy type * * Copyright (C) 2008, 2009 NEC Corporation * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P. * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/sched.h> #include <linux/audit.h> #include <linux/mutex.h> #include <linux/selinux.h> #include <linux/flex_array.h> #include <linux/vmalloc.h> #include <net/netlabel.h> #include "flask.h" #include "avc.h" #include "avc_ss.h" #include "security.h" #include "context.h" #include "policydb.h" #include "sidtab.h" #include "services.h" #include "conditional.h" #include "mls.h" #include "objsec.h" #include "netlabel.h" #include "xfrm.h" #include "ebitmap.h" #include "audit.h" int selinux_policycap_netpeer; int selinux_policycap_openperm; static DEFINE_RWLOCK(policy_rwlock); static struct sidtab sidtab; struct policydb policydb; int ss_initialized; /* * The largest sequence number that has been used when * providing an access decision to the access vector cache. * The sequence number only changes when a policy change * occurs. */ static u32 latest_granting; /* Forward declaration. */ static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len); static void context_struct_compute_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd); struct selinux_mapping { u16 value; /* policy value */ unsigned num_perms; u32 perms[sizeof(u32) * 8]; }; static struct selinux_mapping *current_mapping; static u16 current_mapping_size; static int selinux_set_mapping(struct policydb *pol, struct security_class_mapping *map, struct selinux_mapping **out_map_p, u16 *out_map_size) { struct selinux_mapping *out_map = NULL; size_t size = sizeof(struct selinux_mapping); u16 i, j; unsigned k; bool print_unknown_handle = false; /* Find number of classes in the input mapping */ if (!map) return -EINVAL; i = 0; while (map[i].name) i++; /* Allocate space for the class records, plus one for class zero */ out_map = kcalloc(++i, size, GFP_ATOMIC); if (!out_map) return -ENOMEM; /* Store the raw class and permission values */ j = 0; while (map[j].name) { struct security_class_mapping *p_in = map + (j++); struct selinux_mapping *p_out = out_map + j; /* An empty class string skips ahead */ if (!strcmp(p_in->name, "")) { p_out->num_perms = 0; continue; } p_out->value = string_to_security_class(pol, p_in->name); if (!p_out->value) { printk(KERN_INFO "SELinux: Class %s not defined in policy.\n", p_in->name); if (pol->reject_unknown) goto err; p_out->num_perms = 0; print_unknown_handle = true; continue; } k = 0; while (p_in->perms && p_in->perms[k]) { /* An empty permission string skips ahead */ if (!*p_in->perms[k]) { k++; continue; } p_out->perms[k] = string_to_av_perm(pol, p_out->value, p_in->perms[k]); if (!p_out->perms[k]) { printk(KERN_INFO "SELinux: Permission %s in class %s not defined in policy.\n", p_in->perms[k], p_in->name); if (pol->reject_unknown) goto err; print_unknown_handle = true; } k++; } p_out->num_perms = k; } if (print_unknown_handle) printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n", pol->allow_unknown ? "allowed" : "denied"); *out_map_p = out_map; *out_map_size = i; return 0; err: kfree(out_map); return -EINVAL; } /* * Get real, policy values from mapped values */ static u16 unmap_class(u16 tclass) { if (tclass < current_mapping_size) return current_mapping[tclass].value; return tclass; } /* * Get kernel value for class from its policy value */ static u16 map_class(u16 pol_value) { u16 i; for (i = 1; i < current_mapping_size; i++) { if (current_mapping[i].value == pol_value) return i; } return SECCLASS_NULL; } static void map_decision(u16 tclass, struct av_decision *avd, int allow_unknown) { if (tclass < current_mapping_size) { unsigned i, n = current_mapping[tclass].num_perms; u32 result; for (i = 0, result = 0; i < n; i++) { if (avd->allowed & current_mapping[tclass].perms[i]) result |= 1<<i; if (allow_unknown && !current_mapping[tclass].perms[i]) result |= 1<<i; } avd->allowed = result; for (i = 0, result = 0; i < n; i++) if (avd->auditallow & current_mapping[tclass].perms[i]) result |= 1<<i; avd->auditallow = result; for (i = 0, result = 0; i < n; i++) { if (avd->auditdeny & current_mapping[tclass].perms[i]) result |= 1<<i; if (!allow_unknown && !current_mapping[tclass].perms[i]) result |= 1<<i; } /* * In case the kernel has a bug and requests a permission * between num_perms and the maximum permission number, we * should audit that denial */ for (; i < (sizeof(u32)*8); i++) result |= 1<<i; avd->auditdeny = result; } } int security_mls_enabled(void) { return policydb.mls_enabled; } /* * Return the boolean value of a constraint expression * when it is applied to the specified source and target * security contexts. * * xcontext is a special beast... It is used by the validatetrans rules * only. For these rules, scontext is the context before the transition, * tcontext is the context after the transition, and xcontext is the context * of the process performing the transition. All other callers of * constraint_expr_eval should pass in NULL for xcontext. */ static int constraint_expr_eval(struct context *scontext, struct context *tcontext, struct context *xcontext, struct constraint_expr *cexpr) { u32 val1, val2; struct context *c; struct role_datum *r1, *r2; struct mls_level *l1, *l2; struct constraint_expr *e; int s[CEXPR_MAXDEPTH]; int sp = -1; for (e = cexpr; e; e = e->next) { switch (e->expr_type) { case CEXPR_NOT: BUG_ON(sp < 0); s[sp] = !s[sp]; break; case CEXPR_AND: BUG_ON(sp < 1); sp--; s[sp] &= s[sp + 1]; break; case CEXPR_OR: BUG_ON(sp < 1); sp--; s[sp] |= s[sp + 1]; break; case CEXPR_ATTR: if (sp == (CEXPR_MAXDEPTH - 1)) return 0; switch (e->attr) { case CEXPR_USER: val1 = scontext->user; val2 = tcontext->user; break; case CEXPR_TYPE: val1 = scontext->type; val2 = tcontext->type; break; case CEXPR_ROLE: val1 = scontext->role; val2 = tcontext->role; r1 = policydb.role_val_to_struct[val1 - 1]; r2 = policydb.role_val_to_struct[val2 - 1]; switch (e->op) { case CEXPR_DOM: s[++sp] = ebitmap_get_bit(&r1->dominates, val2 - 1); continue; case CEXPR_DOMBY: s[++sp] = ebitmap_get_bit(&r2->dominates, val1 - 1); continue; case CEXPR_INCOMP: s[++sp] = (!ebitmap_get_bit(&r1->dominates, val2 - 1) && !ebitmap_get_bit(&r2->dominates, val1 - 1)); continue; default: break; } break; case CEXPR_L1L2: l1 = &(scontext->range.level[0]); l2 = &(tcontext->range.level[0]); goto mls_ops; case CEXPR_L1H2: l1 = &(scontext->range.level[0]); l2 = &(tcontext->range.level[1]); goto mls_ops; case CEXPR_H1L2: l1 = &(scontext->range.level[1]); l2 = &(tcontext->range.level[0]); goto mls_ops; case CEXPR_H1H2: l1 = &(scontext->range.level[1]); l2 = &(tcontext->range.level[1]); goto mls_ops; case CEXPR_L1H1: l1 = &(scontext->range.level[0]); l2 = &(scontext->range.level[1]); goto mls_ops; case CEXPR_L2H2: l1 = &(tcontext->range.level[0]); l2 = &(tcontext->range.level[1]); goto mls_ops; mls_ops: switch (e->op) { case CEXPR_EQ: s[++sp] = mls_level_eq(l1, l2); continue; case CEXPR_NEQ: s[++sp] = !mls_level_eq(l1, l2); continue; case CEXPR_DOM: s[++sp] = mls_level_dom(l1, l2); continue; case CEXPR_DOMBY: s[++sp] = mls_level_dom(l2, l1); continue; case CEXPR_INCOMP: s[++sp] = mls_level_incomp(l2, l1); continue; default: BUG(); return 0; } break; default: BUG(); return 0; } switch (e->op) { case CEXPR_EQ: s[++sp] = (val1 == val2); break; case CEXPR_NEQ: s[++sp] = (val1 != val2); break; default: BUG(); return 0; } break; case CEXPR_NAMES: if (sp == (CEXPR_MAXDEPTH-1)) return 0; c = scontext; if (e->attr & CEXPR_TARGET) c = tcontext; else if (e->attr & CEXPR_XTARGET) { c = xcontext; if (!c) { BUG(); return 0; } } if (e->attr & CEXPR_USER) val1 = c->user; else if (e->attr & CEXPR_ROLE) val1 = c->role; else if (e->attr & CEXPR_TYPE) val1 = c->type; else { BUG(); return 0; } switch (e->op) { case CEXPR_EQ: s[++sp] = ebitmap_get_bit(&e->names, val1 - 1); break; case CEXPR_NEQ: s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1); break; default: BUG(); return 0; } break; default: BUG(); return 0; } } BUG_ON(sp != 0); return s[0]; } /* * security_dump_masked_av - dumps masked permissions during * security_compute_av due to RBAC, MLS/Constraint and Type bounds. */ static int dump_masked_av_helper(void *k, void *d, void *args) { struct perm_datum *pdatum = d; char **permission_names = args; BUG_ON(pdatum->value < 1 || pdatum->value > 32); permission_names[pdatum->value - 1] = (char *)k; return 0; } static void security_dump_masked_av(struct context *scontext, struct context *tcontext, u16 tclass, u32 permissions, const char *reason) { struct common_datum *common_dat; struct class_datum *tclass_dat; struct audit_buffer *ab; char *tclass_name; char *scontext_name = NULL; char *tcontext_name = NULL; char *permission_names[32]; int index; u32 length; bool need_comma = false; if (!permissions) return; tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1); tclass_dat = policydb.class_val_to_struct[tclass - 1]; common_dat = tclass_dat->comdatum; /* init permission_names */ if (common_dat && hashtab_map(common_dat->permissions.table, dump_masked_av_helper, permission_names) < 0) goto out; if (hashtab_map(tclass_dat->permissions.table, dump_masked_av_helper, permission_names) < 0) goto out; /* get scontext/tcontext in text form */ if (context_struct_to_string(scontext, &scontext_name, &length) < 0) goto out; if (context_struct_to_string(tcontext, &tcontext_name, &length) < 0) goto out; /* audit a message */ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR); if (!ab) goto out; audit_log_format(ab, "op=security_compute_av reason=%s " "scontext=%s tcontext=%s tclass=%s perms=", reason, scontext_name, tcontext_name, tclass_name); for (index = 0; index < 32; index++) { u32 mask = (1 << index); if ((mask & permissions) == 0) continue; audit_log_format(ab, "%s%s", need_comma ? "," : "", permission_names[index] ? permission_names[index] : "????"); need_comma = true; } audit_log_end(ab); out: /* release scontext/tcontext */ kfree(tcontext_name); kfree(scontext_name); return; } /* * security_boundary_permission - drops violated permissions * on boundary constraint. */ static void type_attribute_bounds_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd) { struct context lo_scontext; struct context lo_tcontext; struct av_decision lo_avd; struct type_datum *source; struct type_datum *target; u32 masked = 0; source = flex_array_get_ptr(policydb.type_val_to_struct_array, scontext->type - 1); BUG_ON(!source); target = flex_array_get_ptr(policydb.type_val_to_struct_array, tcontext->type - 1); BUG_ON(!target); if (source->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); memcpy(&lo_scontext, scontext, sizeof(lo_scontext)); lo_scontext.type = source->bounds; context_struct_compute_av(&lo_scontext, tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (target->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext)); lo_tcontext.type = target->bounds; context_struct_compute_av(scontext, &lo_tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (source->bounds && target->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); /* * lo_scontext and lo_tcontext are already * set up. */ context_struct_compute_av(&lo_scontext, &lo_tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (masked) { /* mask violated permissions */ avd->allowed &= ~masked; /* audit masked permissions */ security_dump_masked_av(scontext, tcontext, tclass, masked, "bounds"); } } /* * Compute access vectors based on a context structure pair for * the permissions in a particular class. */ static void context_struct_compute_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd) { struct constraint_node *constraint; struct role_allow *ra; struct avtab_key avkey; struct avtab_node *node; struct class_datum *tclass_datum; struct ebitmap *sattr, *tattr; struct ebitmap_node *snode, *tnode; unsigned int i, j; avd->allowed = 0; avd->auditallow = 0; avd->auditdeny = 0xffffffff; if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) { if (printk_ratelimit()) printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass); return; } tclass_datum = policydb.class_val_to_struct[tclass - 1]; /* * If a specific type enforcement rule was defined for * this permission check, then use it. */ avkey.target_class = tclass; avkey.specified = AVTAB_AV; sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1); BUG_ON(!sattr); tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1); BUG_ON(!tattr); ebitmap_for_each_positive_bit(sattr, snode, i) { ebitmap_for_each_positive_bit(tattr, tnode, j) { avkey.source_type = i + 1; avkey.target_type = j + 1; for (node = avtab_search_node(&policydb.te_avtab, &avkey); node; node = avtab_search_node_next(node, avkey.specified)) { if (node->key.specified == AVTAB_ALLOWED) avd->allowed |= node->datum.data; else if (node->key.specified == AVTAB_AUDITALLOW) avd->auditallow |= node->datum.data; else if (node->key.specified == AVTAB_AUDITDENY) avd->auditdeny &= node->datum.data; } /* Check conditional av table for additional permissions */ cond_compute_av(&policydb.te_cond_avtab, &avkey, avd); } } /* * Remove any permissions prohibited by a constraint (this includes * the MLS policy). */ constraint = tclass_datum->constraints; while (constraint) { if ((constraint->permissions & (avd->allowed)) && !constraint_expr_eval(scontext, tcontext, NULL, constraint->expr)) { avd->allowed &= ~(constraint->permissions); } constraint = constraint->next; } /* * If checking process transition permission and the * role is changing, then check the (current_role, new_role) * pair. */ if (tclass == policydb.process_class && (avd->allowed & policydb.process_trans_perms) && scontext->role != tcontext->role) { for (ra = policydb.role_allow; ra; ra = ra->next) { if (scontext->role == ra->role && tcontext->role == ra->new_role) break; } if (!ra) avd->allowed &= ~policydb.process_trans_perms; } /* * If the given source and target types have boundary * constraint, lazy checks have to mask any violated * permission and notice it to userspace via audit. */ type_attribute_bounds_av(scontext, tcontext, tclass, avd); } static int security_validtrans_handle_fail(struct context *ocontext, struct context *ncontext, struct context *tcontext, u16 tclass) { char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; if (context_struct_to_string(ocontext, &o, &olen)) goto out; if (context_struct_to_string(ncontext, &n, &nlen)) goto out; if (context_struct_to_string(tcontext, &t, &tlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_validate_transition: denied for" " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(o); kfree(n); kfree(t); if (!selinux_enforcing) return 0; return -EPERM; } int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, u16 orig_tclass) { struct context *ocontext; struct context *ncontext; struct context *tcontext; struct class_datum *tclass_datum; struct constraint_node *constraint; u16 tclass; int rc = 0; if (!ss_initialized) return 0; read_lock(&policy_rwlock); tclass = unmap_class(orig_tclass); if (!tclass || tclass > policydb.p_classes.nprim) { printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", __func__, tclass); rc = -EINVAL; goto out; } tclass_datum = policydb.class_val_to_struct[tclass - 1]; ocontext = sidtab_search(&sidtab, oldsid); if (!ocontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, oldsid); rc = -EINVAL; goto out; } ncontext = sidtab_search(&sidtab, newsid); if (!ncontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, newsid); rc = -EINVAL; goto out; } tcontext = sidtab_search(&sidtab, tasksid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tasksid); rc = -EINVAL; goto out; } constraint = tclass_datum->validatetrans; while (constraint) { if (!constraint_expr_eval(ocontext, ncontext, tcontext, constraint->expr)) { rc = security_validtrans_handle_fail(ocontext, ncontext, tcontext, tclass); goto out; } constraint = constraint->next; } out: read_unlock(&policy_rwlock); return rc; } /* * security_bounded_transition - check whether the given * transition is directed to bounded, or not. * It returns 0, if @newsid is bounded by @oldsid. * Otherwise, it returns error code. * * @oldsid : current security identifier * @newsid : destinated security identifier */ int security_bounded_transition(u32 old_sid, u32 new_sid) { struct context *old_context, *new_context; struct type_datum *type; int index; int rc; read_lock(&policy_rwlock); rc = -EINVAL; old_context = sidtab_search(&sidtab, old_sid); if (!old_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", __func__, old_sid); goto out; } rc = -EINVAL; new_context = sidtab_search(&sidtab, new_sid); if (!new_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", __func__, new_sid); goto out; } rc = 0; /* type/domain unchanged */ if (old_context->type == new_context->type) goto out; index = new_context->type; while (true) { type = flex_array_get_ptr(policydb.type_val_to_struct_array, index - 1); BUG_ON(!type); /* not bounded anymore */ rc = -EPERM; if (!type->bounds) break; /* @newsid is bounded by @oldsid */ rc = 0; if (type->bounds == old_context->type) break; index = type->bounds; } if (rc) { char *old_name = NULL; char *new_name = NULL; u32 length; if (!context_struct_to_string(old_context, &old_name, &length) && !context_struct_to_string(new_context, &new_name, &length)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "op=security_bounded_transition " "result=denied " "oldcontext=%s newcontext=%s", old_name, new_name); } kfree(new_name); kfree(old_name); } out: read_unlock(&policy_rwlock); return rc; } static void avd_init(struct av_decision *avd) { avd->allowed = 0; avd->auditallow = 0; avd->auditdeny = 0xffffffff; avd->seqno = latest_granting; avd->flags = 0; } /** * security_compute_av - Compute access vector decisions. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @avd: access vector decisions * * Compute a set of access vector decisions based on the * SID pair (@ssid, @tsid) for the permissions in @tclass. */ void security_compute_av(u32 ssid, u32 tsid, u16 orig_tclass, struct av_decision *avd) { u16 tclass; struct context *scontext = NULL, *tcontext = NULL; read_lock(&policy_rwlock); avd_init(avd); if (!ss_initialized) goto allow; scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); goto out; } /* permissive domain? */ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); goto out; } tclass = unmap_class(orig_tclass); if (unlikely(orig_tclass && !tclass)) { if (policydb.allow_unknown) goto allow; goto out; } context_struct_compute_av(scontext, tcontext, tclass, avd); map_decision(orig_tclass, avd, policydb.allow_unknown); out: read_unlock(&policy_rwlock); return; allow: avd->allowed = 0xffffffff; goto out; } void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { struct context *scontext = NULL, *tcontext = NULL; read_lock(&policy_rwlock); avd_init(avd); if (!ss_initialized) goto allow; scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); goto out; } /* permissive domain? */ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); goto out; } if (unlikely(!tclass)) { if (policydb.allow_unknown) goto allow; goto out; } context_struct_compute_av(scontext, tcontext, tclass, avd); out: read_unlock(&policy_rwlock); return; allow: avd->allowed = 0xffffffff; goto out; } /* * Write the security context string representation of * the context structure `context' into a dynamically * allocated string of the correct size. Set `*scontext' * to point to this string and set `*scontext_len' to * the length of the string. */ static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) { char *scontextp; if (scontext) *scontext = NULL; *scontext_len = 0; if (context->len) { *scontext_len = context->len; if (scontext) { *scontext = kstrdup(context->str, GFP_ATOMIC); if (!(*scontext)) return -ENOMEM; } return 0; } /* Compute the size of the context. */ *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1; *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1; *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1; *scontext_len += mls_compute_context_len(context); if (!scontext) return 0; /* Allocate space for the context; caller must free this space. */ scontextp = kmalloc(*scontext_len, GFP_ATOMIC); if (!scontextp) return -ENOMEM; *scontext = scontextp; /* * Copy the user name, role name and type name into the context. */ sprintf(scontextp, "%s:%s:%s", sym_name(&policydb, SYM_USERS, context->user - 1), sym_name(&policydb, SYM_ROLES, context->role - 1), sym_name(&policydb, SYM_TYPES, context->type - 1)); scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)); mls_sid_to_context(context, &scontextp); *scontextp = 0; return 0; } #include "initial_sid_to_string.h" const char *security_get_initial_sid_context(u32 sid) { if (unlikely(sid > SECINITSID_NUM)) return NULL; return initial_sid_to_string[sid]; } static int security_sid_to_context_core(u32 sid, char **scontext, u32 *scontext_len, int force) { struct context *context; int rc = 0; if (scontext) *scontext = NULL; *scontext_len = 0; if (!ss_initialized) { if (sid <= SECINITSID_NUM) { char *scontextp; *scontext_len = strlen(initial_sid_to_string[sid]) + 1; if (!scontext) goto out; scontextp = kmalloc(*scontext_len, GFP_ATOMIC); if (!scontextp) { rc = -ENOMEM; goto out; } strcpy(scontextp, initial_sid_to_string[sid]); *scontext = scontextp; goto out; } printk(KERN_ERR "SELinux: %s: called before initial " "load_policy on unknown SID %d\n", __func__, sid); rc = -EINVAL; goto out; } read_lock(&policy_rwlock); if (force) context = sidtab_search_force(&sidtab, sid); else context = sidtab_search(&sidtab, sid); if (!context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, sid); rc = -EINVAL; goto out_unlock; } rc = context_struct_to_string(context, scontext, scontext_len); out_unlock: read_unlock(&policy_rwlock); out: return rc; } /** * security_sid_to_context - Obtain a context for a given SID. * @sid: security identifier, SID * @scontext: security context * @scontext_len: length in bytes * * Write the string representation of the context associated with @sid * into a dynamically allocated string of the correct size. Set @scontext * to point to this string and set @scontext_len to the length of the string. */ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) { return security_sid_to_context_core(sid, scontext, scontext_len, 0); } int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len) { return security_sid_to_context_core(sid, scontext, scontext_len, 1); } /* * Caveat: Mutates scontext. */ static int string_to_context_struct(struct policydb *pol, struct sidtab *sidtabp, char *scontext, u32 scontext_len, struct context *ctx, u32 def_sid) { struct role_datum *role; struct type_datum *typdatum; struct user_datum *usrdatum; char *scontextp, *p, oldc; int rc = 0; context_init(ctx); /* Parse the security context. */ rc = -EINVAL; scontextp = (char *) scontext; /* Extract the user. */ p = scontextp; while (*p && *p != ':') p++; if (*p == 0) goto out; *p++ = 0; usrdatum = hashtab_search(pol->p_users.table, scontextp); if (!usrdatum) goto out; ctx->user = usrdatum->value; /* Extract role. */ scontextp = p; while (*p && *p != ':') p++; if (*p == 0) goto out; *p++ = 0; role = hashtab_search(pol->p_roles.table, scontextp); if (!role) goto out; ctx->role = role->value; /* Extract type. */ scontextp = p; while (*p && *p != ':') p++; oldc = *p; *p++ = 0; typdatum = hashtab_search(pol->p_types.table, scontextp); if (!typdatum || typdatum->attribute) goto out; ctx->type = typdatum->value; rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid); if (rc) goto out; rc = -EINVAL; if ((p - scontext) < scontext_len) goto out; /* Check the validity of the new context. */ if (!policydb_context_isvalid(pol, ctx)) goto out; rc = 0; out: if (rc) context_destroy(ctx); return rc; } static int security_context_to_sid_core(const char *scontext, u32 scontext_len, u32 *sid, u32 def_sid, gfp_t gfp_flags, int force) { char *scontext2, *str = NULL; struct context context; int rc = 0; /* An empty security context is never valid. */ if (!scontext_len) return -EINVAL; if (!ss_initialized) { int i; for (i = 1; i < SECINITSID_NUM; i++) { if (!strcmp(initial_sid_to_string[i], scontext)) { *sid = i; return 0; } } *sid = SECINITSID_KERNEL; return 0; } *sid = SECSID_NULL; /* Copy the string so that we can modify the copy as we parse it. */ scontext2 = kmalloc(scontext_len + 1, gfp_flags); if (!scontext2) return -ENOMEM; memcpy(scontext2, scontext, scontext_len); scontext2[scontext_len] = 0; if (force) { /* Save another copy for storing in uninterpreted form */ rc = -ENOMEM; str = kstrdup(scontext2, gfp_flags); if (!str) goto out; } read_lock(&policy_rwlock); rc = string_to_context_struct(&policydb, &sidtab, scontext2, scontext_len, &context, def_sid); if (rc == -EINVAL && force) { context.str = str; context.len = scontext_len; str = NULL; } else if (rc) goto out_unlock; rc = sidtab_context_to_sid(&sidtab, &context, sid); context_destroy(&context); out_unlock: read_unlock(&policy_rwlock); out: kfree(scontext2); kfree(str); return rc; } /** * security_context_to_sid - Obtain a SID for a given security context. * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) { return security_context_to_sid_core(scontext, scontext_len, sid, SECSID_NULL, GFP_KERNEL, 0); } /** * security_context_to_sid_default - Obtain a SID for a given security context, * falling back to specified default if needed. * * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID * @def_sid: default SID to assign on error * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * The default SID is passed to the MLS layer to be used to allow * kernel labeling of the MLS field if the MLS field is not present * (for upgrading to MLS without full relabel). * Implicitly forces adding of the context even if it cannot be mapped yet. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ int security_context_to_sid_default(const char *scontext, u32 scontext_len, u32 *sid, u32 def_sid, gfp_t gfp_flags) { return security_context_to_sid_core(scontext, scontext_len, sid, def_sid, gfp_flags, 1); } int security_context_to_sid_force(const char *scontext, u32 scontext_len, u32 *sid) { return security_context_to_sid_core(scontext, scontext_len, sid, SECSID_NULL, GFP_KERNEL, 1); } static int compute_sid_handle_invalid_context( struct context *scontext, struct context *tcontext, u16 tclass, struct context *newcontext) { char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; if (context_struct_to_string(scontext, &s, &slen)) goto out; if (context_struct_to_string(tcontext, &t, &tlen)) goto out; if (context_struct_to_string(newcontext, &n, &nlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_compute_sid: invalid context %s" " for scontext=%s" " tcontext=%s" " tclass=%s", n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(s); kfree(t); kfree(n); if (!selinux_enforcing) return 0; return -EACCES; } static void filename_compute_type(struct policydb *p, struct context *newcontext, u32 stype, u32 ttype, u16 tclass, const char *objname) { struct filename_trans ft; struct filename_trans_datum *otype; /* * Most filename trans rules are going to live in specific directories * like /dev or /var/run. This bitmap will quickly skip rule searches * if the ttype does not contain any rules. */ if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype)) return; ft.stype = stype; ft.ttype = ttype; ft.tclass = tclass; ft.name = objname; otype = hashtab_search(p->filename_trans, &ft); if (otype) newcontext->type = otype->otype; } static int security_compute_sid(u32 ssid, u32 tsid, u16 orig_tclass, u32 specified, const char *objname, u32 *out_sid, bool kern) { struct class_datum *cladatum = NULL; struct context *scontext = NULL, *tcontext = NULL, newcontext; struct role_trans *roletr = NULL; struct avtab_key avkey; struct avtab_datum *avdatum; struct avtab_node *node; u16 tclass; int rc = 0; bool sock; if (!ss_initialized) { switch (orig_tclass) { case SECCLASS_PROCESS: /* kernel value */ *out_sid = ssid; break; default: *out_sid = tsid; break; } goto out; } context_init(&newcontext); read_lock(&policy_rwlock); if (kern) { tclass = unmap_class(orig_tclass); sock = security_is_socket_class(orig_tclass); } else { tclass = orig_tclass; sock = security_is_socket_class(map_class(tclass)); } scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); rc = -EINVAL; goto out_unlock; } tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); rc = -EINVAL; goto out_unlock; } if (tclass && tclass <= policydb.p_classes.nprim) cladatum = policydb.class_val_to_struct[tclass - 1]; /* Set the user identity. */ switch (specified) { case AVTAB_TRANSITION: case AVTAB_CHANGE: if (cladatum && cladatum->default_user == DEFAULT_TARGET) { newcontext.user = tcontext->user; } else { /* notice this gets both DEFAULT_SOURCE and unset */ /* Use the process user identity. */ newcontext.user = scontext->user; } break; case AVTAB_MEMBER: /* Use the related object owner. */ newcontext.user = tcontext->user; break; } /* Set the role to default values. */ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) { newcontext.role = scontext->role; } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) { newcontext.role = tcontext->role; } else { if ((tclass == policydb.process_class) || (sock == true)) newcontext.role = scontext->role; else newcontext.role = OBJECT_R_VAL; } /* Set the type to default values. */ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) { newcontext.type = scontext->type; } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) { newcontext.type = tcontext->type; } else { if ((tclass == policydb.process_class) || (sock == true)) { /* Use the type of process. */ newcontext.type = scontext->type; } else { /* Use the type of the related object. */ newcontext.type = tcontext->type; } } /* Look for a type transition/member/change rule. */ avkey.source_type = scontext->type; avkey.target_type = tcontext->type; avkey.target_class = tclass; avkey.specified = specified; avdatum = avtab_search(&policydb.te_avtab, &avkey); /* If no permanent rule, also check for enabled conditional rules */ if (!avdatum) { node = avtab_search_node(&policydb.te_cond_avtab, &avkey); for (; node; node = avtab_search_node_next(node, specified)) { if (node->key.specified & AVTAB_ENABLED) { avdatum = &node->datum; break; } } } if (avdatum) { /* Use the type from the type transition/member/change rule. */ newcontext.type = avdatum->data; } /* if we have a objname this is a file trans check so check those rules */ if (objname) filename_compute_type(&policydb, &newcontext, scontext->type, tcontext->type, tclass, objname); /* Check for class-specific changes. */ if (specified & AVTAB_TRANSITION) { /* Look for a role transition rule. */ for (roletr = policydb.role_tr; roletr; roletr = roletr->next) { if ((roletr->role == scontext->role) && (roletr->type == tcontext->type) && (roletr->tclass == tclass)) { /* Use the role transition rule. */ newcontext.role = roletr->new_role; break; } } } /* Set the MLS attributes. This is done last because it may allocate memory. */ rc = mls_compute_sid(scontext, tcontext, tclass, specified, &newcontext, sock); if (rc) goto out_unlock; /* Check the validity of the context. */ if (!policydb_context_isvalid(&policydb, &newcontext)) { rc = compute_sid_handle_invalid_context(scontext, tcontext, tclass, &newcontext); if (rc) goto out_unlock; } /* Obtain the sid for the context. */ rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid); out_unlock: read_unlock(&policy_rwlock); context_destroy(&newcontext); out: return rc; } /** * security_transition_sid - Compute the SID for a new subject/object. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for new subject/object * * Compute a SID to use for labeling a new subject or object in the * class @tclass based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the new SID was * computed successfully. */ int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, const struct qstr *qstr, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, qstr ? qstr->name : NULL, out_sid, true); } int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, const char *objname, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, objname, out_sid, false); } /** * security_member_sid - Compute the SID for member selection. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for selected member * * Compute a SID to use when selecting a member of a polyinstantiated * object of class @tclass based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the SID was * computed successfully. */ int security_member_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL, out_sid, false); } /** * security_change_sid - Compute the SID for object relabeling. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for selected member * * Compute a SID to use for relabeling an object of class @tclass * based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the SID was * computed successfully. */ int security_change_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL, out_sid, false); } /* Clone the SID into the new SID table. */ static int clone_sid(u32 sid, struct context *context, void *arg) { struct sidtab *s = arg; if (sid > SECINITSID_NUM) return sidtab_insert(s, sid, context); else return 0; } static inline int convert_context_handle_invalid_context(struct context *context) { char *s; u32 len; if (selinux_enforcing) return -EINVAL; if (!context_struct_to_string(context, &s, &len)) { printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s); kfree(s); } return 0; } struct convert_context_args { struct policydb *oldp; struct policydb *newp; }; /* * Convert the values in the security context * structure `c' from the values specified * in the policy `p->oldp' to the values specified * in the policy `p->newp'. Verify that the * context is valid under the new policy. */ static int convert_context(u32 key, struct context *c, void *p) { struct convert_context_args *args; struct context oldc; struct ocontext *oc; struct mls_range *range; struct role_datum *role; struct type_datum *typdatum; struct user_datum *usrdatum; char *s; u32 len; int rc = 0; if (key <= SECINITSID_NUM) goto out; args = p; if (c->str) { struct context ctx; rc = -ENOMEM; s = kstrdup(c->str, GFP_KERNEL); if (!s) goto out; rc = string_to_context_struct(args->newp, NULL, s, c->len, &ctx, SECSID_NULL); kfree(s); if (!rc) { printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n", c->str); /* Replace string with mapped representation. */ kfree(c->str); memcpy(c, &ctx, sizeof(*c)); goto out; } else if (rc == -EINVAL) { /* Retain string representation for later mapping. */ rc = 0; goto out; } else { /* Other error condition, e.g. ENOMEM. */ printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n", c->str, -rc); goto out; } } rc = context_cpy(&oldc, c); if (rc) goto out; /* Convert the user. */ rc = -EINVAL; usrdatum = hashtab_search(args->newp->p_users.table, sym_name(args->oldp, SYM_USERS, c->user - 1)); if (!usrdatum) goto bad; c->user = usrdatum->value; /* Convert the role. */ rc = -EINVAL; role = hashtab_search(args->newp->p_roles.table, sym_name(args->oldp, SYM_ROLES, c->role - 1)); if (!role) goto bad; c->role = role->value; /* Convert the type. */ rc = -EINVAL; typdatum = hashtab_search(args->newp->p_types.table, sym_name(args->oldp, SYM_TYPES, c->type - 1)); if (!typdatum) goto bad; c->type = typdatum->value; /* Convert the MLS fields if dealing with MLS policies */ if (args->oldp->mls_enabled && args->newp->mls_enabled) { rc = mls_convert_context(args->oldp, args->newp, c); if (rc) goto bad; } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) { /* * Switching between MLS and non-MLS policy: * free any storage used by the MLS fields in the * context for all existing entries in the sidtab. */ mls_context_destroy(c); } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) { /* * Switching between non-MLS and MLS policy: * ensure that the MLS fields of the context for all * existing entries in the sidtab are filled in with a * suitable default value, likely taken from one of the * initial SIDs. */ oc = args->newp->ocontexts[OCON_ISID]; while (oc && oc->sid[0] != SECINITSID_UNLABELED) oc = oc->next; rc = -EINVAL; if (!oc) { printk(KERN_ERR "SELinux: unable to look up" " the initial SIDs list\n"); goto bad; } range = &oc->context[0].range; rc = mls_range_set(c, range); if (rc) goto bad; } /* Check the validity of the new context. */ if (!policydb_context_isvalid(args->newp, c)) { rc = convert_context_handle_invalid_context(&oldc); if (rc) goto bad; } context_destroy(&oldc); rc = 0; out: return rc; bad: /* Map old representation to string and save it. */ rc = context_struct_to_string(&oldc, &s, &len); if (rc) return rc; context_destroy(&oldc); context_destroy(c); c->str = s; c->len = len; printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n", c->str); rc = 0; goto out; } static void security_load_policycaps(void) { selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_NETPEER); selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_OPENPERM); } static int security_preserve_bools(struct policydb *p); /** * security_load_policy - Load a security policy configuration. * @data: binary policy data * @len: length of data in bytes * * Load a new set of security policy configuration data, * validate it and convert the SID table as necessary. * This function will flush the access vector cache after * loading the new policy. */ int security_load_policy(void *data, size_t len) { struct policydb oldpolicydb, newpolicydb; struct sidtab oldsidtab, newsidtab; struct selinux_mapping *oldmap, *map = NULL; struct convert_context_args args; u32 seqno; u16 map_size; int rc = 0; struct policy_file file = { data, len }, *fp = &file; if (!ss_initialized) { avtab_cache_init(); rc = policydb_read(&policydb, fp); if (rc) { avtab_cache_destroy(); return rc; } policydb.len = len; rc = selinux_set_mapping(&policydb, secclass_map, &current_mapping, &current_mapping_size); if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); return rc; } rc = policydb_load_isids(&policydb, &sidtab); if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); return rc; } security_load_policycaps(); ss_initialized = 1; seqno = ++latest_granting; selinux_complete_init(); avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); return 0; } #if 0 sidtab_hash_eval(&sidtab, "sids"); #endif rc = policydb_read(&newpolicydb, fp); if (rc) return rc; newpolicydb.len = len; /* If switching between different policy types, log MLS status */ if (policydb.mls_enabled && !newpolicydb.mls_enabled) printk(KERN_INFO "SELinux: Disabling MLS support...\n"); else if (!policydb.mls_enabled && newpolicydb.mls_enabled) printk(KERN_INFO "SELinux: Enabling MLS support...\n"); rc = policydb_load_isids(&newpolicydb, &newsidtab); if (rc) { printk(KERN_ERR "SELinux: unable to load the initial SIDs\n"); policydb_destroy(&newpolicydb); return rc; } rc = selinux_set_mapping(&newpolicydb, secclass_map, &map, &map_size); if (rc) goto err; rc = security_preserve_bools(&newpolicydb); if (rc) { printk(KERN_ERR "SELinux: unable to preserve booleans\n"); goto err; } /* Clone the SID table. */ sidtab_shutdown(&sidtab); rc = sidtab_map(&sidtab, clone_sid, &newsidtab); if (rc) goto err; /* * Convert the internal representations of contexts * in the new SID table. */ args.oldp = &policydb; args.newp = &newpolicydb; rc = sidtab_map(&newsidtab, convert_context, &args); if (rc) { printk(KERN_ERR "SELinux: unable to convert the internal" " representation of contexts in the new SID" " table\n"); goto err; } /* Save the old policydb and SID table to free later. */ memcpy(&oldpolicydb, &policydb, sizeof policydb); sidtab_set(&oldsidtab, &sidtab); /* Install the new policydb and SID table. */ write_lock_irq(&policy_rwlock); memcpy(&policydb, &newpolicydb, sizeof policydb); sidtab_set(&sidtab, &newsidtab); security_load_policycaps(); oldmap = current_mapping; current_mapping = map; current_mapping_size = map_size; seqno = ++latest_granting; write_unlock_irq(&policy_rwlock); /* Free the old policydb and SID table. */ policydb_destroy(&oldpolicydb); sidtab_destroy(&oldsidtab); kfree(oldmap); avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); return 0; err: kfree(map); sidtab_destroy(&newsidtab); policydb_destroy(&newpolicydb); return rc; } size_t security_policydb_len(void) { size_t len; read_lock(&policy_rwlock); len = policydb.len; read_unlock(&policy_rwlock); return len; } /** * security_port_sid - Obtain the SID for a port. * @protocol: protocol number * @port: port number * @out_sid: security identifier */ int security_port_sid(u8 protocol, u16 port, u32 *out_sid) { struct ocontext *c; int rc = 0; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_PORT]; while (c) { if (c->u.port.protocol == protocol && c->u.port.low_port <= port && c->u.port.high_port >= port) break; c = c->next; } if (c) { if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *out_sid = c->sid[0]; } else { *out_sid = SECINITSID_PORT; } out: read_unlock(&policy_rwlock); return rc; } /** * security_netif_sid - Obtain the SID for a network interface. * @name: interface name * @if_sid: interface SID */ int security_netif_sid(char *name, u32 *if_sid) { int rc = 0; struct ocontext *c; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_NETIF]; while (c) { if (strcmp(name, c->u.name) == 0) break; c = c->next; } if (c) { if (!c->sid[0] || !c->sid[1]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; rc = sidtab_context_to_sid(&sidtab, &c->context[1], &c->sid[1]); if (rc) goto out; } *if_sid = c->sid[0]; } else *if_sid = SECINITSID_NETIF; out: read_unlock(&policy_rwlock); return rc; } static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) { int i, fail = 0; for (i = 0; i < 4; i++) if (addr[i] != (input[i] & mask[i])) { fail = 1; break; } return !fail; } /** * security_node_sid - Obtain the SID for a node (host). * @domain: communication domain aka address family * @addrp: address * @addrlen: address length in bytes * @out_sid: security identifier */ int security_node_sid(u16 domain, void *addrp, u32 addrlen, u32 *out_sid) { int rc; struct ocontext *c; read_lock(&policy_rwlock); switch (domain) { case AF_INET: { u32 addr; rc = -EINVAL; if (addrlen != sizeof(u32)) goto out; addr = *((u32 *)addrp); c = policydb.ocontexts[OCON_NODE]; while (c) { if (c->u.node.addr == (addr & c->u.node.mask)) break; c = c->next; } break; } case AF_INET6: rc = -EINVAL; if (addrlen != sizeof(u64) * 2) goto out; c = policydb.ocontexts[OCON_NODE6]; while (c) { if (match_ipv6_addrmask(addrp, c->u.node6.addr, c->u.node6.mask)) break; c = c->next; } break; default: rc = 0; *out_sid = SECINITSID_NODE; goto out; } if (c) { if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *out_sid = c->sid[0]; } else { *out_sid = SECINITSID_NODE; } rc = 0; out: read_unlock(&policy_rwlock); return rc; } #define SIDS_NEL 25 /** * security_get_user_sids - Obtain reachable SIDs for a user. * @fromsid: starting SID * @username: username * @sids: array of reachable SIDs for user * @nel: number of elements in @sids * * Generate the set of SIDs for legal security contexts * for a given user that can be reached by @fromsid. * Set *@sids to point to a dynamically allocated * array containing the set of SIDs. Set *@nel to the * number of elements in the array. */ int security_get_user_sids(u32 fromsid, char *username, u32 **sids, u32 *nel) { struct context *fromcon, usercon; u32 *mysids = NULL, *mysids2, sid; u32 mynel = 0, maxnel = SIDS_NEL; struct user_datum *user; struct role_datum *role; struct ebitmap_node *rnode, *tnode; int rc = 0, i, j; *sids = NULL; *nel = 0; if (!ss_initialized) goto out; read_lock(&policy_rwlock); context_init(&usercon); rc = -EINVAL; fromcon = sidtab_search(&sidtab, fromsid); if (!fromcon) goto out_unlock; rc = -EINVAL; user = hashtab_search(policydb.p_users.table, username); if (!user) goto out_unlock; usercon.user = user->value; rc = -ENOMEM; mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC); if (!mysids) goto out_unlock; ebitmap_for_each_positive_bit(&user->roles, rnode, i) { role = policydb.role_val_to_struct[i]; usercon.role = i + 1; ebitmap_for_each_positive_bit(&role->types, tnode, j) { usercon.type = j + 1; if (mls_setup_user_range(fromcon, user, &usercon)) continue; rc = sidtab_context_to_sid(&sidtab, &usercon, &sid); if (rc) goto out_unlock; if (mynel < maxnel) { mysids[mynel++] = sid; } else { rc = -ENOMEM; maxnel += SIDS_NEL; mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC); if (!mysids2) goto out_unlock; memcpy(mysids2, mysids, mynel * sizeof(*mysids2)); kfree(mysids); mysids = mysids2; mysids[mynel++] = sid; } } } rc = 0; out_unlock: read_unlock(&policy_rwlock); if (rc || !mynel) { kfree(mysids); goto out; } rc = -ENOMEM; mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL); if (!mysids2) { kfree(mysids); goto out; } for (i = 0, j = 0; i < mynel; i++) { struct av_decision dummy_avd; rc = avc_has_perm_noaudit(fromsid, mysids[i], SECCLASS_PROCESS, /* kernel value */ PROCESS__TRANSITION, AVC_STRICT, &dummy_avd); if (!rc) mysids2[j++] = mysids[i]; cond_resched(); } rc = 0; kfree(mysids); *sids = mysids2; *nel = j; out: return rc; } /** * security_genfs_sid - Obtain a SID for a file in a filesystem * @fstype: filesystem type * @path: path from root of mount * @sclass: file security class * @sid: SID for path * * Obtain a SID to use for a file in a filesystem that * cannot support xattr or use a fixed labeling behavior like * transition SIDs or task SIDs. */ int security_genfs_sid(const char *fstype, char *path, u16 orig_sclass, u32 *sid) { int len; u16 sclass; struct genfs *genfs; struct ocontext *c; int rc, cmp = 0; while (path[0] == '/' && path[1] == '/') path++; read_lock(&policy_rwlock); sclass = unmap_class(orig_sclass); *sid = SECINITSID_UNLABELED; for (genfs = policydb.genfs; genfs; genfs = genfs->next) { cmp = strcmp(fstype, genfs->fstype); if (cmp <= 0) break; } rc = -ENOENT; if (!genfs || cmp) goto out; for (c = genfs->head; c; c = c->next) { len = strlen(c->u.name); if ((!c->v.sclass || sclass == c->v.sclass) && (strncmp(c->u.name, path, len) == 0)) break; } rc = -ENOENT; if (!c) goto out; if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *sid = c->sid[0]; rc = 0; out: read_unlock(&policy_rwlock); return rc; } /** * security_fs_use - Determine how to handle labeling for a filesystem. * @fstype: filesystem type * @behavior: labeling behavior * @sid: SID for filesystem (superblock) */ int security_fs_use( const char *fstype, unsigned int *behavior, u32 *sid) { int rc = 0; struct ocontext *c; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_FSUSE]; while (c) { if (strcmp(fstype, c->u.name) == 0) break; c = c->next; } if (c) { *behavior = c->v.behavior; if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *sid = c->sid[0]; } else { rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid); if (rc) { *behavior = SECURITY_FS_USE_NONE; rc = 0; } else { *behavior = SECURITY_FS_USE_GENFS; } } out: read_unlock(&policy_rwlock); return rc; } int security_get_bools(int *len, char ***names, int **values) { int i, rc; read_lock(&policy_rwlock); *names = NULL; *values = NULL; rc = 0; *len = policydb.p_bools.nprim; if (!*len) goto out; rc = -ENOMEM; *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC); if (!*names) goto err; rc = -ENOMEM; *values = kcalloc(*len, sizeof(int), GFP_ATOMIC); if (!*values) goto err; for (i = 0; i < *len; i++) { size_t name_len; (*values)[i] = policydb.bool_val_to_struct[i]->state; name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1; rc = -ENOMEM; (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC); if (!(*names)[i]) goto err; strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len); (*names)[i][name_len - 1] = 0; } rc = 0; out: read_unlock(&policy_rwlock); return rc; err: if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); } kfree(*values); goto out; } int security_set_bools(int len, int *values) { int i, rc; int lenp, seqno = 0; struct cond_node *cur; write_lock_irq(&policy_rwlock); rc = -EFAULT; lenp = policydb.p_bools.nprim; if (len != lenp) goto out; for (i = 0; i < len; i++) { if (!!values[i] != policydb.bool_val_to_struct[i]->state) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_MAC_CONFIG_CHANGE, "bool=%s val=%d old_val=%d auid=%u ses=%u", sym_name(&policydb, SYM_BOOLS, i), !!values[i], policydb.bool_val_to_struct[i]->state, from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); } if (values[i]) policydb.bool_val_to_struct[i]->state = 1; else policydb.bool_val_to_struct[i]->state = 0; } for (cur = policydb.cond_list; cur; cur = cur->next) { rc = evaluate_cond_node(&policydb, cur); if (rc) goto out; } seqno = ++latest_granting; rc = 0; out: write_unlock_irq(&policy_rwlock); if (!rc) { avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_xfrm_notify_policyload(); } return rc; } int security_get_bool_value(int bool) { int rc; int len; read_lock(&policy_rwlock); rc = -EFAULT; len = policydb.p_bools.nprim; if (bool >= len) goto out; rc = policydb.bool_val_to_struct[bool]->state; out: read_unlock(&policy_rwlock); return rc; } static int security_preserve_bools(struct policydb *p) { int rc, nbools = 0, *bvalues = NULL, i; char **bnames = NULL; struct cond_bool_datum *booldatum; struct cond_node *cur; rc = security_get_bools(&nbools, &bnames, &bvalues); if (rc) goto out; for (i = 0; i < nbools; i++) { booldatum = hashtab_search(p->p_bools.table, bnames[i]); if (booldatum) booldatum->state = bvalues[i]; } for (cur = p->cond_list; cur; cur = cur->next) { rc = evaluate_cond_node(p, cur); if (rc) goto out; } out: if (bnames) { for (i = 0; i < nbools; i++) kfree(bnames[i]); } kfree(bnames); kfree(bvalues); return rc; } /* * security_sid_mls_copy() - computes a new sid based on the given * sid and the mls portion of mls_sid. */ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) { struct context *context1; struct context *context2; struct context newcon; char *s; u32 len; int rc; rc = 0; if (!ss_initialized || !policydb.mls_enabled) { *new_sid = sid; goto out; } context_init(&newcon); read_lock(&policy_rwlock); rc = -EINVAL; context1 = sidtab_search(&sidtab, sid); if (!context1) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, sid); goto out_unlock; } rc = -EINVAL; context2 = sidtab_search(&sidtab, mls_sid); if (!context2) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, mls_sid); goto out_unlock; } newcon.user = context1->user; newcon.role = context1->role; newcon.type = context1->type; rc = mls_context_cpy(&newcon, context2); if (rc) goto out_unlock; /* Check the validity of the new context. */ if (!policydb_context_isvalid(&policydb, &newcon)) { rc = convert_context_handle_invalid_context(&newcon); if (rc) { if (!context_struct_to_string(&newcon, &s, &len)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_sid_mls_copy: invalid context %s", s); kfree(s); } goto out_unlock; } } rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid); out_unlock: read_unlock(&policy_rwlock); context_destroy(&newcon); out: return rc; } /** * security_net_peersid_resolve - Compare and resolve two network peer SIDs * @nlbl_sid: NetLabel SID * @nlbl_type: NetLabel labeling protocol type * @xfrm_sid: XFRM SID * * Description: * Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be * resolved into a single SID it is returned via @peer_sid and the function * returns zero. Otherwise @peer_sid is set to SECSID_NULL and the function * returns a negative value. A table summarizing the behavior is below: * * | function return | @sid * ------------------------------+-----------------+----------------- * no peer labels | 0 | SECSID_NULL * single peer label | 0 | <peer_label> * multiple, consistent labels | 0 | <peer_label> * multiple, inconsistent labels | -<errno> | SECSID_NULL * */ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, u32 xfrm_sid, u32 *peer_sid) { int rc; struct context *nlbl_ctx; struct context *xfrm_ctx; *peer_sid = SECSID_NULL; /* handle the common (which also happens to be the set of easy) cases * right away, these two if statements catch everything involving a * single or absent peer SID/label */ if (xfrm_sid == SECSID_NULL) { *peer_sid = nlbl_sid; return 0; } /* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label * and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label * is present */ if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) { *peer_sid = xfrm_sid; return 0; } /* we don't need to check ss_initialized here since the only way both * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the * security server was initialized and ss_initialized was true */ if (!policydb.mls_enabled) return 0; read_lock(&policy_rwlock); rc = -EINVAL; nlbl_ctx = sidtab_search(&sidtab, nlbl_sid); if (!nlbl_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, nlbl_sid); goto out; } rc = -EINVAL; xfrm_ctx = sidtab_search(&sidtab, xfrm_sid); if (!xfrm_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, xfrm_sid); goto out; } rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES); if (rc) goto out; /* at present NetLabel SIDs/labels really only carry MLS * information so if the MLS portion of the NetLabel SID * matches the MLS portion of the labeled XFRM SID/label * then pass along the XFRM SID as it is the most * expressive */ *peer_sid = xfrm_sid; out: read_unlock(&policy_rwlock); return rc; } static int get_classes_callback(void *k, void *d, void *args) { struct class_datum *datum = d; char *name = k, **classes = args; int value = datum->value - 1; classes[value] = kstrdup(name, GFP_ATOMIC); if (!classes[value]) return -ENOMEM; return 0; } int security_get_classes(char ***classes, int *nclasses) { int rc; read_lock(&policy_rwlock); rc = -ENOMEM; *nclasses = policydb.p_classes.nprim; *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC); if (!*classes) goto out; rc = hashtab_map(policydb.p_classes.table, get_classes_callback, *classes); if (rc) { int i; for (i = 0; i < *nclasses; i++) kfree((*classes)[i]); kfree(*classes); } out: read_unlock(&policy_rwlock); return rc; } static int get_permissions_callback(void *k, void *d, void *args) { struct perm_datum *datum = d; char *name = k, **perms = args; int value = datum->value - 1; perms[value] = kstrdup(name, GFP_ATOMIC); if (!perms[value]) return -ENOMEM; return 0; } int security_get_permissions(char *class, char ***perms, int *nperms) { int rc, i; struct class_datum *match; read_lock(&policy_rwlock); rc = -EINVAL; match = hashtab_search(policydb.p_classes.table, class); if (!match) { printk(KERN_ERR "SELinux: %s: unrecognized class %s\n", __func__, class); goto out; } rc = -ENOMEM; *nperms = match->permissions.nprim; *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC); if (!*perms) goto out; if (match->comdatum) { rc = hashtab_map(match->comdatum->permissions.table, get_permissions_callback, *perms); if (rc) goto err; } rc = hashtab_map(match->permissions.table, get_permissions_callback, *perms); if (rc) goto err; out: read_unlock(&policy_rwlock); return rc; err: read_unlock(&policy_rwlock); for (i = 0; i < *nperms; i++) kfree((*perms)[i]); kfree(*perms); return rc; } int security_get_reject_unknown(void) { return policydb.reject_unknown; } int security_get_allow_unknown(void) { return policydb.allow_unknown; } /** * security_policycap_supported - Check for a specific policy capability * @req_cap: capability * * Description: * This function queries the currently loaded policy to see if it supports the * capability specified by @req_cap. Returns true (1) if the capability is * supported, false (0) if it isn't supported. * */ int security_policycap_supported(unsigned int req_cap) { int rc; read_lock(&policy_rwlock); rc = ebitmap_get_bit(&policydb.policycaps, req_cap); read_unlock(&policy_rwlock); return rc; } struct selinux_audit_rule { u32 au_seqno; struct context au_ctxt; }; void selinux_audit_rule_free(void *vrule) { struct selinux_audit_rule *rule = vrule; if (rule) { context_destroy(&rule->au_ctxt); kfree(rule); } } int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) { struct selinux_audit_rule *tmprule; struct role_datum *roledatum; struct type_datum *typedatum; struct user_datum *userdatum; struct selinux_audit_rule **rule = (struct selinux_audit_rule **)vrule; int rc = 0; *rule = NULL; if (!ss_initialized) return -EOPNOTSUPP; switch (field) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: /* only 'equals' and 'not equals' fit user, role, and type */ if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* we do not allow a range, indicated by the presence of '-' */ if (strchr(rulestr, '-')) return -EINVAL; break; default: /* only the above fields are valid */ return -EINVAL; } tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL); if (!tmprule) return -ENOMEM; context_init(&tmprule->au_ctxt); read_lock(&policy_rwlock); tmprule->au_seqno = latest_granting; switch (field) { case AUDIT_SUBJ_USER: case AUDIT_OBJ_USER: rc = -EINVAL; userdatum = hashtab_search(policydb.p_users.table, rulestr); if (!userdatum) goto out; tmprule->au_ctxt.user = userdatum->value; break; case AUDIT_SUBJ_ROLE: case AUDIT_OBJ_ROLE: rc = -EINVAL; roledatum = hashtab_search(policydb.p_roles.table, rulestr); if (!roledatum) goto out; tmprule->au_ctxt.role = roledatum->value; break; case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_TYPE: rc = -EINVAL; typedatum = hashtab_search(policydb.p_types.table, rulestr); if (!typedatum) goto out; tmprule->au_ctxt.type = typedatum->value; break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC); if (rc) goto out; break; } rc = 0; out: read_unlock(&policy_rwlock); if (rc) { selinux_audit_rule_free(tmprule); tmprule = NULL; } *rule = tmprule; return rc; } /* Check to see if the rule contains any selinux fields */ int selinux_audit_rule_known(struct audit_krule *rule) { int i; for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: return 1; } } return 0; } int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule, struct audit_context *actx) { struct context *ctxt; struct mls_level *level; struct selinux_audit_rule *rule = vrule; int match = 0; if (!rule) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: missing rule\n"); return -ENOENT; } read_lock(&policy_rwlock); if (rule->au_seqno < latest_granting) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: stale rule\n"); match = -ESTALE; goto out; } ctxt = sidtab_search(&sidtab, sid); if (!ctxt) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: unrecognized SID %d\n", sid); match = -ENOENT; goto out; } /* a field/op pair that is not caught here will simply fall through without a match */ switch (field) { case AUDIT_SUBJ_USER: case AUDIT_OBJ_USER: switch (op) { case Audit_equal: match = (ctxt->user == rule->au_ctxt.user); break; case Audit_not_equal: match = (ctxt->user != rule->au_ctxt.user); break; } break; case AUDIT_SUBJ_ROLE: case AUDIT_OBJ_ROLE: switch (op) { case Audit_equal: match = (ctxt->role == rule->au_ctxt.role); break; case Audit_not_equal: match = (ctxt->role != rule->au_ctxt.role); break; } break; case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_TYPE: switch (op) { case Audit_equal: match = (ctxt->type == rule->au_ctxt.type); break; case Audit_not_equal: match = (ctxt->type != rule->au_ctxt.type); break; } break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: level = ((field == AUDIT_SUBJ_SEN || field == AUDIT_OBJ_LEV_LOW) ? &ctxt->range.level[0] : &ctxt->range.level[1]); switch (op) { case Audit_equal: match = mls_level_eq(&rule->au_ctxt.range.level[0], level); break; case Audit_not_equal: match = !mls_level_eq(&rule->au_ctxt.range.level[0], level); break; case Audit_lt: match = (mls_level_dom(&rule->au_ctxt.range.level[0], level) && !mls_level_eq(&rule->au_ctxt.range.level[0], level)); break; case Audit_le: match = mls_level_dom(&rule->au_ctxt.range.level[0], level); break; case Audit_gt: match = (mls_level_dom(level, &rule->au_ctxt.range.level[0]) && !mls_level_eq(level, &rule->au_ctxt.range.level[0])); break; case Audit_ge: match = mls_level_dom(level, &rule->au_ctxt.range.level[0]); break; } } out: read_unlock(&policy_rwlock); return match; } static int (*aurule_callback)(void) = audit_update_lsm_rules; static int aurule_avc_callback(u32 event) { int err = 0; if (event == AVC_CALLBACK_RESET && aurule_callback) err = aurule_callback(); return err; } static int __init aurule_init(void) { int err; err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET); if (err) panic("avc_add_callback() failed, error %d\n", err); return err; } __initcall(aurule_init); #ifdef CONFIG_NETLABEL /** * security_netlbl_cache_add - Add an entry to the NetLabel cache * @secattr: the NetLabel packet security attributes * @sid: the SELinux SID * * Description: * Attempt to cache the context in @ctx, which was derived from the packet in * @skb, in the NetLabel subsystem cache. This function assumes @secattr has * already been initialized. * */ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr, u32 sid) { u32 *sid_cache; sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC); if (sid_cache == NULL) return; secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); if (secattr->cache == NULL) { kfree(sid_cache); return; } *sid_cache = sid; secattr->cache->free = kfree; secattr->cache->data = sid_cache; secattr->flags |= NETLBL_SECATTR_CACHE; } /** * security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID * @secattr: the NetLabel packet security attributes * @sid: the SELinux SID * * Description: * Convert the given NetLabel security attributes in @secattr into a * SELinux SID. If the @secattr field does not contain a full SELinux * SID/context then use SECINITSID_NETMSG as the foundation. If possible the * 'cache' field of @secattr is set and the CACHE flag is set; this is to * allow the @secattr to be used by NetLabel to cache the secattr to SID * conversion for future lookups. Returns zero on success, negative values on * failure. * */ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, u32 *sid) { int rc; struct context *ctx; struct context ctx_new; if (!ss_initialized) { *sid = SECSID_NULL; return 0; } read_lock(&policy_rwlock); if (secattr->flags & NETLBL_SECATTR_CACHE) *sid = *(u32 *)secattr->cache->data; else if (secattr->flags & NETLBL_SECATTR_SECID) *sid = secattr->attr.secid; else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) { rc = -EIDRM; ctx = sidtab_search(&sidtab, SECINITSID_NETMSG); if (ctx == NULL) goto out; context_init(&ctx_new); ctx_new.user = ctx->user; ctx_new.role = ctx->role; ctx_new.type = ctx->type; mls_import_netlbl_lvl(&ctx_new, secattr); if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat, secattr->attr.mls.cat); if (rc) goto out; memcpy(&ctx_new.range.level[1].cat, &ctx_new.range.level[0].cat, sizeof(ctx_new.range.level[0].cat)); } rc = -EIDRM; if (!mls_context_isvalid(&policydb, &ctx_new)) goto out_free; rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid); if (rc) goto out_free; security_netlbl_cache_add(secattr, *sid); ebitmap_destroy(&ctx_new.range.level[0].cat); } else *sid = SECSID_NULL; read_unlock(&policy_rwlock); return 0; out_free: ebitmap_destroy(&ctx_new.range.level[0].cat); out: read_unlock(&policy_rwlock); return rc; } /** * security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr * @sid: the SELinux SID * @secattr: the NetLabel packet security attributes * * Description: * Convert the given SELinux SID in @sid into a NetLabel security attribute. * Returns zero on success, negative values on failure. * */ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr) { int rc; struct context *ctx; if (!ss_initialized) return 0; read_lock(&policy_rwlock); rc = -ENOENT; ctx = sidtab_search(&sidtab, sid); if (ctx == NULL) goto out; rc = -ENOMEM; secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1), GFP_ATOMIC); if (secattr->domain == NULL) goto out; secattr->attr.secid = sid; secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID; mls_export_netlbl_lvl(ctx, secattr); rc = mls_export_netlbl_cat(ctx, secattr); out: read_unlock(&policy_rwlock); return rc; } #endif /* CONFIG_NETLABEL */ /** * security_read_policy - read the policy. * @data: binary policy data * @len: length of data in bytes * */ int security_read_policy(void **data, size_t *len) { int rc; struct policy_file fp; if (!ss_initialized) return -EINVAL; *len = security_policydb_len(); *data = vmalloc_user(*len); if (!*data) return -ENOMEM; fp.data = *data; fp.len = *len; read_lock(&policy_rwlock); rc = policydb_write(&policydb, &fp); read_unlock(&policy_rwlock); if (rc) return rc; *len = (unsigned long)fp.data - (unsigned long)*data; return 0; }
gpl-2.0
gq213/linux-3.10.72
drivers/staging/ozwpan/ozusbsvc1.c
1573
13769
/* ----------------------------------------------------------------------------- * Copyright (c) 2011 Ozmo Inc * Released under the GNU General Public License Version 2 (GPLv2). * * This file implements the protocol specific parts of the USB service for a PD. * ----------------------------------------------------------------------------- */ #include <linux/init.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/netdevice.h> #include <linux/errno.h> #include <linux/input.h> #include <asm/unaligned.h> #include "ozconfig.h" #include "ozprotocol.h" #include "ozeltbuf.h" #include "ozpd.h" #include "ozproto.h" #include "ozusbif.h" #include "ozhcd.h" #include "oztrace.h" #include "ozusbsvc.h" #include "ozevent.h" /*------------------------------------------------------------------------------ */ #define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed)) /*------------------------------------------------------------------------------ * Context: softirq */ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei, struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc) { int ret; struct oz_elt *elt = (struct oz_elt *)ei->data; struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1); elt->type = OZ_ELT_APP_DATA; ei->app_id = OZ_APPID_USB; ei->length = elt->length + sizeof(struct oz_elt); app_hdr->app_id = OZ_APPID_USB; spin_lock_bh(&eb->lock); if (isoc == 0) { app_hdr->elt_seq_num = usb_ctx->tx_seq_num++; if (usb_ctx->tx_seq_num == 0) usb_ctx->tx_seq_num = 1; } ret = oz_queue_elt_info(eb, isoc, strid, ei); if (ret) oz_elt_info_free(eb, ei); spin_unlock_bh(&eb->lock); return ret; } /*------------------------------------------------------------------------------ * Context: softirq */ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type, u8 index, u16 windex, int offset, int len) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt *elt; struct oz_get_desc_req *body; struct oz_elt_buf *eb = &pd->elt_buff; struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); oz_trace(" req_type = 0x%x\n", req_type); oz_trace(" desc_type = 0x%x\n", desc_type); oz_trace(" index = 0x%x\n", index); oz_trace(" windex = 0x%x\n", windex); oz_trace(" offset = 0x%x\n", offset); oz_trace(" len = 0x%x\n", len); if (len > 200) len = 200; if (ei == NULL) return -1; elt = (struct oz_elt *)ei->data; elt->length = sizeof(struct oz_get_desc_req); body = (struct oz_get_desc_req *)(elt+1); body->type = OZ_GET_DESC_REQ; body->req_id = req_id; put_unaligned(cpu_to_le16(offset), &body->offset); put_unaligned(cpu_to_le16(len), &body->size); body->req_type = req_type; body->desc_type = desc_type; body->w_index = windex; body->index = index; return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); } /*------------------------------------------------------------------------------ * Context: tasklet */ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt *elt; struct oz_elt_buf *eb = &pd->elt_buff; struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); struct oz_set_config_req *body; if (ei == NULL) return -1; elt = (struct oz_elt *)ei->data; elt->length = sizeof(struct oz_set_config_req); body = (struct oz_set_config_req *)(elt+1); body->type = OZ_SET_CONFIG_REQ; body->req_id = req_id; body->index = index; return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); } /*------------------------------------------------------------------------------ * Context: tasklet */ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt *elt; struct oz_elt_buf *eb = &pd->elt_buff; struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); struct oz_set_interface_req *body; if (ei == NULL) return -1; elt = (struct oz_elt *)ei->data; elt->length = sizeof(struct oz_set_interface_req); body = (struct oz_set_interface_req *)(elt+1); body->type = OZ_SET_INTERFACE_REQ; body->req_id = req_id; body->index = index; body->alternative = alt; return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); } /*------------------------------------------------------------------------------ * Context: tasklet */ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type, u8 recipient, u8 index, __le16 feature) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt *elt; struct oz_elt_buf *eb = &pd->elt_buff; struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); struct oz_feature_req *body; if (ei == NULL) return -1; elt = (struct oz_elt *)ei->data; elt->length = sizeof(struct oz_feature_req); body = (struct oz_feature_req *)(elt+1); body->type = type; body->req_id = req_id; body->recipient = recipient; body->index = index; put_unaligned(feature, &body->feature); return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); } /*------------------------------------------------------------------------------ * Context: tasklet */ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type, u8 request, __le16 value, __le16 index, const u8 *data, int data_len) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt *elt; struct oz_elt_buf *eb = &pd->elt_buff; struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff); struct oz_vendor_class_req *body; if (ei == NULL) return -1; elt = (struct oz_elt *)ei->data; elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len; body = (struct oz_vendor_class_req *)(elt+1); body->type = OZ_VENDOR_CLASS_REQ; body->req_id = req_id; body->req_type = req_type; body->request = request; put_unaligned(value, &body->value); put_unaligned(index, &body->index); if (data_len) memcpy(body->data, data, data_len); return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0); } /*------------------------------------------------------------------------------ * Context: tasklet */ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup, const u8 *data, int data_len) { unsigned wvalue = le16_to_cpu(setup->wValue); unsigned windex = le16_to_cpu(setup->wIndex); unsigned wlength = le16_to_cpu(setup->wLength); int rc = 0; oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id, (void *)(((unsigned long)(setup->wValue))<<16 | ((unsigned long)setup->wIndex)), setup->bRequestType); if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (setup->bRequest) { case USB_REQ_GET_DESCRIPTOR: rc = oz_usb_get_desc_req(hpd, req_id, setup->bRequestType, (u8)(wvalue>>8), (u8)wvalue, setup->wIndex, 0, wlength); break; case USB_REQ_SET_CONFIGURATION: rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue); break; case USB_REQ_SET_INTERFACE: { u8 if_num = (u8)windex; u8 alt = (u8)wvalue; rc = oz_usb_set_interface_req(hpd, req_id, if_num, alt); } break; case USB_REQ_SET_FEATURE: rc = oz_usb_set_clear_feature_req(hpd, req_id, OZ_SET_FEATURE_REQ, setup->bRequestType & 0xf, (u8)windex, setup->wValue); break; case USB_REQ_CLEAR_FEATURE: rc = oz_usb_set_clear_feature_req(hpd, req_id, OZ_CLEAR_FEATURE_REQ, setup->bRequestType & 0xf, (u8)windex, setup->wValue); break; } } else { rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType, setup->bRequest, setup->wValue, setup->wIndex, data, data_len); } return rc; } /*------------------------------------------------------------------------------ * Context: softirq */ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb) { struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd; struct oz_pd *pd = usb_ctx->pd; struct oz_elt_buf *eb; int i; int hdr_size; u8 *data; struct usb_iso_packet_descriptor *desc; if (pd->mode & OZ_F_ISOC_NO_ELTS) { for (i = 0; i < urb->number_of_packets; i++) { u8 *data; desc = &urb->iso_frame_desc[i]; data = ((u8 *)urb->transfer_buffer)+desc->offset; oz_send_isoc_unit(pd, ep_num, data, desc->length); } return 0; } hdr_size = sizeof(struct oz_isoc_fixed) - 1; eb = &pd->elt_buff; i = 0; while (i < urb->number_of_packets) { struct oz_elt_info *ei = oz_elt_info_alloc(eb); struct oz_elt *elt; struct oz_isoc_fixed *body; int unit_count; int unit_size; int rem; if (ei == NULL) return -1; rem = MAX_ISOC_FIXED_DATA; elt = (struct oz_elt *)ei->data; body = (struct oz_isoc_fixed *)(elt + 1); body->type = OZ_USB_ENDPOINT_DATA; body->endpoint = ep_num; body->format = OZ_DATA_F_ISOC_FIXED; unit_size = urb->iso_frame_desc[i].length; body->unit_size = (u8)unit_size; data = ((u8 *)(elt+1)) + hdr_size; unit_count = 0; while (i < urb->number_of_packets) { desc = &urb->iso_frame_desc[i]; if ((unit_size == desc->length) && (desc->length <= rem)) { memcpy(data, ((u8 *)urb->transfer_buffer) + desc->offset, unit_size); data += unit_size; rem -= unit_size; unit_count++; desc->status = 0; desc->actual_length = desc->length; i++; } else { break; } } elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem; /* Store the number of units in body->frame_number for the * moment. This field will be correctly determined before * the element is sent. */ body->frame_number = (u8)unit_count; oz_usb_submit_elt(eb, ei, usb_ctx, ep_num, pd->mode & OZ_F_ISOC_ANYTIME); } return 0; } /*------------------------------------------------------------------------------ * Context: softirq-serialized */ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_usb_hdr *usb_hdr, int len) { struct oz_data *data_hdr = (struct oz_data *)usb_hdr; switch (data_hdr->format) { case OZ_DATA_F_MULTIPLE_FIXED: { struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; int n = (len - sizeof(struct oz_multiple_fixed)+1) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, data, body->unit_size); data += body->unit_size; } } break; case OZ_DATA_F_ISOC_FIXED: { struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)data_hdr; int data_len = len-sizeof(struct oz_isoc_fixed)+1; int unit_size = body->unit_size; u8 *data = body->data; int count; int i; if (!unit_size) break; count = data_len/unit_size; for (i = 0; i < count; i++) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, data, unit_size); data += unit_size; } } break; } } /*------------------------------------------------------------------------------ * This is called when the PD has received a USB element. The type of element * is determined and is then passed to an appropriate handler function. * Context: softirq-serialized */ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (usb_ctx->stopped) goto done; /* If sequence number is non-zero then check it is not a duplicate. * Zero sequence numbers are always accepted. */ if (usb_hdr->elt_seq_num != 0) { if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) /* Reject duplicate element. */ goto done; } usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; switch (usb_hdr->type) { case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; int data_len = elt->length - sizeof(struct oz_get_desc_rsp) + 1; u16 offs = le16_to_cpu(get_unaligned(&body->offset)); u16 total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, data_len, offs, total_size); } break; case OZ_SET_CONFIG_RSP: { struct oz_set_config_rsp *body = (struct oz_set_config_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_SET_INTERFACE_RSP: { struct oz_set_interface_rsp *body = (struct oz_set_interface_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_VENDOR_CLASS_RSP: { struct oz_vendor_class_rsp *body = (struct oz_vendor_class_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, elt->length- sizeof(struct oz_vendor_class_rsp)+1); } break; case OZ_USB_ENDPOINT_DATA: oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); break; } done: oz_usb_put(usb_ctx); } /*------------------------------------------------------------------------------ * Context: softirq, process */ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len) { struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (!usb_ctx->stopped) { oz_trace("Farewell indicated ep = 0x%x\n", ep_num); oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len); } oz_usb_put(usb_ctx); }
gpl-2.0
amatus/linux
drivers/watchdog/pc87413_wdt.c
1829
14364
/* * NS pc87413-wdt Watchdog Timer driver for Linux 2.6.x.x * * This code is based on wdt.c with original copyright. * * (C) Copyright 2006 Sven Anders, <anders@anduras.de> * and Marcus Junker, <junker@anduras.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Sven Anders, Marcus Junker nor ANDURAS AG * admit liability nor provide warranty for any of this software. * This material is provided "AS-IS" and at no charge. * * Release 1.1 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/io.h> #include <linux/uaccess.h> /* #define DEBUG 1 */ #define DEFAULT_TIMEOUT 1 /* 1 minute */ #define MAX_TIMEOUT 255 #define VERSION "1.1" #define MODNAME "pc87413 WDT" #define DPFX MODNAME " - DEBUG: " #define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */ #define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1) #define SWC_LDN 0x04 #define SIOCFG2 0x22 /* Serial IO register */ #define WDCTL 0x10 /* Watchdog-Timer-Control-Register */ #define WDTO 0x11 /* Watchdog timeout register */ #define WDCFG 0x12 /* Watchdog config register */ #define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ static int io = IO_DEFAULT; static int swc_base_addr = -1; static int timeout = DEFAULT_TIMEOUT; /* timeout value */ static unsigned long timer_enabled; /* is the timer enabled? */ static char expect_close; /* is the close expected? */ static DEFINE_SPINLOCK(io_lock); /* to guard us from io races */ static bool nowayout = WATCHDOG_NOWAYOUT; /* -- Low level function ----------------------------------------*/ /* Select pins for Watchdog output */ static inline void pc87413_select_wdt_out(void) { unsigned int cr_data = 0; /* Step 1: Select multiple pin,pin55,as WDT output */ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x80; /* Set Bit7 to 1*/ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); #ifdef DEBUG pr_info(DPFX "Select multiple pin,pin55,as WDT output: Bit7 to 1: %d\n", cr_data); #endif } /* Enable SWC functions */ static inline void pc87413_enable_swc(void) { unsigned int cr_data = 0; /* Step 2: Enable SWC functions */ outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */ outb_p(SWC_LDN, WDT_DATA_IO_PORT); outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */ cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x01; /* Set Bit0 to 1 */ outb_p(0x30, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */ #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SWC functions\n"); #endif } /* Read SWC I/O base address */ static void pc87413_get_swc_base_addr(void) { unsigned char addr_l, addr_h = 0; /* Step 3: Read SWC I/O Base Address */ outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */ addr_h = inb(WDT_DATA_IO_PORT); outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */ addr_l = inb(WDT_DATA_IO_PORT); swc_base_addr = (addr_h << 8) + addr_l; #ifdef DEBUG pr_info(DPFX "Read SWC I/O Base Address: low %d, high %d, res %d\n", addr_l, addr_h, swc_base_addr); #endif } /* Select Bank 3 of SWC */ static inline void pc87413_swc_bank3(void) { /* Step 4: Select Bank3 of SWC */ outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); #ifdef DEBUG pr_info(DPFX "Select Bank3 of SWC\n"); #endif } /* Set watchdog timeout to x minutes */ static inline void pc87413_programm_wdto(char pc87413_time) { /* Step 5: Programm WDTO, Twd. */ outb_p(pc87413_time, swc_base_addr + WDTO); #ifdef DEBUG pr_info(DPFX "Set WDTO to %d minutes\n", pc87413_time); #endif } /* Enable WDEN */ static inline void pc87413_enable_wden(void) { /* Step 6: Enable WDEN */ outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Enable WDEN\n"); #endif } /* Enable SW_WD_TREN */ static inline void pc87413_enable_sw_wd_tren(void) { /* Enable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "Enable SW_WD_TREN\n"); #endif } /* Disable SW_WD_TREN */ static inline void pc87413_disable_sw_wd_tren(void) { /* Disable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "pc87413 - Disable SW_WD_TREN\n"); #endif } /* Enable SW_WD_TRG */ static inline void pc87413_enable_sw_wd_trg(void) { /* Enable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SW_WD_TRG\n"); #endif } /* Disable SW_WD_TRG */ static inline void pc87413_disable_sw_wd_trg(void) { /* Disable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Disable SW_WD_TRG\n"); #endif } /* -- Higher level functions ------------------------------------*/ /* Enable the watchdog */ static void pc87413_enable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* Disable the watchdog */ static void pc87413_disable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(0); spin_unlock(&io_lock); } /* Refresh the watchdog */ static void pc87413_refresh(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* -- File operations -------------------------------------------*/ /** * pc87413_open: * @inode: inode of device * @file: file handle to device * */ static int pc87413_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ pc87413_refresh(); pr_info("Watchdog enabled. Timeout set to %d minute(s).\n", timeout); return nonseekable_open(inode, file); } /** * pc87413_release: * @inode: inode to board * @file: file handle to board * * The watchdog has a configurable API. There is a religious dispute * between people who want their watchdog to be able to shut down and * those who want to be sure if the watchdog manager dies the machine * reboots. In the former case we disable the counters, in the latter * case you have to open it again very soon. */ static int pc87413_release(struct inode *inode, struct file *file) { /* Shut off the timer. */ if (expect_close == 42) { pc87413_disable(); pr_info("Watchdog disabled, sleeping again...\n"); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); pc87413_refresh(); } clear_bit(0, &timer_enabled); expect_close = 0; return 0; } /** * pc87413_status: * * return, if the watchdog is enabled (timeout is set...) */ static int pc87413_status(void) { return 0; /* currently not supported */ } /** * pc87413_write: * @file: file handle to the watchdog * @data: data buffer to write * @len: length in bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t pc87413_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* reset expect flag */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should reload the timer */ pc87413_refresh(); } return len; } /** * pc87413_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. We only actually usefully support * querying capabilities and current status. */ static long pc87413_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "PC87413(HF/F) watchdog", }; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(pc87413_status(), uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, uarg.i)) return -EFAULT; if (options & WDIOS_DISABLECARD) { pc87413_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { pc87413_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: pc87413_refresh(); #ifdef DEBUG pr_info(DPFX "keepalive\n"); #endif return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; /* the API states this is given in secs */ new_timeout /= 60; if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; pc87413_refresh(); /* fall through and return the new timeout... */ case WDIOC_GETTIMEOUT: new_timeout = timeout * 60; return put_user(new_timeout, uarg.i); default: return -ENOTTY; } } /* -- Notifier funtions -----------------------------------------*/ /** * notify_sys: * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. We want to turn the card * off at reboot otherwise the machine will reboot again during memory * test or worse yet during the following fsck. This would suck, in fact * trust me - if it happens it does suck. */ static int pc87413_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the card off */ pc87413_disable(); return NOTIFY_DONE; } /* -- Module's structures ---------------------------------------*/ static const struct file_operations pc87413_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pc87413_write, .unlocked_ioctl = pc87413_ioctl, .open = pc87413_open, .release = pc87413_release, }; static struct notifier_block pc87413_notifier = { .notifier_call = pc87413_notify_sys, }; static struct miscdevice pc87413_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pc87413_fops, }; /* -- Module init functions -------------------------------------*/ /** * pc87413_init: module's "constructor" * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. * The open() function will actually kick the board off. */ static int __init pc87413_init(void) { int ret; pr_info("Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT); if (!request_muxed_region(io, 2, MODNAME)) return -EBUSY; ret = register_reboot_notifier(&pc87413_notifier); if (ret != 0) pr_err("cannot register reboot notifier (err=%d)\n", ret); ret = misc_register(&pc87413_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto reboot_unreg; } pr_info("initialized. timeout=%d min\n", timeout); pc87413_select_wdt_out(); pc87413_enable_swc(); pc87413_get_swc_base_addr(); if (!request_region(swc_base_addr, 0x20, MODNAME)) { pr_err("cannot request SWC region at 0x%x\n", swc_base_addr); ret = -EBUSY; goto misc_unreg; } pc87413_enable(); release_region(io, 2); return 0; misc_unreg: misc_deregister(&pc87413_miscdev); reboot_unreg: unregister_reboot_notifier(&pc87413_notifier); release_region(io, 2); return ret; } /** * pc87413_exit: module's "destructor" * * Unload the watchdog. You cannot do this with any file handles open. * If your watchdog is set to continue ticking on close and you unload * it, well it keeps ticking. We won't get the interrupt but the board * will not touch PC memory so all is fine. You just have to load a new * module in 60 seconds or reboot. */ static void __exit pc87413_exit(void) { /* Stop the timer before we leave */ if (!nowayout) { pc87413_disable(); pr_info("Watchdog disabled\n"); } misc_deregister(&pc87413_miscdev); unregister_reboot_notifier(&pc87413_notifier); release_region(swc_base_addr, 0x20); pr_info("watchdog component driver removed\n"); } module_init(pc87413_init); module_exit(pc87413_exit); MODULE_AUTHOR("Sven Anders <anders@anduras.de>"); MODULE_AUTHOR("Marcus Junker <junker@anduras.de>"); MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")."); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
gpl-2.0
JoseDuque/linux.4.1.7
tools/perf/util/path.c
1829
3416
/* * I'm tired of doing "vsnprintf()" etc just to open a * file, so here's a "return static buffer with printf" * interface for paths. * * It's obviously not thread-safe. Sue me. But it's quite * useful for doing things like * * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); * * which is what it's designed for. */ #include "cache.h" static char bad_path[] = "/bad-path/"; /* * Two hacks: */ static const char *get_perf_dir(void) { return "."; } /* * If libc has strlcpy() then that version will override this * implementation: */ size_t __weak strlcpy(char *dest, const char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } static char *get_pathname(void) { static char pathname_array[4][PATH_MAX]; static int idx; return pathname_array[3 & ++idx]; } static char *cleanup_path(char *path) { /* Clean it up */ if (!memcmp(path, "./", 2)) { path += 2; while (*path == '/') path++; } return path; } static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) { const char *perf_dir = get_perf_dir(); size_t len; len = strlen(perf_dir); if (n < len + 1) goto bad; memcpy(buf, perf_dir, len); if (len && !is_dir_sep(perf_dir[len-1])) buf[len++] = '/'; len += vsnprintf(buf + len, n - len, fmt, args); if (len >= n) goto bad; return cleanup_path(buf); bad: strlcpy(buf, bad_path, n); return buf; } char *perf_pathdup(const char *fmt, ...) { char path[PATH_MAX]; va_list args; va_start(args, fmt); (void)perf_vsnpath(path, sizeof(path), fmt, args); va_end(args); return xstrdup(path); } char *mkpath(const char *fmt, ...) { va_list args; unsigned len; char *pathname = get_pathname(); va_start(args, fmt); len = vsnprintf(pathname, PATH_MAX, fmt, args); va_end(args); if (len >= PATH_MAX) return bad_path; return cleanup_path(pathname); } char *perf_path(const char *fmt, ...) { const char *perf_dir = get_perf_dir(); char *pathname = get_pathname(); va_list args; unsigned len; len = strlen(perf_dir); if (len > PATH_MAX-100) return bad_path; memcpy(pathname, perf_dir, len); if (len && perf_dir[len-1] != '/') pathname[len++] = '/'; va_start(args, fmt); len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); va_end(args); if (len >= PATH_MAX) return bad_path; return cleanup_path(pathname); } /* strip arbitrary amount of directory separators at end of path */ static inline int chomp_trailing_dir_sep(const char *path, int len) { while (len && is_dir_sep(path[len - 1])) len--; return len; } /* * If path ends with suffix (complete path components), returns the * part before suffix (sans trailing directory separators). * Otherwise returns NULL. */ char *strip_path_suffix(const char *path, const char *suffix) { int path_len = strlen(path), suffix_len = strlen(suffix); while (suffix_len) { if (!path_len) return NULL; if (is_dir_sep(path[path_len - 1])) { if (!is_dir_sep(suffix[suffix_len - 1])) return NULL; path_len = chomp_trailing_dir_sep(path, path_len); suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); } else if (path[--path_len] != suffix[--suffix_len]) return NULL; } if (path_len && !is_dir_sep(path[path_len - 1])) return NULL; return strndup(path, chomp_trailing_dir_sep(path, path_len)); }
gpl-2.0
Jeongduckho/E250_KITKAT
drivers/acpi/sbs.c
2597
28843
/* * sbs.c - ACPI Smart Battery System Driver ($Revision: 2.0 $) * * Copyright (c) 2007 Alexey Starikovskiy <astarikovskiy@suse.de> * Copyright (c) 2005-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com> * Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #ifdef CONFIG_ACPI_PROCFS_POWER #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #endif #include <linux/acpi.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/power_supply.h> #include "sbshc.h" #define PREFIX "ACPI: " #define ACPI_SBS_CLASS "sbs" #define ACPI_AC_CLASS "ac_adapter" #define ACPI_BATTERY_CLASS "battery" #define ACPI_SBS_DEVICE_NAME "Smart Battery System" #define ACPI_SBS_FILE_INFO "info" #define ACPI_SBS_FILE_STATE "state" #define ACPI_SBS_FILE_ALARM "alarm" #define ACPI_BATTERY_DIR_NAME "BAT%i" #define ACPI_AC_DIR_NAME "AC0" #define ACPI_SBS_NOTIFY_STATUS 0x80 #define ACPI_SBS_NOTIFY_INFO 0x81 MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); MODULE_DESCRIPTION("Smart Battery System ACPI interface driver"); MODULE_LICENSE("GPL"); static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); extern struct proc_dir_entry *acpi_lock_ac_dir(void); extern struct proc_dir_entry *acpi_lock_battery_dir(void); extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); #define MAX_SBS_BAT 4 #define ACPI_SBS_BLOCK_MAX 32 static const struct acpi_device_id sbs_device_ids[] = { {"ACPI0002", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, sbs_device_ids); struct acpi_battery { struct power_supply bat; struct acpi_sbs *sbs; #ifdef CONFIG_ACPI_PROCFS_POWER struct proc_dir_entry *proc_entry; #endif unsigned long update_time; char name[8]; char manufacturer_name[ACPI_SBS_BLOCK_MAX]; char device_name[ACPI_SBS_BLOCK_MAX]; char device_chemistry[ACPI_SBS_BLOCK_MAX]; u16 alarm_capacity; u16 full_charge_capacity; u16 design_capacity; u16 design_voltage; u16 serial_number; u16 cycle_count; u16 temp_now; u16 voltage_now; s16 rate_now; s16 rate_avg; u16 capacity_now; u16 state_of_charge; u16 state; u16 mode; u16 spec; u8 id; u8 present:1; u8 have_sysfs_alarm:1; }; #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); struct acpi_sbs { struct power_supply charger; struct acpi_device *device; struct acpi_smb_hc *hc; struct mutex lock; #ifdef CONFIG_ACPI_PROCFS_POWER struct proc_dir_entry *charger_entry; #endif struct acpi_battery battery[MAX_SBS_BAT]; u8 batteries_supported:4; u8 manager_present:1; u8 charger_present:1; }; #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) static inline int battery_scale(int log) { int scale = 1; while (log--) scale *= 10; return scale; } static inline int acpi_battery_vscale(struct acpi_battery *battery) { return battery_scale((battery->spec & 0x0f00) >> 8); } static inline int acpi_battery_ipscale(struct acpi_battery *battery) { return battery_scale((battery->spec & 0xf000) >> 12); } static inline int acpi_battery_mode(struct acpi_battery *battery) { return (battery->mode & 0x8000); } static inline int acpi_battery_scale(struct acpi_battery *battery) { return (acpi_battery_mode(battery) ? 10 : 1) * acpi_battery_ipscale(battery); } static int sbs_get_ac_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct acpi_sbs *sbs = to_acpi_sbs(psy); switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = sbs->charger_present; break; default: return -EINVAL; } return 0; } static int acpi_battery_technology(struct acpi_battery *battery) { if (!strcasecmp("NiCd", battery->device_chemistry)) return POWER_SUPPLY_TECHNOLOGY_NiCd; if (!strcasecmp("NiMH", battery->device_chemistry)) return POWER_SUPPLY_TECHNOLOGY_NiMH; if (!strcasecmp("LION", battery->device_chemistry)) return POWER_SUPPLY_TECHNOLOGY_LION; if (!strcasecmp("LiP", battery->device_chemistry)) return POWER_SUPPLY_TECHNOLOGY_LIPO; return POWER_SUPPLY_TECHNOLOGY_UNKNOWN; } static int acpi_sbs_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct acpi_battery *battery = to_acpi_battery(psy); if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT) return -ENODEV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (battery->rate_now < 0) val->intval = POWER_SUPPLY_STATUS_DISCHARGING; else if (battery->rate_now > 0) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_FULL; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = battery->present; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = acpi_battery_technology(battery); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: val->intval = battery->cycle_count; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = battery->design_voltage * acpi_battery_vscale(battery) * 1000; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = battery->voltage_now * acpi_battery_vscale(battery) * 1000; break; case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_POWER_NOW: val->intval = abs(battery->rate_now) * acpi_battery_ipscale(battery) * 1000; break; case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_POWER_AVG: val->intval = abs(battery->rate_avg) * acpi_battery_ipscale(battery) * 1000; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery->state_of_charge; break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: val->intval = battery->design_capacity * acpi_battery_scale(battery) * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL: case POWER_SUPPLY_PROP_ENERGY_FULL: val->intval = battery->full_charge_capacity * acpi_battery_scale(battery) * 1000; break; case POWER_SUPPLY_PROP_CHARGE_NOW: case POWER_SUPPLY_PROP_ENERGY_NOW: val->intval = battery->capacity_now * acpi_battery_scale(battery) * 1000; break; case POWER_SUPPLY_PROP_TEMP: val->intval = battery->temp_now - 2730; // dK -> dC break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = battery->device_name; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = battery->manufacturer_name; break; default: return -EINVAL; } return 0; } static enum power_supply_property sbs_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static enum power_supply_property sbs_charge_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; static enum power_supply_property sbs_energy_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; /* -------------------------------------------------------------------------- Smart Battery System Management -------------------------------------------------------------------------- */ struct acpi_battery_reader { u8 command; /* command for battery */ u8 mode; /* word or block? */ size_t offset; /* offset inside struct acpi_sbs_battery */ }; static struct acpi_battery_reader info_readers[] = { {0x01, SMBUS_READ_WORD, offsetof(struct acpi_battery, alarm_capacity)}, {0x03, SMBUS_READ_WORD, offsetof(struct acpi_battery, mode)}, {0x10, SMBUS_READ_WORD, offsetof(struct acpi_battery, full_charge_capacity)}, {0x17, SMBUS_READ_WORD, offsetof(struct acpi_battery, cycle_count)}, {0x18, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_capacity)}, {0x19, SMBUS_READ_WORD, offsetof(struct acpi_battery, design_voltage)}, {0x1a, SMBUS_READ_WORD, offsetof(struct acpi_battery, spec)}, {0x1c, SMBUS_READ_WORD, offsetof(struct acpi_battery, serial_number)}, {0x20, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, manufacturer_name)}, {0x21, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_name)}, {0x22, SMBUS_READ_BLOCK, offsetof(struct acpi_battery, device_chemistry)}, }; static struct acpi_battery_reader state_readers[] = { {0x08, SMBUS_READ_WORD, offsetof(struct acpi_battery, temp_now)}, {0x09, SMBUS_READ_WORD, offsetof(struct acpi_battery, voltage_now)}, {0x0a, SMBUS_READ_WORD, offsetof(struct acpi_battery, rate_now)}, {0x0b, SMBUS_READ_WORD, offsetof(struct acpi_battery, rate_avg)}, {0x0f, SMBUS_READ_WORD, offsetof(struct acpi_battery, capacity_now)}, {0x0e, SMBUS_READ_WORD, offsetof(struct acpi_battery, state_of_charge)}, {0x16, SMBUS_READ_WORD, offsetof(struct acpi_battery, state)}, }; static int acpi_manager_get_info(struct acpi_sbs *sbs) { int result = 0; u16 battery_system_info; result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER, 0x04, (u8 *)&battery_system_info); if (!result) sbs->batteries_supported = battery_system_info & 0x000f; return result; } static int acpi_battery_get_info(struct acpi_battery *battery) { int i, result = 0; for (i = 0; i < ARRAY_SIZE(info_readers); ++i) { result = acpi_smbus_read(battery->sbs->hc, info_readers[i].mode, ACPI_SBS_BATTERY, info_readers[i].command, (u8 *) battery + info_readers[i].offset); if (result) break; } return result; } static int acpi_battery_get_state(struct acpi_battery *battery) { int i, result = 0; if (battery->update_time && time_before(jiffies, battery->update_time + msecs_to_jiffies(cache_time))) return 0; for (i = 0; i < ARRAY_SIZE(state_readers); ++i) { result = acpi_smbus_read(battery->sbs->hc, state_readers[i].mode, ACPI_SBS_BATTERY, state_readers[i].command, (u8 *)battery + state_readers[i].offset); if (result) goto end; } end: battery->update_time = jiffies; return result; } static int acpi_battery_get_alarm(struct acpi_battery *battery) { return acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD, ACPI_SBS_BATTERY, 0x01, (u8 *)&battery->alarm_capacity); } static int acpi_battery_set_alarm(struct acpi_battery *battery) { struct acpi_sbs *sbs = battery->sbs; u16 value, sel = 1 << (battery->id + 12); int ret; if (sbs->manager_present) { ret = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&value); if (ret) goto end; if ((value & 0xf000) != sel) { value &= 0x0fff; value |= sel; ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&value, 2); if (ret) goto end; } } ret = acpi_smbus_write(sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_BATTERY, 0x01, (u8 *)&battery->alarm_capacity, 2); end: return ret; } static int acpi_ac_get_present(struct acpi_sbs *sbs) { int result; u16 status; result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, 0x13, (u8 *) & status); if (!result) sbs->charger_present = (status >> 15) & 0x1; return result; } static ssize_t acpi_battery_alarm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); acpi_battery_get_alarm(battery); return sprintf(buf, "%d\n", battery->alarm_capacity * acpi_battery_scale(battery) * 1000); } static ssize_t acpi_battery_alarm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long x; struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); if (sscanf(buf, "%ld\n", &x) == 1) battery->alarm_capacity = x / (1000 * acpi_battery_scale(battery)); if (battery->present) acpi_battery_set_alarm(battery); return count; } static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; /* -------------------------------------------------------------------------- FS Interface (/proc/acpi) -------------------------------------------------------------------------- */ #ifdef CONFIG_ACPI_PROCFS_POWER /* Generic Routines */ static int acpi_sbs_add_fs(struct proc_dir_entry **dir, struct proc_dir_entry *parent_dir, char *dir_name, const struct file_operations *info_fops, const struct file_operations *state_fops, const struct file_operations *alarm_fops, void *data) { printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); if (!*dir) { *dir = proc_mkdir(dir_name, parent_dir); if (!*dir) { return -ENODEV; } } /* 'info' [R] */ if (info_fops) proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir, info_fops, data); /* 'state' [R] */ if (state_fops) proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir, state_fops, data); /* 'alarm' [R/W] */ if (alarm_fops) proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir, alarm_fops, data); return 0; } static void acpi_sbs_remove_fs(struct proc_dir_entry **dir, struct proc_dir_entry *parent_dir) { if (*dir) { remove_proc_entry(ACPI_SBS_FILE_INFO, *dir); remove_proc_entry(ACPI_SBS_FILE_STATE, *dir); remove_proc_entry(ACPI_SBS_FILE_ALARM, *dir); remove_proc_entry((*dir)->name, parent_dir); *dir = NULL; } } /* Smart Battery Interface */ static struct proc_dir_entry *acpi_battery_dir = NULL; static inline char *acpi_battery_units(struct acpi_battery *battery) { return acpi_battery_mode(battery) ? " mW" : " mA"; } static int acpi_battery_read_info(struct seq_file *seq, void *offset) { struct acpi_battery *battery = seq->private; struct acpi_sbs *sbs = battery->sbs; int result = 0; mutex_lock(&sbs->lock); seq_printf(seq, "present: %s\n", (battery->present) ? "yes" : "no"); if (!battery->present) goto end; seq_printf(seq, "design capacity: %i%sh\n", battery->design_capacity * acpi_battery_scale(battery), acpi_battery_units(battery)); seq_printf(seq, "last full capacity: %i%sh\n", battery->full_charge_capacity * acpi_battery_scale(battery), acpi_battery_units(battery)); seq_printf(seq, "battery technology: rechargeable\n"); seq_printf(seq, "design voltage: %i mV\n", battery->design_voltage * acpi_battery_vscale(battery)); seq_printf(seq, "design capacity warning: unknown\n"); seq_printf(seq, "design capacity low: unknown\n"); seq_printf(seq, "cycle count: %i\n", battery->cycle_count); seq_printf(seq, "capacity granularity 1: unknown\n"); seq_printf(seq, "capacity granularity 2: unknown\n"); seq_printf(seq, "model number: %s\n", battery->device_name); seq_printf(seq, "serial number: %i\n", battery->serial_number); seq_printf(seq, "battery type: %s\n", battery->device_chemistry); seq_printf(seq, "OEM info: %s\n", battery->manufacturer_name); end: mutex_unlock(&sbs->lock); return result; } static int acpi_battery_info_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_battery_read_info, PDE(inode)->data); } static int acpi_battery_read_state(struct seq_file *seq, void *offset) { struct acpi_battery *battery = seq->private; struct acpi_sbs *sbs = battery->sbs; int rate; mutex_lock(&sbs->lock); seq_printf(seq, "present: %s\n", (battery->present) ? "yes" : "no"); if (!battery->present) goto end; acpi_battery_get_state(battery); seq_printf(seq, "capacity state: %s\n", (battery->state & 0x0010) ? "critical" : "ok"); seq_printf(seq, "charging state: %s\n", (battery->rate_now < 0) ? "discharging" : ((battery->rate_now > 0) ? "charging" : "charged")); rate = abs(battery->rate_now) * acpi_battery_ipscale(battery); rate *= (acpi_battery_mode(battery))?(battery->voltage_now * acpi_battery_vscale(battery)/1000):1; seq_printf(seq, "present rate: %d%s\n", rate, acpi_battery_units(battery)); seq_printf(seq, "remaining capacity: %i%sh\n", battery->capacity_now * acpi_battery_scale(battery), acpi_battery_units(battery)); seq_printf(seq, "present voltage: %i mV\n", battery->voltage_now * acpi_battery_vscale(battery)); end: mutex_unlock(&sbs->lock); return 0; } static int acpi_battery_state_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_battery_read_state, PDE(inode)->data); } static int acpi_battery_read_alarm(struct seq_file *seq, void *offset) { struct acpi_battery *battery = seq->private; struct acpi_sbs *sbs = battery->sbs; int result = 0; mutex_lock(&sbs->lock); if (!battery->present) { seq_printf(seq, "present: no\n"); goto end; } acpi_battery_get_alarm(battery); seq_printf(seq, "alarm: "); if (battery->alarm_capacity) seq_printf(seq, "%i%sh\n", battery->alarm_capacity * acpi_battery_scale(battery), acpi_battery_units(battery)); else seq_printf(seq, "disabled\n"); end: mutex_unlock(&sbs->lock); return result; } static ssize_t acpi_battery_write_alarm(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { struct seq_file *seq = file->private_data; struct acpi_battery *battery = seq->private; struct acpi_sbs *sbs = battery->sbs; char alarm_string[12] = { '\0' }; int result = 0; mutex_lock(&sbs->lock); if (!battery->present) { result = -ENODEV; goto end; } if (count > sizeof(alarm_string) - 1) { result = -EINVAL; goto end; } if (copy_from_user(alarm_string, buffer, count)) { result = -EFAULT; goto end; } alarm_string[count] = 0; battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) / acpi_battery_scale(battery); acpi_battery_set_alarm(battery); end: mutex_unlock(&sbs->lock); if (result) return result; return count; } static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_battery_read_alarm, PDE(inode)->data); } static const struct file_operations acpi_battery_info_fops = { .open = acpi_battery_info_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct file_operations acpi_battery_state_fops = { .open = acpi_battery_state_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; static const struct file_operations acpi_battery_alarm_fops = { .open = acpi_battery_alarm_open_fs, .read = seq_read, .write = acpi_battery_write_alarm, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; /* Legacy AC Adapter Interface */ static struct proc_dir_entry *acpi_ac_dir = NULL; static int acpi_ac_read_state(struct seq_file *seq, void *offset) { struct acpi_sbs *sbs = seq->private; mutex_lock(&sbs->lock); seq_printf(seq, "state: %s\n", sbs->charger_present ? "on-line" : "off-line"); mutex_unlock(&sbs->lock); return 0; } static int acpi_ac_state_open_fs(struct inode *inode, struct file *file) { return single_open(file, acpi_ac_read_state, PDE(inode)->data); } static const struct file_operations acpi_ac_state_fops = { .open = acpi_ac_state_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, .owner = THIS_MODULE, }; #endif /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static int acpi_battery_read(struct acpi_battery *battery) { int result = 0, saved_present = battery->present; u16 state; if (battery->sbs->manager_present) { result = acpi_smbus_read(battery->sbs->hc, SMBUS_READ_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&state); if (!result) battery->present = state & (1 << battery->id); state &= 0x0fff; state |= 1 << (battery->id + 12); acpi_smbus_write(battery->sbs->hc, SMBUS_WRITE_WORD, ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); } else if (battery->id == 0) battery->present = 1; if (result || !battery->present) return result; if (saved_present != battery->present) { battery->update_time = 0; result = acpi_battery_get_info(battery); if (result) return result; } result = acpi_battery_get_state(battery); return result; } /* Smart Battery */ static int acpi_battery_add(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; int result; battery->id = id; battery->sbs = sbs; result = acpi_battery_read(battery); if (result) return result; sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id); #ifdef CONFIG_ACPI_PROCFS_POWER acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir, battery->name, &acpi_battery_info_fops, &acpi_battery_state_fops, &acpi_battery_alarm_fops, battery); #endif battery->bat.name = battery->name; battery->bat.type = POWER_SUPPLY_TYPE_BATTERY; if (!acpi_battery_mode(battery)) { battery->bat.properties = sbs_charge_battery_props; battery->bat.num_properties = ARRAY_SIZE(sbs_charge_battery_props); } else { battery->bat.properties = sbs_energy_battery_props; battery->bat.num_properties = ARRAY_SIZE(sbs_energy_battery_props); } battery->bat.get_property = acpi_sbs_battery_get_property; result = power_supply_register(&sbs->device->dev, &battery->bat); if (result) goto end; result = device_create_file(battery->bat.dev, &alarm_attr); if (result) goto end; battery->have_sysfs_alarm = 1; end: printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), battery->name, battery->present ? "present" : "absent"); return result; } static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; if (battery->bat.dev) { if (battery->have_sysfs_alarm) device_remove_file(battery->bat.dev, &alarm_attr); power_supply_unregister(&battery->bat); } #ifdef CONFIG_ACPI_PROCFS_POWER if (battery->proc_entry) acpi_sbs_remove_fs(&battery->proc_entry, acpi_battery_dir); #endif } static int acpi_charger_add(struct acpi_sbs *sbs) { int result; result = acpi_ac_get_present(sbs); if (result) goto end; #ifdef CONFIG_ACPI_PROCFS_POWER result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir, ACPI_AC_DIR_NAME, NULL, &acpi_ac_state_fops, NULL, sbs); if (result) goto end; #endif sbs->charger.name = "sbs-charger"; sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; sbs->charger.properties = sbs_ac_props; sbs->charger.num_properties = ARRAY_SIZE(sbs_ac_props); sbs->charger.get_property = sbs_get_ac_property; power_supply_register(&sbs->device->dev, &sbs->charger); printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line"); end: return result; } static void acpi_charger_remove(struct acpi_sbs *sbs) { if (sbs->charger.dev) power_supply_unregister(&sbs->charger); #ifdef CONFIG_ACPI_PROCFS_POWER if (sbs->charger_entry) acpi_sbs_remove_fs(&sbs->charger_entry, acpi_ac_dir); #endif } static void acpi_sbs_callback(void *context) { int id; struct acpi_sbs *sbs = context; struct acpi_battery *bat; u8 saved_charger_state = sbs->charger_present; u8 saved_battery_state; acpi_ac_get_present(sbs); if (sbs->charger_present != saved_charger_state) { #ifdef CONFIG_ACPI_PROC_EVENT acpi_bus_generate_proc_event4(ACPI_AC_CLASS, ACPI_AC_DIR_NAME, ACPI_SBS_NOTIFY_STATUS, sbs->charger_present); #endif kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); } if (sbs->manager_present) { for (id = 0; id < MAX_SBS_BAT; ++id) { if (!(sbs->batteries_supported & (1 << id))) continue; bat = &sbs->battery[id]; saved_battery_state = bat->present; acpi_battery_read(bat); if (saved_battery_state == bat->present) continue; #ifdef CONFIG_ACPI_PROC_EVENT acpi_bus_generate_proc_event4(ACPI_BATTERY_CLASS, bat->name, ACPI_SBS_NOTIFY_STATUS, bat->present); #endif kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE); } } } static int acpi_sbs_remove(struct acpi_device *device, int type); static int acpi_sbs_add(struct acpi_device *device) { struct acpi_sbs *sbs; int result = 0; int id; sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); if (!sbs) { result = -ENOMEM; goto end; } mutex_init(&sbs->lock); sbs->hc = acpi_driver_data(device->parent); sbs->device = device; strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_SBS_CLASS); device->driver_data = sbs; result = acpi_charger_add(sbs); if (result) goto end; result = acpi_manager_get_info(sbs); if (!result) { sbs->manager_present = 1; for (id = 0; id < MAX_SBS_BAT; ++id) if ((sbs->batteries_supported & (1 << id))) acpi_battery_add(sbs, id); } else acpi_battery_add(sbs, 0); acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); end: if (result) acpi_sbs_remove(device, 0); return result; } static int acpi_sbs_remove(struct acpi_device *device, int type) { struct acpi_sbs *sbs; int id; if (!device) return -EINVAL; sbs = acpi_driver_data(device); if (!sbs) return -EINVAL; mutex_lock(&sbs->lock); acpi_smbus_unregister_callback(sbs->hc); for (id = 0; id < MAX_SBS_BAT; ++id) acpi_battery_remove(sbs, id); acpi_charger_remove(sbs); mutex_unlock(&sbs->lock); mutex_destroy(&sbs->lock); kfree(sbs); return 0; } static void acpi_sbs_rmdirs(void) { #ifdef CONFIG_ACPI_PROCFS_POWER if (acpi_ac_dir) { acpi_unlock_ac_dir(acpi_ac_dir); acpi_ac_dir = NULL; } if (acpi_battery_dir) { acpi_unlock_battery_dir(acpi_battery_dir); acpi_battery_dir = NULL; } #endif } static int acpi_sbs_resume(struct acpi_device *device) { struct acpi_sbs *sbs; if (!device) return -EINVAL; sbs = device->driver_data; acpi_sbs_callback(sbs); return 0; } static struct acpi_driver acpi_sbs_driver = { .name = "sbs", .class = ACPI_SBS_CLASS, .ids = sbs_device_ids, .ops = { .add = acpi_sbs_add, .remove = acpi_sbs_remove, .resume = acpi_sbs_resume, }, }; static int __init acpi_sbs_init(void) { int result = 0; if (acpi_disabled) return -ENODEV; #ifdef CONFIG_ACPI_PROCFS_POWER acpi_ac_dir = acpi_lock_ac_dir(); if (!acpi_ac_dir) return -ENODEV; acpi_battery_dir = acpi_lock_battery_dir(); if (!acpi_battery_dir) { acpi_sbs_rmdirs(); return -ENODEV; } #endif result = acpi_bus_register_driver(&acpi_sbs_driver); if (result < 0) { acpi_sbs_rmdirs(); return -ENODEV; } return 0; } static void __exit acpi_sbs_exit(void) { acpi_bus_unregister_driver(&acpi_sbs_driver); acpi_sbs_rmdirs(); return; } module_init(acpi_sbs_init); module_exit(acpi_sbs_exit);
gpl-2.0
1119553797/linux-sunxi
drivers/media/rc/keymaps/rc-rc6-mce.c
4901
3279
/* rc-rc6-mce.c - Keytable for Windows Media Center RC-6 remotes for use * with the Media Center Edition eHome Infrared Transceiver. * * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> * * See http://mediacenterguides.com/book/export/html/31 for details on * key mappings. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table rc6_mce[] = { { 0x800f0400, KEY_NUMERIC_0 }, { 0x800f0401, KEY_NUMERIC_1 }, { 0x800f0402, KEY_NUMERIC_2 }, { 0x800f0403, KEY_NUMERIC_3 }, { 0x800f0404, KEY_NUMERIC_4 }, { 0x800f0405, KEY_NUMERIC_5 }, { 0x800f0406, KEY_NUMERIC_6 }, { 0x800f0407, KEY_NUMERIC_7 }, { 0x800f0408, KEY_NUMERIC_8 }, { 0x800f0409, KEY_NUMERIC_9 }, { 0x800f040a, KEY_DELETE }, { 0x800f040b, KEY_ENTER }, { 0x800f040c, KEY_SLEEP }, /* Formerly PC Power */ { 0x800f040d, KEY_MEDIA }, /* Windows MCE button */ { 0x800f040e, KEY_MUTE }, { 0x800f040f, KEY_INFO }, { 0x800f0410, KEY_VOLUMEUP }, { 0x800f0411, KEY_VOLUMEDOWN }, { 0x800f0412, KEY_CHANNELUP }, { 0x800f0413, KEY_CHANNELDOWN }, { 0x800f0414, KEY_FASTFORWARD }, { 0x800f0415, KEY_REWIND }, { 0x800f0416, KEY_PLAY }, { 0x800f0417, KEY_RECORD }, { 0x800f0418, KEY_PAUSE }, { 0x800f0419, KEY_STOP }, { 0x800f041a, KEY_NEXT }, { 0x800f041b, KEY_PREVIOUS }, { 0x800f041c, KEY_NUMERIC_POUND }, { 0x800f041d, KEY_NUMERIC_STAR }, { 0x800f041e, KEY_UP }, { 0x800f041f, KEY_DOWN }, { 0x800f0420, KEY_LEFT }, { 0x800f0421, KEY_RIGHT }, { 0x800f0422, KEY_OK }, { 0x800f0423, KEY_EXIT }, { 0x800f0424, KEY_DVD }, { 0x800f0425, KEY_TUNER }, /* LiveTV */ { 0x800f0426, KEY_EPG }, /* Guide */ { 0x800f0427, KEY_ZOOM }, /* Aspect */ { 0x800f0432, KEY_MODE }, /* Visualization */ { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */ { 0x800f0434, KEY_EJECTCD }, { 0x800f043a, KEY_BRIGHTNESSUP }, { 0x800f0446, KEY_TV }, { 0x800f0447, KEY_AUDIO }, /* My Music */ { 0x800f0448, KEY_PVR }, /* RecordedTV */ { 0x800f0449, KEY_CAMERA }, { 0x800f044a, KEY_VIDEO }, { 0x800f044c, KEY_LANGUAGE }, { 0x800f044d, KEY_TITLE }, { 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */ { 0x800f0450, KEY_RADIO }, { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */ { 0x800f045b, KEY_RED }, { 0x800f045c, KEY_GREEN }, { 0x800f045d, KEY_YELLOW }, { 0x800f045e, KEY_BLUE }, { 0x800f0465, KEY_POWER2 }, /* TV Power */ { 0x800f046e, KEY_PLAYPAUSE }, { 0x800f046f, KEY_PLAYER }, /* Start media application (NEW) */ { 0x800f0480, KEY_BRIGHTNESSDOWN }, { 0x800f0481, KEY_PLAYPAUSE }, }; static struct rc_map_list rc6_mce_map = { .map = { .scan = rc6_mce, .size = ARRAY_SIZE(rc6_mce), .rc_type = RC_TYPE_RC6, .name = RC_MAP_RC6_MCE, } }; static int __init init_rc_map_rc6_mce(void) { return rc_map_register(&rc6_mce_map); } static void __exit exit_rc_map_rc6_mce(void) { rc_map_unregister(&rc6_mce_map); } module_init(init_rc_map_rc6_mce) module_exit(exit_rc_map_rc6_mce) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
gpl-2.0
finnq/android_kernel_lge_g3
drivers/gpu/drm/radeon/r600_blit.c
5669
22406
/* * Copyright 2009 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon_drv.h" #include "r600_blit_shaders.h" #define DI_PT_RECTLIST 0x11 #define DI_INDEX_SIZE_16_BIT 0x0 #define DI_SRC_SEL_AUTO_INDEX 0x2 #define FMT_8 0x1 #define FMT_5_6_5 0x8 #define FMT_8_8_8_8 0x1a #define COLOR_8 0x1 #define COLOR_5_6_5 0x8 #define COLOR_8_8_8_8 0x1a static void set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr) { u32 cb_color_info; int pitch, slice; RING_LOCALS; DRM_DEBUG("\n"); h = ALIGN(h, 8); if (h < 8) h = 8; cb_color_info = ((format << 2) | (1 << 27)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) && ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) { BEGIN_RING(21 + 2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0)); OUT_RING(2 << 0); } else { BEGIN_RING(21); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); } OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((pitch << 0) | (slice << 10)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(cb_color_info); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); } static void cp_set_surface_sync(drm_radeon_private_t *dev_priv, u32 sync_type, u32 size, u64 mc_addr) { u32 cp_coher_size; RING_LOCALS; DRM_DEBUG("\n"); if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3)); OUT_RING(sync_type); OUT_RING(cp_coher_size); OUT_RING((mc_addr >> 8)); OUT_RING(10); /* poll interval */ ADVANCE_RING(); } static void set_shaders(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u64 gpu_addr; int i; u32 *vs, *ps; uint32_t sq_pgm_resources; RING_LOCALS; DRM_DEBUG("\n"); /* load shaders */ vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset); ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); for (i = 0; i < r6xx_vs_size; i++) vs[i] = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) ps[i] = cpu_to_le32(r6xx_ps[i]); dev_priv->blit_vb->used = 512; gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset; /* setup shader regs */ sq_pgm_resources = (1 << 0); BEGIN_RING(9 + 12); /* VS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(gpu_addr >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); /* PS */ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((gpu_addr + 256) >> 8); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(sq_pgm_resources | (1 << 28)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(2); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1)); OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING(0); ADVANCE_RING(); cp_set_surface_sync(dev_priv, R600_SH_ACTION_ENA, 512, gpu_addr); } static void set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) { uint32_t sq_vtx_constant_word2; RING_LOCALS; DRM_DEBUG("\n"); sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); #ifdef __BIG_ENDIAN sq_vtx_constant_word2 |= (2 << 30); #endif BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0x460); OUT_RING(gpu_addr & 0xffffffff); OUT_RING(48 - 1); OUT_RING(sq_vtx_constant_word2); OUT_RING(1 << 0); OUT_RING(0); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30); ADVANCE_RING(); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(dev_priv, R600_VC_ACTION_ENA, 48, gpu_addr); } static void set_tex_resource(drm_radeon_private_t *dev_priv, int format, int w, int h, int pitch, u64 gpu_addr) { uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; RING_LOCALS; DRM_DEBUG("\n"); if (h < 1) h = 1; sq_tex_resource_word0 = (1 << 0); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | ((w - 1) << 19)); sq_tex_resource_word1 = (format << 26); sq_tex_resource_word1 |= ((h - 1) << 0); sq_tex_resource_word4 = ((1 << 14) | (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25)); BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); OUT_RING(0); OUT_RING(sq_tex_resource_word0); OUT_RING(sq_tex_resource_word1); OUT_RING(gpu_addr >> 8); OUT_RING(gpu_addr >> 8); OUT_RING(sq_tex_resource_word4); OUT_RING(0); OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30); ADVANCE_RING(); } static void set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(12); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((x1 << 0) | (y1 << 16)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); OUT_RING((x2 << 0) | (y2 << 16)); OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2)); OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2); OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31)); OUT_RING((x2 << 0) | (y2 << 16)); ADVANCE_RING(); } static void draw_auto(drm_radeon_private_t *dev_priv) { RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(10); OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(DI_PT_RECTLIST); OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); #ifdef __BIG_ENDIAN OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); #else OUT_RING(DI_INDEX_SIZE_16_BIT); #endif OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); OUT_RING(1); OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1)); OUT_RING(3); OUT_RING(DI_SRC_SEL_AUTO_INDEX); ADVANCE_RING(); COMMIT_RING(); } static void set_default_state(drm_radeon_private_t *dev_priv) { int i; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; RING_LOCALS; switch ((dev_priv->flags & RADEON_FAMILY_MASK)) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)) sq_config = 0; else sq_config = R600_VC_ENABLE; sq_config |= (R600_DX9_CONSTS | R600_ALU_INST_PREFER_VECTOR | R600_PS_PRIO(0) | R600_VS_PRIO(1) | R600_GS_PRIO(2) | R600_ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) | R600_NUM_VS_GPRS(num_vs_gprs) | R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) | R600_NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) | R600_NUM_VS_THREADS(num_vs_threads) | R600_NUM_GS_THREADS(num_gs_threads) | R600_NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries)); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { BEGIN_RING(r7xx_default_size + 10); for (i = 0; i < r7xx_default_size; i++) OUT_RING(r7xx_default_state[i]); } else { BEGIN_RING(r6xx_default_size + 10); for (i = 0; i < r6xx_default_size; i++) OUT_RING(r6xx_default_state[i]); } OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* SQ config */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6)); OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(sq_config); OUT_RING(sq_gpr_resource_mgmt_1); OUT_RING(sq_gpr_resource_mgmt_2); OUT_RING(sq_thread_resource_mgmt); OUT_RING(sq_stack_resource_mgmt_1); OUT_RING(sq_stack_resource_mgmt_2); ADVANCE_RING(); } static uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; if ((input & 0x3fff) == 0) result = 0; /* 0 is a special case */ else { exponent = 140; /* exponent biased by 127; */ fraction = (input & 0x3fff) << 10; /* cheat and only handle numbers below 2^^15 */ for (i = 0; i < 14; i++) { if (fraction & 0x800000) break; else { fraction = fraction << 1; /* keep shifting left until top bit = 1 */ exponent = exponent - 1; } } result = exponent << 23 | (fraction & 0x7fffff); /* mask off top bit; assumed 1 */ } return result; } static int r600_nomm_get_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->blit_vb = radeon_freelist_get(dev); if (!dev_priv->blit_vb) { DRM_ERROR("Unable to allocate vertex buffer for blit\n"); return -EAGAIN; } return 0; } static void r600_nomm_put_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->blit_vb->used = 0; radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb); } static void *r600_nomm_get_vb_ptr(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; return (((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + dev_priv->blit_vb->used)); } int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; int ret; DRM_DEBUG("\n"); ret = r600_nomm_get_vb(dev); if (ret) return ret; dev_priv->blit_vb->file_priv = file_priv; set_default_state(dev_priv); set_shaders(dev); return 0; } void r600_done_blit_copy(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(5); OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); /* wait for 3D idle clean */ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2); OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN); ADVANCE_RING(); COMMIT_RING(); r600_nomm_put_vb(dev); } void r600_blit_copy(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int size_bytes) { drm_radeon_private_t *dev_priv = dev->dev_private; int max_bytes; u64 vb_addr; u32 *vb; vb = r600_nomm_get_vb_ptr(dev); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; while (size_bytes) { int cur_size = size_bytes; int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); } vb[0] = i2f(dst_x); vb[1] = 0; vb[2] = i2f(src_x); vb[3] = 0; vb[4] = i2f(dst_x); vb[5] = i2f(h); vb[6] = i2f(src_x); vb[7] = i2f(h); vb[8] = i2f(dst_x + cur_size); vb[9] = i2f(h); vb[10] = i2f(src_x + cur_size); vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8, src_x + cur_size, h, src_x + cur_size, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8, dst_x + cur_size, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } else { max_bytes = 8192 * 4; while (size_bytes) { int cur_size = size_bytes; int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; src_gpu_addr = src_gpu_addr & ~255; dst_gpu_addr = dst_gpu_addr & ~255; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); } vb[0] = i2f(dst_x / 4); vb[1] = 0; vb[2] = i2f(src_x / 4); vb[3] = 0; vb[4] = i2f(dst_x / 4); vb[5] = i2f(h); vb[6] = i2f(src_x / 4); vb[7] = i2f(h); vb[8] = i2f((dst_x + cur_size) / 4); vb[9] = i2f(h); vb[10] = i2f((src_x + cur_size) / 4); vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8_8_8_8, (src_x + cur_size) / 4, h, (src_x + cur_size) / 4, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst */ set_render_target(dev_priv, COLOR_8_8_8_8, (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; dev_priv->blit_vb->used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } } void r600_blit_swap(struct drm_device *dev, uint64_t src_gpu_addr, uint64_t dst_gpu_addr, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp) { drm_radeon_private_t *dev_priv = dev->dev_private; int cb_format, tex_format; int sx2, sy2, dx2, dy2; u64 vb_addr; u32 *vb; if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); } vb = r600_nomm_get_vb_ptr(dev); sx2 = sx + w; sy2 = sy + h; dx2 = dx + w; dy2 = dy + h; vb[0] = i2f(dx); vb[1] = i2f(dy); vb[2] = i2f(sx); vb[3] = i2f(sy); vb[4] = i2f(dx); vb[5] = i2f(dy2); vb[6] = i2f(sx); vb[7] = i2f(sy2); vb[8] = i2f(dx2); vb[9] = i2f(dy2); vb[10] = i2f(sx2); vb[11] = i2f(sy2); switch(cpp) { case 4: cb_format = COLOR_8_8_8_8; tex_format = FMT_8_8_8_8; break; case 2: cb_format = COLOR_5_6_5; tex_format = FMT_5_6_5; break; default: cb_format = COLOR_8; tex_format = FMT_8; break; } /* src */ set_tex_resource(dev_priv, tex_format, src_pitch / cpp, sy2, src_pitch / cpp, src_gpu_addr); cp_set_surface_sync(dev_priv, R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr); /* dst */ set_render_target(dev_priv, cb_format, dst_pitch / cpp, dy2, dst_gpu_addr); /* scissors */ set_scissors(dev_priv, dx, dy, dx2, dy2); /* Vertex buffer setup */ vb_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset + dev_priv->blit_vb->used; set_vtx_resource(dev_priv, vb_addr); /* draw */ draw_auto(dev_priv); cp_set_surface_sync(dev_priv, R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA, dst_pitch * dy2, dst_gpu_addr); dev_priv->blit_vb->used += 12 * 4; }
gpl-2.0
ch33kybutt/D3v1l-kernel
sound/usb/mixer_maps.c
7717
12114
/* * Additional mixer mapping * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ struct usbmix_dB_map { u32 min; u32 max; }; struct usbmix_name_map { int id; const char *name; int control; struct usbmix_dB_map *dB; }; struct usbmix_selector_map { int id; int count; const char **names; }; struct usbmix_ctl_map { u32 id; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; int ignore_ctl_error; }; /* * USB control mappers for SB Exitigy */ /* * Topology of SB Extigy (see on the wide screen :) USB_IN[1] --->FU[2]------------------------------+->MU[16]-->PU[17]-+->FU[18]--+->EU[27]--+->EU[21]-->FU[22]--+->FU[23] > Dig_OUT[24] ^ | | | | USB_IN[3] -+->SU[5]-->FU[6]--+->MU[14] ->PU[15]->+ | | | +->FU[25] > Dig_OUT[26] ^ ^ | | | | Dig_IN[4] -+ | | | | +->FU[28]---------------------> Spk_OUT[19] | | | | Lin-IN[7] -+-->FU[8]---------+ | | +----------------------------------------> Hph_OUT[20] | | | Mic-IN[9] --+->FU[10]----------------------------+ | || | || +----------------------------------------------------+ VV V ++--+->SU[11]-->FU[12] --------------------------------------------------------------------------------------> USB_OUT[13] */ static struct usbmix_name_map extigy_map[] = { /* 1: IT pcm */ { 2, "PCM Playback" }, /* FU */ /* 3: IT pcm */ /* 4: IT digital in */ { 5, NULL }, /* DISABLED: this seems to be bogus on some firmware */ { 6, "Digital In" }, /* FU */ /* 7: IT line */ { 8, "Line Playback" }, /* FU */ /* 9: IT mic */ { 10, "Mic Playback" }, /* FU */ { 11, "Capture Source" }, /* SU */ { 12, "Capture" }, /* FU */ /* 13: OT pcm capture */ /* 14: MU (w/o controls) */ /* 15: PU (3D enh) */ /* 16: MU (w/o controls) */ { 17, NULL, 1 }, /* DISABLED: PU-switch (any effect?) */ { 17, "Channel Routing", 2 }, /* PU: mode select */ { 18, "Tone Control - Bass", UAC_FU_BASS }, /* FU */ { 18, "Tone Control - Treble", UAC_FU_TREBLE }, /* FU */ { 18, "Master Playback" }, /* FU; others */ /* 19: OT speaker */ /* 20: OT headphone */ { 21, NULL }, /* DISABLED: EU (for what?) */ { 22, "Digital Out Playback" }, /* FU */ { 23, "Digital Out1 Playback" }, /* FU */ /* FIXME: corresponds to 24 */ /* 24: OT digital out */ { 25, "IEC958 Optical Playback" }, /* FU */ { 26, "IEC958 Optical Playback" }, /* OT */ { 27, NULL }, /* DISABLED: EU (for what?) */ /* 28: FU speaker (mute) */ { 29, NULL }, /* Digital Input Playback Source? */ { 0 } /* terminator */ }; /* Sound Blaster MP3+ controls mapping * The default mixer channels have totally misleading names, * e.g. no Master and fake PCM volume * Pavel Mihaylov <bin@bash.info> */ static struct usbmix_dB_map mp3plus_dB_1 = {-4781, 0}; /* just guess */ static struct usbmix_dB_map mp3plus_dB_2 = {-1781, 618}; /* just guess */ static struct usbmix_name_map mp3plus_map[] = { /* 1: IT pcm */ /* 2: IT mic */ /* 3: IT line */ /* 4: IT digital in */ /* 5: OT digital out */ /* 6: OT speaker */ /* 7: OT pcm capture */ { 8, "Capture Source" }, /* FU, default PCM Capture Source */ /* (Mic, Input 1 = Line input, Input 2 = Optical input) */ { 9, "Master Playback" }, /* FU, default Speaker 1 */ /* { 10, "Mic Capture", 1 }, */ /* FU, Mic Capture */ { 10, /* "Mic Capture", */ NULL, 2, .dB = &mp3plus_dB_2 }, /* FU, Mic Capture */ { 10, "Mic Boost", 7 }, /* FU, default Auto Gain Input */ { 11, "Line Capture", .dB = &mp3plus_dB_2 }, /* FU, default PCM Capture */ { 12, "Digital In Playback" }, /* FU, default PCM 1 */ { 13, /* "Mic Playback", */ .dB = &mp3plus_dB_1 }, /* FU, default Mic Playback */ { 14, "Line Playback", .dB = &mp3plus_dB_1 }, /* FU, default Speaker */ /* 15: MU */ { 0 } /* terminator */ }; /* Topology of SB Audigy 2 NX +----------------------------->EU[27]--+ | v | +----------------------------------->SU[29]---->FU[22]-->Dig_OUT[24] | | ^ USB_IN[1]-+------------+ +->EU[17]->+->FU[11]-+ | v | v | Dig_IN[4]---+->FU[6]-->MU[16]->FU[18]-+->EU[21]->SU[31]----->FU[30]->Hph_OUT[20] | ^ | | Lin_IN[7]-+--->FU[8]---+ +->EU[23]->FU[28]------------->Spk_OUT[19] | | v +--->FU[12]------------------------------------->SU[14]--->USB_OUT[15] | ^ +->FU[13]--------------------------------------+ */ static struct usbmix_name_map audigy2nx_map[] = { /* 1: IT pcm playback */ /* 4: IT digital in */ { 6, "Digital In Playback" }, /* FU */ /* 7: IT line in */ { 8, "Line Playback" }, /* FU */ { 11, "What-U-Hear Capture" }, /* FU */ { 12, "Line Capture" }, /* FU */ { 13, "Digital In Capture" }, /* FU */ { 14, "Capture Source" }, /* SU */ /* 15: OT pcm capture */ /* 16: MU w/o controls */ { 17, NULL }, /* DISABLED: EU (for what?) */ { 18, "Master Playback" }, /* FU */ /* 19: OT speaker */ /* 20: OT headphone */ { 21, NULL }, /* DISABLED: EU (for what?) */ { 22, "Digital Out Playback" }, /* FU */ { 23, NULL }, /* DISABLED: EU (for what?) */ /* 24: OT digital out */ { 27, NULL }, /* DISABLED: EU (for what?) */ { 28, "Speaker Playback" }, /* FU */ { 29, "Digital Out Source" }, /* SU */ { 30, "Headphone Playback" }, /* FU */ { 31, "Headphone Source" }, /* SU */ { 0 } /* terminator */ }; static struct usbmix_selector_map audigy2nx_selectors[] = { { .id = 14, /* Capture Source */ .count = 3, .names = (const char*[]) {"Line", "Digital In", "What-U-Hear"} }, { .id = 29, /* Digital Out Source */ .count = 3, .names = (const char*[]) {"Front", "PCM", "Digital In"} }, { .id = 31, /* Headphone Source */ .count = 2, .names = (const char*[]) {"Front", "Side"} }, { 0 } /* terminator */ }; /* Creative SoundBlaster Live! 24-bit External */ static struct usbmix_name_map live24ext_map[] = { /* 2: PCM Playback Volume */ { 5, "Mic Capture" }, /* FU, default PCM Capture Volume */ { 0 } /* terminator */ }; /* LineX FM Transmitter entry - needed to bypass controls bug */ static struct usbmix_name_map linex_map[] = { /* 1: IT pcm */ /* 2: OT Speaker */ { 3, "Master" }, /* FU: master volume - left / right / mute */ { 0 } /* terminator */ }; static struct usbmix_name_map maya44_map[] = { /* 1: IT line */ { 2, "Line Playback" }, /* FU */ /* 3: IT line */ { 4, "Line Playback" }, /* FU */ /* 5: IT pcm playback */ /* 6: MU */ { 7, "Master Playback" }, /* FU */ /* 8: OT speaker */ /* 9: IT line */ { 10, "Line Capture" }, /* FU */ /* 11: MU */ /* 12: OT pcm capture */ { } }; /* Section "justlink_map" below added by James Courtier-Dutton <James@superbug.demon.co.uk> * sourced from Maplin Electronics (http://www.maplin.co.uk), part number A56AK * Part has 2 connectors that act as a single output. (TOSLINK Optical for digital out, and 3.5mm Jack for Analogue out.) * The USB Mixer publishes a Microphone and extra Volume controls for it, but none exist on the device, * so this map removes all unwanted sliders from alsamixer */ static struct usbmix_name_map justlink_map[] = { /* 1: IT pcm playback */ /* 2: Not present */ { 3, NULL}, /* IT mic (No mic input on device) */ /* 4: Not present */ /* 5: OT speacker */ /* 6: OT pcm capture */ { 7, "Master Playback" }, /* Mute/volume for speaker */ { 8, NULL }, /* Capture Switch (No capture inputs on device) */ { 9, NULL }, /* Capture Mute/volume (No capture inputs on device */ /* 0xa: Not present */ /* 0xb: MU (w/o controls) */ { 0xc, NULL }, /* Mic feedback Mute/volume (No capture inputs on device) */ { 0 } /* terminator */ }; /* TerraTec Aureon 5.1 MkII USB */ static struct usbmix_name_map aureon_51_2_map[] = { /* 1: IT USB */ /* 2: IT Mic */ /* 3: IT Line */ /* 4: IT SPDIF */ /* 5: OT SPDIF */ /* 6: OT Speaker */ /* 7: OT USB */ { 8, "Capture Source" }, /* SU */ { 9, "Master Playback" }, /* FU */ { 10, "Mic Capture" }, /* FU */ { 11, "Line Capture" }, /* FU */ { 12, "IEC958 In Capture" }, /* FU */ { 13, "Mic Playback" }, /* FU */ { 14, "Line Playback" }, /* FU */ /* 15: MU */ {} /* terminator */ }; static struct usbmix_name_map scratch_live_map[] = { /* 1: IT Line 1 (USB streaming) */ /* 2: OT Line 1 (Speaker) */ /* 3: IT Line 1 (Line connector) */ { 4, "Line 1 In" }, /* FU */ /* 5: OT Line 1 (USB streaming) */ /* 6: IT Line 2 (USB streaming) */ /* 7: OT Line 2 (Speaker) */ /* 8: IT Line 2 (Line connector) */ { 9, "Line 2 In" }, /* FU */ /* 10: OT Line 2 (USB streaming) */ /* 11: IT Mic (Line connector) */ /* 12: OT Mic (USB streaming) */ { 0 } /* terminator */ }; /* "Gamesurround Muse Pocket LT" looks same like "Sound Blaster MP3+" * most importand difference is SU[8], it should be set to "Capture Source" * to make alsamixer and PA working properly. * FIXME: or mp3plus_map should use "Capture Source" too, * so this maps can be merget */ static struct usbmix_name_map hercules_usb51_map[] = { { 8, "Capture Source" }, /* SU, default "PCM Capture Source" */ { 9, "Master Playback" }, /* FU, default "Speaker Playback" */ { 10, "Mic Boost", 7 }, /* FU, default "Auto Gain Input" */ { 11, "Line Capture" }, /* FU, default "PCM Capture" */ { 13, "Mic Bypass Playback" }, /* FU, default "Mic Playback" */ { 14, "Line Bypass Playback" }, /* FU, default "Line Playback" */ { 0 } /* terminator */ }; /* * Control map entries */ static struct usbmix_ctl_map usbmix_ctl_maps[] = { { .id = USB_ID(0x041e, 0x3000), .map = extigy_map, .ignore_ctl_error = 1, }, { .id = USB_ID(0x041e, 0x3010), .map = mp3plus_map, }, { .id = USB_ID(0x041e, 0x3020), .map = audigy2nx_map, .selector_map = audigy2nx_selectors, }, { .id = USB_ID(0x041e, 0x3040), .map = live24ext_map, }, { .id = USB_ID(0x041e, 0x3048), .map = audigy2nx_map, .selector_map = audigy2nx_selectors, }, { /* Hercules DJ Console (Windows Edition) */ .id = USB_ID(0x06f8, 0xb000), .ignore_ctl_error = 1, }, { /* Hercules DJ Console (Macintosh Edition) */ .id = USB_ID(0x06f8, 0xd002), .ignore_ctl_error = 1, }, { /* Hercules Gamesurround Muse Pocket LT * (USB 5.1 Channel Audio Adapter) */ .id = USB_ID(0x06f8, 0xc000), .map = hercules_usb51_map, }, { .id = USB_ID(0x08bb, 0x2702), .map = linex_map, .ignore_ctl_error = 1, }, { .id = USB_ID(0x0a92, 0x0091), .map = maya44_map, }, { .id = USB_ID(0x0c45, 0x1158), .map = justlink_map, }, { .id = USB_ID(0x0ccd, 0x0028), .map = aureon_51_2_map, }, { .id = USB_ID(0x13e5, 0x0001), .map = scratch_live_map, .ignore_ctl_error = 1, }, { 0 } /* terminator */ };
gpl-2.0
slz/arco-samsung-kernel-msm7x30
arch/c6x/platforms/cache.c
7973
10140
/* * Copyright (C) 2011 Texas Instruments Incorporated * Author: Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <asm/cache.h> #include <asm/soc.h> /* * Internal Memory Control Registers for caches */ #define IMCR_CCFG 0x0000 #define IMCR_L1PCFG 0x0020 #define IMCR_L1PCC 0x0024 #define IMCR_L1DCFG 0x0040 #define IMCR_L1DCC 0x0044 #define IMCR_L2ALLOC0 0x2000 #define IMCR_L2ALLOC1 0x2004 #define IMCR_L2ALLOC2 0x2008 #define IMCR_L2ALLOC3 0x200c #define IMCR_L2WBAR 0x4000 #define IMCR_L2WWC 0x4004 #define IMCR_L2WIBAR 0x4010 #define IMCR_L2WIWC 0x4014 #define IMCR_L2IBAR 0x4018 #define IMCR_L2IWC 0x401c #define IMCR_L1PIBAR 0x4020 #define IMCR_L1PIWC 0x4024 #define IMCR_L1DWIBAR 0x4030 #define IMCR_L1DWIWC 0x4034 #define IMCR_L1DWBAR 0x4040 #define IMCR_L1DWWC 0x4044 #define IMCR_L1DIBAR 0x4048 #define IMCR_L1DIWC 0x404c #define IMCR_L2WB 0x5000 #define IMCR_L2WBINV 0x5004 #define IMCR_L2INV 0x5008 #define IMCR_L1PINV 0x5028 #define IMCR_L1DWB 0x5040 #define IMCR_L1DWBINV 0x5044 #define IMCR_L1DINV 0x5048 #define IMCR_MAR_BASE 0x8000 #define IMCR_MAR96_111 0x8180 #define IMCR_MAR128_191 0x8200 #define IMCR_MAR224_239 0x8380 #define IMCR_L2MPFAR 0xa000 #define IMCR_L2MPFSR 0xa004 #define IMCR_L2MPFCR 0xa008 #define IMCR_L2MPLK0 0xa100 #define IMCR_L2MPLK1 0xa104 #define IMCR_L2MPLK2 0xa108 #define IMCR_L2MPLK3 0xa10c #define IMCR_L2MPLKCMD 0xa110 #define IMCR_L2MPLKSTAT 0xa114 #define IMCR_L2MPPA_BASE 0xa200 #define IMCR_L1PMPFAR 0xa400 #define IMCR_L1PMPFSR 0xa404 #define IMCR_L1PMPFCR 0xa408 #define IMCR_L1PMPLK0 0xa500 #define IMCR_L1PMPLK1 0xa504 #define IMCR_L1PMPLK2 0xa508 #define IMCR_L1PMPLK3 0xa50c #define IMCR_L1PMPLKCMD 0xa510 #define IMCR_L1PMPLKSTAT 0xa514 #define IMCR_L1PMPPA_BASE 0xa600 #define IMCR_L1DMPFAR 0xac00 #define IMCR_L1DMPFSR 0xac04 #define IMCR_L1DMPFCR 0xac08 #define IMCR_L1DMPLK0 0xad00 #define IMCR_L1DMPLK1 0xad04 #define IMCR_L1DMPLK2 0xad08 #define IMCR_L1DMPLK3 0xad0c #define IMCR_L1DMPLKCMD 0xad10 #define IMCR_L1DMPLKSTAT 0xad14 #define IMCR_L1DMPPA_BASE 0xae00 #define IMCR_L2PDWAKE0 0xc040 #define IMCR_L2PDWAKE1 0xc044 #define IMCR_L2PDSLEEP0 0xc050 #define IMCR_L2PDSLEEP1 0xc054 #define IMCR_L2PDSTAT0 0xc060 #define IMCR_L2PDSTAT1 0xc064 /* * CCFG register values and bits */ #define L2MODE_0K_CACHE 0x0 #define L2MODE_32K_CACHE 0x1 #define L2MODE_64K_CACHE 0x2 #define L2MODE_128K_CACHE 0x3 #define L2MODE_256K_CACHE 0x7 #define L2PRIO_URGENT 0x0 #define L2PRIO_HIGH 0x1 #define L2PRIO_MEDIUM 0x2 #define L2PRIO_LOW 0x3 #define CCFG_ID 0x100 /* Invalidate L1P bit */ #define CCFG_IP 0x200 /* Invalidate L1D bit */ static void __iomem *cache_base; /* * L1 & L2 caches generic functions */ #define imcr_get(reg) soc_readl(cache_base + (reg)) #define imcr_set(reg, value) \ do { \ soc_writel((value), cache_base + (reg)); \ soc_readl(cache_base + (reg)); \ } while (0) static void cache_block_operation_wait(unsigned int wc_reg) { /* Wait for completion */ while (imcr_get(wc_reg)) cpu_relax(); } static DEFINE_SPINLOCK(cache_lock); /* * Generic function to perform a block cache operation as * invalidate or writeback/invalidate */ static void cache_block_operation(unsigned int *start, unsigned int *end, unsigned int bar_reg, unsigned int wc_reg) { unsigned long flags; unsigned int wcnt = (L2_CACHE_ALIGN_CNT((unsigned int) end) - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; unsigned int wc = 0; for (; wcnt; wcnt -= wc, start += wc) { loop: spin_lock_irqsave(&cache_lock, flags); /* * If another cache operation is occuring */ if (unlikely(imcr_get(wc_reg))) { spin_unlock_irqrestore(&cache_lock, flags); /* Wait for previous operation completion */ cache_block_operation_wait(wc_reg); /* Try again */ goto loop; } imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); if (wcnt > 0xffff) wc = 0xffff; else wc = wcnt; /* Set word count value in the WC register */ imcr_set(wc_reg, wc & 0xffff); spin_unlock_irqrestore(&cache_lock, flags); /* Wait for completion */ cache_block_operation_wait(wc_reg); } } static void cache_block_operation_nowait(unsigned int *start, unsigned int *end, unsigned int bar_reg, unsigned int wc_reg) { unsigned long flags; unsigned int wcnt = (L2_CACHE_ALIGN_CNT((unsigned int) end) - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2; unsigned int wc = 0; for (; wcnt; wcnt -= wc, start += wc) { spin_lock_irqsave(&cache_lock, flags); imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start)); if (wcnt > 0xffff) wc = 0xffff; else wc = wcnt; /* Set word count value in the WC register */ imcr_set(wc_reg, wc & 0xffff); spin_unlock_irqrestore(&cache_lock, flags); /* Don't wait for completion on last cache operation */ if (wcnt > 0xffff) cache_block_operation_wait(wc_reg); } } /* * L1 caches management */ /* * Disable L1 caches */ void L1_cache_off(void) { unsigned int dummy; imcr_set(IMCR_L1PCFG, 0); dummy = imcr_get(IMCR_L1PCFG); imcr_set(IMCR_L1DCFG, 0); dummy = imcr_get(IMCR_L1DCFG); } /* * Enable L1 caches */ void L1_cache_on(void) { unsigned int dummy; imcr_set(IMCR_L1PCFG, 7); dummy = imcr_get(IMCR_L1PCFG); imcr_set(IMCR_L1DCFG, 7); dummy = imcr_get(IMCR_L1DCFG); } /* * L1P global-invalidate all */ void L1P_cache_global_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1PINV, set); while (imcr_get(IMCR_L1PINV) & 1) cpu_relax(); } /* * L1D global-invalidate all * * Warning: this operation causes all updated data in L1D to * be discarded rather than written back to the lower levels of * memory */ void L1D_cache_global_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1DINV, set); while (imcr_get(IMCR_L1DINV) & 1) cpu_relax(); } void L1D_cache_global_writeback(void) { unsigned int set = 1; imcr_set(IMCR_L1DWB, set); while (imcr_get(IMCR_L1DWB) & 1) cpu_relax(); } void L1D_cache_global_writeback_invalidate(void) { unsigned int set = 1; imcr_set(IMCR_L1DWBINV, set); while (imcr_get(IMCR_L1DWBINV) & 1) cpu_relax(); } /* * L2 caches management */ /* * Set L2 operation mode */ void L2_cache_set_mode(unsigned int mode) { unsigned int ccfg = imcr_get(IMCR_CCFG); /* Clear and set the L2MODE bits in CCFG */ ccfg &= ~7; ccfg |= (mode & 7); imcr_set(IMCR_CCFG, ccfg); ccfg = imcr_get(IMCR_CCFG); } /* * L2 global-writeback and global-invalidate all */ void L2_cache_global_writeback_invalidate(void) { imcr_set(IMCR_L2WBINV, 1); while (imcr_get(IMCR_L2WBINV)) cpu_relax(); } /* * L2 global-writeback all */ void L2_cache_global_writeback(void) { imcr_set(IMCR_L2WB, 1); while (imcr_get(IMCR_L2WB)) cpu_relax(); } /* * Cacheability controls */ void enable_caching(unsigned long start, unsigned long end) { unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); for (; mar <= mar_e; mar += 4) imcr_set(mar, imcr_get(mar) | 1); } void disable_caching(unsigned long start, unsigned long end) { unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2); unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2); for (; mar <= mar_e; mar += 4) imcr_set(mar, imcr_get(mar) & ~1); } /* * L1 block operations */ void L1P_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1PIBAR, IMCR_L1PIWC); } void L1D_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DIBAR, IMCR_L1DIWC); } void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DWIBAR, IMCR_L1DWIWC); } void L1D_cache_block_writeback(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L1DWBAR, IMCR_L1DWWC); } /* * L2 block operations */ void L2_cache_block_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2IBAR, IMCR_L2IWC); } void L2_cache_block_writeback(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2WBAR, IMCR_L2WWC); } void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end) { cache_block_operation((unsigned int *) start, (unsigned int *) end, IMCR_L2WIBAR, IMCR_L2WIWC); } void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2IBAR, IMCR_L2IWC); } void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2WBAR, IMCR_L2WWC); } void L2_cache_block_writeback_invalidate_nowait(unsigned int start, unsigned int end) { cache_block_operation_nowait((unsigned int *) start, (unsigned int *) end, IMCR_L2WIBAR, IMCR_L2WIWC); } /* * L1 and L2 caches configuration */ void __init c6x_cache_init(void) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache"); if (!node) return; cache_base = of_iomap(node, 0); of_node_put(node); if (!cache_base) return; /* Set L2 caches on the the whole L2 SRAM memory */ L2_cache_set_mode(L2MODE_SIZE); /* Enable L1 */ L1_cache_on(); }
gpl-2.0
drewis/android_kernel_htc_ruby
drivers/staging/line6/control.c
8229
44512
/* * Line6 Linux USB driver - 0.9.1beta * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/usb.h> #include "control.h" #include "driver.h" #include "pod.h" #include "usbdefs.h" #include "variax.h" #define DEVICE_ATTR2(_name1, _name2, _mode, _show, _store) \ struct device_attribute dev_attr_##_name1 = __ATTR(_name2, _mode, _show, _store) #define LINE6_PARAM_R(PREFIX, prefix, type, param) \ static ssize_t prefix##_get_##param(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return prefix##_get_param_##type(dev, buf, PREFIX##_##param); \ } #define LINE6_PARAM_RW(PREFIX, prefix, type, param) \ LINE6_PARAM_R(PREFIX, prefix, type, param); \ static ssize_t prefix##_set_##param(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ return prefix##_set_param_##type(dev, buf, count, PREFIX##_##param); \ } #define POD_PARAM_R(type, param) LINE6_PARAM_R(POD, pod, type, param) #define POD_PARAM_RW(type, param) LINE6_PARAM_RW(POD, pod, type, param) #define VARIAX_PARAM_R(type, param) LINE6_PARAM_R(VARIAX, variax, type, param) #define VARIAX_PARAM_RW(type, param) LINE6_PARAM_RW(VARIAX, variax, type, param) static ssize_t pod_get_param_int(struct device *dev, char *buf, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_pod *pod = usb_get_intfdata(interface); int retval = line6_dump_wait_interruptible(&pod->dumpreq); if (retval < 0) return retval; return sprintf(buf, "%d\n", pod->prog_data.control[param]); } static ssize_t pod_set_param_int(struct device *dev, const char *buf, size_t count, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_pod *pod = usb_get_intfdata(interface); unsigned long value; int retval; retval = strict_strtoul(buf, 10, &value); if (retval) return retval; line6_pod_transmit_parameter(pod, param, value); return count; } static ssize_t variax_get_param_int(struct device *dev, char *buf, int param) { struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_variax *variax = usb_get_intfdata(interface); int retval = line6_dump_wait_interruptible(&variax->dumpreq); if (retval < 0) return retval; return sprintf(buf, "%d\n", variax->model_data.control[param]); } static ssize_t variax_get_param_float(struct device *dev, char *buf, int param) { /* We do our own floating point handling here since at the time this code was written (Jan 2006) it was highly discouraged to use floating point arithmetic in the kernel. If you think that this no longer applies, feel free to replace this by generic floating point code. */ static const int BIAS = 0x7f; static const int OFFSET = 0xf; static const int PRECISION = 1000; int len = 0; unsigned part_int, part_frac; struct usb_interface *interface = to_usb_interface(dev); struct usb_line6_variax *variax = usb_get_intfdata(interface); const unsigned char *p = variax->model_data.control + param; int retval = line6_dump_wait_interruptible(&variax->dumpreq); if (retval < 0) return retval; if ((p[0] == 0) && (p[1] == 0) && (p[2] == 0)) part_int = part_frac = 0; else { int exponent = (((p[0] & 0x7f) << 1) | (p[1] >> 7)) - BIAS; unsigned mantissa = (p[1] << 8) | p[2] | 0x8000; exponent -= OFFSET; if (exponent >= 0) { part_int = mantissa << exponent; part_frac = 0; } else { part_int = mantissa >> -exponent; part_frac = (mantissa << (32 + exponent)) & 0xffffffff; } part_frac = (part_frac / ((1UL << 31) / (PRECISION / 2 * 10)) + 5) / 10; } len += sprintf(buf + len, "%s%d.%03d\n", ((p[0] & 0x80) ? "-" : ""), part_int, part_frac); return len; } POD_PARAM_RW(int, tweak); POD_PARAM_RW(int, wah_position); POD_PARAM_RW(int, compression_gain); POD_PARAM_RW(int, vol_pedal_position); POD_PARAM_RW(int, compression_threshold); POD_PARAM_RW(int, pan); POD_PARAM_RW(int, amp_model_setup); POD_PARAM_RW(int, amp_model); POD_PARAM_RW(int, drive); POD_PARAM_RW(int, bass); POD_PARAM_RW(int, mid); POD_PARAM_RW(int, lowmid); POD_PARAM_RW(int, treble); POD_PARAM_RW(int, highmid); POD_PARAM_RW(int, chan_vol); POD_PARAM_RW(int, reverb_mix); POD_PARAM_RW(int, effect_setup); POD_PARAM_RW(int, band_1_frequency); POD_PARAM_RW(int, presence); POD_PARAM_RW(int, treble__bass); POD_PARAM_RW(int, noise_gate_enable); POD_PARAM_RW(int, gate_threshold); POD_PARAM_RW(int, gate_decay_time); POD_PARAM_RW(int, stomp_enable); POD_PARAM_RW(int, comp_enable); POD_PARAM_RW(int, stomp_time); POD_PARAM_RW(int, delay_enable); POD_PARAM_RW(int, mod_param_1); POD_PARAM_RW(int, delay_param_1); POD_PARAM_RW(int, delay_param_1_note_value); POD_PARAM_RW(int, band_2_frequency__bass); POD_PARAM_RW(int, delay_param_2); POD_PARAM_RW(int, delay_volume_mix); POD_PARAM_RW(int, delay_param_3); POD_PARAM_RW(int, reverb_enable); POD_PARAM_RW(int, reverb_type); POD_PARAM_RW(int, reverb_decay); POD_PARAM_RW(int, reverb_tone); POD_PARAM_RW(int, reverb_pre_delay); POD_PARAM_RW(int, reverb_pre_post); POD_PARAM_RW(int, band_2_frequency); POD_PARAM_RW(int, band_3_frequency__bass); POD_PARAM_RW(int, wah_enable); POD_PARAM_RW(int, modulation_lo_cut); POD_PARAM_RW(int, delay_reverb_lo_cut); POD_PARAM_RW(int, volume_pedal_minimum); POD_PARAM_RW(int, eq_pre_post); POD_PARAM_RW(int, volume_pre_post); POD_PARAM_RW(int, di_model); POD_PARAM_RW(int, di_delay); POD_PARAM_RW(int, mod_enable); POD_PARAM_RW(int, mod_param_1_note_value); POD_PARAM_RW(int, mod_param_2); POD_PARAM_RW(int, mod_param_3); POD_PARAM_RW(int, mod_param_4); POD_PARAM_RW(int, mod_param_5); POD_PARAM_RW(int, mod_volume_mix); POD_PARAM_RW(int, mod_pre_post); POD_PARAM_RW(int, modulation_model); POD_PARAM_RW(int, band_3_frequency); POD_PARAM_RW(int, band_4_frequency__bass); POD_PARAM_RW(int, mod_param_1_double_precision); POD_PARAM_RW(int, delay_param_1_double_precision); POD_PARAM_RW(int, eq_enable); POD_PARAM_RW(int, tap); POD_PARAM_RW(int, volume_tweak_pedal_assign); POD_PARAM_RW(int, band_5_frequency); POD_PARAM_RW(int, tuner); POD_PARAM_RW(int, mic_selection); POD_PARAM_RW(int, cabinet_model); POD_PARAM_RW(int, stomp_model); POD_PARAM_RW(int, roomlevel); POD_PARAM_RW(int, band_4_frequency); POD_PARAM_RW(int, band_6_frequency); POD_PARAM_RW(int, stomp_param_1_note_value); POD_PARAM_RW(int, stomp_param_2); POD_PARAM_RW(int, stomp_param_3); POD_PARAM_RW(int, stomp_param_4); POD_PARAM_RW(int, stomp_param_5); POD_PARAM_RW(int, stomp_param_6); POD_PARAM_RW(int, amp_switch_select); POD_PARAM_RW(int, delay_param_4); POD_PARAM_RW(int, delay_param_5); POD_PARAM_RW(int, delay_pre_post); POD_PARAM_RW(int, delay_model); POD_PARAM_RW(int, delay_verb_model); POD_PARAM_RW(int, tempo_msb); POD_PARAM_RW(int, tempo_lsb); POD_PARAM_RW(int, wah_model); POD_PARAM_RW(int, bypass_volume); POD_PARAM_RW(int, fx_loop_on_off); POD_PARAM_RW(int, tweak_param_select); POD_PARAM_RW(int, amp1_engage); POD_PARAM_RW(int, band_1_gain); POD_PARAM_RW(int, band_2_gain__bass); POD_PARAM_RW(int, band_2_gain); POD_PARAM_RW(int, band_3_gain__bass); POD_PARAM_RW(int, band_3_gain); POD_PARAM_RW(int, band_4_gain__bass); POD_PARAM_RW(int, band_5_gain__bass); POD_PARAM_RW(int, band_4_gain); POD_PARAM_RW(int, band_6_gain__bass); VARIAX_PARAM_R(int, body); VARIAX_PARAM_R(int, pickup1_enable); VARIAX_PARAM_R(int, pickup1_type); VARIAX_PARAM_R(float, pickup1_position); VARIAX_PARAM_R(float, pickup1_angle); VARIAX_PARAM_R(float, pickup1_level); VARIAX_PARAM_R(int, pickup2_enable); VARIAX_PARAM_R(int, pickup2_type); VARIAX_PARAM_R(float, pickup2_position); VARIAX_PARAM_R(float, pickup2_angle); VARIAX_PARAM_R(float, pickup2_level); VARIAX_PARAM_R(int, pickup_phase); VARIAX_PARAM_R(float, capacitance); VARIAX_PARAM_R(float, tone_resistance); VARIAX_PARAM_R(float, volume_resistance); VARIAX_PARAM_R(int, taper); VARIAX_PARAM_R(float, tone_dump); VARIAX_PARAM_R(int, save_tone); VARIAX_PARAM_R(float, volume_dump); VARIAX_PARAM_R(int, tuning_enable); VARIAX_PARAM_R(int, tuning6); VARIAX_PARAM_R(int, tuning5); VARIAX_PARAM_R(int, tuning4); VARIAX_PARAM_R(int, tuning3); VARIAX_PARAM_R(int, tuning2); VARIAX_PARAM_R(int, tuning1); VARIAX_PARAM_R(float, detune6); VARIAX_PARAM_R(float, detune5); VARIAX_PARAM_R(float, detune4); VARIAX_PARAM_R(float, detune3); VARIAX_PARAM_R(float, detune2); VARIAX_PARAM_R(float, detune1); VARIAX_PARAM_R(float, mix6); VARIAX_PARAM_R(float, mix5); VARIAX_PARAM_R(float, mix4); VARIAX_PARAM_R(float, mix3); VARIAX_PARAM_R(float, mix2); VARIAX_PARAM_R(float, mix1); VARIAX_PARAM_R(int, pickup_wiring); static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak); static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position); static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain); static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position); static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold); static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan); static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup); static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model); static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive); static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass); static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid); static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid); static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble); static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid); static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol); static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix); static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup); static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency); static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence); static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass); static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable); static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold); static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time); static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable); static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable); static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time); static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable); static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1); static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1); static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value); static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass); static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2); static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix); static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3); static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable); static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type); static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay); static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone); static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post); static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency); static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass); static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable); static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post); static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post); static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model); static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay); static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable); static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value); static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2); static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3); static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4); static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5); static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix); static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post); static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model); static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency); static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass); static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision); static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision); static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable); static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap); static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign); static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency); static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner); static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection); static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model); static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model); static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel); static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency); static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency); static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value); static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2); static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3); static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4); static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5); static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6); static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select); static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4); static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5); static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post); static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model); static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model); static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb); static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb); static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model); static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume); static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off); static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select); static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage); static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain); static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain); static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain); static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain); static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, line6_nop_write); static DEVICE_ATTR(pickup1_type, S_IRUGO, variax_get_pickup1_type, line6_nop_write); static DEVICE_ATTR(pickup1_position, S_IRUGO, variax_get_pickup1_position, line6_nop_write); static DEVICE_ATTR(pickup1_angle, S_IRUGO, variax_get_pickup1_angle, line6_nop_write); static DEVICE_ATTR(pickup1_level, S_IRUGO, variax_get_pickup1_level, line6_nop_write); static DEVICE_ATTR(pickup2_enable, S_IRUGO, variax_get_pickup2_enable, line6_nop_write); static DEVICE_ATTR(pickup2_type, S_IRUGO, variax_get_pickup2_type, line6_nop_write); static DEVICE_ATTR(pickup2_position, S_IRUGO, variax_get_pickup2_position, line6_nop_write); static DEVICE_ATTR(pickup2_angle, S_IRUGO, variax_get_pickup2_angle, line6_nop_write); static DEVICE_ATTR(pickup2_level, S_IRUGO, variax_get_pickup2_level, line6_nop_write); static DEVICE_ATTR(pickup_phase, S_IRUGO, variax_get_pickup_phase, line6_nop_write); static DEVICE_ATTR(capacitance, S_IRUGO, variax_get_capacitance, line6_nop_write); static DEVICE_ATTR(tone_resistance, S_IRUGO, variax_get_tone_resistance, line6_nop_write); static DEVICE_ATTR(volume_resistance, S_IRUGO, variax_get_volume_resistance, line6_nop_write); static DEVICE_ATTR(taper, S_IRUGO, variax_get_taper, line6_nop_write); static DEVICE_ATTR(tone_dump, S_IRUGO, variax_get_tone_dump, line6_nop_write); static DEVICE_ATTR(save_tone, S_IRUGO, variax_get_save_tone, line6_nop_write); static DEVICE_ATTR(volume_dump, S_IRUGO, variax_get_volume_dump, line6_nop_write); static DEVICE_ATTR(tuning_enable, S_IRUGO, variax_get_tuning_enable, line6_nop_write); static DEVICE_ATTR(tuning6, S_IRUGO, variax_get_tuning6, line6_nop_write); static DEVICE_ATTR(tuning5, S_IRUGO, variax_get_tuning5, line6_nop_write); static DEVICE_ATTR(tuning4, S_IRUGO, variax_get_tuning4, line6_nop_write); static DEVICE_ATTR(tuning3, S_IRUGO, variax_get_tuning3, line6_nop_write); static DEVICE_ATTR(tuning2, S_IRUGO, variax_get_tuning2, line6_nop_write); static DEVICE_ATTR(tuning1, S_IRUGO, variax_get_tuning1, line6_nop_write); static DEVICE_ATTR(detune6, S_IRUGO, variax_get_detune6, line6_nop_write); static DEVICE_ATTR(detune5, S_IRUGO, variax_get_detune5, line6_nop_write); static DEVICE_ATTR(detune4, S_IRUGO, variax_get_detune4, line6_nop_write); static DEVICE_ATTR(detune3, S_IRUGO, variax_get_detune3, line6_nop_write); static DEVICE_ATTR(detune2, S_IRUGO, variax_get_detune2, line6_nop_write); static DEVICE_ATTR(detune1, S_IRUGO, variax_get_detune1, line6_nop_write); static DEVICE_ATTR(mix6, S_IRUGO, variax_get_mix6, line6_nop_write); static DEVICE_ATTR(mix5, S_IRUGO, variax_get_mix5, line6_nop_write); static DEVICE_ATTR(mix4, S_IRUGO, variax_get_mix4, line6_nop_write); static DEVICE_ATTR(mix3, S_IRUGO, variax_get_mix3, line6_nop_write); static DEVICE_ATTR(mix2, S_IRUGO, variax_get_mix2, line6_nop_write); static DEVICE_ATTR(mix1, S_IRUGO, variax_get_mix1, line6_nop_write); static DEVICE_ATTR(pickup_wiring, S_IRUGO, variax_get_pickup_wiring, line6_nop_write); int line6_pod_create_files(int firmware, int type, struct device *dev) { int err; CHECK_RETURN(device_create_file(dev, &dev_attr_tweak)); CHECK_RETURN(device_create_file(dev, &dev_attr_wah_position)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_compression_gain)); CHECK_RETURN(device_create_file(dev, &dev_attr_vol_pedal_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_compression_threshold)); CHECK_RETURN(device_create_file(dev, &dev_attr_pan)); CHECK_RETURN(device_create_file(dev, &dev_attr_amp_model_setup)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_amp_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_drive)); CHECK_RETURN(device_create_file(dev, &dev_attr_bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_mid)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_lowmid)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_treble)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_highmid)); CHECK_RETURN(device_create_file(dev, &dev_attr_chan_vol)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_effect_setup)); if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_1_frequency)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_presence)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_treble__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_noise_gate_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_gate_threshold)); CHECK_RETURN(device_create_file(dev, &dev_attr_gate_decay_time)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_comp_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_time)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_1)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_1)); CHECK_RETURN(device_create_file (dev, &dev_attr_delay_param_1_note_value)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_frequency__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_volume_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_3)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_enable)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_type)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_decay)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_reverb_tone)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_reverb_pre_delay)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_reverb_pre_post)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_frequency__bass)); CHECK_RETURN(device_create_file(dev, &dev_attr_wah_enable)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_modulation_lo_cut)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_delay_reverb_lo_cut)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_volume_pedal_minimum)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_eq_pre_post)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_pre_post)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_di_model)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_di_delay)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_1_note_value)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_3)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_4)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_mod_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_volume_mix)); CHECK_RETURN(device_create_file(dev, &dev_attr_mod_pre_post)); CHECK_RETURN(device_create_file(dev, &dev_attr_modulation_model)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_frequency__bass)); CHECK_RETURN(device_create_file (dev, &dev_attr_mod_param_1_double_precision)); CHECK_RETURN(device_create_file (dev, &dev_attr_delay_param_1_double_precision)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_eq_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_tap)); CHECK_RETURN(device_create_file (dev, &dev_attr_volume_tweak_pedal_assign)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_5_frequency)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuner)); CHECK_RETURN(device_create_file(dev, &dev_attr_mic_selection)); CHECK_RETURN(device_create_file(dev, &dev_attr_cabinet_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_roomlevel)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_frequency)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_6_frequency)); CHECK_RETURN(device_create_file (dev, &dev_attr_stomp_param_1_note_value)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_2)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_3)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_4)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_stomp_param_6)); if ((type & (LINE6_BITS_LIVE)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_amp_switch_select)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_4)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_param_5)); CHECK_RETURN(device_create_file(dev, &dev_attr_delay_pre_post)); if ((type & (LINE6_BITS_PODXTALL)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_delay_model)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) CHECK_RETURN(device_create_file (dev, &dev_attr_delay_verb_model)); CHECK_RETURN(device_create_file(dev, &dev_attr_tempo_msb)); CHECK_RETURN(device_create_file(dev, &dev_attr_tempo_lsb)); if (firmware >= 300) CHECK_RETURN(device_create_file(dev, &dev_attr_wah_model)); if (firmware >= 214) CHECK_RETURN(device_create_file(dev, &dev_attr_bypass_volume)); if ((type & (LINE6_BITS_PRO)) != 0) CHECK_RETURN(device_create_file(dev, &dev_attr_fx_loop_on_off)); CHECK_RETURN(device_create_file(dev, &dev_attr_tweak_param_select)); CHECK_RETURN(device_create_file(dev, &dev_attr_amp1_engage)); if (firmware >= 200) CHECK_RETURN(device_create_file(dev, &dev_attr_band_1_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_2_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_3_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_gain__bass)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_5_gain__bass)); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_4_gain)); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) CHECK_RETURN(device_create_file (dev, &dev_attr_band_6_gain__bass)); return 0; } void line6_pod_remove_files(int firmware, int type, struct device *dev) { device_remove_file(dev, &dev_attr_tweak); device_remove_file(dev, &dev_attr_wah_position); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_compression_gain); device_remove_file(dev, &dev_attr_vol_pedal_position); device_remove_file(dev, &dev_attr_compression_threshold); device_remove_file(dev, &dev_attr_pan); device_remove_file(dev, &dev_attr_amp_model_setup); if (firmware >= 200) device_remove_file(dev, &dev_attr_amp_model); device_remove_file(dev, &dev_attr_drive); device_remove_file(dev, &dev_attr_bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_mid); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_lowmid); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_treble); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_highmid); device_remove_file(dev, &dev_attr_chan_vol); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_mix); device_remove_file(dev, &dev_attr_effect_setup); if (firmware >= 200) device_remove_file(dev, &dev_attr_band_1_frequency); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_presence); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_treble__bass); device_remove_file(dev, &dev_attr_noise_gate_enable); device_remove_file(dev, &dev_attr_gate_threshold); device_remove_file(dev, &dev_attr_gate_decay_time); device_remove_file(dev, &dev_attr_stomp_enable); device_remove_file(dev, &dev_attr_comp_enable); device_remove_file(dev, &dev_attr_stomp_time); device_remove_file(dev, &dev_attr_delay_enable); device_remove_file(dev, &dev_attr_mod_param_1); device_remove_file(dev, &dev_attr_delay_param_1); device_remove_file(dev, &dev_attr_delay_param_1_note_value); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_frequency__bass); device_remove_file(dev, &dev_attr_delay_param_2); device_remove_file(dev, &dev_attr_delay_volume_mix); device_remove_file(dev, &dev_attr_delay_param_3); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_enable); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_type); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_decay); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_tone); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_pre_delay); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_reverb_pre_post); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_frequency__bass); device_remove_file(dev, &dev_attr_wah_enable); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_modulation_lo_cut); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_reverb_lo_cut); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_volume_pedal_minimum); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_eq_pre_post); device_remove_file(dev, &dev_attr_volume_pre_post); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_di_model); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_di_delay); device_remove_file(dev, &dev_attr_mod_enable); device_remove_file(dev, &dev_attr_mod_param_1_note_value); device_remove_file(dev, &dev_attr_mod_param_2); device_remove_file(dev, &dev_attr_mod_param_3); device_remove_file(dev, &dev_attr_mod_param_4); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_mod_param_5); device_remove_file(dev, &dev_attr_mod_volume_mix); device_remove_file(dev, &dev_attr_mod_pre_post); device_remove_file(dev, &dev_attr_modulation_model); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_frequency__bass); device_remove_file(dev, &dev_attr_mod_param_1_double_precision); device_remove_file(dev, &dev_attr_delay_param_1_double_precision); if (firmware >= 200) device_remove_file(dev, &dev_attr_eq_enable); device_remove_file(dev, &dev_attr_tap); device_remove_file(dev, &dev_attr_volume_tweak_pedal_assign); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_5_frequency); device_remove_file(dev, &dev_attr_tuner); device_remove_file(dev, &dev_attr_mic_selection); device_remove_file(dev, &dev_attr_cabinet_model); device_remove_file(dev, &dev_attr_stomp_model); device_remove_file(dev, &dev_attr_roomlevel); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_frequency); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_6_frequency); device_remove_file(dev, &dev_attr_stomp_param_1_note_value); device_remove_file(dev, &dev_attr_stomp_param_2); device_remove_file(dev, &dev_attr_stomp_param_3); device_remove_file(dev, &dev_attr_stomp_param_4); device_remove_file(dev, &dev_attr_stomp_param_5); device_remove_file(dev, &dev_attr_stomp_param_6); if ((type & (LINE6_BITS_LIVE)) != 0) device_remove_file(dev, &dev_attr_amp_switch_select); device_remove_file(dev, &dev_attr_delay_param_4); device_remove_file(dev, &dev_attr_delay_param_5); device_remove_file(dev, &dev_attr_delay_pre_post); if ((type & (LINE6_BITS_PODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_model); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) device_remove_file(dev, &dev_attr_delay_verb_model); device_remove_file(dev, &dev_attr_tempo_msb); device_remove_file(dev, &dev_attr_tempo_lsb); if (firmware >= 300) device_remove_file(dev, &dev_attr_wah_model); if (firmware >= 214) device_remove_file(dev, &dev_attr_bypass_volume); if ((type & (LINE6_BITS_PRO)) != 0) device_remove_file(dev, &dev_attr_fx_loop_on_off); device_remove_file(dev, &dev_attr_tweak_param_select); device_remove_file(dev, &dev_attr_amp1_engage); if (firmware >= 200) device_remove_file(dev, &dev_attr_band_1_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_2_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_3_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_gain__bass); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_5_gain__bass); if ((type & (LINE6_BITS_PODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_4_gain); if ((type & (LINE6_BITS_BASSPODXTALL)) != 0) if (firmware >= 200) device_remove_file(dev, &dev_attr_band_6_gain__bass); } int line6_variax_create_files(int firmware, int type, struct device *dev) { int err; CHECK_RETURN(device_create_file(dev, &dev_attr_body)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_type)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_angle)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup1_level)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_type)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_position)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_angle)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup2_level)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup_phase)); CHECK_RETURN(device_create_file(dev, &dev_attr_capacitance)); CHECK_RETURN(device_create_file(dev, &dev_attr_tone_resistance)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_resistance)); CHECK_RETURN(device_create_file(dev, &dev_attr_taper)); CHECK_RETURN(device_create_file(dev, &dev_attr_tone_dump)); CHECK_RETURN(device_create_file(dev, &dev_attr_save_tone)); CHECK_RETURN(device_create_file(dev, &dev_attr_volume_dump)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning_enable)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning6)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning5)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning4)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning3)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning2)); CHECK_RETURN(device_create_file(dev, &dev_attr_tuning1)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune6)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune5)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune4)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune3)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune2)); CHECK_RETURN(device_create_file(dev, &dev_attr_detune1)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix6)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix5)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix4)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix3)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix2)); CHECK_RETURN(device_create_file(dev, &dev_attr_mix1)); CHECK_RETURN(device_create_file(dev, &dev_attr_pickup_wiring)); return 0; } void line6_variax_remove_files(int firmware, int type, struct device *dev) { device_remove_file(dev, &dev_attr_body); device_remove_file(dev, &dev_attr_pickup1_enable); device_remove_file(dev, &dev_attr_pickup1_type); device_remove_file(dev, &dev_attr_pickup1_position); device_remove_file(dev, &dev_attr_pickup1_angle); device_remove_file(dev, &dev_attr_pickup1_level); device_remove_file(dev, &dev_attr_pickup2_enable); device_remove_file(dev, &dev_attr_pickup2_type); device_remove_file(dev, &dev_attr_pickup2_position); device_remove_file(dev, &dev_attr_pickup2_angle); device_remove_file(dev, &dev_attr_pickup2_level); device_remove_file(dev, &dev_attr_pickup_phase); device_remove_file(dev, &dev_attr_capacitance); device_remove_file(dev, &dev_attr_tone_resistance); device_remove_file(dev, &dev_attr_volume_resistance); device_remove_file(dev, &dev_attr_taper); device_remove_file(dev, &dev_attr_tone_dump); device_remove_file(dev, &dev_attr_save_tone); device_remove_file(dev, &dev_attr_volume_dump); device_remove_file(dev, &dev_attr_tuning_enable); device_remove_file(dev, &dev_attr_tuning6); device_remove_file(dev, &dev_attr_tuning5); device_remove_file(dev, &dev_attr_tuning4); device_remove_file(dev, &dev_attr_tuning3); device_remove_file(dev, &dev_attr_tuning2); device_remove_file(dev, &dev_attr_tuning1); device_remove_file(dev, &dev_attr_detune6); device_remove_file(dev, &dev_attr_detune5); device_remove_file(dev, &dev_attr_detune4); device_remove_file(dev, &dev_attr_detune3); device_remove_file(dev, &dev_attr_detune2); device_remove_file(dev, &dev_attr_detune1); device_remove_file(dev, &dev_attr_mix6); device_remove_file(dev, &dev_attr_mix5); device_remove_file(dev, &dev_attr_mix4); device_remove_file(dev, &dev_attr_mix3); device_remove_file(dev, &dev_attr_mix2); device_remove_file(dev, &dev_attr_mix1); device_remove_file(dev, &dev_attr_pickup_wiring); }
gpl-2.0
Pillar1989/linux-a80-3.4
net/irda/irlmp.c
8485
55641
/********************************************************************* * * Filename: irlmp.c * Version: 1.0 * Description: IrDA Link Management Protocol (LMP) layer * Status: Stable. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Aug 17 20:54:32 1997 * Modified at: Wed Jan 5 11:26:03 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/random.h> #include <linux/seq_file.h> #include <net/irda/irda.h> #include <net/irda/timer.h> #include <net/irda/qos.h> #include <net/irda/irlap.h> #include <net/irda/iriap.h> #include <net/irda/irlmp.h> #include <net/irda/irlmp_frame.h> #include <asm/unaligned.h> static __u8 irlmp_find_free_slsap(void); static int irlmp_slsap_inuse(__u8 slsap_sel); /* Master structure */ struct irlmp_cb *irlmp = NULL; /* These can be altered by the sysctl interface */ int sysctl_discovery = 0; int sysctl_discovery_timeout = 3; /* 3 seconds by default */ int sysctl_discovery_slots = 6; /* 6 slots by default */ int sysctl_lap_keepalive_time = LM_IDLE_TIMEOUT * 1000 / HZ; char sysctl_devname[65]; const char *irlmp_reasons[] = { "ERROR, NOT USED", "LM_USER_REQUEST", "LM_LAP_DISCONNECT", "LM_CONNECT_FAILURE", "LM_LAP_RESET", "LM_INIT_DISCONNECT", "ERROR, NOT USED", }; /* * Function irlmp_init (void) * * Create (allocate) the main IrLMP structure * */ int __init irlmp_init(void) { IRDA_DEBUG(1, "%s()\n", __func__); /* Initialize the irlmp structure. */ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); if (irlmp == NULL) return -ENOMEM; irlmp->magic = LMP_MAGIC; irlmp->clients = hashbin_new(HB_LOCK); irlmp->services = hashbin_new(HB_LOCK); irlmp->links = hashbin_new(HB_LOCK); irlmp->unconnected_lsaps = hashbin_new(HB_LOCK); irlmp->cachelog = hashbin_new(HB_NOLOCK); if ((irlmp->clients == NULL) || (irlmp->services == NULL) || (irlmp->links == NULL) || (irlmp->unconnected_lsaps == NULL) || (irlmp->cachelog == NULL)) { return -ENOMEM; } spin_lock_init(&irlmp->cachelog->hb_spinlock); irlmp->last_lsap_sel = 0x0f; /* Reserved 0x00-0x0f */ strcpy(sysctl_devname, "Linux"); init_timer(&irlmp->discovery_timer); /* Do discovery every 3 seconds, conditionally */ if (sysctl_discovery) irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ); return 0; } /* * Function irlmp_cleanup (void) * * Remove IrLMP layer * */ void irlmp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irlmp != NULL, return;); IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return;); del_timer(&irlmp->discovery_timer); hashbin_delete(irlmp->links, (FREE_FUNC) kfree); hashbin_delete(irlmp->unconnected_lsaps, (FREE_FUNC) kfree); hashbin_delete(irlmp->clients, (FREE_FUNC) kfree); hashbin_delete(irlmp->services, (FREE_FUNC) kfree); hashbin_delete(irlmp->cachelog, (FREE_FUNC) kfree); /* De-allocate main structure */ kfree(irlmp); irlmp = NULL; } /* * Function irlmp_open_lsap (slsap, notify) * * Register with IrLMP and create a local LSAP, * returns handle to LSAP. */ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid) { struct lsap_cb *self; IRDA_ASSERT(notify != NULL, return NULL;); IRDA_ASSERT(irlmp != NULL, return NULL;); IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return NULL;); IRDA_ASSERT(notify->instance != NULL, return NULL;); /* Does the client care which Source LSAP selector it gets? */ if (slsap_sel == LSAP_ANY) { slsap_sel = irlmp_find_free_slsap(); if (!slsap_sel) return NULL; } else if (irlmp_slsap_inuse(slsap_sel)) return NULL; /* Allocate new instance of a LSAP connection */ self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); if (self == NULL) { IRDA_ERROR("%s: can't allocate memory\n", __func__); return NULL; } self->magic = LMP_LSAP_MAGIC; self->slsap_sel = slsap_sel; /* Fix connectionless LSAP's */ if (slsap_sel == LSAP_CONNLESS) { #ifdef CONFIG_IRDA_ULTRA self->dlsap_sel = LSAP_CONNLESS; self->pid = pid; #endif /* CONFIG_IRDA_ULTRA */ } else self->dlsap_sel = LSAP_ANY; /* self->connected = FALSE; -> already NULL via memset() */ init_timer(&self->watchdog_timer); self->notify = *notify; self->lsap_state = LSAP_DISCONNECTED; /* Insert into queue of unconnected LSAPs */ hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self, (long) self, NULL); return self; } EXPORT_SYMBOL(irlmp_open_lsap); /* * Function __irlmp_close_lsap (self) * * Remove an instance of LSAP */ static void __irlmp_close_lsap(struct lsap_cb *self) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); /* * Set some of the variables to preset values */ self->magic = 0; del_timer(&self->watchdog_timer); /* Important! */ if (self->conn_skb) dev_kfree_skb(self->conn_skb); kfree(self); } /* * Function irlmp_close_lsap (self) * * Close and remove LSAP * */ void irlmp_close_lsap(struct lsap_cb *self) { struct lap_cb *lap; struct lsap_cb *lsap = NULL; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); /* * Find out if we should remove this LSAP from a link or from the * list of unconnected lsaps (not associated with a link) */ lap = self->lap; if (lap) { IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); /* We might close a LSAP before it has completed the * connection setup. In those case, higher layers won't * send a proper disconnect request. Harmless, except * that we will forget to close LAP... - Jean II */ if(self->lsap_state != LSAP_DISCONNECTED) { self->lsap_state = LSAP_DISCONNECTED; irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); } /* Now, remove from the link */ lsap = hashbin_remove(lap->lsaps, (long) self, NULL); #ifdef CONFIG_IRDA_CACHE_LAST_LSAP lap->cache.valid = FALSE; #endif } self->lap = NULL; /* Check if we found the LSAP! If not then try the unconnected lsaps */ if (!lsap) { lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self, NULL); } if (!lsap) { IRDA_DEBUG(0, "%s(), Looks like somebody has removed me already!\n", __func__); return; } __irlmp_close_lsap(self); } EXPORT_SYMBOL(irlmp_close_lsap); /* * Function irlmp_register_irlap (saddr, notify) * * Register IrLAP layer with IrLMP. There is possible to have multiple * instances of the IrLAP layer, each connected to different IrDA ports * */ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) { struct lap_cb *lap; IRDA_ASSERT(irlmp != NULL, return;); IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return;); IRDA_ASSERT(notify != NULL, return;); /* * Allocate new instance of a LSAP connection */ lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); if (lap == NULL) { IRDA_ERROR("%s: unable to kmalloc\n", __func__); return; } lap->irlap = irlap; lap->magic = LMP_LAP_MAGIC; lap->saddr = saddr; lap->daddr = DEV_ADDR_ANY; #ifdef CONFIG_IRDA_CACHE_LAST_LSAP lap->cache.valid = FALSE; #endif lap->lsaps = hashbin_new(HB_LOCK); if (lap->lsaps == NULL) { IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__); kfree(lap); return; } lap->lap_state = LAP_STANDBY; init_timer(&lap->idle_timer); /* * Insert into queue of LMP links */ hashbin_insert(irlmp->links, (irda_queue_t *) lap, lap->saddr, NULL); /* * We set only this variable so IrLAP can tell us on which link the * different events happened on */ irda_notify_init(notify); notify->instance = lap; } /* * Function irlmp_unregister_irlap (saddr) * * IrLAP layer has been removed! * */ void irlmp_unregister_link(__u32 saddr) { struct lap_cb *link; IRDA_DEBUG(4, "%s()\n", __func__); /* We must remove ourselves from the hashbin *first*. This ensure * that no more LSAPs will be open on this link and no discovery * will be triggered anymore. Jean II */ link = hashbin_remove(irlmp->links, saddr, NULL); if (link) { IRDA_ASSERT(link->magic == LMP_LAP_MAGIC, return;); /* Kill all the LSAPs on this link. Jean II */ link->reason = LAP_DISC_INDICATION; link->daddr = DEV_ADDR_ANY; irlmp_do_lap_event(link, LM_LAP_DISCONNECT_INDICATION, NULL); /* Remove all discoveries discovered at this link */ irlmp_expire_discoveries(irlmp->cachelog, link->saddr, TRUE); /* Final cleanup */ del_timer(&link->idle_timer); link->magic = 0; hashbin_delete(link->lsaps, (FREE_FUNC) __irlmp_close_lsap); kfree(link); } } /* * Function irlmp_connect_request (handle, dlsap, userdata) * * Connect with a peer LSAP * */ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, __u32 saddr, __u32 daddr, struct qos_info *qos, struct sk_buff *userdata) { struct sk_buff *tx_skb = userdata; struct lap_cb *lap; struct lsap_cb *lsap; int ret; IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;); IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", __func__, self->slsap_sel, dlsap_sel, saddr, daddr); if (test_bit(0, &self->connected)) { ret = -EISCONN; goto err; } /* Client must supply destination device address */ if (!daddr) { ret = -EINVAL; goto err; } /* Any userdata? */ if (tx_skb == NULL) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; skb_reserve(tx_skb, LMP_MAX_HEADER); } /* Make room for MUX control header (3 bytes) */ IRDA_ASSERT(skb_headroom(tx_skb) >= LMP_CONTROL_HEADER, return -1;); skb_push(tx_skb, LMP_CONTROL_HEADER); self->dlsap_sel = dlsap_sel; /* * Find the link to where we should try to connect since there may * be more than one IrDA port on this machine. If the client has * passed us the saddr (and already knows which link to use), then * we use that to find the link, if not then we have to look in the * discovery log and check if any of the links has discovered a * device with the given daddr */ if ((!saddr) || (saddr == DEV_ADDR_ANY)) { discovery_t *discovery; unsigned long flags; spin_lock_irqsave(&irlmp->cachelog->hb_spinlock, flags); if (daddr != DEV_ADDR_ANY) discovery = hashbin_find(irlmp->cachelog, daddr, NULL); else { IRDA_DEBUG(2, "%s(), no daddr\n", __func__); discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog); } if (discovery) { saddr = discovery->data.saddr; daddr = discovery->data.daddr; } spin_unlock_irqrestore(&irlmp->cachelog->hb_spinlock, flags); } lap = hashbin_lock_find(irlmp->links, saddr, NULL); if (lap == NULL) { IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__); ret = -EHOSTUNREACH; goto err; } /* Check if LAP is disconnected or already connected */ if (lap->daddr == DEV_ADDR_ANY) lap->daddr = daddr; else if (lap->daddr != daddr) { /* Check if some LSAPs are active on this LAP */ if (HASHBIN_GET_SIZE(lap->lsaps) == 0) { /* No active connection, but LAP hasn't been * disconnected yet (waiting for timeout in LAP). * Maybe we could give LAP a bit of help in this case. */ IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__); ret = -EAGAIN; goto err; } /* LAP is already connected to a different node, and LAP * can only talk to one node at a time */ IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__); ret = -EBUSY; goto err; } self->lap = lap; /* * Remove LSAP from list of unconnected LSAPs and insert it into the * list of connected LSAPs for the particular link */ lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self, NULL); IRDA_ASSERT(lsap != NULL, return -1;); IRDA_ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(lsap->lap != NULL, return -1;); IRDA_ASSERT(lsap->lap->magic == LMP_LAP_MAGIC, return -1;); hashbin_insert(self->lap->lsaps, (irda_queue_t *) self, (long) self, NULL); set_bit(0, &self->connected); /* TRUE */ /* * User supplied qos specifications? */ if (qos) self->qos = *qos; irlmp_do_lsap_event(self, LM_CONNECT_REQUEST, tx_skb); /* Drop reference count - see irlap_data_request(). */ dev_kfree_skb(tx_skb); return 0; err: /* Cleanup */ if(tx_skb) dev_kfree_skb(tx_skb); return ret; } EXPORT_SYMBOL(irlmp_connect_request); /* * Function irlmp_connect_indication (self) * * Incoming connection * */ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb) { int max_seg_size; int lap_header_size; int max_header_size; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self->lap != NULL, return;); IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", __func__, self->slsap_sel, self->dlsap_sel); /* Note : self->lap is set in irlmp_link_data_indication(), * (case CONNECT_CMD:) because we have no way to set it here. * Similarly, self->dlsap_sel is usually set in irlmp_find_lsap(). * Jean II */ self->qos = *self->lap->qos; max_seg_size = self->lap->qos->data_size.value-LMP_HEADER; lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap); max_header_size = LMP_HEADER + lap_header_size; /* Hide LMP_CONTROL_HEADER header from layer above */ skb_pull(skb, LMP_CONTROL_HEADER); if (self->notify.connect_indication) { /* Don't forget to refcount it - see irlap_driver_rcv(). */ skb_get(skb); self->notify.connect_indication(self->notify.instance, self, &self->qos, max_seg_size, max_header_size, skb); } } /* * Function irlmp_connect_response (handle, userdata) * * Service user is accepting connection * */ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata) { IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(userdata != NULL, return -1;); /* We set the connected bit and move the lsap to the connected list * in the state machine itself. Jean II */ IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", __func__, self->slsap_sel, self->dlsap_sel); /* Make room for MUX control header (3 bytes) */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); skb_push(userdata, LMP_CONTROL_HEADER); irlmp_do_lsap_event(self, LM_CONNECT_RESPONSE, userdata); /* Drop reference count - see irlap_data_request(). */ dev_kfree_skb(userdata); return 0; } EXPORT_SYMBOL(irlmp_connect_response); /* * Function irlmp_connect_confirm (handle, skb) * * LSAP connection confirmed peer device! */ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) { int max_header_size; int lap_header_size; int max_seg_size; IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(self->lap != NULL, return;); self->qos = *self->lap->qos; max_seg_size = self->lap->qos->data_size.value-LMP_HEADER; lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap); max_header_size = LMP_HEADER + lap_header_size; IRDA_DEBUG(2, "%s(), max_header_size=%d\n", __func__, max_header_size); /* Hide LMP_CONTROL_HEADER header from layer above */ skb_pull(skb, LMP_CONTROL_HEADER); if (self->notify.connect_confirm) { /* Don't forget to refcount it - see irlap_driver_rcv() */ skb_get(skb); self->notify.connect_confirm(self->notify.instance, self, &self->qos, max_seg_size, max_header_size, skb); } } /* * Function irlmp_dup (orig, instance) * * Duplicate LSAP, can be used by servers to confirm a connection on a * new LSAP so it can keep listening on the old one. * */ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) { struct lsap_cb *new; unsigned long flags; IRDA_DEBUG(1, "%s()\n", __func__); spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); /* Only allowed to duplicate unconnected LSAP's, and only LSAPs * that have received a connect indication. Jean II */ if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || (orig->lap == NULL)) { IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; } /* Allocate a new instance */ new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); if (!new) { IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; } /* new->lap = orig->lap; => done in the memcpy() */ /* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */ new->conn_skb = NULL; spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); /* Not everything is the same */ new->notify.instance = instance; init_timer(&new->watchdog_timer); hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new, (long) new, NULL); #ifdef CONFIG_IRDA_CACHE_LAST_LSAP /* Make sure that we invalidate the LSAP cache */ new->lap->cache.valid = FALSE; #endif /* CONFIG_IRDA_CACHE_LAST_LSAP */ return new; } /* * Function irlmp_disconnect_request (handle, userdata) * * The service user is requesting disconnection, this will not remove the * LSAP, but only mark it as disconnected */ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata) { struct lsap_cb *lsap; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(userdata != NULL, return -1;); /* Already disconnected ? * There is a race condition between irlmp_disconnect_indication() * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); dev_kfree_skb(userdata); return -1; } skb_push(userdata, LMP_CONTROL_HEADER); /* * Do the event before the other stuff since we must know * which lap layer that the frame should be transmitted on */ irlmp_do_lsap_event(self, LM_DISCONNECT_REQUEST, userdata); /* Drop reference count - see irlap_data_request(). */ dev_kfree_skb(userdata); /* * Remove LSAP from list of connected LSAPs for the particular link * and insert it into the list of unconnected LSAPs */ IRDA_ASSERT(self->lap != NULL, return -1;); IRDA_ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;); IRDA_ASSERT(self->lap->lsaps != NULL, return -1;); lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL); #ifdef CONFIG_IRDA_CACHE_LAST_LSAP self->lap->cache.valid = FALSE; #endif IRDA_ASSERT(lsap != NULL, return -1;); IRDA_ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(lsap == self, return -1;); hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self, (long) self, NULL); /* Reset some values */ self->dlsap_sel = LSAP_ANY; self->lap = NULL; return 0; } EXPORT_SYMBOL(irlmp_disconnect_request); /* * Function irlmp_disconnect_indication (reason, userdata) * * LSAP is being closed! */ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, struct sk_buff *skb) { struct lsap_cb *lsap; IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", __func__, self->slsap_sel, self->dlsap_sel); /* Already disconnected ? * There is a race condition between irlmp_disconnect_request() * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); return; } /* * Remove association between this LSAP and the link it used */ IRDA_ASSERT(self->lap != NULL, return;); IRDA_ASSERT(self->lap->lsaps != NULL, return;); lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL); #ifdef CONFIG_IRDA_CACHE_LAST_LSAP self->lap->cache.valid = FALSE; #endif IRDA_ASSERT(lsap != NULL, return;); IRDA_ASSERT(lsap == self, return;); hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) lsap, (long) lsap, NULL); self->dlsap_sel = LSAP_ANY; self->lap = NULL; /* * Inform service user */ if (self->notify.disconnect_indication) { /* Don't forget to refcount it - see irlap_driver_rcv(). */ if(skb) skb_get(skb); self->notify.disconnect_indication(self->notify.instance, self, reason, skb); } else { IRDA_DEBUG(0, "%s(), no handler\n", __func__); } } /* * Function irlmp_do_expiry (void) * * Do a cleanup of the discovery log (remove old entries) * * Note : separate from irlmp_do_discovery() so that we can handle * passive discovery properly. */ void irlmp_do_expiry(void) { struct lap_cb *lap; /* * Expire discovery on all links which are *not* connected. * On links which are connected, we can't do discovery * anymore and can't refresh the log, so we freeze the * discovery log to keep info about the device we are * connected to. * This info is mandatory if we want irlmp_connect_request() * to work properly. - Jean II */ lap = (struct lap_cb *) hashbin_get_first(irlmp->links); while (lap != NULL) { IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); if (lap->lap_state == LAP_STANDBY) { /* Expire discoveries discovered on this link */ irlmp_expire_discoveries(irlmp->cachelog, lap->saddr, FALSE); } lap = (struct lap_cb *) hashbin_get_next(irlmp->links); } } /* * Function irlmp_do_discovery (nslots) * * Do some discovery on all links * * Note : log expiry is done above. */ void irlmp_do_discovery(int nslots) { struct lap_cb *lap; __u16 *data_hintsp; /* Make sure the value is sane */ if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ IRDA_WARNING("%s: invalid value for number of slots!\n", __func__); nslots = sysctl_discovery_slots = 8; } /* Construct new discovery info to be used by IrLAP, */ data_hintsp = (__u16 *) irlmp->discovery_cmd.data.hints; put_unaligned(irlmp->hints.word, data_hintsp); /* * Set character set for device name (we use ASCII), and * copy device name. Remember to make room for a \0 at the * end */ irlmp->discovery_cmd.data.charset = CS_ASCII; strncpy(irlmp->discovery_cmd.data.info, sysctl_devname, NICKNAME_MAX_LEN); irlmp->discovery_cmd.name_len = strlen(irlmp->discovery_cmd.data.info); irlmp->discovery_cmd.nslots = nslots; /* * Try to send discovery packets on all links */ lap = (struct lap_cb *) hashbin_get_first(irlmp->links); while (lap != NULL) { IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); if (lap->lap_state == LAP_STANDBY) { /* Try to discover */ irlmp_do_lap_event(lap, LM_LAP_DISCOVERY_REQUEST, NULL); } lap = (struct lap_cb *) hashbin_get_next(irlmp->links); } } /* * Function irlmp_discovery_request (nslots) * * Do a discovery of devices in front of the computer * * If the caller has registered a client discovery callback, this * allow him to receive the full content of the discovery log through * this callback (as normally he will receive only new discoveries). */ void irlmp_discovery_request(int nslots) { /* Return current cached discovery log (in full) */ irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_LOG); /* * Start a single discovery operation if discovery is not already * running */ if (!sysctl_discovery) { /* Check if user wants to override the default */ if (nslots == DISCOVERY_DEFAULT_SLOTS) nslots = sysctl_discovery_slots; irlmp_do_discovery(nslots); /* Note : we never do expiry here. Expiry will run on the * discovery timer regardless of the state of sysctl_discovery * Jean II */ } } EXPORT_SYMBOL(irlmp_discovery_request); /* * Function irlmp_get_discoveries (pn, mask, slots) * * Return the current discovery log * * If discovery is not enabled, you should call this function again * after 1 or 2 seconds (i.e. after discovery has been done). */ struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots) { /* If discovery is not enabled, it's likely that the discovery log * will be empty. So, we trigger a single discovery, so that next * time the user call us there might be some results in the log. * Jean II */ if (!sysctl_discovery) { /* Check if user wants to override the default */ if (nslots == DISCOVERY_DEFAULT_SLOTS) nslots = sysctl_discovery_slots; /* Start discovery - will complete sometime later */ irlmp_do_discovery(nslots); /* Note : we never do expiry here. Expiry will run on the * discovery timer regardless of the state of sysctl_discovery * Jean II */ } /* Return current cached discovery log */ return irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE); } EXPORT_SYMBOL(irlmp_get_discoveries); /* * Function irlmp_notify_client (log) * * Notify all about discovered devices * * Clients registered with IrLMP are : * o IrComm * o IrLAN * o Any socket (in any state - ouch, that may be a lot !) * The client may have defined a callback to be notified in case of * partial/selective discovery based on the hints that it passed to IrLMP. */ static inline void irlmp_notify_client(irlmp_client_t *client, hashbin_t *log, DISCOVERY_MODE mode) { discinfo_t *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; IRDA_DEBUG(3, "%s()\n", __func__); /* Check if client wants or not partial/selective log (optimisation) */ if (!client->disco_callback) return; /* * Locking notes : * the old code was manipulating the log directly, which was * very racy. Now, we use copy_discoveries, that protects * itself while dumping the log for us. * The overhead of the copy is compensated by the fact that * we only pass new discoveries in normal mode and don't * pass the same old entry every 3s to the caller as we used * to do (virtual function calling is expensive). * Jean II */ /* * Now, check all discovered devices (if any), and notify client * only about the services that the client is interested in * We also notify only about the new devices unless the caller * explicitly request a dump of the log. Jean II */ discoveries = irlmp_copy_discoveries(log, &number, client->hint_mask.word, (mode == DISCOVERY_LOG)); /* Check if the we got some results */ if (discoveries == NULL) return; /* No nodes discovered */ /* Pass all entries to the listener */ for(i = 0; i < number; i++) client->disco_callback(&(discoveries[i]), mode, client->priv); /* Free up our buffer */ kfree(discoveries); } /* * Function irlmp_discovery_confirm ( self, log) * * Some device(s) answered to our discovery request! Check to see which * device it is, and give indication to the client(s) * */ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode) { irlmp_client_t *client; irlmp_client_t *client_next; IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(log != NULL, return;); if (!(HASHBIN_GET_SIZE(log))) return; /* For each client - notify callback may touch client list */ client = (irlmp_client_t *) hashbin_get_first(irlmp->clients); while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL, (void *) &client_next) ) { /* Check if we should notify client */ irlmp_notify_client(client, log, mode); client = client_next; } } /* * Function irlmp_discovery_expiry (expiry) * * This device is no longer been discovered, and therefore it is being * purged from the discovery log. Inform all clients who have * registered for this event... * * Note : called exclusively from discovery.c * Note : this is no longer called under discovery spinlock, so the * client can do whatever he wants in the callback. */ void irlmp_discovery_expiry(discinfo_t *expiries, int number) { irlmp_client_t *client; irlmp_client_t *client_next; int i; IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(expiries != NULL, return;); /* For each client - notify callback may touch client list */ client = (irlmp_client_t *) hashbin_get_first(irlmp->clients); while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL, (void *) &client_next) ) { /* Pass all entries to the listener */ for(i = 0; i < number; i++) { /* Check if we should notify client */ if ((client->expir_callback) && (client->hint_mask.word & get_unaligned((__u16 *)expiries[i].hints) & 0x7f7f) ) client->expir_callback(&(expiries[i]), EXPIRY_TIMEOUT, client->priv); } /* Next client */ client = client_next; } } /* * Function irlmp_get_discovery_response () * * Used by IrLAP to get the discovery info it needs when answering * discovery requests by other devices. */ discovery_t *irlmp_get_discovery_response(void) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(irlmp != NULL, return NULL;); put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints); /* * Set character set for device name (we use ASCII), and * copy device name. Remember to make room for a \0 at the * end */ irlmp->discovery_rsp.data.charset = CS_ASCII; strncpy(irlmp->discovery_rsp.data.info, sysctl_devname, NICKNAME_MAX_LEN); irlmp->discovery_rsp.name_len = strlen(irlmp->discovery_rsp.data.info); return &irlmp->discovery_rsp; } /* * Function irlmp_data_request (self, skb) * * Send some data to peer device * * Note on skb management : * After calling the lower layers of the IrDA stack, we always * kfree() the skb, which drop the reference count (and potentially * destroy it). * IrLMP and IrLAP may queue the packet, and in those cases will need * to use skb_get() to keep it around. * Jean II */ int irlmp_data_request(struct lsap_cb *self, struct sk_buff *userdata) { int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); /* Make room for MUX header */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER, return -1;); skb_push(userdata, LMP_HEADER); ret = irlmp_do_lsap_event(self, LM_DATA_REQUEST, userdata); /* Drop reference count - see irlap_data_request(). */ dev_kfree_skb(userdata); return ret; } EXPORT_SYMBOL(irlmp_data_request); /* * Function irlmp_data_indication (handle, skb) * * Got data from LAP layer so pass it up to upper layer * */ void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb) { /* Hide LMP header from layer above */ skb_pull(skb, LMP_HEADER); if (self->notify.data_indication) { /* Don't forget to refcount it - see irlap_driver_rcv(). */ skb_get(skb); self->notify.data_indication(self->notify.instance, self, skb); } } /* * Function irlmp_udata_request (self, skb) */ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) { int ret; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX header */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER, return -1;); skb_push(userdata, LMP_HEADER); ret = irlmp_do_lsap_event(self, LM_UDATA_REQUEST, userdata); /* Drop reference count - see irlap_data_request(). */ dev_kfree_skb(userdata); return ret; } /* * Function irlmp_udata_indication (self, skb) * * Send unreliable data (but still within the connection) * */ void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Hide LMP header from layer above */ skb_pull(skb, LMP_HEADER); if (self->notify.udata_indication) { /* Don't forget to refcount it - see irlap_driver_rcv(). */ skb_get(skb); self->notify.udata_indication(self->notify.instance, self, skb); } } /* * Function irlmp_connless_data_request (self, skb) */ #ifdef CONFIG_IRDA_ULTRA int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, __u8 pid) { struct sk_buff *clone_skb; struct lap_cb *lap; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX and PID header */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_HEADER+LMP_PID_HEADER, return -1;); /* Insert protocol identifier */ skb_push(userdata, LMP_PID_HEADER); if(self != NULL) userdata->data[0] = self->pid; else userdata->data[0] = pid; /* Connectionless sockets must use 0x70 */ skb_push(userdata, LMP_HEADER); userdata->data[0] = userdata->data[1] = LSAP_CONNLESS; /* Try to send Connectionless packets out on all links */ lap = (struct lap_cb *) hashbin_get_first(irlmp->links); while (lap != NULL) { IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return -1;); clone_skb = skb_clone(userdata, GFP_ATOMIC); if (!clone_skb) { dev_kfree_skb(userdata); return -ENOMEM; } irlap_unitdata_request(lap->irlap, clone_skb); /* irlap_unitdata_request() don't increase refcount, * so no dev_kfree_skb() - Jean II */ lap = (struct lap_cb *) hashbin_get_next(irlmp->links); } dev_kfree_skb(userdata); return 0; } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irlmp_connless_data_indication (self, skb) * * Receive unreliable data outside any connection. Mostly used by Ultra * */ #ifdef CONFIG_IRDA_ULTRA void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Hide LMP and PID header from layer above */ skb_pull(skb, LMP_HEADER+LMP_PID_HEADER); if (self->notify.udata_indication) { /* Don't forget to refcount it - see irlap_driver_rcv(). */ skb_get(skb); self->notify.udata_indication(self->notify.instance, self, skb); } } #endif /* CONFIG_IRDA_ULTRA */ /* * Propagate status indication from LAP to LSAPs (via LMP) * This don't trigger any change of state in lap_cb, lmp_cb or lsap_cb, * and the event is stateless, therefore we can bypass both state machines * and send the event direct to the LSAP user. * Jean II */ void irlmp_status_indication(struct lap_cb *self, LINK_STATUS link, LOCK_STATUS lock) { struct lsap_cb *next; struct lsap_cb *curr; /* Send status_indication to all LSAPs using this link */ curr = (struct lsap_cb *) hashbin_get_first( self->lsaps); while (NULL != hashbin_find_next(self->lsaps, (long) curr, NULL, (void *) &next) ) { IRDA_ASSERT(curr->magic == LMP_LSAP_MAGIC, return;); /* * Inform service user if he has requested it */ if (curr->notify.status_indication != NULL) curr->notify.status_indication(curr->notify.instance, link, lock); else IRDA_DEBUG(2, "%s(), no handler\n", __func__); curr = next; } } /* * Receive flow control indication from LAP. * LAP want us to send it one more frame. We implement a simple round * robin scheduler between the active sockets so that we get a bit of * fairness. Note that the round robin is far from perfect, but it's * better than nothing. * We then poll the selected socket so that we can do synchronous * refilling of IrLAP (which allow to minimise the number of buffers). * Jean II */ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) { struct lsap_cb *next; struct lsap_cb *curr; int lsap_todo; IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(flow == FLOW_START, return;); /* Get the number of lsap. That's the only safe way to know * that we have looped around... - Jean II */ lsap_todo = HASHBIN_GET_SIZE(self->lsaps); IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo); /* Poll lsap in order until the queue is full or until we * tried them all. * Most often, the current LSAP will have something to send, * so we will go through this loop only once. - Jean II */ while((lsap_todo--) && (IRLAP_GET_TX_QUEUE_LEN(self->irlap) < LAP_HIGH_THRESHOLD)) { /* Try to find the next lsap we should poll. */ next = self->flow_next; /* If we have no lsap, restart from first one */ if(next == NULL) next = (struct lsap_cb *) hashbin_get_first(self->lsaps); /* Verify current one and find the next one */ curr = hashbin_find_next(self->lsaps, (long) next, NULL, (void *) &self->flow_next); /* Uh-oh... Paranoia */ if(curr == NULL) break; IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); /* Inform lsap user that it can send one more packet. */ if (curr->notify.flow_indication != NULL) curr->notify.flow_indication(curr->notify.instance, curr, flow); else IRDA_DEBUG(1, "%s(), no handler\n", __func__); } } #if 0 /* * Function irlmp_hint_to_service (hint) * * Returns a list of all servics contained in the given hint bits. This * function assumes that the hint bits have the size of two bytes only */ __u8 *irlmp_hint_to_service(__u8 *hint) { __u8 *service; int i = 0; /* * Allocate array to store services in. 16 entries should be safe * since we currently only support 2 hint bytes */ service = kmalloc(16, GFP_ATOMIC); if (!service) { IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); return NULL; } if (!hint[0]) { IRDA_DEBUG(1, "<None>\n"); kfree(service); return NULL; } if (hint[0] & HINT_PNP) IRDA_DEBUG(1, "PnP Compatible "); if (hint[0] & HINT_PDA) IRDA_DEBUG(1, "PDA/Palmtop "); if (hint[0] & HINT_COMPUTER) IRDA_DEBUG(1, "Computer "); if (hint[0] & HINT_PRINTER) { IRDA_DEBUG(1, "Printer "); service[i++] = S_PRINTER; } if (hint[0] & HINT_MODEM) IRDA_DEBUG(1, "Modem "); if (hint[0] & HINT_FAX) IRDA_DEBUG(1, "Fax "); if (hint[0] & HINT_LAN) { IRDA_DEBUG(1, "LAN Access "); service[i++] = S_LAN; } /* * Test if extension byte exists. This byte will usually be * there, but this is not really required by the standard. * (IrLMP p. 29) */ if (hint[0] & HINT_EXTENSION) { if (hint[1] & HINT_TELEPHONY) { IRDA_DEBUG(1, "Telephony "); service[i++] = S_TELEPHONY; } if (hint[1] & HINT_FILE_SERVER) IRDA_DEBUG(1, "File Server "); if (hint[1] & HINT_COMM) { IRDA_DEBUG(1, "IrCOMM "); service[i++] = S_COMM; } if (hint[1] & HINT_OBEX) { IRDA_DEBUG(1, "IrOBEX "); service[i++] = S_OBEX; } } IRDA_DEBUG(1, "\n"); /* So that client can be notified about any discovery */ service[i++] = S_ANY; service[i] = S_END; return service; } #endif static const __u16 service_hint_mapping[S_END][2] = { { HINT_PNP, 0 }, /* S_PNP */ { HINT_PDA, 0 }, /* S_PDA */ { HINT_COMPUTER, 0 }, /* S_COMPUTER */ { HINT_PRINTER, 0 }, /* S_PRINTER */ { HINT_MODEM, 0 }, /* S_MODEM */ { HINT_FAX, 0 }, /* S_FAX */ { HINT_LAN, 0 }, /* S_LAN */ { HINT_EXTENSION, HINT_TELEPHONY }, /* S_TELEPHONY */ { HINT_EXTENSION, HINT_COMM }, /* S_COMM */ { HINT_EXTENSION, HINT_OBEX }, /* S_OBEX */ { 0xFF, 0xFF }, /* S_ANY */ }; /* * Function irlmp_service_to_hint (service) * * Converts a service type, to a hint bit * * Returns: a 16 bit hint value, with the service bit set */ __u16 irlmp_service_to_hint(int service) { __u16_host_order hint; hint.byte[0] = service_hint_mapping[service][0]; hint.byte[1] = service_hint_mapping[service][1]; return hint.word; } EXPORT_SYMBOL(irlmp_service_to_hint); /* * Function irlmp_register_service (service) * * Register local service with IrLMP * */ void *irlmp_register_service(__u16 hints) { irlmp_service_t *service; IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints); /* Make a new registration */ service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); if (!service) { IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); return NULL; } service->hints.word = hints; hashbin_insert(irlmp->services, (irda_queue_t *) service, (long) service, NULL); irlmp->hints.word |= hints; return (void *)service; } EXPORT_SYMBOL(irlmp_register_service); /* * Function irlmp_unregister_service (handle) * * Unregister service with IrLMP. * * Returns: 0 on success, -1 on error */ int irlmp_unregister_service(void *handle) { irlmp_service_t *service; unsigned long flags; IRDA_DEBUG(4, "%s()\n", __func__); if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ service = hashbin_lock_find(irlmp->services, (long) handle, NULL); if (!service) { IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__); return -1; } hashbin_remove_this(irlmp->services, (irda_queue_t *) service); kfree(service); /* Remove old hint bits */ irlmp->hints.word = 0; /* Refresh current hint bits */ spin_lock_irqsave(&irlmp->services->hb_spinlock, flags); service = (irlmp_service_t *) hashbin_get_first(irlmp->services); while (service) { irlmp->hints.word |= service->hints.word; service = (irlmp_service_t *)hashbin_get_next(irlmp->services); } spin_unlock_irqrestore(&irlmp->services->hb_spinlock, flags); return 0; } EXPORT_SYMBOL(irlmp_unregister_service); /* * Function irlmp_register_client (hint_mask, callback1, callback2) * * Register a local client with IrLMP * First callback is selective discovery (based on hints) * Second callback is for selective discovery expiries * * Returns: handle > 0 on success, 0 on error */ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb, DISCOVERY_CALLBACK2 expir_clb, void *priv) { irlmp_client_t *client; IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(irlmp != NULL, return NULL;); /* Make a new registration */ client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); if (!client) { IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__); return NULL; } /* Register the details */ client->hint_mask.word = hint_mask; client->disco_callback = disco_clb; client->expir_callback = expir_clb; client->priv = priv; hashbin_insert(irlmp->clients, (irda_queue_t *) client, (long) client, NULL); return (void *) client; } EXPORT_SYMBOL(irlmp_register_client); /* * Function irlmp_update_client (handle, hint_mask, callback1, callback2) * * Updates specified client (handle) with possibly new hint_mask and * callback * * Returns: 0 on success, -1 on error */ int irlmp_update_client(void *handle, __u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb, DISCOVERY_CALLBACK2 expir_clb, void *priv) { irlmp_client_t *client; if (!handle) return -1; client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); return -1; } client->hint_mask.word = hint_mask; client->disco_callback = disco_clb; client->expir_callback = expir_clb; client->priv = priv; return 0; } EXPORT_SYMBOL(irlmp_update_client); /* * Function irlmp_unregister_client (handle) * * Returns: 0 on success, -1 on error * */ int irlmp_unregister_client(void *handle) { struct irlmp_client *client; IRDA_DEBUG(4, "%s()\n", __func__); if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); return -1; } IRDA_DEBUG(4, "%s(), removing client!\n", __func__); hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); kfree(client); return 0; } EXPORT_SYMBOL(irlmp_unregister_client); /* * Function irlmp_slsap_inuse (slsap) * * Check if the given source LSAP selector is in use * * This function is clearly not very efficient. On the mitigating side, the * stack make sure that in 99% of the cases, we are called only once * for each socket allocation. We could probably keep a bitmap * of the allocated LSAP, but I'm not sure the complexity is worth it. * Jean II */ static int irlmp_slsap_inuse(__u8 slsap_sel) { struct lsap_cb *self; struct lap_cb *lap; unsigned long flags; IRDA_ASSERT(irlmp != NULL, return TRUE;); IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); IRDA_DEBUG(4, "%s()\n", __func__); #ifdef CONFIG_IRDA_ULTRA /* Accept all bindings to the connectionless LSAP */ if (slsap_sel == LSAP_CONNLESS) return FALSE; #endif /* CONFIG_IRDA_ULTRA */ /* Valid values are between 0 and 127 (0x0-0x6F) */ if (slsap_sel > LSAP_MAX) return TRUE; /* * Check if slsap is already in use. To do this we have to loop over * every IrLAP connection and check every LSAP associated with each * the connection. */ spin_lock_irqsave_nested(&irlmp->links->hb_spinlock, flags, SINGLE_DEPTH_NESTING); lap = (struct lap_cb *) hashbin_get_first(irlmp->links); while (lap != NULL) { IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, goto errlap;); /* Careful for priority inversions here ! * irlmp->links is never taken while another IrDA * spinlock is held, so we are safe. Jean II */ spin_lock(&lap->lsaps->hb_spinlock); /* For this IrLAP, check all the LSAPs */ self = (struct lsap_cb *) hashbin_get_first(lap->lsaps); while (self != NULL) { IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto errlsap;); if ((self->slsap_sel == slsap_sel)) { IRDA_DEBUG(4, "Source LSAP selector=%02x in use\n", self->slsap_sel); goto errlsap; } self = (struct lsap_cb*) hashbin_get_next(lap->lsaps); } spin_unlock(&lap->lsaps->hb_spinlock); /* Next LAP */ lap = (struct lap_cb *) hashbin_get_next(irlmp->links); } spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags); /* * Server sockets are typically waiting for connections and * therefore reside in the unconnected list. We don't want * to give out their LSAPs for obvious reasons... * Jean II */ spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); self = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps); while (self != NULL) { IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;); if ((self->slsap_sel == slsap_sel)) { IRDA_DEBUG(4, "Source LSAP selector=%02x in use (unconnected)\n", self->slsap_sel); goto erruncon; } self = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps); } spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return FALSE; /* Error exit from within one of the two nested loops. * Make sure we release the right spinlock in the righ order. * Jean II */ errlsap: spin_unlock(&lap->lsaps->hb_spinlock); IRDA_ASSERT_LABEL(errlap:) spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags); return TRUE; /* Error exit from within the unconnected loop. * Just one spinlock to release... Jean II */ erruncon: spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return TRUE; } /* * Function irlmp_find_free_slsap () * * Find a free source LSAP to use. This function is called if the service * user has requested a source LSAP equal to LM_ANY */ static __u8 irlmp_find_free_slsap(void) { __u8 lsap_sel; int wrapped = 0; IRDA_ASSERT(irlmp != NULL, return -1;); IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return -1;); /* Most users don't really care which LSAPs they are given, * and therefore we automatically give them a free LSAP. * This function try to find a suitable LSAP, i.e. which is * not in use and is within the acceptable range. Jean II */ do { /* Always increment to LSAP number before using it. * In theory, we could reuse the last LSAP number, as long * as it is no longer in use. Some IrDA stack do that. * However, the previous socket may be half closed, i.e. * we closed it, we think it's no longer in use, but the * other side did not receive our close and think it's * active and still send data on it. * This is similar to what is done with PIDs and TCP ports. * Also, this reduce the number of calls to irlmp_slsap_inuse() * which is an expensive function to call. * Jean II */ irlmp->last_lsap_sel++; /* Check if we need to wraparound (0x70-0x7f are reserved) */ if (irlmp->last_lsap_sel > LSAP_MAX) { /* 0x00-0x10 are also reserved for well know ports */ irlmp->last_lsap_sel = 0x10; /* Make sure we terminate the loop */ if (wrapped++) { IRDA_ERROR("%s: no more free LSAPs !\n", __func__); return 0; } } /* If the LSAP is in use, try the next one. * Despite the autoincrement, we need to check if the lsap * is really in use or not, first because LSAP may be * directly allocated in irlmp_open_lsap(), and also because * we may wraparound on old sockets. Jean II */ } while (irlmp_slsap_inuse(irlmp->last_lsap_sel)); /* Got it ! */ lsap_sel = irlmp->last_lsap_sel; IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", __func__, lsap_sel); return lsap_sel; } /* * Function irlmp_convert_lap_reason (lap_reason) * * Converts IrLAP disconnect reason codes to IrLMP disconnect reason * codes * */ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) { int reason = LM_LAP_DISCONNECT; switch (lap_reason) { case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__); reason = LM_USER_REQUEST; break; case LAP_NO_RESPONSE: /* To many retransmits without response */ IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__); reason = LM_LAP_DISCONNECT; break; case LAP_RESET_INDICATION: IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__); reason = LM_LAP_RESET; break; case LAP_FOUND_NONE: case LAP_MEDIA_BUSY: case LAP_PRIMARY_CONFLICT: IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__); reason = LM_CONNECT_FAILURE; break; default: IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n", __func__, lap_reason); reason = LM_LAP_DISCONNECT; break; } return reason; } #ifdef CONFIG_PROC_FS struct irlmp_iter_state { hashbin_t *hashbin; }; #define LSAP_START_TOKEN ((void *)1) #define LINK_START_TOKEN ((void *)2) static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off) { void *element; spin_lock_irq(&iter->hashbin->hb_spinlock); for (element = hashbin_get_first(iter->hashbin); element != NULL; element = hashbin_get_next(iter->hashbin)) { if (!off || *off-- == 0) { /* NB: hashbin left locked */ return element; } } spin_unlock_irq(&iter->hashbin->hb_spinlock); iter->hashbin = NULL; return NULL; } static void *irlmp_seq_start(struct seq_file *seq, loff_t *pos) { struct irlmp_iter_state *iter = seq->private; void *v; loff_t off = *pos; iter->hashbin = NULL; if (off-- == 0) return LSAP_START_TOKEN; iter->hashbin = irlmp->unconnected_lsaps; v = irlmp_seq_hb_idx(iter, &off); if (v) return v; if (off-- == 0) return LINK_START_TOKEN; iter->hashbin = irlmp->links; return irlmp_seq_hb_idx(iter, &off); } static void *irlmp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct irlmp_iter_state *iter = seq->private; ++*pos; if (v == LSAP_START_TOKEN) { /* start of list of lsaps */ iter->hashbin = irlmp->unconnected_lsaps; v = irlmp_seq_hb_idx(iter, NULL); return v ? v : LINK_START_TOKEN; } if (v == LINK_START_TOKEN) { /* start of list of links */ iter->hashbin = irlmp->links; return irlmp_seq_hb_idx(iter, NULL); } v = hashbin_get_next(iter->hashbin); if (v == NULL) { /* no more in this hash bin */ spin_unlock_irq(&iter->hashbin->hb_spinlock); if (iter->hashbin == irlmp->unconnected_lsaps) v = LINK_START_TOKEN; iter->hashbin = NULL; } return v; } static void irlmp_seq_stop(struct seq_file *seq, void *v) { struct irlmp_iter_state *iter = seq->private; if (iter->hashbin) spin_unlock_irq(&iter->hashbin->hb_spinlock); } static int irlmp_seq_show(struct seq_file *seq, void *v) { const struct irlmp_iter_state *iter = seq->private; struct lsap_cb *self = v; if (v == LSAP_START_TOKEN) seq_puts(seq, "Unconnected LSAPs:\n"); else if (v == LINK_START_TOKEN) seq_puts(seq, "\nRegistered Link Layers:\n"); else if (iter->hashbin == irlmp->unconnected_lsaps) { self = v; IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EINVAL; ); seq_printf(seq, "lsap state: %s, ", irlsap_state[ self->lsap_state]); seq_printf(seq, "slsap_sel: %#02x, dlsap_sel: %#02x, ", self->slsap_sel, self->dlsap_sel); seq_printf(seq, "(%s)", self->notify.name); seq_printf(seq, "\n"); } else if (iter->hashbin == irlmp->links) { struct lap_cb *lap = v; seq_printf(seq, "lap state: %s, ", irlmp_state[lap->lap_state]); seq_printf(seq, "saddr: %#08x, daddr: %#08x, ", lap->saddr, lap->daddr); seq_printf(seq, "num lsaps: %d", HASHBIN_GET_SIZE(lap->lsaps)); seq_printf(seq, "\n"); /* Careful for priority inversions here ! * All other uses of attrib spinlock are independent of * the object spinlock, so we are safe. Jean II */ spin_lock(&lap->lsaps->hb_spinlock); seq_printf(seq, "\n Connected LSAPs:\n"); for (self = (struct lsap_cb *) hashbin_get_first(lap->lsaps); self != NULL; self = (struct lsap_cb *)hashbin_get_next(lap->lsaps)) { IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto outloop;); seq_printf(seq, " lsap state: %s, ", irlsap_state[ self->lsap_state]); seq_printf(seq, "slsap_sel: %#02x, dlsap_sel: %#02x, ", self->slsap_sel, self->dlsap_sel); seq_printf(seq, "(%s)", self->notify.name); seq_putc(seq, '\n'); } IRDA_ASSERT_LABEL(outloop:) spin_unlock(&lap->lsaps->hb_spinlock); seq_putc(seq, '\n'); } else return -EINVAL; return 0; } static const struct seq_operations irlmp_seq_ops = { .start = irlmp_seq_start, .next = irlmp_seq_next, .stop = irlmp_seq_stop, .show = irlmp_seq_show, }; static int irlmp_seq_open(struct inode *inode, struct file *file) { IRDA_ASSERT(irlmp != NULL, return -EINVAL;); return seq_open_private(file, &irlmp_seq_ops, sizeof(struct irlmp_iter_state)); } const struct file_operations irlmp_seq_fops = { .owner = THIS_MODULE, .open = irlmp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* PROC_FS */
gpl-2.0
XMoon/XKernel
drivers/media/video/hp3a/hp3a_queue.c
38
5837
/* * drivers/media/video/hp3a/hp3a_queue.c * * HP Imaging/3A Driver : hp3a queue functionality implementation. * * Copyright (C) 2008-2009 Hewlett-Packard Co. * * Contributors: * Tanvir Islam <tanvir.islam@hp.com> * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ #include "hp3a_common.h" #include "hp3a_queue.h" /** * hp3a_initialize_queue - Initializes the quere for use. * @queue: Pointer to queue structure to be initialized. * @queue_size: size of queue. * @element_size: size of queue elements. * * No return value. **/ int hp3a_initialize_queue(struct hp3a_queue *queue, int queue_size, unsigned int element_size) { if (queue_size > 0 && element_size > 0) { queue->data = kmalloc(queue_size * element_size, GFP_KERNEL); if (queue->data != NULL) { queue->write_index = 0; queue->read_index = 0; queue->queue_count = 0; queue->queue_size = queue_size; queue->element_size = element_size; spin_lock_init(&queue->queue_lock); return 0; } else { printk(KERN_ERR "hp3a: Error allocating memory!\n"); } } return -1; } /** * hp3a_deinitialize_queue - Deinitializes the quere. * @queue: Pointer to queue structure to be deinitialized. * * No return value. **/ void hp3a_deinitialize_queue(struct hp3a_queue *queue) { spin_lock(&queue->queue_lock); if (queue->queue_size > 0) { queue->write_index = 0; queue->read_index = 0; queue->queue_count = 0; queue->queue_size = 0; kfree(queue->data); queue->data = NULL; } spin_unlock(&queue->queue_lock); } /** * hp3a_enqueue - Queues one element into the queue. * @queue: Pointer to queue structure to equeued to. * @element: Pointer to a element to be queued. * * Return 0 on success, -1 otherwise. **/ int hp3a_enqueue(struct hp3a_queue *queue, void *element) { int ret = -1; spin_lock(&queue->queue_lock); if (queue->queue_count < queue->queue_size) { u32 offset = queue->write_index * queue->element_size; void *item = (queue->data + offset); queue->queue_count += 1; queue->write_index += 1; queue->write_index = queue->write_index % queue->queue_size; /* copy data from element to queue. */ memcpy(item, element, queue->element_size); ret = 0; } spin_unlock(&queue->queue_lock); return ret; } /** * hp3a_enqueue_irqsave - Queues one element into the queue. * @queue: Pointer to queue structure to equeued to. * @element: Pointer to a element to be queued. * * Return 0 on success, -1 otherwise. **/ int hp3a_enqueue_irqsave(struct hp3a_queue *queue, void *element) { unsigned long irqflags = 0; int ret = -1; spin_lock_irqsave(&queue->queue_lock, irqflags); if (queue->queue_count < queue->queue_size) { u32 offset = queue->write_index * queue->element_size; void *item = (queue->data + offset); queue->queue_count += 1; queue->write_index += 1; queue->write_index = queue->write_index % queue->queue_size; /* copy data from element to queue. */ memcpy(item, element, queue->element_size); ret = 0; } spin_unlock_irqrestore(&queue->queue_lock, irqflags); return ret; } /** * hp3a_dequeue - Dequeues one element from the queue. * @queue: Pointer to queue structure to dequeued from. * @element: Pointer to a pointer of type element to copy * dequeued element. * * Return 0 on success, -1 otherwise. **/ int hp3a_dequeue(struct hp3a_queue *queue, void *element) { int ret = -1; spin_lock(&queue->queue_lock); if (queue->queue_count > 0) { u32 offset = queue->read_index * queue->element_size; void *item = (queue->data + offset); queue->queue_count -= 1; queue->read_index += 1; queue->read_index = queue->read_index % queue->queue_size; /* copy data from queue to element. */ memcpy(element, item, queue->element_size); ret = 0; } spin_unlock(&queue->queue_lock); return ret; } /** * hp3a_dequeue_irqsave - Dequeues one element from the queue. * @queue: Pointer to queue structure to dequeued from. * @element: Pointer to a pointer of type element to copy * dequeued element. * * Return 0 on success, -1 otherwise. **/ int hp3a_dequeue_irqsave(struct hp3a_queue *queue, void *element) { unsigned long irqflags = 0; int ret = -1; spin_lock_irqsave(&queue->queue_lock, irqflags); if (queue->queue_count > 0) { u32 offset = queue->read_index * queue->element_size; void *item = (queue->data + offset); queue->queue_count -= 1; queue->read_index += 1; queue->read_index = queue->read_index % queue->queue_size; /* copy data from queue to element. */ memcpy(element, item, queue->element_size); ret = 0; } spin_unlock_irqrestore(&queue->queue_lock, irqflags); return ret; } /** * hp3a_flush_queue - Flushes all elements from queue. * @queue: Pointer to queue structure to be flushed. * * No return value. **/ void hp3a_flush_queue(struct hp3a_queue *queue) { spin_lock(&queue->queue_lock); if (queue->queue_count > 0) { queue->write_index = 0; queue->read_index = 0; queue->queue_count = 0; } spin_unlock(&queue->queue_lock); } /** * hp3a_flush_queue_irqsave - Flushes all elements from queue. * @queue: Pointer to queue structure to be flushed. * * No return value. **/ void hp3a_flush_queue_irqsave(struct hp3a_queue *queue) { unsigned long irqflags = 0; spin_lock_irqsave(&queue->queue_lock, irqflags); if (queue->queue_count > 0) { queue->write_index = 0; queue->read_index = 0; queue->queue_count = 0; } spin_unlock_irqrestore(&queue->queue_lock, irqflags); }
gpl-2.0
antonblanchard/linux
arch/sh/lib/delay.c
38
1127
// SPDX-License-Identifier: GPL-2.0 /* * Precise Delay Loops for SuperH * * Copyright (C) 1999 Niibe Yutaka & Kaz Kojima */ #include <linux/sched.h> #include <linux/delay.h> void __delay(unsigned long loops) { __asm__ __volatile__( /* * ST40-300 appears to have an issue with this code, * normally taking two cycles each loop, as with all * other SH variants. If however the branch and the * delay slot straddle an 8 byte boundary, this increases * to 3 cycles. * This align directive ensures this doesn't occur. */ ".balign 8\n\t" "tst %0, %0\n\t" "1:\t" "bf/s 1b\n\t" " dt %0" : "=r" (loops) : "0" (loops) : "t"); } EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { xloops *= 4; __asm__("dmulu.l %0, %2\n\t" "sts mach, %0" : "=r" (xloops) : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)) : "macl", "mach"); __delay(++xloops); } void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */ } void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00000005); }
gpl-2.0
ownhere/samsung-kernel-sgs3-ownhere
drivers/gpu/drm/i915/intel_sdvo.c
38
79734
/* * Copyright 2006 Dave Airlie <airlied@linux.ie> * Copyright © 2006-2007 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_edid.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) static const char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , "PAL_N" , "PAL_NC" , "PAL_60" , "SECAM_B" , "SECAM_D" , "SECAM_G" , "SECAM_K" , "SECAM_K1", "SECAM_L" , "SECAM_60" }; #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) struct intel_sdvo { struct intel_encoder base; struct i2c_adapter *i2c; u8 slave_addr; struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ int sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; /* * Capabilities of the SDVO device returned by * i830_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; /* * For multiple function SDVO device, * this is for current attached outputs. */ uint16_t attached_output; /* * Hotplug activation bits for this device */ uint8_t hotplug_active[2]; /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. */ uint32_t color_range; /** * This is set if we're going to treat the device as TV-out. * * While we have these nice friendly flags for output types that ought * to decide this for us, the S-Video output on our HDMI+S-Video card * shows up as RGB1 (VGA). */ bool is_tv; /* This is for current tv format name */ int tv_format_index; /** * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; bool has_hdmi_monitor; bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and * have a valid fixed mode to use with the panel. */ bool is_lvds; /** * This is sdvo fixed pannel mode pointer */ struct drm_display_mode *sdvo_lvds_fixed_mode; /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Input timings for adjusted_mode */ struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ uint16_t output_flag; enum hdmi_force_audio force_audio; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; struct drm_property *top; struct drm_property *bottom; struct drm_property *hpos; struct drm_property *vpos; struct drm_property *contrast; struct drm_property *saturation; struct drm_property *hue; struct drm_property *sharpness; struct drm_property *flicker_filter; struct drm_property *flicker_filter_adaptive; struct drm_property *flicker_filter_2d; struct drm_property *tv_chroma_filter; struct drm_property *tv_luma_filter; struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ struct drm_property *brightness; /* Add variable to record current setting for the above property */ u32 left_margin, right_margin, top_margin, bottom_margin; /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; u32 max_hpos, cur_hpos; u32 max_vpos, cur_vpos; u32 cur_brightness, max_brightness; u32 cur_contrast, max_contrast; u32 cur_saturation, max_saturation; u32 cur_hue, max_hue; u32 cur_sharpness, max_sharpness; u32 cur_flicker_filter, max_flicker_filter; u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; u32 cur_flicker_filter_2d, max_flicker_filter_2d; u32 cur_tv_chroma_filter, max_tv_chroma_filter; u32 cur_tv_luma_filter, max_tv_luma_filter; u32 cur_dot_crawl, max_dot_crawl; }; static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base.base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_sdvo, base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type); static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; if (intel_sdvo->sdvo_reg == PCH_SDVOB) { I915_WRITE(intel_sdvo->sdvo_reg, val); I915_READ(intel_sdvo->sdvo_reg); return; } if (intel_sdvo->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); } /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { I915_WRITE(SDVOB, bval); I915_READ(SDVOB); I915_WRITE(SDVOC, cval); I915_READ(SDVOC); } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { struct i2c_msg msgs[] = { { .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, } }; int ret; if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; const char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) #define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { int i; DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) DRM_LOG_KMS(" "); for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) { DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); break; } } if (i == ARRAY_SIZE(sdvo_cmd_names)) DRM_LOG_KMS("(%02X)", cmd); DRM_LOG_KMS("\n"); } static const char *cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { u8 buf[args_len*2 + 2], status; struct i2c_msg msgs[args_len + 3]; int i, ret; intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; buf[2*i + 0] = SDVO_I2C_OPCODE; buf[2*i + 1] = cmd; /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; msgs[i+1].addr = intel_sdvo->slave_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; msgs[i+2].addr = intel_sdvo->slave_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); return false; } if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); return false; } return true; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { u8 retry = 5; u8 status; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll * the status byte a maximum of 3 times in order for the * command to be complete. * * Check 5 times in case the hardware failed to read the docs. */ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; while (status == SDVO_CMD_STATUS_PENDING && retry--) { udelay(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; } if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) DRM_LOG_KMS("(%s)", cmd_status_names[status]); else DRM_LOG_KMS("(??? %d)", status); if (status != SDVO_CMD_STATUS_SUCCESS) goto log_fail; /* Read the command response */ for (i = 0; i < response_len; i++) { if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); } DRM_LOG_KMS("\n"); return true; log_fail: DRM_LOG_KMS("... failed\n"); return false; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { /* This must be the immediately preceding write before the i2c xfer */ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) return false; return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) return false; return intel_sdvo_read_response(intel_sdvo, value, len); } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_set_target_input_args targets = {0}; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; BUILD_BUG_ON(sizeof(response) != 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; BUILD_BUG_ON(sizeof(clocks) != 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; memset(&args, 0, sizeof(args)); args.clock = clock; args.width = width; args.height = height; args.interlace = 0; if (intel_sdvo->is_lvds && (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { BUILD_BUG_ON(sizeof(dtd->part1) != 8); BUILD_BUG_ON(sizeof(dtd->part2) != 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; /* do some mode translations */ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; dtd->part1.clock = mode->clock / 10; dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); dtd->part1.v_active = height & 0xff; dtd->part1.v_blank = v_blank_len & 0xff; dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) dtd->part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) dtd->part2.dtd_flags |= 0x4; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; dtd->part2.reserved = 0; } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, const struct intel_sdvo_dtd *dtd) { mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode->htotal = mode->hdisplay + dtd->part1.h_blank; mode->htotal += (dtd->part1.h_high & 0xf) << 8; mode->vdisplay = dtd->part1.v_active; mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode->vsync_start = mode->vdisplay; mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode->vsync_end = mode->vsync_start + (dtd->part2.v_sync_off_width & 0xf); mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode->vtotal = mode->vdisplay + dtd->part1.v_blank; mode->vtotal += (dtd->part1.v_high & 0xf) << 8; mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); if (dtd->part2.dtd_flags & 0x2) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dtd->part2.dtd_flags & 0x4) mode->flags |= DRM_MODE_FLAG_PVSYNC; } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; uint8_t set_buf_index[2]; uint8_t av_split; uint8_t buf_size; uint8_t buf[48]; uint8_t *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; uint8_t set_buf_index[2] = { 1, 0 }; uint64_t *data = (uint64_t *)&avi_if; unsigned i; intel_dip_infoframe_csum(&avi_if); if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; for (i = 0; i < sizeof(avi_if); i += 8) { if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) return false; data++; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; uint32_t format_map; format_map = 1 << intel_sdvo->tv_format_index; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); BUILD_BUG_ON(sizeof(format) != 6); return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return false; intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) return false; return true; } static bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, mode->clock / 10, mode->hdisplay, mode->vdisplay)) return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, &intel_sdvo->input_dtd)) return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); return true; } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output * timings, even though this isn't really the right place in * the sequence to do it. Oh well. */ if (intel_sdvo->is_tv) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } /* Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. */ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; if (!mode) return; /* First, set the input mapping for the first input to our controlled * output. This is only correct if we're a single-input device, in * which case the first input is the output from the appropriate SDVO * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); /* Set the output timings to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; /* We have tried to get input timing in mode_fixup, and filled into * adjusted_mode. */ if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { input_dtd = intel_sdvo->input_dtd; } else { /* Set the output timing to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); } /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return; if (intel_sdvo->has_hdmi_monitor) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); if (intel_sdvo->is_tv && !intel_sdvo_set_tv_format(intel_sdvo)) return; (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) return; /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } if (INTEL_PCH_TYPE(dev) >= PCH_CPT) sdvox |= TRANSCODER_CPT(intel_crtc->pipe); else sdvox |= TRANSCODER(intel_crtc->pipe); if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } return; } static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; DRM_DEBUG_KMS("SDVO capabilities:\n" " vendor_id: %d\n" " device_id: %d\n" " device_rev_id: %d\n" " sdvo_version_major: %d\n" " sdvo_version_minor: %d\n" " sdvo_inputs_mask: %d\n" " smooth_scaling: %d\n" " sharp_scaling: %d\n" " up_scaling: %d\n" " down_scaling: %d\n" " stall_support: %d\n" " output_flags: %d\n", caps->vendor_id, caps->device_id, caps->device_rev_id, caps->sdvo_version_major, caps->sdvo_version_minor, caps->sdvo_inputs_mask, caps->smooth_scaling, caps->sharp_scaling, caps->up_scaling, caps->down_scaling, caps->stall_support, caps->output_flags); return true; } static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { u8 response[2]; return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { /* Is there more than one type of output? */ return hweight16(intel_sdvo->caps.output_flags) > 1; } static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(connector); return drm_get_edid(connector, &sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status intel_sdvo_tmds_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); enum drm_connector_status status; struct edid *edid; edid = intel_sdvo_get_edid(connector); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; edid = intel_sdvo_get_edid(connector); if (edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } /* * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; if (edid != NULL) { /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; if (intel_sdvo->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); } } else status = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); } return status; } static bool intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, struct edid *edid) { bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_DIGITAL(sdvo); DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", connector_is_digital, monitor_is_digital); return connector_is_digital == monitor_is_digital; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; /* add 30ms delay when the output type might be TV */ if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) mdelay(30); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", response & 0xff, response >> 8, intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; intel_sdvo->attached_output = response; intel_sdvo->has_hdmi_monitor = false; intel_sdvo->has_hdmi_audio = false; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_tmds_sink_detect(connector); else { struct edid *edid; /* if we have an edid check it matches the connection */ edid = intel_sdvo_get_edid(connector); if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, edid)) ret = connector_status_connected; else ret = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; } /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { intel_sdvo->is_tv = false; intel_sdvo->is_lvds = false; intel_sdvo->base.needs_tv_clock = false; if (response & SDVO_TV_MASK) { intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; } if (response & SDVO_LVDS_MASK) intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; } return ret; } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct edid *edid; /* set the bus switch and get the modes */ edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC * link between analog and digital outputs. So, if the regular SDVO * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), edid)) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } connector->display_info.raw_edid = NULL; kfree(edid); } } /* * Set of SDVO TV modes. * Note! This is in reply order (see loop in get_tv_modes). * XXX: all 60Hz refresh? */ static const struct drm_display_mode sdvo_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; /* Read the list of supported input resolutions for the selected TV * format. */ format_map = 1 << intel_sdvo->tv_format_index; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; BUILD_BUG_ON(sizeof(tv_res) != 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); if (nmode) drm_mode_probed_add(connector, nmode); } } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ intel_ddc_get_modes(connector, intel_sdvo->i2c); if (list_empty(&connector->probed_modes) == false) goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { /* Guarantee the mode is preferred */ newmode->type = (DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER); drm_mode_probed_add(connector, newmode); } } end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); intel_sdvo->is_lvds = true; break; } } } static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); return !list_empty(&connector->probed_modes); } static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_device *dev = connector->dev; if (intel_sdvo_connector->left) drm_property_destroy(dev, intel_sdvo_connector->left); if (intel_sdvo_connector->right) drm_property_destroy(dev, intel_sdvo_connector->right); if (intel_sdvo_connector->top) drm_property_destroy(dev, intel_sdvo_connector->top); if (intel_sdvo_connector->bottom) drm_property_destroy(dev, intel_sdvo_connector->bottom); if (intel_sdvo_connector->hpos) drm_property_destroy(dev, intel_sdvo_connector->hpos); if (intel_sdvo_connector->vpos) drm_property_destroy(dev, intel_sdvo_connector->vpos); if (intel_sdvo_connector->saturation) drm_property_destroy(dev, intel_sdvo_connector->saturation); if (intel_sdvo_connector->contrast) drm_property_destroy(dev, intel_sdvo_connector->contrast); if (intel_sdvo_connector->hue) drm_property_destroy(dev, intel_sdvo_connector->hue); if (intel_sdvo_connector->sharpness) drm_property_destroy(dev, intel_sdvo_connector->sharpness); if (intel_sdvo_connector->flicker_filter) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); if (intel_sdvo_connector->flicker_filter_2d) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); if (intel_sdvo_connector->flicker_filter_adaptive) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); if (intel_sdvo_connector->tv_luma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); if (intel_sdvo_connector->tv_chroma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); if (intel_sdvo_connector->dot_crawl) drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); if (intel_sdvo_connector->brightness) drm_property_destroy(dev, intel_sdvo_connector->brightness); } static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->tv_format) drm_property_destroy(connector->dev, intel_sdvo_connector->tv_format); intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; bool has_audio = false; if (!intel_sdvo->is_hdmi) return false; edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); return has_audio; } static int intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; uint16_t temp_value; uint8_t cmd; int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_sdvo_connector->force_audio) return 0; intel_sdvo_connector->force_audio = i; if (i == HDMI_AUDIO_AUTO) has_audio = intel_sdvo_detect_hdmi_audio(connector); else has_audio = (i == HDMI_AUDIO_ON); if (has_audio == intel_sdvo->has_hdmi_audio) return 0; intel_sdvo->has_hdmi_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_sdvo->color_range) return 0; intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ cmd = SDVO_CMD_SET_##NAME; \ intel_sdvo_connector->cur_##name = temp_value; \ goto set_value; \ } if (property == intel_sdvo_connector->tv_format) { if (val >= TV_FORMAT_NUM) return -EINVAL; if (intel_sdvo->tv_format_index == intel_sdvo_connector->tv_format_supported[val]) return 0; intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; goto done; } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } CHECK_PROPERTY(hpos, HPOS) CHECK_PROPERTY(vpos, VPOS) CHECK_PROPERTY(saturation, SATURATION) CHECK_PROPERTY(contrast, CONTRAST) CHECK_PROPERTY(hue, HUE) CHECK_PROPERTY(brightness, BRIGHTNESS) CHECK_PROPERTY(sharpness, SHARPNESS) CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) CHECK_PROPERTY(dot_crawl, DOT_CRAWL) } return -EINVAL; /* unknown property */ set_value: if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) return -EIO; done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; #undef CHECK_PROPERTY } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .destroy = intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { uint16_t mask = 0; unsigned int num_bits; /* Make a mask of outputs less than or equal to our own priority in the * list. */ switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; } /* Count bits to find what number we are in the priority list. */ mask &= sdvo->caps.output_flags; num_bits = hweight16(mask); /* If more than 3 outputs, default to DDC bus 3 for now. */ if (num_bits > 3) num_bits = 3; /* Corresponds to SDVO_CONTROL_BUS_DDCx */ sdvo->ddc_bus = 1 << num_bits; } /** * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * * DDC bus number assignment is in a priority order of RGB outputs, then TMDS * outputs, then LVDS outputs. */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; if (IS_SDVOB(reg)) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); else intel_sdvo_guess_ddc_bus(sdvo); } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; u8 pin; if (IS_SDVOB(reg)) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; pin = GMBUS_PORT_DPB; if (mapping->initialized) pin = mapping->i2c_pin; if (pin < GMBUS_NUM_PORTS) { sdvo->i2c = &dev_priv->gmbus[pin].adapter; intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); intel_gmbus_force_bit(sdvo->i2c, true); } else { sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; } } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { return intel_sdvo_check_supp_encode(intel_sdvo); } static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; if (IS_SDVOB(sdvo_reg)) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { my_mapping = &dev_priv->sdvo_mappings[1]; other_mapping = &dev_priv->sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ if (my_mapping->slave_addr) return my_mapping->slave_addr; /* If the BIOS only described a different SDVO device, use the * address that it isn't using. */ if (other_mapping->slave_addr) { if (other_mapping->slave_addr == 0x70) return 0x72; else return 0x70; } /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ if (IS_SDVOB(sdvo_reg)) return 0x70; else return 0x72; } static void intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { drm_connector_init(encoder->base.base.dev, &connector->base.base, &intel_sdvo_connector_funcs, connector->base.base.connector_type); drm_connector_helper_add(&connector->base.base, &intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) { struct drm_device *dev = connector->base.base.dev; intel_attach_force_audio_property(&connector->base.base); if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) intel_attach_broadcast_rgb_property(&connector->base.base); } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { connector->polled = DRM_CONNECTOR_POLL_HPD; intel_sdvo->hotplug_active[0] |= 1 << device; /* Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo_connector); return true; } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) { intel_sdvo->is_tv = false; intel_sdvo->base.needs_tv_clock = false; intel_sdvo->is_lvds = false; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; /* TV has no XXX1 function block */ if (flags & SDVO_OUTPUT_SVID0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) return false; if (flags & SDVO_OUTPUT_CVBS0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; if (flags & SDVO_OUTPUT_RGB0) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; if (flags & SDVO_OUTPUT_LVDS0) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), bytes[0], bytes[1]); return false; } intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); return true; } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; BUILD_BUG_ON(sizeof(format) != 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) return false; memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) return false; intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; intel_sdvo_connector->tv_format = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( intel_sdvo_connector->tv_format, i, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; } #define ENHANCEMENT(name, NAME) do { \ if (enhancements.name) { \ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ return false; \ intel_sdvo_connector->max_##name = data_value[0]; \ intel_sdvo_connector->cur_##name = response; \ intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ data_value[0], data_value[1], response); \ } \ } while (0) static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_H, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_H, &response, 2)) return false; intel_sdvo_connector->max_hscan = data_value[0]; intel_sdvo_connector->left_margin = data_value[0] - response; intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; intel_sdvo_connector->left = drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); if (!intel_sdvo_connector->left) return false; drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); intel_sdvo_connector->right = drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); if (!intel_sdvo_connector->right) return false; drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } if (enhancements.overscan_v) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_V, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_V, &response, 2)) return false; intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top_margin = data_value[0] - response; intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; intel_sdvo_connector->top = drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]); if (!intel_sdvo_connector->top) return false; drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]); if (!intel_sdvo_connector->bottom) return false; drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } ENHANCEMENT(hpos, HPOS); ENHANCEMENT(vpos, VPOS); ENHANCEMENT(saturation, SATURATION); ENHANCEMENT(contrast, CONTRAST); ENHANCEMENT(hue, HUE); ENHANCEMENT(sharpness, SHARPNESS); ENHANCEMENT(brightness, BRIGHTNESS); ENHANCEMENT(flicker_filter, FLICKER_FILTER); ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); if (enhancements.dot_crawl) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) return false; intel_sdvo_connector->max_dot_crawl = 1; intel_sdvo_connector->cur_dot_crawl = response & 0x1; intel_sdvo_connector->dot_crawl = drm_property_create_range(dev, 0, "dot_crawl", 0, 1); if (!intel_sdvo_connector->dot_crawl) return false; drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); } return true; } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; ENHANCEMENT(brightness, BRIGHTNESS); return true; } #undef ENHANCEMENT static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { union { struct intel_sdvo_enhancements_reply reply; uint16_t response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); enhancements.response = 0; intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)); if (enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; } if (IS_TV(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); else if (IS_LVDS(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; } static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_sdvo *sdvo = adapter->algo_data; if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) return -EIO; return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); } static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) { struct intel_sdvo *sdvo = adapter->algo_data; return sdvo->i2c->algo->functionality(sdvo->i2c); } static const struct i2c_algorithm intel_sdvo_ddc_proxy = { .master_xfer = intel_sdvo_ddc_proxy_xfer, .functionality = intel_sdvo_ddc_proxy_func }; static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev) { sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); sdvo->ddc.dev.parent = &dev->pdev->dev; sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo = &intel_sdvo_ddc_proxy; return i2c_add_adapter(&sdvo->ddc) == 0; } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { kfree(intel_sdvo); return false; } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } } if (IS_SDVOB(sdvo_reg)) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; /* Set up hotplug command - note paranoia about contents of reply. * We assume that the hardware is in a sane state, and only touch * the bits we think we understand. */ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); intel_sdvo->hotplug_active[0] &= ~0x3; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); kfree(intel_sdvo); return false; }
gpl-2.0
gromikakao/e980-zeKrnl
drivers/broadcast/oneseg/tcc3530/Tcc353xDriver/sample/interruptProcess/tcc353x_Interrupt_Process.c
38
8531
#include "tcc353x_common.h" #include "tcc353x_api.h" #include "tcpal_os.h" #include "tcc353x_user_defines.h" #define USE_LGE_RING_BUFFER #ifdef USE_LGE_RING_BUFFER extern void mbt_dataring_create(unsigned int* buffer_id, int len); extern void mbt_dataring_destroy(unsigned int* buffer_id); extern int mbt_dataring_empty(unsigned int buffer_id); extern int mbt_dataring_free(unsigned int buffer_id); extern int mbt_dataring_avail(unsigned int buffer_id); extern void mbt_dataring_flush(unsigned int buffer_id); extern int mbt_dataring_read(unsigned int buffer_id, char * buf, int len); extern int mbt_dataring_write(unsigned int buffer_id, const char * buf, int len); #endif I08U Tcc353xStreamData[TCC353X_STREAM_BUFFER_SIZE + 188]; #ifndef USE_LGE_RING_BUFFER I32U Tcc353xStreamRp[4] = {0,0,0,0}; I32U Tcc353xStreamWp[4] = {0,0,0,0}; I32U Tcc353xStreamBufferedSize[4] = {0,0,0,0}; I32U Tcc353xStreamFlushFlag[4] = {0,0,0,0}; #else unsigned int RingBufId = 0; #endif extern TcpalSemaphore_t Tcc353xStreamSema; /* for overflow test */ //#define _DBG_CHK_OVERFLOW_CNT_ I32U gOverflowcnt = 0; I32U gDbgIsrCnt = 0; #ifndef USE_LGE_RING_BUFFER void Tcc353xStreamBufferInit(I32S _moduleIndex) { TcpalSemaphoreLock(&Tcc353xStreamSema); Tcc353xStreamRp[_moduleIndex] = 0; Tcc353xStreamWp[_moduleIndex] = 0; Tcc353xStreamBufferedSize[_moduleIndex] = 0; Tcc353xStreamFlushFlag[_moduleIndex] = 0; TcpalSemaphoreUnLock(&Tcc353xStreamSema); } void Tcc353xStreamBufferClose(I32S _moduleIndex) { TcpalSemaphoreLock(&Tcc353xStreamSema); Tcc353xStreamRp[_moduleIndex] = 0; Tcc353xStreamWp[_moduleIndex] = 0; Tcc353xStreamBufferedSize[_moduleIndex] = 0; Tcc353xStreamFlushFlag[_moduleIndex] = 0; TcpalSemaphoreUnLock(&Tcc353xStreamSema); } void Tcc353xStreamBufferReset(I32S _moduleIndex) { TcpalSemaphoreLock(&Tcc353xStreamSema); Tcc353xStreamRp[_moduleIndex] = 0; Tcc353xStreamWp[_moduleIndex] = 0; Tcc353xStreamBufferedSize[_moduleIndex] = 0; Tcc353xStreamFlushFlag[_moduleIndex] = 0; TcpalSemaphoreUnLock(&Tcc353xStreamSema); } void Tcc353xStreamBufferFlush(I32S _moduleIndex) { TcpalPrintStatus((I08S *) "[TCC353X] StreamBufferFlushing\n"); TcpalSemaphoreLock(&Tcc353xStreamSema); Tcc353xStreamRp[_moduleIndex] = 0; Tcc353xStreamWp[_moduleIndex] = 0; Tcc353xStreamBufferedSize[_moduleIndex] = 0; Tcc353xStreamFlushFlag[_moduleIndex] = 1; TcpalSemaphoreUnLock(&Tcc353xStreamSema); } #else void Tcc353xStreamBufferInit(I32S _moduleIndex) { mbt_dataring_create(&RingBufId, TCC353X_STREAM_BUFFER_SIZE); } void Tcc353xStreamBufferClose(I32S _moduleIndex) { mbt_dataring_destroy(&RingBufId); } void Tcc353xStreamBufferReset(I32S _moduleIndex) { } void Tcc353xStreamBufferFlush(I32S _moduleIndex) { mbt_dataring_flush(RingBufId); } #endif #ifndef USE_LGE_RING_BUFFER I32U Tcc353xGetStreamBuffer(I32S _moduleIndex, I08U * _buff, I32U _size) { I32U tsSize = 0; I32U totalSize = 0; I32U rp; I32U wp; I32U nextRp; TcpalSemaphoreLock(&Tcc353xStreamSema); rp = Tcc353xStreamRp[_moduleIndex]; wp = Tcc353xStreamWp[_moduleIndex]; Tcc353xStreamFlushFlag[_moduleIndex] = 0; TcpalSemaphoreUnLock(&Tcc353xStreamSema); if(rp > wp) totalSize = wp + (TCC353X_STREAM_BUFFER_SIZE - rp); else if (rp < wp) totalSize = wp - rp; else totalSize = 0; if(_size > totalSize) tsSize = totalSize; else tsSize = _size; tsSize = tsSize - (tsSize%188); if (tsSize<188) { return 0; } nextRp = ((rp+tsSize)%TCC353X_STREAM_BUFFER_SIZE); if(rp+tsSize>TCC353X_STREAM_BUFFER_SIZE) { /* read twice */ I32U first; I32U remain; first = TCC353X_STREAM_BUFFER_SIZE - rp; remain = nextRp; TcpalMemcpy (&_buff[0], &Tcc353xStreamData[rp], first); TcpalMemcpy (&_buff[first], &Tcc353xStreamData[0], nextRp); } else { TcpalMemcpy (&_buff[0], &Tcc353xStreamData[rp], tsSize); } TcpalSemaphoreLock(&Tcc353xStreamSema); if(Tcc353xStreamFlushFlag[_moduleIndex]!=0) { /* no update read pointer & no push data */ Tcc353xStreamFlushFlag[_moduleIndex] = 0; tsSize = 0; } else { Tcc353xStreamRp[_moduleIndex] = nextRp; if(Tcc353xStreamBufferedSize[0]>=tsSize) Tcc353xStreamBufferedSize[0] -= tsSize; } TcpalSemaphoreUnLock(&Tcc353xStreamSema); return tsSize; } #else I32U Tcc353xGetStreamBuffer(I32S _moduleIndex, I08U * _buff, I32U _size) { I32U tsSize = 0; I32U totalSize = 0; totalSize = mbt_dataring_avail(RingBufId); if (totalSize < 188) { return 0; } if(_size > totalSize) { tsSize = totalSize; } else { tsSize = _size; } tsSize = mbt_dataring_read(RingBufId, _buff, tsSize); return tsSize; } #endif I32U Tcc353xInterruptProcess(void) { I32U ret = 0; I08U irqStatus = 0; I32S moduleIndex = 0; I32U totalSize = 0; /* Read BB Interrupt Status */ Tcc353xApiGetIrqStatus(moduleIndex, &irqStatus); /* Stream Interrupt */ if (irqStatus&0x01) { TcpalPrintErr((I08S *) "[TCC353X] FIFO overflow[0x%02X] flush!!!\n", irqStatus); /* Tcc353x IRQ Clear */ Tcc353xApiIrqClear(moduleIndex, irqStatus); Tcc353xApiInterruptBuffClr(moduleIndex); gOverflowcnt ++; ret = 0; } else { /* Tcc353x IRQ Clear */ Tcc353xApiIrqClear(moduleIndex, irqStatus); Tcc353xApiGetFifoStatus(moduleIndex, &totalSize); ret = totalSize; } gDbgIsrCnt++; if(gDbgIsrCnt>40) { gDbgIsrCnt = 0; #ifdef _DBG_CHK_OVERFLOW_CNT_ TcpalPrintStatus((I08S *) "[TCC353X] CurrOverflow Cnt %d\n", gOverflowcnt); #endif } return ret; } #ifndef USE_LGE_RING_BUFFER void Tcc353xInterruptGetStream(I32U _fifoSize) { I32S moduleIndex = 0; I32U totalSize = 0; I32U wp; totalSize = _fifoSize - (_fifoSize%188); //[Fix Start]align TS size to use DMA only mode - 20121228 hyewon.eum@lge.com //TcpalPrintErr((I08S *) "Tcc353xInterruptGetStream size[%d]\n", totalSize); totalSize = (totalSize/188/4)*188*4; if(totalSize > 188 * 87) totalSize = 188 * 84; //[Fix End]align TS size to use DMA only mode - 20121228 hyewon.eum@lge.com if(totalSize>=188) { I32U nextwp; if(Tcc353xStreamBufferedSize[moduleIndex]+totalSize > TCC353X_STREAM_BUFFER_SIZE) Tcc353xStreamBufferFlush (moduleIndex); TcpalSemaphoreLock(&Tcc353xStreamSema); wp = Tcc353xStreamWp[moduleIndex]; TcpalSemaphoreUnLock(&Tcc353xStreamSema); nextwp = ((wp+totalSize)%TCC353X_STREAM_BUFFER_SIZE); if(wp+totalSize>TCC353X_STREAM_BUFFER_SIZE) { /* read twice */ I32U first; first = TCC353X_STREAM_BUFFER_SIZE - wp; Tcc353xApiStreamRead(moduleIndex, &Tcc353xStreamData[wp], first); Tcc353xApiStreamRead(moduleIndex, &Tcc353xStreamData[0], nextwp); } else { Tcc353xApiStreamRead(moduleIndex, &Tcc353xStreamData[wp], totalSize); } TcpalSemaphoreLock(&Tcc353xStreamSema); Tcc353xStreamWp[moduleIndex] = nextwp; Tcc353xStreamBufferedSize[moduleIndex] += totalSize; TcpalSemaphoreUnLock(&Tcc353xStreamSema); if(Tcc353xStreamData[wp]!=0x47) { TcpalPrintErr((I08S *) "[TCC353X] SyncByte Error! [0x%02x]\n", Tcc353xStreamData[wp]); TcpalPrintErr((I08S *) "[TCC353X] Buff Flush for SyncByte matching\n"); Tcc353xApiInterruptBuffClr(moduleIndex); } } } #else void Tcc353xInterruptGetStream(I32U _fifoSize) { I32U totalSize = 0; I32U freeSize = 0; I32U writeSize = 0; totalSize = _fifoSize - (_fifoSize%188); //[Fix Start]align TS size to use DMA only mode - 20121228 hyewon.eum@lge.com //TcpalPrintErr((I08S *) "Tcc353xInterruptGetStream size[%d]\n", totalSize); totalSize = (totalSize/188/4)*188*4; if(totalSize > 188 * 87) totalSize = 188 * 84; //[Fix End]align TS size to use DMA only mode - 20121228 hyewon.eum@lge.com if(totalSize>=188) { Tcc353xApiStreamRead(0, &Tcc353xStreamData[0], totalSize); if(Tcc353xStreamData[0]!=0x47) { TcpalPrintErr((I08S *) "[TCC353X] SyncByte Error! [0x%02x]\n", Tcc353xStreamData[0]); TcpalPrintErr((I08S *) "[TCC353X] Buff Flush for SyncByte matching\n"); } else { freeSize = mbt_dataring_free(RingBufId); if(freeSize >= totalSize) { writeSize = mbt_dataring_write(RingBufId, &Tcc353xStreamData[0], totalSize); if(writeSize < 0) { TcpalPrintErr((I08S *) "[TCC353X] Ring Buffer Error!\n"); } } } } } #endif
gpl-2.0
matianfu/kunlun-u-boot
board/g2000/g2000.c
38
8288
/* * (C) Copyright 2004 * Stefan Roese, esd gmbh germany, stefan.roese@esd-electronics.com * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <asm/processor.h> #include <command.h> #define MEM_MCOPT1_INIT_VAL 0x00800000 #define MEM_RTR_INIT_VAL 0x04070000 #define MEM_PMIT_INIT_VAL 0x07c00000 #define MEM_MB0CF_INIT_VAL 0x00082001 #define MEM_MB1CF_INIT_VAL 0x04082000 #define MEM_SDTR1_INIT_VAL 0x00854005 #define SDRAM0_CFG_ENABLE 0x80000000 #define CFG_SDRAM_SIZE 0x04000000 /* 64 MB */ int board_early_init_f (void) { #if 0 /* test-only */ mtdcr (uicsr, 0xFFFFFFFF); /* clear all ints */ mtdcr (uicer, 0x00000000); /* disable all ints */ mtdcr (uiccr, 0x00000010); mtdcr (uicpr, 0xFFFF7FF0); /* set int polarities */ mtdcr (uictr, 0x00000010); /* set int trigger levels */ mtdcr (uicsr, 0xFFFFFFFF); /* clear all ints */ #else mtdcr(uicsr, 0xFFFFFFFF); /* clear all ints */ mtdcr(uicer, 0x00000000); /* disable all ints */ mtdcr(uiccr, 0x00000000); /* set all to be non-critical*/ mtdcr(uicpr, 0xFFFFFFF0); /* set int polarities */ mtdcr(uictr, 0x10000000); /* set int trigger levels */ mtdcr(uicvcr, 0x00000001); /* set vect base=0,INT0 highest priority*/ mtdcr(uicsr, 0xFFFFFFFF); /* clear all ints */ #endif #if 1 /* test-only */ /* * EBC Configuration Register: set ready timeout to 512 ebc-clks -> ca. 15 us */ mtebc (epcr, 0xa8400000); /* ebc always driven */ #endif return 0; } int misc_init_f (void) { return 0; /* dummy implementation */ } int misc_init_r (void) { #if (CONFIG_COMMANDS & CFG_CMD_NAND) /* * Set NAND-FLASH GPIO signals to default */ out32(GPIO0_OR, in32(GPIO0_OR) & ~(CFG_NAND_CLE | CFG_NAND_ALE)); out32(GPIO0_OR, in32(GPIO0_OR) | CFG_NAND_CE); #endif return (0); } /* * Check Board Identity: */ int checkboard (void) { char str[64]; int i = getenv_r ("serial#", str, sizeof(str)); puts ("Board: "); if (i == -1) { puts ("### No HW ID - assuming G2000"); } else { puts(str); } putc ('\n'); return 0; } /* ------------------------------------------------------------------------- G2000 rev B is an embeded design. we don't read for spd of this version. Doing static SDRAM controller configuration in the following section. ------------------------------------------------------------------------- */ long int init_sdram_static_settings(void) { #define mtsdram0(reg, data) mtdcr(memcfga,reg);mtdcr(memcfgd,data) /* disable memcontroller so updates work */ mtsdram0( mem_mcopt1, MEM_MCOPT1_INIT_VAL ); mtsdram0( mem_rtr , MEM_RTR_INIT_VAL ); mtsdram0( mem_pmit , MEM_PMIT_INIT_VAL ); mtsdram0( mem_mb0cf , MEM_MB0CF_INIT_VAL ); mtsdram0( mem_mb1cf , MEM_MB1CF_INIT_VAL ); mtsdram0( mem_sdtr1 , MEM_SDTR1_INIT_VAL ); /* SDRAM have a power on delay, 500 micro should do */ udelay(500); mtsdram0( mem_mcopt1, MEM_MCOPT1_INIT_VAL|SDRAM0_CFG_ENABLE ); return (CFG_SDRAM_SIZE); /* CFG_SDRAM_SIZE is in G2000.h */ } long int initdram (int board_type) { long int ret; /* flzt, we can still turn this on in the future */ /* #ifdef CONFIG_SPD_EEPROM ret = spd_sdram (); #else ret = init_sdram_static_settings(); #endif */ ret = init_sdram_static_settings(); return ret; } #if 1 /* test-only */ void sdram_init(void) { init_sdram_static_settings(); } #endif #if 0 /* test-only */ long int initdram (int board_type) { unsigned long val; mtdcr(memcfga, mem_mb0cf); val = mfdcr(memcfgd); #if 0 printf("\nmb0cf=%x\n", val); /* test-only */ printf("strap=%x\n", mfdcr(strap)); /* test-only */ #endif return (4*1024*1024 << ((val & 0x000e0000) >> 17)); } #endif int testdram (void) { /* TODO: XXX XXX XXX */ printf ("test: 16 MB - ok\n"); return (0); } #if (CONFIG_COMMANDS & CFG_CMD_NAND) #include <linux/mtd/nand_legacy.h> extern struct nand_chip nand_dev_desc[CFG_MAX_NAND_DEVICE]; void nand_init(void) { nand_probe(CFG_NAND_BASE); if (nand_dev_desc[0].ChipID != NAND_ChipID_UNKNOWN) { print_size(nand_dev_desc[0].totlen, "\n"); } } #endif #if 0 /* test-only !!! */ int do_dumpebc(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { ulong ap, cr; printf("\nEBC registers for PPC405GP:\n"); mfebc(pb0ap, ap); mfebc(pb0cr, cr); printf("0: AP=%08lx CP=%08lx\n", ap, cr); mfebc(pb1ap, ap); mfebc(pb1cr, cr); printf("1: AP=%08lx CP=%08lx\n", ap, cr); mfebc(pb2ap, ap); mfebc(pb2cr, cr); printf("2: AP=%08lx CP=%08lx\n", ap, cr); mfebc(pb3ap, ap); mfebc(pb3cr, cr); printf("3: AP=%08lx CP=%08lx\n", ap, cr); mfebc(pb4ap, ap); mfebc(pb4cr, cr); printf("4: AP=%08lx CP=%08lx\n", ap, cr); printf("\n"); return 0; } U_BOOT_CMD( dumpebc, 1, 1, do_dumpebc, "dumpebc - Dump all EBC registers\n", NULL ); int do_dumpdcr(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { int i; printf("\nDevice Configuration Registers (DCR's) for PPC405GP:"); for (i=0; i<=0x1e0; i++) { if (!(i % 0x8)) { printf("\n%04x ", i); } printf("%08lx ", get_dcr(i)); } printf("\n"); return 0; } U_BOOT_CMD( dumpdcr, 1, 1, do_dumpdcr, "dumpdcr - Dump all DCR registers\n", NULL ); int do_dumpspr(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { printf("\nSpecial Purpose Registers (SPR's) for PPC405GP:"); printf("\n%04x %08x ", 947, mfspr(947)); printf("\n%04x %08x ", 9, mfspr(9)); printf("\n%04x %08x ", 1014, mfspr(1014)); printf("\n%04x %08x ", 1015, mfspr(1015)); printf("\n%04x %08x ", 1010, mfspr(1010)); printf("\n%04x %08x ", 957, mfspr(957)); printf("\n%04x %08x ", 1008, mfspr(1008)); printf("\n%04x %08x ", 1018, mfspr(1018)); printf("\n%04x %08x ", 954, mfspr(954)); printf("\n%04x %08x ", 950, mfspr(950)); printf("\n%04x %08x ", 951, mfspr(951)); printf("\n%04x %08x ", 981, mfspr(981)); printf("\n%04x %08x ", 980, mfspr(980)); printf("\n%04x %08x ", 982, mfspr(982)); printf("\n%04x %08x ", 1012, mfspr(1012)); printf("\n%04x %08x ", 1013, mfspr(1013)); printf("\n%04x %08x ", 948, mfspr(948)); printf("\n%04x %08x ", 949, mfspr(949)); printf("\n%04x %08x ", 1019, mfspr(1019)); printf("\n%04x %08x ", 979, mfspr(979)); printf("\n%04x %08x ", 8, mfspr(8)); printf("\n%04x %08x ", 945, mfspr(945)); printf("\n%04x %08x ", 987, mfspr(987)); printf("\n%04x %08x ", 287, mfspr(287)); printf("\n%04x %08x ", 953, mfspr(953)); printf("\n%04x %08x ", 955, mfspr(955)); printf("\n%04x %08x ", 272, mfspr(272)); printf("\n%04x %08x ", 273, mfspr(273)); printf("\n%04x %08x ", 274, mfspr(274)); printf("\n%04x %08x ", 275, mfspr(275)); printf("\n%04x %08x ", 260, mfspr(260)); printf("\n%04x %08x ", 276, mfspr(276)); printf("\n%04x %08x ", 261, mfspr(261)); printf("\n%04x %08x ", 277, mfspr(277)); printf("\n%04x %08x ", 262, mfspr(262)); printf("\n%04x %08x ", 278, mfspr(278)); printf("\n%04x %08x ", 263, mfspr(263)); printf("\n%04x %08x ", 279, mfspr(279)); printf("\n%04x %08x ", 26, mfspr(26)); printf("\n%04x %08x ", 27, mfspr(27)); printf("\n%04x %08x ", 990, mfspr(990)); printf("\n%04x %08x ", 991, mfspr(991)); printf("\n%04x %08x ", 956, mfspr(956)); printf("\n%04x %08x ", 284, mfspr(284)); printf("\n%04x %08x ", 285, mfspr(285)); printf("\n%04x %08x ", 986, mfspr(986)); printf("\n%04x %08x ", 984, mfspr(984)); printf("\n%04x %08x ", 256, mfspr(256)); printf("\n%04x %08x ", 1, mfspr(1)); printf("\n%04x %08x ", 944, mfspr(944)); printf("\n"); return 0; } U_BOOT_CMD( dumpspr, 1, 1, do_dumpspr, "dumpspr - Dump all SPR registers\n", NULL ); #endif
gpl-2.0
nobodyAtall/nAa-kernel
drivers/usb/gadget/f_rmnet.c
38
28906
/* * f_rmnet.c -- RmNet function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 Nokia Corporation * Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <mach/msm_smd.h> #include <linux/usb/cdc.h> #include <linux/usb/composite.h> #include <linux/usb/ch9.h> #include "gadget_chips.h" #ifdef CONFIG_USB_ANDROID_RMNET static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL; module_param(rmnet_ctl_ch, charp, S_IRUGO); MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel"); static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL; module_param(rmnet_data_ch, charp, S_IRUGO); MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel"); #endif #define RMNET_NOTIFY_INTERVAL 5 #define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) #define QMI_REQ_MAX 4 #define QMI_REQ_SIZE 2048 #define QMI_RESP_MAX 8 #define QMI_RESP_SIZE 2048 #define RX_REQ_MAX 8 #define RX_REQ_SIZE 2048 #define TX_REQ_MAX 8 #define TX_REQ_SIZE 2048 #define TXN_MAX 2048 /* QMI requests & responses buffer*/ struct qmi_buf { void *buf; int len; struct list_head list; }; /* Control & data SMD channel private data */ struct rmnet_smd_info { struct smd_channel *ch; struct tasklet_struct tx_tlet; struct tasklet_struct rx_tlet; #define CH_OPENED 0 unsigned long flags; /* pending rx packet length */ atomic_t rx_pkt; /* wait for smd open event*/ wait_queue_head_t wait; }; struct rmnet_dev { struct usb_function function; struct usb_composite_dev *cdev; struct usb_ep *epout; struct usb_ep *epin; struct usb_ep *epnotify; struct usb_request *notify_req; u8 ifc_id; /* QMI lists */ struct list_head qmi_req_pool; struct list_head qmi_resp_pool; struct list_head qmi_req_q; struct list_head qmi_resp_q; /* Tx/Rx lists */ struct list_head tx_idle; struct list_head rx_idle; struct list_head rx_queue; spinlock_t lock; atomic_t online; atomic_t notify_count; struct rmnet_smd_info smd_ctl; struct rmnet_smd_info smd_data; struct workqueue_struct *wq; struct work_struct connect_work; struct work_struct disconnect_work; }; static struct usb_interface_descriptor rmnet_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, /* .iInterface = DYNAMIC */ }; /* Full speed support */ static struct usb_endpoint_descriptor rmnet_fs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), .bInterval = 1 << RMNET_NOTIFY_INTERVAL, }; static struct usb_endpoint_descriptor rmnet_fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_endpoint_descriptor rmnet_fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_descriptor_header *rmnet_fs_function[] = { (struct usb_descriptor_header *) &rmnet_interface_desc, (struct usb_descriptor_header *) &rmnet_fs_notify_desc, (struct usb_descriptor_header *) &rmnet_fs_in_desc, (struct usb_descriptor_header *) &rmnet_fs_out_desc, NULL, }; /* High speed support */ static struct usb_endpoint_descriptor rmnet_hs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), .bInterval = RMNET_NOTIFY_INTERVAL + 4, }; static struct usb_endpoint_descriptor rmnet_hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor rmnet_hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_descriptor_header *rmnet_hs_function[] = { (struct usb_descriptor_header *) &rmnet_interface_desc, (struct usb_descriptor_header *) &rmnet_hs_notify_desc, (struct usb_descriptor_header *) &rmnet_hs_in_desc, (struct usb_descriptor_header *) &rmnet_hs_out_desc, NULL, }; /* String descriptors */ static struct usb_string rmnet_string_defs[] = { [0].s = "QMI RmNet", { } /* end of list */ }; static struct usb_gadget_strings rmnet_string_table = { .language = 0x0409, /* en-us */ .strings = rmnet_string_defs, }; static struct usb_gadget_strings *rmnet_strings[] = { &rmnet_string_table, NULL, }; static struct qmi_buf * rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags) { struct qmi_buf *qmi; qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags); if (qmi != NULL) { qmi->buf = kmalloc(len, kmalloc_flags); if (qmi->buf == NULL) { kfree(qmi); qmi = NULL; } } return qmi ? qmi : ERR_PTR(-ENOMEM); } static void rmnet_free_qmi(struct qmi_buf *qmi) { kfree(qmi->buf); kfree(qmi); } /* * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */ static struct usb_request * rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, kmalloc_flags); if (req != NULL) { req->length = len; req->buf = kmalloc(len, kmalloc_flags); if (req->buf == NULL) { usb_ep_free_request(ep, req); req = NULL; } } return req ? req : ERR_PTR(-ENOMEM); } /* * Free a usb_request and its buffer. */ static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct rmnet_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; int status = req->status; switch (status) { case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ atomic_set(&dev->notify_count, 0); break; default: ERROR(cdev, "rmnet notify ep error %d\n", status); /* FALLTHROUGH */ case 0: if (ep != dev->epnotify) break; /* handle multiple pending QMI_RESPONSE_AVAILABLE * notifications by resending until we're done */ if (atomic_dec_and_test(&dev->notify_count)) break; status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); if (status) { atomic_dec(&dev->notify_count); ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); } break; } } static void qmi_response_available(struct rmnet_dev *dev) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = dev->notify_req; struct usb_cdc_notification *event = req->buf; int status; /* Response will be sent later */ if (atomic_inc_return(&dev->notify_count) != 1) return; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ifc_id); event->wLength = cpu_to_le16(0); status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); if (status < 0) { atomic_dec(&dev->notify_count); ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); } } /* TODO * handle modem restart events */ static void rmnet_smd_notify(void *priv, unsigned event) { struct rmnet_smd_info *smd_info = priv; int len = atomic_read(&smd_info->rx_pkt); switch (event) { case SMD_EVENT_DATA: { if (len && (smd_write_avail(smd_info->ch) >= len)) tasklet_schedule(&smd_info->rx_tlet); if (smd_read_avail(smd_info->ch)) tasklet_schedule(&smd_info->tx_tlet); break; } case SMD_EVENT_OPEN: /* usb endpoints are not enabled untill smd channels * are opened. wake up worker thread to continue * connection processing */ set_bit(CH_OPENED, &smd_info->flags); wake_up(&smd_info->wait); break; case SMD_EVENT_CLOSE: /* We will never come here. * reset flags after closing smd channel * */ clear_bit(CH_OPENED, &smd_info->flags); break; } } static void rmnet_control_tx_tlet(unsigned long arg) { struct rmnet_dev *dev = (struct rmnet_dev *) arg; struct usb_composite_dev *cdev = dev->cdev; struct qmi_buf *qmi_resp; int sz; unsigned long flags; while (1) { sz = smd_cur_packet_size(dev->smd_ctl.ch); if (sz == 0) break; if (smd_read_avail(dev->smd_ctl.ch) < sz) break; spin_lock_irqsave(&dev->lock, flags); if (list_empty(&dev->qmi_resp_pool)) { ERROR(cdev, "rmnet QMI Tx buffers full\n"); spin_unlock_irqrestore(&dev->lock, flags); break; } qmi_resp = list_first_entry(&dev->qmi_resp_pool, struct qmi_buf, list); list_del(&qmi_resp->list); spin_unlock_irqrestore(&dev->lock, flags); qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz); spin_lock_irqsave(&dev->lock, flags); list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); spin_unlock_irqrestore(&dev->lock, flags); qmi_response_available(dev); } } static void rmnet_control_rx_tlet(unsigned long arg) { struct rmnet_dev *dev = (struct rmnet_dev *) arg; struct usb_composite_dev *cdev = dev->cdev; struct qmi_buf *qmi_req; int ret; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); while (1) { if (list_empty(&dev->qmi_req_q)) { atomic_set(&dev->smd_ctl.rx_pkt, 0); break; } qmi_req = list_first_entry(&dev->qmi_req_q, struct qmi_buf, list); if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) { atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len); DBG(cdev, "rmnet control smd channel full\n"); break; } list_del(&qmi_req->list); spin_unlock_irqrestore(&dev->lock, flags); ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len); spin_lock_irqsave(&dev->lock, flags); if (ret != qmi_req->len) { ERROR(cdev, "rmnet control smd write failed\n"); break; } list_add_tail(&qmi_req->list, &dev->qmi_req_pool); } spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req) { struct rmnet_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; struct qmi_buf *qmi_req; int ret; if (req->status < 0) { ERROR(cdev, "rmnet command error %d\n", req->status); return; } spin_lock(&dev->lock); /* no pending control rx packet */ if (!atomic_read(&dev->smd_ctl.rx_pkt)) { if (smd_write_avail(dev->smd_ctl.ch) < req->actual) { atomic_set(&dev->smd_ctl.rx_pkt, req->actual); goto queue_req; } spin_unlock(&dev->lock); ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual); /* This should never happen */ if (ret != req->actual) ERROR(cdev, "rmnet control smd write failed\n"); return; } queue_req: if (list_empty(&dev->qmi_req_pool)) { spin_unlock(&dev->lock); ERROR(cdev, "rmnet QMI pool is empty\n"); return; } qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list); list_del(&qmi_req->list); spin_unlock(&dev->lock); memcpy(qmi_req->buf, req->buf, req->actual); qmi_req->len = req->actual; spin_lock(&dev->lock); list_add_tail(&qmi_req->list, &dev->qmi_req_q); spin_unlock(&dev->lock); } static int rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int ret = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct qmi_buf *resp; int schedule = 0; if (!atomic_read(&dev->online)) return -ENOTCONN; switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: if (w_length > req->length || w_value || w_index != dev->ifc_id) goto invalid; ret = w_length; req->complete = rmnet_command_complete; req->context = dev; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: if (w_value || w_index != dev->ifc_id) goto invalid; else { spin_lock(&dev->lock); resp = list_first_entry(&dev->qmi_resp_q, struct qmi_buf, list); list_del(&resp->list); spin_unlock(&dev->lock); memcpy(req->buf, resp->buf, resp->len); ret = resp->len; spin_lock(&dev->lock); if (list_empty(&dev->qmi_resp_pool)) schedule = 1; list_add_tail(&resp->list, &dev->qmi_resp_pool); if (schedule) tasklet_schedule(&dev->smd_ctl.tx_tlet); spin_unlock(&dev->lock); } break; default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (ret >= 0) { VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = ret; ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (ret < 0) ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); } return ret; } static void rmnet_start_rx(struct rmnet_dev *dev) { struct usb_composite_dev *cdev = dev->cdev; int status; struct usb_request *req; struct list_head *act, *tmp; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); list_for_each_safe(act, tmp, &dev->rx_idle) { req = list_entry(act, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&dev->lock, flags); status = usb_ep_queue(dev->epout, req, GFP_ATOMIC); spin_lock_irqsave(&dev->lock, flags); if (status) { ERROR(cdev, "rmnet data rx enqueue err %d\n", status); list_add_tail(&req->list, &dev->rx_idle); break; } } spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_data_tx_tlet(unsigned long arg) { struct rmnet_dev *dev = (struct rmnet_dev *) arg; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; int status; int sz; unsigned long flags; while (1) { sz = smd_cur_packet_size(dev->smd_data.ch); if (sz == 0) break; if (smd_read_avail(dev->smd_data.ch) < sz) break; spin_lock_irqsave(&dev->lock, flags); if (list_empty(&dev->tx_idle)) { spin_unlock_irqrestore(&dev->lock, flags); DBG(cdev, "rmnet data Tx buffers full\n"); break; } req = list_first_entry(&dev->tx_idle, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&dev->lock, flags); req->length = smd_read(dev->smd_data.ch, req->buf, sz); status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); if (status) { ERROR(cdev, "rmnet tx data enqueue err %d\n", status); spin_lock_irqsave(&dev->lock, flags); list_add_tail(&req->list, &dev->tx_idle); spin_unlock_irqrestore(&dev->lock, flags); break; } } } static void rmnet_data_rx_tlet(unsigned long arg) { struct rmnet_dev *dev = (struct rmnet_dev *) arg; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; int ret; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); while (1) { if (list_empty(&dev->rx_queue)) { atomic_set(&dev->smd_data.rx_pkt, 0); break; } req = list_first_entry(&dev->rx_queue, struct usb_request, list); if (smd_write_avail(dev->smd_data.ch) < req->actual) { atomic_set(&dev->smd_data.rx_pkt, req->actual); DBG(cdev, "rmnet SMD data channel full\n"); break; } list_del(&req->list); spin_unlock_irqrestore(&dev->lock, flags); ret = smd_write(dev->smd_data.ch, req->buf, req->actual); spin_lock_irqsave(&dev->lock, flags); if (ret != req->actual) { ERROR(cdev, "rmnet SMD data write failed\n"); break; } list_add_tail(&req->list, &dev->rx_idle); } spin_unlock_irqrestore(&dev->lock, flags); /* We have free rx data requests. */ rmnet_start_rx(dev); } /* If SMD has enough room to accommodate a data rx packet, * write into SMD directly. Otherwise enqueue to rx_queue. * We will not write into SMD directly untill rx_queue is * empty to strictly follow the ordering requests. */ static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req) { struct rmnet_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; int status = req->status; int ret; switch (status) { case 0: /* normal completion */ break; case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ spin_lock(&dev->lock); list_add_tail(&req->list, &dev->rx_idle); spin_unlock(&dev->lock); return; default: /* unexpected failure */ ERROR(cdev, "RMNET %s response error %d, %d/%d\n", ep->name, status, req->actual, req->length); spin_lock(&dev->lock); list_add_tail(&req->list, &dev->rx_idle); spin_unlock(&dev->lock); return; } spin_lock(&dev->lock); if (!atomic_read(&dev->smd_data.rx_pkt)) { if (smd_write_avail(dev->smd_data.ch) < req->actual) { atomic_set(&dev->smd_data.rx_pkt, req->actual); goto queue_req; } spin_unlock(&dev->lock); ret = smd_write(dev->smd_data.ch, req->buf, req->actual); /* This should never happen */ if (ret != req->actual) ERROR(cdev, "rmnet data smd write failed\n"); /* Restart Rx */ spin_lock(&dev->lock); list_add_tail(&req->list, &dev->rx_idle); spin_unlock(&dev->lock); rmnet_start_rx(dev); return; } queue_req: list_add_tail(&req->list, &dev->rx_queue); spin_unlock(&dev->lock); } static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req) { struct rmnet_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; int status = req->status; int schedule = 0; switch (status) { case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ spin_lock(&dev->lock); list_add_tail(&req->list, &dev->tx_idle); spin_unlock(&dev->lock); break; default: ERROR(cdev, "rmnet data tx ep error %d\n", status); /* FALLTHROUGH */ case 0: spin_lock(&dev->lock); if (list_empty(&dev->tx_idle)) schedule = 1; list_add_tail(&req->list, &dev->tx_idle); if (schedule) tasklet_schedule(&dev->smd_data.tx_tlet); spin_unlock(&dev->lock); break; } } static void rmnet_disconnect_work(struct work_struct *w) { struct qmi_buf *qmi; struct usb_request *req; struct list_head *act, *tmp; struct rmnet_dev *dev = container_of(w, struct rmnet_dev, disconnect_work); atomic_set(&dev->notify_count, 0); tasklet_kill(&dev->smd_ctl.rx_tlet); tasklet_kill(&dev->smd_ctl.tx_tlet); tasklet_kill(&dev->smd_data.rx_tlet); tasklet_kill(&dev->smd_data.rx_tlet); list_for_each_safe(act, tmp, &dev->rx_queue) { req = list_entry(act, struct usb_request, list); list_del(&req->list); list_add_tail(&req->list, &dev->rx_idle); } list_for_each_safe(act, tmp, &dev->qmi_req_q) { qmi = list_entry(act, struct qmi_buf, list); list_del(&qmi->list); list_add_tail(&qmi->list, &dev->qmi_req_pool); } list_for_each_safe(act, tmp, &dev->qmi_resp_q) { qmi = list_entry(act, struct qmi_buf, list); list_del(&qmi->list); list_add_tail(&qmi->list, &dev->qmi_resp_pool); } smd_close(dev->smd_ctl.ch); dev->smd_ctl.flags = 0; smd_close(dev->smd_data.ch); dev->smd_data.flags = 0; } /* SMD close may sleep * schedule a work to close smd channels */ static void rmnet_disable(struct usb_function *f) { struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function); if (!atomic_read(&dev->online)) return; atomic_set(&dev->online, 0); usb_ep_fifo_flush(dev->epnotify); usb_ep_disable(dev->epnotify); usb_ep_fifo_flush(dev->epout); usb_ep_disable(dev->epout); usb_ep_fifo_flush(dev->epin); usb_ep_disable(dev->epin); /* cleanup work */ queue_work(dev->wq, &dev->disconnect_work); } static void rmnet_connect_work(struct work_struct *w) { struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work); struct usb_composite_dev *cdev = dev->cdev; int ret; /* Control channel for QMI messages */ ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch, &dev->smd_ctl, rmnet_smd_notify); if (ret) { ERROR(cdev, "Unable to open control smd channel\n"); return; } wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED, &dev->smd_ctl.flags)); /* Data channel for network packets */ ret = smd_open(rmnet_data_ch, &dev->smd_data.ch, &dev->smd_data, rmnet_smd_notify); if (ret) { ERROR(cdev, "Unable to open data smd channel\n"); smd_close(dev->smd_ctl.ch); } wait_event(dev->smd_data.wait, test_bit(CH_OPENED, &dev->smd_data.flags)); usb_ep_enable(dev->epin, ep_choose(cdev->gadget, &rmnet_hs_in_desc, &rmnet_fs_in_desc)); usb_ep_enable(dev->epout, ep_choose(cdev->gadget, &rmnet_hs_out_desc, &rmnet_fs_out_desc)); usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget, &rmnet_hs_notify_desc, &rmnet_fs_notify_desc)); atomic_set(&dev->online, 1); /* Queue Rx data requests */ rmnet_start_rx(dev); } /* SMD open may sleep. * Schedule a work to open smd channels and enable * endpoints if smd channels are opened successfully. */ static int rmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function); queue_work(dev->wq, &dev->connect_work); return 0; } static void rmnet_free_buf(struct rmnet_dev *dev) { struct qmi_buf *qmi; struct usb_request *req; struct list_head *act, *tmp; /* free all usb requests in tx pool */ list_for_each_safe(act, tmp, &dev->tx_idle) { req = list_entry(act, struct usb_request, list); list_del(&req->list); rmnet_free_req(dev->epout, req); } /* free all usb requests in rx pool */ list_for_each_safe(act, tmp, &dev->rx_idle) { req = list_entry(act, struct usb_request, list); list_del(&req->list); rmnet_free_req(dev->epin, req); } /* free all buffers in qmi request pool */ list_for_each_safe(act, tmp, &dev->qmi_req_pool) { qmi = list_entry(act, struct qmi_buf, list); list_del(&qmi->list); rmnet_free_qmi(qmi); } /* free all buffers in qmi request pool */ list_for_each_safe(act, tmp, &dev->qmi_resp_pool) { qmi = list_entry(act, struct qmi_buf, list); list_del(&qmi->list); rmnet_free_qmi(qmi); } rmnet_free_req(dev->epnotify, dev->notify_req); } static int rmnet_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function); int i, id, ret; struct qmi_buf *qmi; struct usb_request *req; struct usb_ep *ep; dev->cdev = cdev; /* allocate interface ID */ id = usb_interface_id(c, f); if (id < 0) return id; dev->ifc_id = id; rmnet_interface_desc.bInterfaceNumber = id; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc); if (!ep) return -ENODEV; ep->driver_data = cdev; /* claim endpoint */ dev->epin = ep; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc); if (!ep) return -ENODEV; ep->driver_data = cdev; /* claim endpoint */ dev->epout = ep; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc); if (!ep) return -ENODEV; ep->driver_data = cdev; /* clain endpoint */ dev->epnotify = ep; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { rmnet_hs_in_desc.bEndpointAddress = rmnet_fs_in_desc.bEndpointAddress; rmnet_hs_out_desc.bEndpointAddress = rmnet_fs_out_desc.bEndpointAddress; rmnet_hs_notify_desc.bEndpointAddress = rmnet_fs_notify_desc.bEndpointAddress; } /* allocate notification */ dev->notify_req = rmnet_alloc_req(dev->epnotify, RMNET_MAX_NOTIFY_SIZE, GFP_KERNEL); if (IS_ERR(dev->notify_req)) return PTR_ERR(dev->notify_req); dev->notify_req->complete = rmnet_notify_complete; dev->notify_req->context = dev; dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE; /* Allocate the qmi request and response buffers */ for (i = 0; i < QMI_REQ_MAX; i++) { qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL); if (IS_ERR(qmi)) { ret = PTR_ERR(qmi); goto free_buf; } list_add_tail(&qmi->list, &dev->qmi_req_pool); } for (i = 0; i < QMI_RESP_MAX; i++) { qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL); if (IS_ERR(qmi)) { ret = PTR_ERR(qmi); goto free_buf; } list_add_tail(&qmi->list, &dev->qmi_resp_pool); } /* Allocate bulk in/out requests for data transfer */ for (i = 0; i < RX_REQ_MAX; i++) { req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL); if (IS_ERR(req)) { ret = PTR_ERR(req); goto free_buf; } req->length = TXN_MAX; req->context = dev; req->complete = rmnet_complete_epout; list_add_tail(&req->list, &dev->rx_idle); } for (i = 0; i < TX_REQ_MAX; i++) { req = rmnet_alloc_req(dev->epout, TX_REQ_SIZE, GFP_KERNEL); if (IS_ERR(req)) { ret = PTR_ERR(req); goto free_buf; } req->context = dev; req->complete = rmnet_complete_epin; list_add_tail(&req->list, &dev->tx_idle); } return 0; free_buf: rmnet_free_buf(dev); dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ return ret; } static void rmnet_unbind(struct usb_configuration *c, struct usb_function *f) { struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function); tasklet_kill(&dev->smd_ctl.rx_tlet); tasklet_kill(&dev->smd_ctl.tx_tlet); tasklet_kill(&dev->smd_data.rx_tlet); tasklet_kill(&dev->smd_data.rx_tlet); flush_workqueue(dev->wq); rmnet_free_buf(dev); dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ destroy_workqueue(dev->wq); kfree(dev); } int rmnet_function_add(struct usb_configuration *c) { struct rmnet_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->wq = create_singlethread_workqueue("k_rmnet_work"); if (!dev->wq) { ret = -ENOMEM; goto free_dev; } spin_lock_init(&dev->lock); atomic_set(&dev->notify_count, 0); atomic_set(&dev->online, 0); atomic_set(&dev->smd_ctl.rx_pkt, 0); atomic_set(&dev->smd_data.rx_pkt, 0); INIT_WORK(&dev->connect_work, rmnet_connect_work); INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work); tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet, (unsigned long) dev); tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet, (unsigned long) dev); tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet, (unsigned long) dev); tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet, (unsigned long) dev); init_waitqueue_head(&dev->smd_ctl.wait); init_waitqueue_head(&dev->smd_data.wait); INIT_LIST_HEAD(&dev->qmi_req_pool); INIT_LIST_HEAD(&dev->qmi_req_q); INIT_LIST_HEAD(&dev->qmi_resp_pool); INIT_LIST_HEAD(&dev->qmi_resp_q); INIT_LIST_HEAD(&dev->rx_idle); INIT_LIST_HEAD(&dev->rx_queue); INIT_LIST_HEAD(&dev->tx_idle); dev->function.name = "rmnet"; dev->function.strings = rmnet_strings; dev->function.descriptors = rmnet_fs_function; dev->function.hs_descriptors = rmnet_hs_function; dev->function.bind = rmnet_bind; dev->function.unbind = rmnet_unbind; dev->function.setup = rmnet_setup; dev->function.set_alt = rmnet_set_alt; dev->function.disable = rmnet_disable; ret = usb_add_function(c, &dev->function); if (ret) goto free_wq; return 0; free_wq: destroy_workqueue(dev->wq); free_dev: kfree(dev); return ret; }
gpl-2.0
OpenELEC/linux
drivers/md/dm-table.c
38
40508
/* * Copyright (C) 2001 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/atomic.h> #include <linux/blk-mq.h> #include <linux/mount.h> #define DM_MSG_PREFIX "table" #define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) struct dm_table { struct mapped_device *md; unsigned type; /* btree table */ unsigned int depth; unsigned int counts[MAX_DEPTH]; /* in nodes */ sector_t *index[MAX_DEPTH]; unsigned int num_targets; unsigned int num_allocated; sector_t *highs; struct dm_target *targets; struct target_type *immutable_target_type; unsigned integrity_supported:1; unsigned singleton:1; /* * Indicates the rw permissions for the new logical * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ fmode_t mode; /* a list of devices used by this table */ struct list_head devices; /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context; struct dm_md_mempools *mempools; struct list_head target_callbacks; }; /* * Similar to ceiling(log_size(n)) */ static unsigned int int_log(unsigned int n, unsigned int base) { int result = 0; while (n > 1) { n = dm_div_up(n, base); result++; } return result; } /* * Calculate the index of the child node of the n'th node k'th key. */ static inline unsigned int get_child(unsigned int n, unsigned int k) { return (n * CHILDREN_PER_NODE) + k; } /* * Return the n'th node of level l from table t. */ static inline sector_t *get_node(struct dm_table *t, unsigned int l, unsigned int n) { return t->index[l] + (n * KEYS_PER_NODE); } /* * Return the highest key that you could lookup from the n'th * node on level l of the btree. */ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) { for (; l < t->depth - 1; l++) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) return (sector_t) - 1; return get_node(t, l, n)[KEYS_PER_NODE - 1]; } /* * Fills in a level of the btree based on the highs of the level * below it. */ static int setup_btree_index(unsigned int l, struct dm_table *t) { unsigned int n, k; sector_t *node; for (n = 0U; n < t->counts[l]; n++) { node = get_node(t, l, n); for (k = 0U; k < KEYS_PER_NODE; k++) node[k] = high(t, l + 1, get_child(n, k)); } return 0; } void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) { unsigned long size; void *addr; /* * Check that we're not going to overflow. */ if (nmemb > (ULONG_MAX / elem_size)) return NULL; size = nmemb * elem_size; addr = vzalloc(size); return addr; } EXPORT_SYMBOL(dm_vcalloc); /* * highs, and targets are managed as dynamic arrays during a * table load. */ static int alloc_targets(struct dm_table *t, unsigned int num) { sector_t *n_highs; struct dm_target *n_targets; /* * Allocate both the target array and offset array at once. * Append an empty entry to catch sectors beyond the end of * the device. */ n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); memset(n_highs, -1, sizeof(*n_highs) * num); vfree(t->highs); t->num_allocated = num; t->highs = n_highs; t->targets = n_targets; return 0; } int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->target_callbacks); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (!num_targets) { kfree(t); return -ENOMEM; } if (alloc_targets(t, num_targets)) { kfree(t); return -ENOMEM; } t->mode = mode; t->md = md; *result = t; return 0; } static void free_devices(struct list_head *devices, struct mapped_device *md) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct dm_dev_internal *dd = list_entry(tmp, struct dm_dev_internal, list); DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", dm_device_name(md), dd->dm_dev->name); dm_put_table_device(md, dd->dm_dev); kfree(dd); } } void dm_table_destroy(struct dm_table *t) { unsigned int i; if (!t) return; /* free the indexes */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ free_devices(&t->devices, t->md); dm_free_md_mempools(t->mempools); kfree(t); } /* * See if we've already got a device in the list. */ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) { struct dm_dev_internal *dd; list_for_each_entry (dd, l, list) if (dd->dm_dev->bdev->bd_dev == dev) return dd; return NULL; } /* * If possible, this checks an area of a destination device is invalid. */ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; unsigned short logical_block_size_sectors = limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; /* * Some devices exist without request functions, * such as loop devices not yet bound to backing files. * Forbid the use of such devices. */ q = bdev_get_queue(bdev); if (!q || !q->make_request_fn) { DMWARN("%s: %s is not yet initialised: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (!dev_size) return 0; if ((start >= dev_size) || (start + len > dev_size)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (logical_block_size_sectors <= 1) return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); return 1; } if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); return 1; } return 0; } /* * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case * it is accessed concurrently inside dm_table_any_congested(). */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; struct dm_dev *old_dev, *new_dev; old_dev = dd->dm_dev; r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, dd->dm_dev->mode | new_mode, &new_dev); if (r) return r; dd->dm_dev = new_dev; dm_put_table_device(md, old_dev); return 0; } /* * Convert the path to a device */ dev_t dm_get_dev_t(const char *path) { dev_t uninitialized_var(dev); struct block_device *bdev; bdev = lookup_bdev(path); if (IS_ERR(bdev)) dev = name_to_dev_t(path); else { dev = bdev->bd_dev; bdput(bdev); } return dev; } EXPORT_SYMBOL_GPL(dm_get_dev_t); /* * Add a device to the list, or just increment the usage count if * it's already present. */ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, struct dm_dev **result) { int r; dev_t dev; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); dev = dm_get_dev_t(path); if (!dev) return -ENODEV; dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { kfree(dd); return r; } atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) return r; } atomic_inc(&dd->count); *result = dd->dm_dev; return 0; } EXPORT_SYMBOL(dm_get_device); static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); char b[BDEVNAME_SIZE]; if (unlikely(!q)) { DMWARN("%s: Cannot set limits for nonexistent device %s", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; } if (bdev_stack_limits(limits, bdev, start) < 0) DMWARN("%s: adding target device %s caused an alignment inconsistency: " "physical_block_size=%u, logical_block_size=%u, " "alignment_offset=%u, start=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), q->limits.physical_block_size, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); return 0; } /* * Decrement a device's use count and remove it if necessary. */ void dm_put_device(struct dm_target *ti, struct dm_dev *d) { int found = 0; struct list_head *devices = &ti->table->devices; struct dm_dev_internal *dd; list_for_each_entry(dd, devices, list) { if (dd->dm_dev == d) { found = 1; break; } } if (!found) { DMWARN("%s: device %s not in table devices list", dm_device_name(ti->table->md), d->name); return; } if (atomic_dec_and_test(&dd->count)) { dm_put_table_device(ti->table->md, d); list_del(&dd->list); kfree(dd); } } EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. */ static int adjoin(struct dm_table *table, struct dm_target *ti) { struct dm_target *prev; if (!table->num_targets) return !ti->begin; prev = &table->targets[table->num_targets - 1]; return (ti->begin == (prev->begin + prev->len)); } /* * Used to dynamically allocate the arg array. * * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must * process messages even if some device is suspended. These messages have a * small fixed number of arguments. * * On the other hand, dm-switch needs to process bulk data using messages and * excessive use of GFP_NOIO could cause trouble. */ static char **realloc_argv(unsigned *array_size, char **old_argv) { char **argv; unsigned new_size; gfp_t gfp; if (*array_size) { new_size = *array_size * 2; gfp = GFP_KERNEL; } else { new_size = 8; gfp = GFP_NOIO; } argv = kmalloc(new_size * sizeof(*argv), gfp); if (argv) { memcpy(argv, old_argv, *array_size * sizeof(*argv)); *array_size = new_size; } kfree(old_argv); return argv; } /* * Destructively splits up the argument list to pass to ctr. */ int dm_split_args(int *argc, char ***argvp, char *input) { char *start, *end = input, *out, **argv = NULL; unsigned array_size = 0; *argc = 0; if (!input) { *argvp = NULL; return 0; } argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; while (1) { /* Skip whitespace */ start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ /* 'out' is used to remove any back-quotes */ end = out = start; while (*end) { /* Everything apart from '\0' can be quoted */ if (*end == '\\' && *(end + 1)) { *out++ = *(end + 1); end += 2; continue; } if (isspace(*end)) break; /* end of token */ *out++ = *end++; } /* have we already filled the array ? */ if ((*argc + 1) > array_size) { argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; } /* we know this is whitespace */ if (*end) end++; /* terminate the string and put it in the array */ *out = '\0'; argv[*argc] = start; (*argc)++; } *argvp = argv; return 0; } /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be * processed successfully. If it falls across the boundary between * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */ static int validate_hardware_logical_block_alignment(struct dm_table *table, struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size * (in units of 512-byte sectors). */ unsigned short device_logical_block_size_sects = limits->logical_block_size >> SECTOR_SHIFT; /* * Offset of the start of the next table entry, mod logical_block_size. */ unsigned short next_target_start = 0; /* * Given an aligned bio that extends beyond the end of a * target, how many sectors must the next target handle? */ unsigned short remaining = 0; struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; /* * Check each entry in the table in turn. */ while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); blk_set_stacking_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* * If the remaining sectors fall entirely within this * table entry are they compatible with its logical_block_size? */ if (remaining < ti->len && remaining & ((ti_limits.logical_block_size >> SECTOR_SHIFT) - 1)) break; /* Error */ next_target_start = (unsigned short) ((next_target_start + ti->len) & (device_logical_block_size_sects - 1)); remaining = next_target_start ? device_logical_block_size_sects - next_target_start : 0; } if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, limits->logical_block_size); return -EINVAL; } return 0; } int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params) { int r = -EINVAL, argc; char **argv; struct dm_target *tgt; if (t->singleton) { DMERR("%s: target type %s must appear alone in table", dm_device_name(t->md), t->targets->type->name); return -EINVAL; } BUG_ON(t->num_targets >= t->num_allocated); tgt = t->targets + t->num_targets; memset(tgt, 0, sizeof(*tgt)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } if (dm_target_needs_singleton(tgt->type)) { if (t->num_targets) { DMERR("%s: target type %s must appear alone in table", dm_device_name(t->md), type); return -EINVAL; } t->singleton = 1; } if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { DMERR("%s: target type %s may not be included in read-only tables", dm_device_name(t->md), type); return -EINVAL; } if (t->immutable_target_type) { if (t->immutable_target_type != tgt->type) { DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_device_name(t->md), t->immutable_target_type->name); return -EINVAL; } } else if (dm_target_is_immutable(tgt->type)) { if (t->num_targets) { DMERR("%s: immutable target type %s cannot be mixed with other target types", dm_device_name(t->md), tgt->type->name); return -EINVAL; } t->immutable_target_type = tgt->type; } tgt->table = t; tgt->begin = start; tgt->len = len; tgt->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; r = -EINVAL; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { tgt->error = "couldn't split parameters (insufficient memory)"; goto bad; } r = tgt->type->ctr(tgt, argc, argv); kfree(argv); if (r) goto bad; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; if (!tgt->num_discard_bios && tgt->discards_supported) DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", dm_device_name(t->md), type); return 0; bad: DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_put_target_type(tgt->type); return r; } /* * Target argument parsing helpers. */ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); char dummy; if (!arg_str || (sscanf(arg_str, "%u%c", value, &dummy) != 1) || (*value < arg->min) || (*value > arg->max) || (grouped && arg_set->argc < *value)) { *error = arg->error; return -EINVAL; } return 0; } int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); } EXPORT_SYMBOL(dm_read_arg_group); const char *dm_shift_arg(struct dm_arg_set *as) { char *r; if (as->argc) { as->argc--; r = *as->argv; as->argv++; return r; } return NULL; } EXPORT_SYMBOL(dm_shift_arg); void dm_consume_args(struct dm_arg_set *as, unsigned num_args) { BUG_ON(as->argc < num_args); as->argc -= num_args; as->argv += num_args; } EXPORT_SYMBOL(dm_consume_args); static bool __table_type_request_based(unsigned table_type) { return (table_type == DM_TYPE_REQUEST_BASED || table_type == DM_TYPE_MQ_REQUEST_BASED); } static int dm_table_set_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0, hybrid = 0; bool use_blk_mq = false; struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices; unsigned live_md_type = dm_get_md_type(t->md); for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; if (dm_target_hybrid(tgt)) hybrid = 1; else if (dm_target_request_based(tgt)) request_based = 1; else bio_based = 1; if (bio_based && request_based) { DMWARN("Inconsistent table: different target types" " can't be mixed up"); return -EINVAL; } } if (hybrid && !bio_based && !request_based) { /* * The targets can work either way. * Determine the type from the live device. * Default to bio-based if device is new. */ if (__table_type_request_based(live_md_type)) request_based = 1; else bio_based = 1; } if (bio_based) { /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; return 0; } BUG_ON(!request_based); /* No targets in this table */ /* * Request-based dm supports only tables that have a single target now. * To support multiple targets, request splitting support is needed, * and that needs lots of changes in the block-layer. * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { DMWARN("Request-based dm doesn't support multiple targets yet"); return -EINVAL; } /* Non-request-stackable devices can't be used for request-based dm */ devices = dm_table_get_devices(t); list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); if (!blk_queue_stackable(q)) { DMERR("table load rejected: including" " non-request-stackable devices"); return -EINVAL; } if (q->mq_ops) use_blk_mq = true; } if (use_blk_mq) { /* verify _all_ devices in the table are blk-mq devices */ list_for_each_entry(dd, devices, list) if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { DMERR("table load rejected: not all devices" " are blk-mq request-stackable"); return -EINVAL; } t->type = DM_TYPE_MQ_REQUEST_BASED; } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { /* inherit live MD type */ t->type = live_md_type; } else t->type = DM_TYPE_REQUEST_BASED; return 0; } unsigned dm_table_get_type(struct dm_table *t) { return t->type; } struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) { return t->immutable_target_type; } struct dm_target *dm_table_get_immutable_target(struct dm_table *t) { /* Immutable target is implicitly a singleton */ if (t->num_targets > 1 || !dm_target_is_immutable(t->targets[0].type)) return NULL; return t->targets; } struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) { struct dm_target *uninitialized_var(ti); unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (dm_target_is_wildcard(ti->type)) return ti; } return NULL; } bool dm_table_request_based(struct dm_table *t) { return __table_type_request_based(dm_table_get_type(t)); } bool dm_table_mq_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED; } static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { unsigned type = dm_table_get_type(t); unsigned per_io_data_size = 0; struct dm_target *tgt; unsigned i; if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } if (type == DM_TYPE_BIO_BASED) for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; per_io_data_size = max(per_io_data_size, tgt->per_io_data_size); } t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size); if (!t->mempools) return -ENOMEM; return 0; } void dm_table_free_md_mempools(struct dm_table *t) { dm_free_md_mempools(t->mempools); t->mempools = NULL; } struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) { return t->mempools; } static int setup_indexes(struct dm_table *t) { int i; unsigned int total = 0; sector_t *indexes; /* allocate the space for *all* the indexes */ for (i = t->depth - 2; i >= 0; i--) { t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); total += t->counts[i]; } indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); if (!indexes) return -ENOMEM; /* set up internal nodes, bottom-up */ for (i = t->depth - 2; i >= 0; i--) { t->index[i] = indexes; indexes += (KEYS_PER_NODE * t->counts[i]); setup_btree_index(i, t); } return 0; } /* * Builds the btree to index the map. */ static int dm_table_build_index(struct dm_table *t) { int r = 0; unsigned int leaf_nodes; /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); /* leaf layer has already been set up */ t->counts[t->depth - 1] = leaf_nodes; t->index[t->depth - 1] = t->highs; if (t->depth >= 2) r = setup_indexes(t); return r; } static bool integrity_profile_exists(struct gendisk *disk) { return !!blk_get_integrity(disk); } /* * Get a disk whose integrity profile reflects the table's profile. * Returns NULL if integrity support was inconsistent or unavailable. */ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; struct gendisk *prev_disk = NULL, *template_disk = NULL; list_for_each_entry(dd, devices, list) { template_disk = dd->dm_dev->bdev->bd_disk; if (!integrity_profile_exists(template_disk)) goto no_integrity; else if (prev_disk && blk_integrity_compare(prev_disk, template_disk) < 0) goto no_integrity; prev_disk = template_disk; } return template_disk; no_integrity: if (prev_disk) DMWARN("%s: integrity not set: %s and %s profile mismatch", dm_device_name(t->md), prev_disk->disk_name, template_disk->disk_name); return NULL; } /* * Register the mapped device for blk_integrity support if the * underlying devices have an integrity profile. But all devices may * not have matching profiles (checking all devices isn't reliable * during table load because this table may use other DM device(s) which * must be resumed before they will have an initialized integity * profile). Consequently, stacked DM devices force a 2 stage integrity * profile validation: First pass during table load, final pass during * resume. */ static int dm_table_register_integrity(struct dm_table *t) { struct mapped_device *md = t->md; struct gendisk *template_disk = NULL; template_disk = dm_table_get_integrity_disk(t); if (!template_disk) return 0; if (!integrity_profile_exists(dm_disk(md))) { t->integrity_supported = 1; /* * Register integrity profile during table load; we can do * this because the final profile must match during resume. */ blk_integrity_register(dm_disk(md), blk_get_integrity(template_disk)); return 0; } /* * If DM device already has an initialized integrity * profile the new profile should not conflict. */ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { DMWARN("%s: conflict with existing integrity profile: " "%s profile mismatch", dm_device_name(t->md), template_disk->disk_name); return 1; } /* Preserve existing integrity profile */ t->integrity_supported = 1; return 0; } /* * Prepares the table for use by building the indices, * setting the type, and allocating mempools. */ int dm_table_complete(struct dm_table *t) { int r; r = dm_table_set_type(t); if (r) { DMERR("unable to set table type"); return r; } r = dm_table_build_index(t); if (r) { DMERR("unable to build btrees"); return r; } r = dm_table_register_integrity(t); if (r) { DMERR("could not register integrity profile."); return r; } r = dm_table_alloc_md_mempools(t, t->md); if (r) DMERR("unable to allocate mempools"); return r; } static DEFINE_MUTEX(_event_lock); void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { mutex_lock(&_event_lock); t->event_fn = fn; t->event_context = context; mutex_unlock(&_event_lock); } void dm_table_event(struct dm_table *t) { /* * You can no longer call dm_table_event() from interrupt * context, use a bottom half instead. */ BUG_ON(in_interrupt()); mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); mutex_unlock(&_event_lock); } EXPORT_SYMBOL(dm_table_event); sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } EXPORT_SYMBOL(dm_table_get_size); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { if (index >= t->num_targets) return NULL; return t->targets + index; } /* * Search the btree for the correct target. * * Caller should check returned pointer with dm_target_is_valid() * to trap I/O beyond end of device. */ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { unsigned int l, n = 0, k = 0; sector_t *node; for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); for (k = 0; k < KEYS_PER_NODE; k++) if (node[k] >= sector) break; } return &t->targets[(KEYS_PER_NODE * n) + k]; } static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned *num_devices = data; (*num_devices)++; return 0; } /* * Check whether a table has no data devices attached using each * target's iterate_devices method. * Returns false if the result is unknown because a target doesn't * support iterate_devices. */ bool dm_table_has_no_data_devices(struct dm_table *table) { struct dm_target *uninitialized_var(ti); unsigned i = 0, num_devices = 0; while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) return false; ti->type->iterate_devices(ti, count_device, &num_devices); if (num_devices) return false; } return true; } /* * Establish the new table's queue_limits and validate them. */ int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits) { struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; blk_set_stacking_limits(limits); while (i < dm_table_get_num_targets(table)) { blk_set_stacking_limits(&ti_limits); ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) goto combine_limits; /* * Combine queue limits of all the devices this target uses. */ ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); /* * Check each device area is consistent with the target's * overall queue limits. */ if (ti->type->iterate_devices(ti, device_area_is_invalid, &ti_limits)) return -EINVAL; combine_limits: /* * Merge this target's queue limits into the overall limits * for the table. */ if (blk_stack_limits(limits, &ti_limits, 0) < 0) DMWARN("%s: adding target device " "(start sect %llu len %llu) " "caused an alignment inconsistency", dm_device_name(table->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); } return validate_hardware_logical_block_alignment(table, limits); } /* * Verify that all devices have an integrity profile that matches the * DM device's registered integrity profile. If the profiles don't * match then unregister the DM device's integrity profile. */ static void dm_table_verify_integrity(struct dm_table *t) { struct gendisk *template_disk = NULL; if (t->integrity_supported) { /* * Verify that the original integrity profile * matches all the devices in this table. */ template_disk = dm_table_get_integrity_disk(t); if (template_disk && blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) return; } if (integrity_profile_exists(dm_disk(t->md))) { DMWARN("%s: unable to establish an integrity profile", dm_device_name(t->md)); blk_integrity_unregister(dm_disk(t->md)); } } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned flush = (*(unsigned *)data); struct request_queue *q = bdev_get_queue(dev->bdev); return q && (q->flush_flags & flush); } static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) { struct dm_target *ti; unsigned i = 0; /* * Require at least one underlying device to support flushes. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting flushes must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_flush_bios) continue; if (ti->flush_supported) return true; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_flush_capable, &flush)) return true; } return false; } static bool dm_table_discard_zeroes_data(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all targets supports discard_zeroes_data. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (ti->discard_zeroes_data_unsupported) return false; } return true; } static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_nonrot(q); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !blk_queue_add_random(q); } static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); } static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { struct dm_target *ti; unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || !ti->type->iterate_devices(ti, func, NULL)) return false; } return true; } static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && !q->limits.max_write_same_sectors; } static bool dm_table_supports_write_same(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_write_same_bios) return false; if (!ti->type->iterate_devices || ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } return true; } static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_discard(q); } static bool dm_table_supports_discards(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* * Unless any target used by the table set discards_supported, * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting discard selectively must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_discard_bios) continue; if (ti->discards_supported) return true; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_discard_capable, NULL)) return true; } return false; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { unsigned flush = 0; /* * Copy table's limits to the DM device's request_queue */ q->limits = *limits; if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, REQ_FLUSH)) { flush |= REQ_FLUSH; if (dm_table_supports_flush(t, REQ_FUA)) flush |= REQ_FUA; } blk_queue_flush(q, flush); if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); else queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); /* * Determine whether or not this queue's I/O timings contribute * to the entropy pool, Only request-based targets use this. * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios * are processed by request-based dm, which refers to the queue * settings. * Until the flag set, bios are passed to bio-based dm and queued to * md->deferred where queue settings are not needed yet. * Those bios are passed to request-based dm at the resume time. */ smp_mb(); if (dm_table_request_based(t)) queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) { return t->num_targets; } struct list_head *dm_table_get_devices(struct dm_table *t) { return &t->devices; } fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } EXPORT_SYMBOL(dm_table_get_mode); enum suspend_mode { PRESUSPEND, PRESUSPEND_UNDO, POSTSUSPEND, }; static void suspend_targets(struct dm_table *t, enum suspend_mode mode) { int i = t->num_targets; struct dm_target *ti = t->targets; while (i--) { switch (mode) { case PRESUSPEND: if (ti->type->presuspend) ti->type->presuspend(ti); break; case PRESUSPEND_UNDO: if (ti->type->presuspend_undo) ti->type->presuspend_undo(ti); break; case POSTSUSPEND: if (ti->type->postsuspend) ti->type->postsuspend(ti); break; } ti++; } } void dm_table_presuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, PRESUSPEND); } void dm_table_presuspend_undo_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, PRESUSPEND_UNDO); } void dm_table_postsuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, POSTSUSPEND); } int dm_table_resume_targets(struct dm_table *t) { int i, r = 0; for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (!ti->type->preresume) continue; r = ti->type->preresume(ti); if (r) { DMERR("%s: %s: preresume failed, error = %d", dm_device_name(t->md), ti->type->name, r); return r; } } for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (ti->type->resume) ti->type->resume(ti); } return 0; } void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) { list_add(&cb->list, &t->target_callbacks); } EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); int dm_table_any_congested(struct dm_table *t, int bdi_bits) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); struct dm_target_callbacks *cb; int r = 0; list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); char b[BDEVNAME_SIZE]; if (likely(q)) r |= bdi_congested(&q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), bdevname(dd->dm_dev->bdev, b)); } list_for_each_entry(cb, &t->target_callbacks, list) if (cb->congested_fn) r |= cb->congested_fn(cb, bdi_bits); return r; } struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; } EXPORT_SYMBOL(dm_table_get_md); void dm_table_run_md_queue_async(struct dm_table *t) { struct mapped_device *md; struct request_queue *queue; unsigned long flags; if (!dm_table_request_based(t)) return; md = dm_table_get_md(t); queue = dm_get_md_queue(md); if (queue) { if (queue->mq_ops) blk_mq_run_hw_queues(queue, true); else { spin_lock_irqsave(queue->queue_lock, flags); blk_run_queue_async(queue); spin_unlock_irqrestore(queue->queue_lock, flags); } } } EXPORT_SYMBOL(dm_table_run_md_queue_async);
gpl-2.0
ninjablocks/kernel-VAR-SOM-AMxx
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
294
37382
/***************************************************************************** * * * File: cxgb2.c * * $Revision: 1.25 $ * * $Date: 2005/06/22 00:43:25 $ * * Description: * * Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, see <http://www.gnu.org/licenses/>. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/mii.h> #include <linux/sockios.h> #include <linux/dma-mapping.h> #include <asm/uaccess.h> #include "cpl5_cmd.h" #include "regs.h" #include "gmac.h" #include "cphy.h" #include "sge.h" #include "tp.h" #include "espi.h" #include "elmer0.h" #include <linux/workqueue.h> static inline void schedule_mac_stats_update(struct adapter *ap, int secs) { schedule_delayed_work(&ap->stats_update_task, secs * HZ); } static inline void cancel_mac_stats_update(struct adapter *ap) { cancel_delayed_work(&ap->stats_update_task); } #define MAX_CMDQ_ENTRIES 16384 #define MAX_CMDQ1_ENTRIES 1024 #define MAX_RX_BUFFERS 16384 #define MAX_RX_JUMBO_BUFFERS 16384 #define MAX_TX_BUFFERS_HIGH 16384U #define MAX_TX_BUFFERS_LOW 1536U #define MAX_TX_BUFFERS 1460U #define MIN_FL_ENTRIES 32 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) /* * The EEPROM is actually bigger but only the first few bytes are used so we * only report those. */ #define EEPROM_SIZE 32 MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("GPL"); static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0); MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); #define HCLOCK 0x0 #define LCLOCK 0x1 /* T1 cards powersave mode */ static int t1_clock(struct adapter *adapter, int mode); static int t1powersave = 1; /* HW default is powersave mode. */ module_param(t1powersave, int, 0); MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); static const char pci_speed[][4] = { "33", "66", "100", "133" }; /* * Setup MAC to receive the types of packets we want. */ static void t1_set_rxmode(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct t1_rx_mode rm; rm.dev = dev; mac->ops->set_rx_mode(mac, &rm); } static void link_report(struct port_info *p) { if (!netif_carrier_ok(p->dev)) netdev_info(p->dev, "link down\n"); else { const char *s = "10Mbps"; switch (p->link_config.speed) { case SPEED_10000: s = "10Gbps"; break; case SPEED_1000: s = "1000Mbps"; break; case SPEED_100: s = "100Mbps"; break; } netdev_info(p->dev, "link up, %s, %s-duplex\n", s, p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); } } void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, int speed, int duplex, int pause) { struct port_info *p = &adapter->port[port_id]; if (link_stat != netif_carrier_ok(p->dev)) { if (link_stat) netif_carrier_on(p->dev); else netif_carrier_off(p->dev); link_report(p); /* multi-ports: inform toe */ if ((speed > 0) && (adapter->params.nports > 1)) { unsigned int sched_speed = 10; switch (speed) { case SPEED_1000: sched_speed = 1000; break; case SPEED_100: sched_speed = 100; break; case SPEED_10: sched_speed = 10; break; } t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); } } } static void link_start(struct port_info *p) { struct cmac *mac = p->mac; mac->ops->reset(mac); if (mac->ops->macaddress_set) mac->ops->macaddress_set(mac, p->dev->dev_addr); t1_set_rxmode(p->dev); t1_link_start(p->phy, mac, &p->link_config); mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); } static void enable_hw_csum(struct adapter *adapter) { if (adapter->port[0].dev->hw_features & NETIF_F_TSO) t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ t1_tp_set_tcp_checksum_offload(adapter->tp, 1); } /* * Things to do upon first use of a card. * This must run with the rtnl lock held. */ static int cxgb_up(struct adapter *adapter) { int err = 0; if (!(adapter->flags & FULL_INIT_DONE)) { err = t1_init_hw_modules(adapter); if (err) goto out_err; enable_hw_csum(adapter); adapter->flags |= FULL_INIT_DONE; } t1_interrupts_clear(adapter); adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); err = request_irq(adapter->pdev->irq, t1_interrupt, adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->name, adapter); if (err) { if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); goto out_err; } t1_sge_start(adapter->sge); t1_interrupts_enable(adapter); out_err: return err; } /* * Release resources when all the ports have been stopped. */ static void cxgb_down(struct adapter *adapter) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); free_irq(adapter->pdev->irq, adapter); if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); } static int cxgb_open(struct net_device *dev) { int err; struct adapter *adapter = dev->ml_priv; int other_ports = adapter->open_device_map & PORT_MASK; napi_enable(&adapter->napi); if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { napi_disable(&adapter->napi); return err; } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); netif_start_queue(dev); if (!other_ports && adapter->params.stats_update_period) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); t1_vlan_mode(adapter, dev->features); return 0; } static int cxgb_close(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct cmac *mac = p->mac; netif_stop_queue(dev); napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); clear_bit(dev->if_port, &adapter->open_device_map); if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ smp_mb__after_clear_bit(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); } if (!adapter->open_device_map) cxgb_down(adapter); return 0; } static struct net_device_stats *t1_get_stats(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct net_device_stats *ns = &p->netstats; const struct cmac_statistics *pstats; /* Do a full update of the MAC stats */ pstats = p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FULL); ns->tx_packets = pstats->TxUnicastFramesOK + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; ns->rx_packets = pstats->RxUnicastFramesOK + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; ns->tx_bytes = pstats->TxOctetsOK; ns->rx_bytes = pstats->RxOctetsOK; ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + pstats->RxFCSErrors + pstats->RxAlignErrors + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + pstats->RxSymbolErrors + pstats->RxRuntErrors; ns->multicast = pstats->RxMulticastFramesOK; ns->collisions = pstats->TxTotalCollisions; /* detailed rx_errors */ ns->rx_length_errors = pstats->RxFrameTooLongErrors + pstats->RxJabberErrors; ns->rx_over_errors = 0; ns->rx_crc_errors = pstats->RxFCSErrors; ns->rx_frame_errors = pstats->RxAlignErrors; ns->rx_fifo_errors = 0; ns->rx_missed_errors = 0; /* detailed tx_errors */ ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; ns->tx_carrier_errors = 0; ns->tx_fifo_errors = pstats->TxUnderrun; ns->tx_heartbeat_errors = 0; ns->tx_window_errors = pstats->TxLateCollisions; return ns; } static u32 get_msglevel(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return adapter->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct adapter *adapter = dev->ml_priv; adapter->msg_enable = val; } static char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", "TxMulticastFramesOK", "TxBroadcastFramesOK", "TxPauseFrames", "TxFramesWithDeferredXmissions", "TxLateCollisions", "TxTotalCollisions", "TxFramesAbortedDueToXSCollisions", "TxUnderrun", "TxLengthErrors", "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", "TxJumboFramesOk", "TxJumboOctetsOk", "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", "RxMulticastFramesOK", "RxBroadcastFramesOK", "RxPauseFrames", "RxFCSErrors", "RxAlignErrors", "RxSymbolErrors", "RxDataErrors", "RxSequenceErrors", "RxRuntErrors", "RxJabberErrors", "RxInternalMACRcvError", "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", "RxJumboFramesOk", "RxJumboOctetsOk", /* Port stats */ "RxCsumGood", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", "TxNeedHeadroom", /* Interrupt stats */ "rx drops", "pure_rsps", "unhandled irqs", "respQ_empty", "respQ_overflow", "freelistQ_empty", "pkt_too_big", "pkt_mismatch", "cmdQ_full0", "cmdQ_full1", "espi_DIP2ParityErr", "espi_DIP4Err", "espi_RxDrops", "espi_TxDrops", "espi_RxOvfl", "espi_ParityErr" }; #define T2_REGMAP_SIZE (3 * 1024) static int get_regs_len(struct net_device *dev) { return T2_REGMAP_SIZE; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->ml_priv; strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP; } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, stats_strings, sizeof(stats_strings)); } static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); t = t1_sge_get_intr_counts(adapter->sge); t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); *data++ = s->TxOctetsOK; *data++ = s->TxOctetsBad; *data++ = s->TxUnicastFramesOK; *data++ = s->TxMulticastFramesOK; *data++ = s->TxBroadcastFramesOK; *data++ = s->TxPauseFrames; *data++ = s->TxFramesWithDeferredXmissions; *data++ = s->TxLateCollisions; *data++ = s->TxTotalCollisions; *data++ = s->TxFramesAbortedDueToXSCollisions; *data++ = s->TxUnderrun; *data++ = s->TxLengthErrors; *data++ = s->TxInternalMACXmitError; *data++ = s->TxFramesWithExcessiveDeferral; *data++ = s->TxFCSErrors; *data++ = s->TxJumboFramesOK; *data++ = s->TxJumboOctetsOK; *data++ = s->RxOctetsOK; *data++ = s->RxOctetsBad; *data++ = s->RxUnicastFramesOK; *data++ = s->RxMulticastFramesOK; *data++ = s->RxBroadcastFramesOK; *data++ = s->RxPauseFrames; *data++ = s->RxFCSErrors; *data++ = s->RxAlignErrors; *data++ = s->RxSymbolErrors; *data++ = s->RxDataErrors; *data++ = s->RxSequenceErrors; *data++ = s->RxRuntErrors; *data++ = s->RxJabberErrors; *data++ = s->RxInternalMACRcvError; *data++ = s->RxInRangeLengthErrors; *data++ = s->RxOutOfRangeLengthField; *data++ = s->RxFrameTooLongErrors; *data++ = s->RxJumboFramesOK; *data++ = s->RxJumboOctetsOK; *data++ = ss.rx_cso_good; *data++ = ss.tx_cso; *data++ = ss.tx_tso; *data++ = ss.vlan_xtract; *data++ = ss.vlan_insert; *data++ = ss.tx_need_hdrroom; *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; *data++ = t->respQ_empty; *data++ = t->respQ_overflow; *data++ = t->freelistQ_empty; *data++ = t->pkt_too_big; *data++ = t->pkt_mismatch; *data++ = t->cmdQ_full[0]; *data++ = t->cmdQ_full[1]; if (adapter->espi) { const struct espi_intr_counts *e; e = t1_espi_get_intr_counts(adapter->espi); *data++ = e->DIP2_parity_err; *data++ = e->DIP4_err; *data++ = e->rx_drops; *data++ = e->tx_drops; *data++ = e->rx_ovflw; *data++ = e->parity_err; } } static inline void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, unsigned int end) { u32 *p = buf + start; for ( ; start <= end; start += sizeof(u32)) *p++ = readl(ap->regs + start); } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct adapter *ap = dev->ml_priv; /* * Version scheme: bits 0..9: chip version, bits 10..15: chip revision */ regs->version = 2; memset(buf, 0, T2_REGMAP_SIZE); reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); } static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; cmd->supported = p->link_config.supported; cmd->advertising = p->link_config.advertising; if (netif_carrier_ok(dev)) { ethtool_cmd_speed_set(cmd, p->link_config.speed); cmd->duplex = p->link_config.duplex; } else { ethtool_cmd_speed_set(cmd, -1); cmd->duplex = -1; } cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; cmd->phy_address = p->phy->mdio.prtad; cmd->transceiver = XCVR_EXTERNAL; cmd->autoneg = p->link_config.autoneg; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static int speed_duplex_to_caps(int speed, int duplex) { int cap = 0; switch (speed) { case SPEED_10: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10baseT_Full; else cap = SUPPORTED_10baseT_Half; break; case SPEED_100: if (duplex == DUPLEX_FULL) cap = SUPPORTED_100baseT_Full; else cap = SUPPORTED_100baseT_Half; break; case SPEED_1000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_1000baseT_Full; else cap = SUPPORTED_1000baseT_Half; break; case SPEED_10000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10000baseT_Full; } return cap; } #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ ADVERTISED_10000baseT_Full) static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (!(lc->supported & SUPPORTED_Autoneg)) return -EOPNOTSUPP; /* can't change speed/duplex */ if (cmd->autoneg == AUTONEG_DISABLE) { u32 speed = ethtool_cmd_speed(cmd); int cap = speed_duplex_to_caps(speed, cmd->duplex); if (!(lc->supported & cap) || (speed == SPEED_1000)) return -EINVAL; lc->requested_speed = speed; lc->requested_duplex = cmd->duplex; lc->advertising = 0; } else { cmd->advertising &= ADVERTISED_MASK; if (cmd->advertising & (cmd->advertising - 1)) cmd->advertising = lc->supported; cmd->advertising &= lc->supported; if (!cmd->advertising) return -EINVAL; lc->requested_speed = SPEED_INVALID; lc->requested_duplex = DUPLEX_INVALID; lc->advertising = cmd->advertising | ADVERTISED_Autoneg; } lc->autoneg = cmd->autoneg; if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); return 0; } static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; else if (lc->supported & SUPPORTED_Autoneg) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; if (epause->rx_pause) lc->requested_fc |= PAUSE_RX; if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (lc->autoneg == AUTONEG_ENABLE) { if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); } else { lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (netif_running(dev)) p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, lc->fc); } return 0; } static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; e->rx_max_pending = MAX_RX_BUFFERS; e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; e->tx_max_pending = MAX_CMDQ_ENTRIES; e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; e->tx_pending = adapter->params.sge.cmdQ_size[0]; } static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || e->tx_pending > MAX_CMDQ_ENTRIES || e->rx_pending < MIN_FL_ENTRIES || e->rx_jumbo_pending < MIN_FL_ENTRIES || e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; adapter->params.sge.cmdQ_size[0] = e->tx_pending; adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? MAX_CMDQ1_ENTRIES : e->tx_pending; return 0; } static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); return 0; } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; return 0; } static int get_eeprom_len(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return t1_is_asic(adapter) ? EEPROM_SIZE : 0; } #define EEPROM_MAGIC(ap) \ (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 *data) { int i; u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); struct adapter *adapter = dev->ml_priv; e->magic = EEPROM_MAGIC(adapter); for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); memcpy(data, buf + e->offset, e->len); return 0; } static const struct ethtool_ops t1_ethtool_ops = { .get_settings = get_settings, .set_settings = set_settings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_ringparam = get_sge_param, .set_ringparam = set_sge_param, .get_coalesce = get_coalesce, .set_coalesce = set_coalesce, .get_eeprom_len = get_eeprom_len, .get_eeprom = get_eeprom, .get_pauseparam = get_pauseparam, .set_pauseparam = set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, }; static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct adapter *adapter = dev->ml_priv; struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; return mdio_mii_ioctl(mdio, if_mii(req), cmd); } static int t1_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; if (!mac->ops->set_mtu) return -EOPNOTSUPP; if (new_mtu < 68) return -EINVAL; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; return 0; } static int t1_set_mac_addr(struct net_device *dev, void *p) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct sockaddr *addr = p; if (!mac->ops->macaddress_set) return -EOPNOTSUPP; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } static netdev_features_t t1_fix_features(struct net_device *dev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int t1_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; if (changed & NETIF_F_HW_VLAN_CTAG_RX) t1_vlan_mode(adapter, features); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void t1_netpoll(struct net_device *dev) { unsigned long flags; struct adapter *adapter = dev->ml_priv; local_irq_save(flags); t1_interrupt(adapter->pdev->irq, adapter); local_irq_restore(flags); } #endif /* * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */ static void mac_stats_task(struct work_struct *work) { int i; struct adapter *adapter = container_of(work, struct adapter, stats_update_task.work); for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; if (netif_running(p->dev)) p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FAST); } /* Schedule the next statistics update if any port is active. */ spin_lock(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); spin_unlock(&adapter->work_lock); } /* * Processes elmer0 external interrupts in process context. */ static void ext_intr_task(struct work_struct *work) { struct adapter *adapter = container_of(work, struct adapter, ext_intr_handler_task); t1_elmer0_ext_intr_handler(adapter); /* Now reenable external interrupts */ spin_lock_irq(&adapter->async_lock); adapter->slow_intr_mask |= F_PL_INTR_EXT; writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); spin_unlock_irq(&adapter->async_lock); } /* * Interrupt-context handler for elmer0 external interrupts. */ void t1_elmer0_ext_intr(struct adapter *adapter) { /* * Schedule a task to handle external interrupts as we require * a process context. We disable EXT interrupts in the interim * and let the task reenable them when it's done. */ adapter->slow_intr_mask &= ~F_PL_INTR_EXT; writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, adapter->regs + A_PL_ENABLE); schedule_work(&adapter->ext_intr_handler_task); } void t1_fatal_err(struct adapter *adapter) { if (adapter->flags & FULL_INIT_DONE) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); } pr_alert("%s: encountered fatal error, operation suspended\n", adapter->name); } static const struct net_device_ops cxgb_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, .ndo_start_xmit = t1_start_xmit, .ndo_get_stats = t1_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = t1_set_rxmode, .ndo_do_ioctl = t1_ioctl, .ndo_change_mtu = t1_change_mtu, .ndo_set_mac_address = t1_set_mac_addr, .ndo_fix_features = t1_fix_features, .ndo_set_features = t1_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = t1_netpoll, #endif }; static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { pr_err("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { pr_err("%s: unable to obtain 64-bit DMA for " "consistent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev_priv(netdev); adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); spin_lock_init(&adapter->mac_lock); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_DELAYED_WORK(&adapter->stats_update_task, mac_stats_task); pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->ml_priv = adapter; netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_LLTX; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; /* T204: disable TSO */ if (!(is_T2(adapter)) || bi->port_number != 4) { netdev->hw_features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO; } } netdev->netdev_ops = &cxgb_netdev_ops; netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netif_napi_add(netdev, &adapter->napi, t1_poll, 64); SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) pr_warn("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { pr_err("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); /* * Set the T1B ASIC and memory clocks. */ if (t1powersave) adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ else adapter->t1powersave = HCLOCK; if (t1_is_T1B(adapter)) t1_clock(adapter, t1powersave); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); return err; } static void bit_bang(struct adapter *adapter, int bitdata, int nbits) { int data; int i; u32 val; enum { S_CLOCK = 1 << 3, S_DATA = 1 << 4 }; for (i = (nbits - 1); i > -1; i--) { udelay(50); data = ((bitdata >> i) & 0x1); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); if (data) val |= S_DATA; else val &= ~S_DATA; udelay(50); /* Set SCLOCK low */ val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Write SCLOCK high */ val |= S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); } } static int t1_clock(struct adapter *adapter, int mode) { u32 val; int M_CORE_VAL; int M_MEM_VAL; enum { M_CORE_BITS = 9, T_CORE_VAL = 0, T_CORE_BITS = 2, N_CORE_VAL = 0, N_CORE_BITS = 2, M_MEM_BITS = 9, T_MEM_VAL = 0, T_MEM_BITS = 2, N_MEM_VAL = 0, N_MEM_BITS = 2, NP_LOAD = 1 << 17, S_LOAD_MEM = 1 << 5, S_LOAD_CORE = 1 << 6, S_CLOCK = 1 << 3 }; if (!t1_is_T1B(adapter)) return -ENODEV; /* Can't re-clock this chip. */ if (mode & 2) return 0; /* show current mode. */ if ((adapter->t1powersave & 1) == (mode & 1)) return -EALREADY; /* ASIC already running in mode. */ if ((mode & 1) == HCLOCK) { M_CORE_VAL = 0x14; M_MEM_VAL = 0x18; adapter->t1powersave = HCLOCK; /* overclock */ } else { M_CORE_VAL = 0xe; M_MEM_VAL = 0x10; adapter->t1powersave = LCLOCK; /* underclock */ } /* Don't interrupt this serial stream! */ spin_lock(&adapter->tpi_lock); /* Initialize for ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the ASIC clock synthesizer */ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); udelay(50); /* Finish ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Initialize for memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; val &= ~S_CLOCK; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the memory clock synthesizer */ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); udelay(50); /* Finish memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); spin_unlock(&adapter->tpi_lock); return 0; } static inline void t1_sw_reset(struct pci_dev *pdev) { pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); } static void remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct adapter *adapter = dev->ml_priv; int i; for_each_port(adapter, i) { if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i].dev); } t1_free_sw_modules(adapter); iounmap(adapter->regs); while (--i >= 0) { if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); pci_disable_device(pdev); t1_sw_reset(pdev); } static struct pci_driver cxgb_pci_driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = remove_one, }; module_pci_driver(cxgb_pci_driver);
gpl-2.0
myjang0507/Polaris
drivers/net/wireless/ipsecdrvtl/ap.c
294
46017
/* 'sha.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Mon Dec 22 18:00:49 2014 */ #include"cobf.h" #ifdef _WIN32 #if defined( UNDER_CE) && defined( bb334) || ! defined( bb341) #define bb357 1 #define bb356 1 #else #define bb335 bb350 #define bb358 1 #define bb348 1 #endif #define bb360 1 #include"uncobf.h" #include<ndis.h> #include"cobf.h" #ifdef UNDER_CE #include"uncobf.h" #include<ndiswan.h> #include"cobf.h" #endif #include"uncobf.h" #include<stdio.h> #include<basetsd.h> #include"cobf.h" bba bbt bbl bbf, *bb3;bba bbt bbe bbo, *bb80;bba bb137 bb125, *bb351; bba bbt bbl bb41, *bb73;bba bbt bb137 bbk, *bb59;bba bbe bbu, *bb134; bba bbh bbf*bb79; #ifdef bb307 bba bbd bb61, *bb124; #endif #else #include"uncobf.h" #include<linux/module.h> #include<linux/ctype.h> #include<linux/time.h> #include<linux/slab.h> #include"cobf.h" #ifndef bb118 #define bb118 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb121 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 #ifdef _MSC_VER bba bb117 bb224; #endif #else bba bbe bbu, *bb134, *bb216; #define bb200 1 #define bb202 0 bba bb261 bb249, *bb205, *bb252;bba bbe bb278, *bb255, *bb227;bba bbt bbo, *bb80, *bb215;bba bb8 bb266, *bb221;bba bbt bb8 bb226, *bb230; bba bb8 bb119, *bb212;bba bbt bb8 bb63, *bb237;bba bb63 bb228, *bb251 ;bba bb63 bb259, *bb220;bba bb119 bb117, *bb217;bba bb244 bb289;bba bb210 bb125;bba bb262 bb85;bba bb112 bb116;bba bb112 bb235; #ifdef bb234 bba bb233 bb41, *bb73;bba bb287 bbk, *bb59;bba bb209 bbd, *bb31;bba bb222 bb57, *bb120; #else bba bb231 bb41, *bb73;bba bb253 bbk, *bb59;bba bb245 bbd, *bb31;bba bb229 bb57, *bb120; #endif bba bb41 bbf, *bb3, *bb263;bba bbk bb206, *bb225, *bb286;bba bbk bb282 , *bb246, *bb284;bba bbd bb61, *bb124, *bb269;bba bb85 bb38, *bb241, * bb223;bba bbd bb239, *bb265, *bb243;bba bb116 bb272, *bb213, *bb281; bba bb57 bb270, *bb240, *bb208; #define bb143 bbb bba bbb*bb247, *bb81;bba bbh bbb*bb271;bba bbl bb218;bba bbl*bb207; bba bbh bbl*bb62; #if defined( bb121) bba bbe bb115; #endif bba bb115 bb19;bba bb19*bb273;bba bbh bb19*bb186; #if defined( bb268) || defined( bb248) bba bb19 bb37;bba bb19 bb111; #else bba bbl bb37;bba bbt bbl bb111; #endif bba bbh bb37*bb279;bba bb37*bb277;bba bb61 bb211, *bb219;bba bbb* bb107;bba bb107*bb257; #define bb250( bb36) bbj bb36##__ { bbe bb267; }; bba bbj bb36##__ * \ bb36 bba bbj{bb38 bb190,bb260,bb214,bb285;}bb232, *bb238, *bb283;bba bbj{ bb38 bb10,bb177;}bb254, *bb280, *bb242;bba bbj{bb38 bb264,bb275;} bb274, *bb288, *bb276; #endif bba bbh bbf*bb79; #endif bba bbf bb103; #define IN #define OUT #ifdef _DEBUG #define bb147( bbc) bb27( bbc) #else #define bb147( bbc) ( bbb)( bbc) #endif bba bbe bb160, *bb172; #define bb293 0 #define bb313 1 #define bb297 2 #define bb325 3 #define bb354 4 bba bbe bb361;bba bbb*bb123; #endif #ifdef _WIN32 #ifndef UNDER_CE #define bb32 bb346 #define bb43 bb347 bba bbt bb8 bb32;bba bb8 bb43; #endif #else #endif #ifdef _WIN32 bbb*bb127(bb32 bb48);bbb bb108(bbb* );bbb*bb138(bb32 bb158,bb32 bb48); #else #define bb127( bbc) bb146(1, bbc, bb141) #define bb108( bbc) bb340( bbc) #define bb138( bbc, bbp) bb146( bbc, bbp, bb141) #endif #ifdef _WIN32 #define bb27( bbc) bb339( bbc) #else #ifdef _DEBUG bbe bb145(bbh bbl*bb98,bbh bbl*bb26,bbt bb258); #define bb27( bbc) ( bbb)(( bbc) || ( bb145(# bbc, __FILE__, __LINE__ \ ))) #else #define bb27( bbc) (( bbb)0) #endif #endif bb43 bb301(bb43*bb319); #ifndef _WIN32 bbe bb331(bbh bbl*bbg);bbe bb320(bbh bbl*bb20,...); #endif #ifdef _WIN32 bba bb355 bb95; #define bb142( bbc) bb353( bbc) #define bb144( bbc) bb336( bbc) #define bb135( bbc) bb359( bbc) #define bb133( bbc) bb342( bbc) #else bba bb343 bb95; #define bb142( bbc) ( bbb)( * bbc = bb337( bbc)) #define bb144( bbc) (( bbb)0) #define bb135( bbc) bb352( bbc) #define bb133( bbc) bb344( bbc) #endif #ifdef __cplusplus bbr"\x43"{ #endif bba bbj{bbd bb5;bbd bb23[5 ];bbf bb105[64 ];}bb459;bbb bb1898(bb459*bbi );bbb bb1331(bb459*bbi,bbh bbb*bbx,bbo bb5);bbb bb1844(bb459*bbi,bbb* bb1);bba bbj{bbd bb5;bbd bb23[8 ];bbf bb105[64 ];}bb412;bbb bb1865( bb412*bbi);bbb bb1277(bb412*bbi,bbh bbb*bbx,bbo bb5);bbb bb1860(bb412 *bbi,bbb*bb1);bba bb412 bb965;bbb bb1962(bb965*bbi);bbb bb1913(bb965 *bbi,bbb*bb1);bba bbj{bbd bb5;bb57 bb23[8 ];bbf bb105[128 ];}bb315;bbb bb1851(bb315*bbi);bbb bb1065(bb315*bbi,bbh bbb*bbx,bbo bb5);bbb bb1885 (bb315*bbi,bbb*bb1);bba bb315 bb626;bbb bb1837(bb626*bbi);bbb bb1855( bb626*bbi,bbb*bb1);bba bb315 bb978;bbb bb1838(bb978*bbi);bbb bb1897( bb978*bbi,bbb*bb1);bba bb315 bb961;bbb bb1854(bb961*bbi);bbb bb1845( bb961*bbi,bbb*bb1);bbb bb1955(bbb*bb1,bbh bbb*bbx,bbo bb5);bbb bb1916 (bbb*bb1,bbh bbb*bbx,bbo bb5);bbb bb2020(bbb*bb1,bbh bbb*bbx,bbo bb5); bbb bb1983(bbb*bb1,bbh bbb*bbx,bbo bb5);bbb bb1967(bbb*bb1,bbh bbb* bbx,bbo bb5);bbb bb1987(bbb*bb1,bbh bbb*bbx,bbo bb5);bbb bb2080(bbb* bb1,bb62 bbx);bbb bb2022(bbb*bb1,bb62 bbx);bbb bb2091(bbb*bb1,bb62 bbx );bbb bb2082(bbb*bb1,bb62 bbx);bbb bb2057(bbb*bb1,bb62 bbx);bbb bb2087 (bbb*bb1,bb62 bbx); #ifdef __cplusplus } #endif bb40 bbb bb2416(bbd bb23[5 ],bbh bbf bb98[64 ]){bb27(bb12(bbe)>=4 );{bbd bbc=bb23[0 ],bbp=bb23[1 ],bbn=bb23[2 ],bbs=bb23[3 ],bbv=bb23[4 ];bbd bbz; bbo bbw[80 ];bb90(bbz=0 ;bbz<16 ;bbz++,bb98+=4 )bbw[bbz]=(bb98[3 ]|bb98[2 ] <<8 |bb98[1 ]<<16 |bb98[0 ]<<24 );bb90(bbz=16 ;bbz<80 ;bbz++){bbd bb10=bbw[ bbz-3 ]^bbw[bbz-8 ]^bbw[bbz-14 ]^bbw[bbz-16 ];bbw[bbz]=((bb10)<<(1 )|(bb10 )>>(32 -1 ));}bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp&bbn|~bbp&bbs)+ 0x5a827999 +bbw[0 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|( bbv)>>(32 -5 ))+(bbc&bbp|~bbc&bbn)+0x5a827999 +bbw[0 +1 ];bbc=((bbc)<<(30 )| (bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv&bbc|~bbv&bbp)+ 0x5a827999 +bbw[0 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )| (bbn)>>(32 -5 ))+(bbs&bbv|~bbs&bbc)+0x5a827999 +bbw[0 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn&bbs|~bbn&bbv)+ 0x5a827999 +bbw[0 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )| (bbc)>>(32 -5 ))+(bbp&bbn|~bbp&bbs)+0x5a827999 +bbw[0 +5 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp|~bbc&bbn)+ 0x5a827999 +bbw[0 +5 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<( 5 )|(bbs)>>(32 -5 ))+(bbv&bbc|~bbv&bbp)+0x5a827999 +bbw[0 +5 +2 ];bbv=((bbv)<< (30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs&bbv|~bbs& bbc)+0x5a827999 +bbw[0 +5 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=(( bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn&bbs|~bbn&bbv)+0x5a827999 +bbw[0 +5 +4 ];bbn =((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp& bbn|~bbp&bbs)+0x5a827999 +bbw[0 +10 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 )); bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp|~bbc&bbn)+0x5a827999 +bbw[0 + 10 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+ (bbv&bbc|~bbv&bbp)+0x5a827999 +bbw[0 +10 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs&bbv|~bbs&bbc)+0x5a827999 + bbw[0 +10 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>> (32 -5 ))+(bbn&bbs|~bbn&bbv)+0x5a827999 +bbw[0 +10 +4 ];bbn=((bbn)<<(30 )|( bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp&bbn|~bbp&bbs)+ 0x5a827999 +bbw[0 +15 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp|~bbc&bbn)+0x5a827999 +bbw[0 +15 +1 ];bbc=((bbc)<< (30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv&bbc|~bbv& bbp)+0x5a827999 +bbw[0 +15 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=(( bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs&bbv|~bbs&bbc)+0x5a827999 +bbw[0 +15 +3 ]; bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn &bbs|~bbn&bbv)+0x5a827999 +bbw[0 +15 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 )); bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+0x6ed9eba1 +bbw[20 ];bbp= ((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp ^bbn)+0x6ed9eba1 +bbw[20 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=(( bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+0x6ed9eba1 +bbw[20 +2 ];bbv=((bbv )<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+ 0x6ed9eba1 +bbw[20 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+0x6ed9eba1 +bbw[20 +4 ];bbn=((bbn)<<(30 )| (bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+ 0x6ed9eba1 +bbw[20 +5 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+0x6ed9eba1 +bbw[20 +5 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+ 0x6ed9eba1 +bbw[20 +5 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<< (5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+0x6ed9eba1 +bbw[20 +5 +3 ];bbs=((bbs)<<( 30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+ 0x6ed9eba1 +bbw[20 +5 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<< (5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+0x6ed9eba1 +bbw[20 +10 ];bbp=((bbp)<<( 30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+ 0x6ed9eba1 +bbw[20 +10 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<< (5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+0x6ed9eba1 +bbw[20 +10 +2 ];bbv=((bbv)<< (30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+ 0x6ed9eba1 +bbw[20 +10 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<< (5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+0x6ed9eba1 +bbw[20 +10 +4 ];bbn=((bbn)<< (30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+ 0x6ed9eba1 +bbw[20 +15 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<( 5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+0x6ed9eba1 +bbw[20 +15 +1 ];bbc=((bbc)<<( 30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+ 0x6ed9eba1 +bbw[20 +15 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<< (5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+0x6ed9eba1 +bbw[20 +15 +3 ];bbs=((bbs)<< (30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+ 0x6ed9eba1 +bbw[20 +15 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<< (5 )|(bbc)>>(32 -5 ))+(bbp&bbn|bbp&bbs|bbn&bbs)+0x8f1bbcdc +bbw[40 ];bbp=( (bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp| bbc&bbn|bbp&bbn)+0x8f1bbcdc +bbw[40 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 )); bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv&bbc|bbv&bbp|bbc&bbp)+0x8f1bbcdc + bbw[40 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>( 32 -5 ))+(bbs&bbv|bbs&bbc|bbv&bbc)+0x8f1bbcdc +bbw[40 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn&bbs|bbn&bbv| bbs&bbv)+0x8f1bbcdc +bbw[40 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=( (bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp&bbn|bbp&bbs|bbn&bbs)+0x8f1bbcdc +bbw[40 +5 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+ (bbc&bbp|bbc&bbn|bbp&bbn)+0x8f1bbcdc +bbw[40 +5 +1 ];bbc=((bbc)<<(30 )|( bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv&bbc|bbv&bbp|bbc& bbp)+0x8f1bbcdc +bbw[40 +5 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=(( bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs&bbv|bbs&bbc|bbv&bbc)+0x8f1bbcdc +bbw[40 + 5 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+ (bbn&bbs|bbn&bbv|bbs&bbv)+0x8f1bbcdc +bbw[40 +5 +4 ];bbn=((bbn)<<(30 )|( bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp&bbn|bbp&bbs|bbn& bbs)+0x8f1bbcdc +bbw[40 +10 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=(( bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp|bbc&bbn|bbp&bbn)+0x8f1bbcdc +bbw[40 + 10 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+ (bbv&bbc|bbv&bbp|bbc&bbp)+0x8f1bbcdc +bbw[40 +10 +2 ];bbv=((bbv)<<(30 )|( bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs&bbv|bbs&bbc|bbv& bbc)+0x8f1bbcdc +bbw[40 +10 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=(( bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn&bbs|bbn&bbv|bbs&bbv)+0x8f1bbcdc +bbw[40 + 10 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+ (bbp&bbn|bbp&bbs|bbn&bbs)+0x8f1bbcdc +bbw[40 +15 ];bbp=((bbp)<<(30 )|(bbp )>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc&bbp|bbc&bbn|bbp&bbn)+ 0x8f1bbcdc +bbw[40 +15 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<< (5 )|(bbs)>>(32 -5 ))+(bbv&bbc|bbv&bbp|bbc&bbp)+0x8f1bbcdc +bbw[40 +15 +2 ]; bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs &bbv|bbs&bbc|bbv&bbc)+0x8f1bbcdc +bbw[40 +15 +3 ];bbs=((bbs)<<(30 )|(bbs)>> (32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn&bbs|bbn&bbv|bbs&bbv)+ 0x8f1bbcdc +bbw[40 +15 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<< (5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+0xca62c1d6 +bbw[60 ];bbp=((bbp)<<(30 )| (bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+ 0xca62c1d6 +bbw[60 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+0xca62c1d6 +bbw[60 +2 ];bbv=((bbv)<<(30 )| (bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+ 0xca62c1d6 +bbw[60 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+0xca62c1d6 +bbw[60 +4 ];bbn=((bbn)<<(30 )| (bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+ 0xca62c1d6 +bbw[60 +5 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+0xca62c1d6 +bbw[60 +5 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+ 0xca62c1d6 +bbw[60 +5 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<< (5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+0xca62c1d6 +bbw[60 +5 +3 ];bbs=((bbs)<<( 30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+ 0xca62c1d6 +bbw[60 +5 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<< (5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+0xca62c1d6 +bbw[60 +10 ];bbp=((bbp)<<( 30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<(5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+ 0xca62c1d6 +bbw[60 +10 +1 ];bbc=((bbc)<<(30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<< (5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+0xca62c1d6 +bbw[60 +10 +2 ];bbv=((bbv)<< (30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<<(5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+ 0xca62c1d6 +bbw[60 +10 +3 ];bbs=((bbs)<<(30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<< (5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+0xca62c1d6 +bbw[60 +10 +4 ];bbn=((bbn)<< (30 )|(bbn)>>(32 -30 ));bbv+=((bbc)<<(5 )|(bbc)>>(32 -5 ))+(bbp^bbn^bbs)+ 0xca62c1d6 +bbw[60 +15 ];bbp=((bbp)<<(30 )|(bbp)>>(32 -30 ));bbs+=((bbv)<<( 5 )|(bbv)>>(32 -5 ))+(bbc^bbp^bbn)+0xca62c1d6 +bbw[60 +15 +1 ];bbc=((bbc)<<( 30 )|(bbc)>>(32 -30 ));bbn+=((bbs)<<(5 )|(bbs)>>(32 -5 ))+(bbv^bbc^bbp)+ 0xca62c1d6 +bbw[60 +15 +2 ];bbv=((bbv)<<(30 )|(bbv)>>(32 -30 ));bbp+=((bbn)<< (5 )|(bbn)>>(32 -5 ))+(bbs^bbv^bbc)+0xca62c1d6 +bbw[60 +15 +3 ];bbs=((bbs)<< (30 )|(bbs)>>(32 -30 ));bbc+=((bbp)<<(5 )|(bbp)>>(32 -5 ))+(bbn^bbs^bbv)+ 0xca62c1d6 +bbw[60 +15 +4 ];bbn=((bbn)<<(30 )|(bbn)>>(32 -30 ));bb23[0 ]+=bbc ;bb23[1 ]+=bbp;bb23[2 ]+=bbn;bb23[3 ]+=bbs;bb23[4 ]+=bbv;}}bbb bb1898( bb459*bbi){bb40 bbd bb23[5 ]={0x67452301 ,0xefcdab89 ,0x98badcfe , 0x10325476 ,0xc3d2e1f0 };bbi->bb5=0 ;bb74(bbi->bb23,bb23,bb12(bb23));} bbb bb1331(bb459*bbi,bbh bbb*bb516,bbo bb5){bbh bbf*bbx=(bbh bbf* )bb516 ;bbo bb398=bbi->bb5%bb12(bbi->bb105);bbi->bb5+=bb5;bbm(bb398){bbo bb11 =bb12(bbi->bb105)-bb398;bb74(bbi->bb105+bb398,bbx,((bb5)<(bb11)?(bb5): (bb11)));bbm(bb5<bb11)bb4;bbx+=bb11;bb5-=bb11;bb2416(bbi->bb23,bbi-> bb105);}bb90(;bb5>=bb12(bbi->bb105);bb5-=bb12(bbi->bb105),bbx+=bb12( bbi->bb105))bb2416(bbi->bb23,bbx);bb74(bbi->bb105,bbx,bb5);}bbb bb1844 (bb459*bbi,bbb*bb1){bbd bb1037[2 ]={(bbd)(bbi->bb5>>29 ),(bbd)(bbi->bb5 <<3 )};bbf bb437[bb12(bb1037)];bbo bbz;bb90(bbz=0 ;bbz<bb12(bb437);bbz ++)bb437[bbz]=bb1037[bbz/4 ]>>((3 -bbz%4 ) *8 )&0xff ;{bbf bb1352[]={0x80 } ,bb1353[bb12(bbi->bb105)]={0 };bbo bb398=bbi->bb5%bb12(bbi->bb105); bb1331(bbi,bb1352,1 );bb1331(bbi,bb1353,(bb12(bbi->bb105) *2 -1 -bb12( bb437)-bb398)%bb12(bbi->bb105));}bb1331(bbi,bb437,bb12(bb437));bb90( bbz=0 ;bbz<bb12(bbi->bb23);bbz++)((bbf* )bb1)[bbz]=bbi->bb23[bbz/4 ]>>( (3 -bbz%4 ) *8 )&0xff ;}bb40 bbb bb2470(bbd bb23[8 ],bbh bbf bb98[64 ]){ bb40 bbd bb6[64 ]={0x428a2f98 ,0x71374491 ,0xb5c0fbcf ,0xe9b5dba5 , 0x3956c25b ,0x59f111f1 ,0x923f82a4 ,0xab1c5ed5 ,0xd807aa98 ,0x12835b01 , 0x243185be ,0x550c7dc3 ,0x72be5d74 ,0x80deb1fe ,0x9bdc06a7 ,0xc19bf174 , 0xe49b69c1 ,0xefbe4786 ,0x0fc19dc6 ,0x240ca1cc ,0x2de92c6f ,0x4a7484aa , 0x5cb0a9dc ,0x76f988da ,0x983e5152 ,0xa831c66d ,0xb00327c8 ,0xbf597fc7 , 0xc6e00bf3 ,0xd5a79147 ,0x06ca6351 ,0x14292967 ,0x27b70a85 ,0x2e1b2138 , 0x4d2c6dfc ,0x53380d13 ,0x650a7354 ,0x766a0abb ,0x81c2c92e ,0x92722c85 , 0xa2bfe8a1 ,0xa81a664b ,0xc24b8b70 ,0xc76c51a3 ,0xd192e819 ,0xd6990624 , 0xf40e3585 ,0x106aa070 ,0x19a4c116 ,0x1e376c08 ,0x2748774c ,0x34b0bcb5 , 0x391c0cb3 ,0x4ed8aa4a ,0x5b9cca4f ,0x682e6ff3 ,0x748f82ee ,0x78a5636f , 0x84c87814 ,0x8cc70208 ,0x90befffa ,0xa4506ceb ,0xbef9a3f7 ,0xc67178f2 ,}; bb27(bb12(bbe)>=4 );{bbd bbc=bb23[0 ],bbp=bb23[1 ],bbn=bb23[2 ],bbs=bb23[ 3 ],bbv=bb23[4 ],bb20=bb23[5 ],bb55=bb23[6 ],bb44=bb23[7 ];bbd bbz,bb54, bb89;bbd bbw[64 ];bb90(bbz=0 ;bbz<16 ;bbz++,bb98+=4 )bbw[bbz]=(bb98[3 ]| bb98[2 ]<<8 |bb98[1 ]<<16 |bb98[0 ]<<24 );bb90(bbz=16 ;bbz<64 ;bbz++){bb54=(( bbw[bbz-15 ])>>(7 )|(bbw[bbz-15 ])<<(32 -7 ))^((bbw[bbz-15 ])>>(18 )|(bbw[ bbz-15 ])<<(32 -18 ))^(bbw[bbz-15 ]>>3 );bb89=((bbw[bbz-2 ])>>(17 )|(bbw[bbz -2 ])<<(32 -17 ))^((bbw[bbz-2 ])>>(19 )|(bbw[bbz-2 ])<<(32 -19 ))^(bbw[bbz-2 ] >>10 );bbw[bbz]=bbw[bbz-16 ]+bb54+bbw[bbz-7 ]+bb89;}bb89=(((bbc)>>(2 )|( bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>>(6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<(32 -25 )))+((bbv &bb20)^(~bbv&bb55))+bb6[0 ]+bbw[0 ];bbs+=bb54;bb44=bb54+bb89;bb89=((( bb44)>>(2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )|(bb44)<<(32 -13 ))^((bb44)>>( 22 )|(bb44)<<(32 -22 )))+((bb44&bbc)^(bb44&bbp)^(bbc&bbp));bb54=bb55+((( bbs)>>(6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<<(32 -11 ))^((bbs)>>(25 )|( bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[0 +1 ]+bbw[0 +1 ];bbn+=bb54; bb55=bb54+bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55)>>(13 )|(bb55 )<<(32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 -22 )))+((bb55&bb44)^(bb55&bbc)^( bb44&bbc));bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 -6 ))^((bbn)>>(11 )|(bbn)<< (32 -11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn&bbs)^(~bbn&bbv))+bb6[0 +2 ]+bbw[0 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(((bb20)>>(2 )|(bb20)<<(32 -2 ))^ ((bb20)>>(13 )|(bb20)<<(32 -13 ))^((bb20)>>(22 )|(bb20)<<(32 -22 )))+((bb20 &bb55)^(bb20&bb44)^(bb55&bb44));bb54=bbv+(((bbp)>>(6 )|(bbp)<<(32 -6 ))^ ((bbp)>>(11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 -25 )))+((bbp&bbn)^ (~bbp&bbs))+bb6[0 +3 ]+bbw[0 +3 ];bbc+=bb54;bbv=bb54+bb89;bb89=(((bbv)>>( 2 )|(bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<(32 -13 ))^((bbv)>>(22 )|(bbv)<<( 32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20&bb55));bb54=bbs+(((bbc)>>(6 )|( bbc)<<(32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 -11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+bb6[0 +4 ]+bbw[0 +4 ];bb44+=bb54;bbs=bb54+bb89 ;bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^((bbs)>>(13 )|(bbs)<<(32 -13 ))^((bbs)>> (22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^(bbs&bb20)^(bbv&bb20));bb54=bbn+((( bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>>(11 )|(bb44)<<(32 -11 ))^((bb44)>>( 25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~bb44&bbp))+bb6[0 +5 ]+bbw[0 +5 ];bb55 +=bb54;bbn=bb54+bb89;bb89=(((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|( bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<(32 -22 )))+((bbn&bbs)^(bbn&bbv)^( bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55)<<(32 -6 ))^((bb55)>>(11 )|(bb55)<< (32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 -25 )))+((bb55&bb44)^(~bb55&bbc))+ bb6[0 +6 ]+bbw[0 +6 ];bb20+=bb54;bbp=bb54+bb89;bb89=(((bbp)>>(2 )|(bbp)<<( 32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+(( bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54=bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^ ((bb20)>>(11 )|(bb20)<<(32 -11 ))^((bb20)>>(25 )|(bb20)<<(32 -25 )))+((bb20 &bb55)^(~bb20&bb44))+bb6[0 +7 ]+bbw[0 +7 ];bbv+=bb54;bbc=bb54+bb89;bb89=( ((bbc)>>(2 )|(bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )| (bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>>( 6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<( 32 -25 )))+((bbv&bb20)^(~bbv&bb55))+bb6[8 ]+bbw[8 ];bbs+=bb54;bb44=bb54+ bb89;bb89=(((bb44)>>(2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )|(bb44)<<(32 -13 ))^ ((bb44)>>(22 )|(bb44)<<(32 -22 )))+((bb44&bbc)^(bb44&bbp)^(bbc&bbp)); bb54=bb55+(((bbs)>>(6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<<(32 -11 ))^(( bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[8 +1 ]+bbw[8 +1 ] ;bbn+=bb54;bb55=bb54+bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55)>> (13 )|(bb55)<<(32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 -22 )))+((bb55&bb44)^( bb55&bbc)^(bb44&bbc));bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 -6 ))^((bbn)>>( 11 )|(bbn)<<(32 -11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn&bbs)^(~bbn& bbv))+bb6[8 +2 ]+bbw[8 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(((bb20)>>(2 )|( bb20)<<(32 -2 ))^((bb20)>>(13 )|(bb20)<<(32 -13 ))^((bb20)>>(22 )|(bb20)<<( 32 -22 )))+((bb20&bb55)^(bb20&bb44)^(bb55&bb44));bb54=bbv+(((bbp)>>(6 )| (bbp)<<(32 -6 ))^((bbp)>>(11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 - 25 )))+((bbp&bbn)^(~bbp&bbs))+bb6[8 +3 ]+bbw[8 +3 ];bbc+=bb54;bbv=bb54+ bb89;bb89=(((bbv)>>(2 )|(bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<(32 -13 ))^(( bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20&bb55));bb54= bbs+(((bbc)>>(6 )|(bbc)<<(32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 -11 ))^((bbc)>> (25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+bb6[8 +4 ]+bbw[8 +4 ];bb44+= bb54;bbs=bb54+bb89;bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^((bbs)>>(13 )|(bbs )<<(32 -13 ))^((bbs)>>(22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^(bbs&bb20)^(bbv& bb20));bb54=bbn+(((bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>>(11 )|(bb44)<<( 32 -11 ))^((bb44)>>(25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~bb44&bbp))+bb6[ 8 +5 ]+bbw[8 +5 ];bb55+=bb54;bbn=bb54+bb89;bb89=(((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<(32 -22 )))+((bbn& bbs)^(bbn&bbv)^(bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55)<<(32 -6 ))^(( bb55)>>(11 )|(bb55)<<(32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 -25 )))+((bb55& bb44)^(~bb55&bbc))+bb6[8 +6 ]+bbw[8 +6 ];bb20+=bb54;bbp=bb54+bb89;bb89=(( (bbp)>>(2 )|(bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|( bbp)<<(32 -22 )))+((bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54=bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^((bb20)>>(11 )|(bb20)<<(32 -11 ))^((bb20)>>(25 )|(bb20)<< (32 -25 )))+((bb20&bb55)^(~bb20&bb44))+bb6[8 +7 ]+bbw[8 +7 ];bbv+=bb54;bbc= bb54+bb89;bb89=(((bbc)>>(2 )|(bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54 =bb44+(((bbv)>>(6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>> (25 )|(bbv)<<(32 -25 )))+((bbv&bb20)^(~bbv&bb55))+bb6[16 ]+bbw[16 ];bbs+= bb54;bb44=bb54+bb89;bb89=(((bb44)>>(2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )| (bb44)<<(32 -13 ))^((bb44)>>(22 )|(bb44)<<(32 -22 )))+((bb44&bbc)^(bb44& bbp)^(bbc&bbp));bb54=bb55+(((bbs)>>(6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|( bbs)<<(32 -11 ))^((bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+ bb6[16 +1 ]+bbw[16 +1 ];bbn+=bb54;bb55=bb54+bb89;bb89=(((bb55)>>(2 )|(bb55 )<<(32 -2 ))^((bb55)>>(13 )|(bb55)<<(32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 - 22 )))+((bb55&bb44)^(bb55&bbc)^(bb44&bbc));bb54=bb20+(((bbn)>>(6 )|(bbn )<<(32 -6 ))^((bbn)>>(11 )|(bbn)<<(32 -11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+ ((bbn&bbs)^(~bbn&bbv))+bb6[16 +2 ]+bbw[16 +2 ];bbp+=bb54;bb20=bb54+bb89; bb89=(((bb20)>>(2 )|(bb20)<<(32 -2 ))^((bb20)>>(13 )|(bb20)<<(32 -13 ))^(( bb20)>>(22 )|(bb20)<<(32 -22 )))+((bb20&bb55)^(bb20&bb44)^(bb55&bb44)); bb54=bbv+(((bbp)>>(6 )|(bbp)<<(32 -6 ))^((bbp)>>(11 )|(bbp)<<(32 -11 ))^(( bbp)>>(25 )|(bbp)<<(32 -25 )))+((bbp&bbn)^(~bbp&bbs))+bb6[16 +3 ]+bbw[16 +3 ];bbc+=bb54;bbv=bb54+bb89;bb89=(((bbv)>>(2 )|(bbv)<<(32 -2 ))^((bbv)>>( 13 )|(bbv)<<(32 -13 ))^((bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20)^(bbv& bb55)^(bb20&bb55));bb54=bbs+(((bbc)>>(6 )|(bbc)<<(32 -6 ))^((bbc)>>(11 )| (bbc)<<(32 -11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+ bb6[16 +4 ]+bbw[16 +4 ];bb44+=bb54;bbs=bb54+bb89;bb89=(((bbs)>>(2 )|(bbs)<< (32 -2 ))^((bbs)>>(13 )|(bbs)<<(32 -13 ))^((bbs)>>(22 )|(bbs)<<(32 -22 )))+(( bbs&bbv)^(bbs&bb20)^(bbv&bb20));bb54=bbn+(((bb44)>>(6 )|(bb44)<<(32 -6 ))^ ((bb44)>>(11 )|(bb44)<<(32 -11 ))^((bb44)>>(25 )|(bb44)<<(32 -25 )))+((bb44 &bbc)^(~bb44&bbp))+bb6[16 +5 ]+bbw[16 +5 ];bb55+=bb54;bbn=bb54+bb89;bb89= (((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )| (bbn)<<(32 -22 )))+((bbn&bbs)^(bbn&bbv)^(bbs&bbv));bb54=bbp+(((bb55)>>( 6 )|(bb55)<<(32 -6 ))^((bb55)>>(11 )|(bb55)<<(32 -11 ))^((bb55)>>(25 )|(bb55 )<<(32 -25 )))+((bb55&bb44)^(~bb55&bbc))+bb6[16 +6 ]+bbw[16 +6 ];bb20+=bb54 ;bbp=bb54+bb89;bb89=(((bbp)>>(2 )|(bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<( 32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+((bbp&bbn)^(bbp&bbs)^(bbn&bbs)); bb54=bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^((bb20)>>(11 )|(bb20)<<(32 -11 ))^ ((bb20)>>(25 )|(bb20)<<(32 -25 )))+((bb20&bb55)^(~bb20&bb44))+bb6[16 +7 ]+ bbw[16 +7 ];bbv+=bb54;bbc=bb54+bb89;bb89=(((bbc)>>(2 )|(bbc)<<(32 -2 ))^(( bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^( bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>>(6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<(32 -25 )))+((bbv&bb20)^(~bbv& bb55))+bb6[24 ]+bbw[24 ];bbs+=bb54;bb44=bb54+bb89;bb89=(((bb44)>>(2 )|( bb44)<<(32 -2 ))^((bb44)>>(13 )|(bb44)<<(32 -13 ))^((bb44)>>(22 )|(bb44)<<( 32 -22 )))+((bb44&bbc)^(bb44&bbp)^(bbc&bbp));bb54=bb55+(((bbs)>>(6 )|( bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<<(32 -11 ))^((bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[24 +1 ]+bbw[24 +1 ];bbn+=bb54;bb55=bb54+ bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55)>>(13 )|(bb55)<<(32 -13 ))^ ((bb55)>>(22 )|(bb55)<<(32 -22 )))+((bb55&bb44)^(bb55&bbc)^(bb44&bbc)); bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 -6 ))^((bbn)>>(11 )|(bbn)<<(32 -11 ))^(( bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn&bbs)^(~bbn&bbv))+bb6[24 +2 ]+bbw[24 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(((bb20)>>(2 )|(bb20)<<(32 -2 ))^((bb20)>> (13 )|(bb20)<<(32 -13 ))^((bb20)>>(22 )|(bb20)<<(32 -22 )))+((bb20&bb55)^( bb20&bb44)^(bb55&bb44));bb54=bbv+(((bbp)>>(6 )|(bbp)<<(32 -6 ))^((bbp)>> (11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 -25 )))+((bbp&bbn)^(~bbp& bbs))+bb6[24 +3 ]+bbw[24 +3 ];bbc+=bb54;bbv=bb54+bb89;bb89=(((bbv)>>(2 )|( bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<(32 -13 ))^((bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20&bb55));bb54=bbs+(((bbc)>>(6 )|(bbc)<< (32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 -11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+(( bbc&bbp)^(~bbc&bbn))+bb6[24 +4 ]+bbw[24 +4 ];bb44+=bb54;bbs=bb54+bb89; bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^((bbs)>>(13 )|(bbs)<<(32 -13 ))^((bbs)>> (22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^(bbs&bb20)^(bbv&bb20));bb54=bbn+((( bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>>(11 )|(bb44)<<(32 -11 ))^((bb44)>>( 25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~bb44&bbp))+bb6[24 +5 ]+bbw[24 +5 ]; bb55+=bb54;bbn=bb54+bb89;bb89=(((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<(32 -22 )))+((bbn&bbs)^(bbn&bbv)^ (bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55)<<(32 -6 ))^((bb55)>>(11 )|(bb55 )<<(32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 -25 )))+((bb55&bb44)^(~bb55&bbc))+ bb6[24 +6 ]+bbw[24 +6 ];bb20+=bb54;bbp=bb54+bb89;bb89=(((bbp)>>(2 )|(bbp)<< (32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+(( bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54=bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^ ((bb20)>>(11 )|(bb20)<<(32 -11 ))^((bb20)>>(25 )|(bb20)<<(32 -25 )))+((bb20 &bb55)^(~bb20&bb44))+bb6[24 +7 ]+bbw[24 +7 ];bbv+=bb54;bbc=bb54+bb89;bb89 =(((bbc)>>(2 )|(bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>> (6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<( 32 -25 )))+((bbv&bb20)^(~bbv&bb55))+bb6[32 ]+bbw[32 ];bbs+=bb54;bb44=bb54 +bb89;bb89=(((bb44)>>(2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )|(bb44)<<(32 -13 ))^((bb44)>>(22 )|(bb44)<<(32 -22 )))+((bb44&bbc)^(bb44&bbp)^(bbc&bbp)); bb54=bb55+(((bbs)>>(6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<<(32 -11 ))^(( bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[32 +1 ]+bbw[32 + 1 ];bbn+=bb54;bb55=bb54+bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55 )>>(13 )|(bb55)<<(32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 -22 )))+((bb55&bb44)^ (bb55&bbc)^(bb44&bbc));bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 -6 ))^((bbn)>> (11 )|(bbn)<<(32 -11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn&bbs)^(~bbn& bbv))+bb6[32 +2 ]+bbw[32 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(((bb20)>>(2 )| (bb20)<<(32 -2 ))^((bb20)>>(13 )|(bb20)<<(32 -13 ))^((bb20)>>(22 )|(bb20)<< (32 -22 )))+((bb20&bb55)^(bb20&bb44)^(bb55&bb44));bb54=bbv+(((bbp)>>(6 )| (bbp)<<(32 -6 ))^((bbp)>>(11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 - 25 )))+((bbp&bbn)^(~bbp&bbs))+bb6[32 +3 ]+bbw[32 +3 ];bbc+=bb54;bbv=bb54+ bb89;bb89=(((bbv)>>(2 )|(bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<(32 -13 ))^(( bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20&bb55));bb54= bbs+(((bbc)>>(6 )|(bbc)<<(32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 -11 ))^((bbc)>> (25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+bb6[32 +4 ]+bbw[32 +4 ];bb44 +=bb54;bbs=bb54+bb89;bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^((bbs)>>(13 )|( bbs)<<(32 -13 ))^((bbs)>>(22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^(bbs&bb20)^( bbv&bb20));bb54=bbn+(((bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>>(11 )|(bb44 )<<(32 -11 ))^((bb44)>>(25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~bb44&bbp))+ bb6[32 +5 ]+bbw[32 +5 ];bb55+=bb54;bbn=bb54+bb89;bb89=(((bbn)>>(2 )|(bbn)<< (32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<(32 -22 )))+(( bbn&bbs)^(bbn&bbv)^(bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55)<<(32 -6 ))^ ((bb55)>>(11 )|(bb55)<<(32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 -25 )))+((bb55 &bb44)^(~bb55&bbc))+bb6[32 +6 ]+bbw[32 +6 ];bb20+=bb54;bbp=bb54+bb89;bb89 =(((bbp)>>(2 )|(bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+((bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54=bbc+(((bb20)>> (6 )|(bb20)<<(32 -6 ))^((bb20)>>(11 )|(bb20)<<(32 -11 ))^((bb20)>>(25 )|( bb20)<<(32 -25 )))+((bb20&bb55)^(~bb20&bb44))+bb6[32 +7 ]+bbw[32 +7 ];bbv+= bb54;bbc=bb54+bb89;bb89=(((bbc)>>(2 )|(bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc )<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp& bbn));bb54=bb44+(((bbv)>>(6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 - 11 ))^((bbv)>>(25 )|(bbv)<<(32 -25 )))+((bbv&bb20)^(~bbv&bb55))+bb6[40 ]+ bbw[40 ];bbs+=bb54;bb44=bb54+bb89;bb89=(((bb44)>>(2 )|(bb44)<<(32 -2 ))^( (bb44)>>(13 )|(bb44)<<(32 -13 ))^((bb44)>>(22 )|(bb44)<<(32 -22 )))+((bb44& bbc)^(bb44&bbp)^(bbc&bbp));bb54=bb55+(((bbs)>>(6 )|(bbs)<<(32 -6 ))^(( bbs)>>(11 )|(bbs)<<(32 -11 ))^((bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^( ~bbs&bb20))+bb6[40 +1 ]+bbw[40 +1 ];bbn+=bb54;bb55=bb54+bb89;bb89=(((bb55 )>>(2 )|(bb55)<<(32 -2 ))^((bb55)>>(13 )|(bb55)<<(32 -13 ))^((bb55)>>(22 )|( bb55)<<(32 -22 )))+((bb55&bb44)^(bb55&bbc)^(bb44&bbc));bb54=bb20+(((bbn )>>(6 )|(bbn)<<(32 -6 ))^((bbn)>>(11 )|(bbn)<<(32 -11 ))^((bbn)>>(25 )|(bbn)<< (32 -25 )))+((bbn&bbs)^(~bbn&bbv))+bb6[40 +2 ]+bbw[40 +2 ];bbp+=bb54;bb20= bb54+bb89;bb89=(((bb20)>>(2 )|(bb20)<<(32 -2 ))^((bb20)>>(13 )|(bb20)<<( 32 -13 ))^((bb20)>>(22 )|(bb20)<<(32 -22 )))+((bb20&bb55)^(bb20&bb44)^( bb55&bb44));bb54=bbv+(((bbp)>>(6 )|(bbp)<<(32 -6 ))^((bbp)>>(11 )|(bbp)<< (32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 -25 )))+((bbp&bbn)^(~bbp&bbs))+bb6[40 + 3 ]+bbw[40 +3 ];bbc+=bb54;bbv=bb54+bb89;bb89=(((bbv)>>(2 )|(bbv)<<(32 -2 ))^ ((bbv)>>(13 )|(bbv)<<(32 -13 ))^((bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20 )^(bbv&bb55)^(bb20&bb55));bb54=bbs+(((bbc)>>(6 )|(bbc)<<(32 -6 ))^((bbc)>> (11 )|(bbc)<<(32 -11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc& bbn))+bb6[40 +4 ]+bbw[40 +4 ];bb44+=bb54;bbs=bb54+bb89;bb89=(((bbs)>>(2 )| (bbs)<<(32 -2 ))^((bbs)>>(13 )|(bbs)<<(32 -13 ))^((bbs)>>(22 )|(bbs)<<(32 - 22 )))+((bbs&bbv)^(bbs&bb20)^(bbv&bb20));bb54=bbn+(((bb44)>>(6 )|(bb44)<< (32 -6 ))^((bb44)>>(11 )|(bb44)<<(32 -11 ))^((bb44)>>(25 )|(bb44)<<(32 -25 )))+ ((bb44&bbc)^(~bb44&bbp))+bb6[40 +5 ]+bbw[40 +5 ];bb55+=bb54;bbn=bb54+bb89 ;bb89=(((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>> (22 )|(bbn)<<(32 -22 )))+((bbn&bbs)^(bbn&bbv)^(bbs&bbv));bb54=bbp+((( bb55)>>(6 )|(bb55)<<(32 -6 ))^((bb55)>>(11 )|(bb55)<<(32 -11 ))^((bb55)>>( 25 )|(bb55)<<(32 -25 )))+((bb55&bb44)^(~bb55&bbc))+bb6[40 +6 ]+bbw[40 +6 ]; bb20+=bb54;bbp=bb54+bb89;bb89=(((bbp)>>(2 )|(bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+((bbp&bbn)^(bbp&bbs)^ (bbn&bbs));bb54=bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^((bb20)>>(11 )|(bb20 )<<(32 -11 ))^((bb20)>>(25 )|(bb20)<<(32 -25 )))+((bb20&bb55)^(~bb20&bb44))+ bb6[40 +7 ]+bbw[40 +7 ];bbv+=bb54;bbc=bb54+bb89;bb89=(((bbc)>>(2 )|(bbc)<< (32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^((bbc)>>(22 )|(bbc)<<(32 -22 )))+(( bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>>(6 )|(bbv)<<(32 -6 ))^( (bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<(32 -25 )))+((bbv&bb20)^ (~bbv&bb55))+bb6[48 ]+bbw[48 ];bbs+=bb54;bb44=bb54+bb89;bb89=(((bb44)>> (2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )|(bb44)<<(32 -13 ))^((bb44)>>(22 )|( bb44)<<(32 -22 )))+((bb44&bbc)^(bb44&bbp)^(bbc&bbp));bb54=bb55+(((bbs)>> (6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<<(32 -11 ))^((bbs)>>(25 )|(bbs)<<( 32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[48 +1 ]+bbw[48 +1 ];bbn+=bb54;bb55= bb54+bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55)>>(13 )|(bb55)<<( 32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 -22 )))+((bb55&bb44)^(bb55&bbc)^(bb44 &bbc));bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 -6 ))^((bbn)>>(11 )|(bbn)<<(32 - 11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn&bbs)^(~bbn&bbv))+bb6[48 +2 ]+ bbw[48 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(((bb20)>>(2 )|(bb20)<<(32 -2 ))^ ((bb20)>>(13 )|(bb20)<<(32 -13 ))^((bb20)>>(22 )|(bb20)<<(32 -22 )))+((bb20 &bb55)^(bb20&bb44)^(bb55&bb44));bb54=bbv+(((bbp)>>(6 )|(bbp)<<(32 -6 ))^ ((bbp)>>(11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )|(bbp)<<(32 -25 )))+((bbp&bbn)^ (~bbp&bbs))+bb6[48 +3 ]+bbw[48 +3 ];bbc+=bb54;bbv=bb54+bb89;bb89=(((bbv)>> (2 )|(bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<(32 -13 ))^((bbv)>>(22 )|(bbv)<<( 32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20&bb55));bb54=bbs+(((bbc)>>(6 )|( bbc)<<(32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 -11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+bb6[48 +4 ]+bbw[48 +4 ];bb44+=bb54;bbs=bb54+ bb89;bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^((bbs)>>(13 )|(bbs)<<(32 -13 ))^(( bbs)>>(22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^(bbs&bb20)^(bbv&bb20));bb54= bbn+(((bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>>(11 )|(bb44)<<(32 -11 ))^(( bb44)>>(25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~bb44&bbp))+bb6[48 +5 ]+bbw[ 48 +5 ];bb55+=bb54;bbn=bb54+bb89;bb89=(((bbn)>>(2 )|(bbn)<<(32 -2 ))^((bbn )>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<(32 -22 )))+((bbn&bbs)^(bbn &bbv)^(bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55)<<(32 -6 ))^((bb55)>>(11 )| (bb55)<<(32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 -25 )))+((bb55&bb44)^(~bb55& bbc))+bb6[48 +6 ]+bbw[48 +6 ];bb20+=bb54;bbp=bb54+bb89;bb89=(((bbp)>>(2 )| (bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 - 22 )))+((bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54=bbc+(((bb20)>>(6 )|(bb20)<< (32 -6 ))^((bb20)>>(11 )|(bb20)<<(32 -11 ))^((bb20)>>(25 )|(bb20)<<(32 -25 )))+ ((bb20&bb55)^(~bb20&bb44))+bb6[48 +7 ]+bbw[48 +7 ];bbv+=bb54;bbc=bb54+ bb89;bb89=(((bbc)>>(2 )|(bbc)<<(32 -2 ))^((bbc)>>(13 )|(bbc)<<(32 -13 ))^(( bbc)>>(22 )|(bbc)<<(32 -22 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44 +(((bbv)>>(6 )|(bbv)<<(32 -6 ))^((bbv)>>(11 )|(bbv)<<(32 -11 ))^((bbv)>>(25 )|(bbv)<<(32 -25 )))+((bbv&bb20)^(~bbv&bb55))+bb6[56 ]+bbw[56 ];bbs+=bb54 ;bb44=bb54+bb89;bb89=(((bb44)>>(2 )|(bb44)<<(32 -2 ))^((bb44)>>(13 )|( bb44)<<(32 -13 ))^((bb44)>>(22 )|(bb44)<<(32 -22 )))+((bb44&bbc)^(bb44&bbp )^(bbc&bbp));bb54=bb55+(((bbs)>>(6 )|(bbs)<<(32 -6 ))^((bbs)>>(11 )|(bbs)<< (32 -11 ))^((bbs)>>(25 )|(bbs)<<(32 -25 )))+((bbs&bbv)^(~bbs&bb20))+bb6[56 +1 ]+bbw[56 +1 ];bbn+=bb54;bb55=bb54+bb89;bb89=(((bb55)>>(2 )|(bb55)<<(32 -2 ))^((bb55)>>(13 )|(bb55)<<(32 -13 ))^((bb55)>>(22 )|(bb55)<<(32 -22 )))+( (bb55&bb44)^(bb55&bbc)^(bb44&bbc));bb54=bb20+(((bbn)>>(6 )|(bbn)<<(32 - 6 ))^((bbn)>>(11 )|(bbn)<<(32 -11 ))^((bbn)>>(25 )|(bbn)<<(32 -25 )))+((bbn& bbs)^(~bbn&bbv))+bb6[56 +2 ]+bbw[56 +2 ];bbp+=bb54;bb20=bb54+bb89;bb89=(( (bb20)>>(2 )|(bb20)<<(32 -2 ))^((bb20)>>(13 )|(bb20)<<(32 -13 ))^((bb20)>>( 22 )|(bb20)<<(32 -22 )))+((bb20&bb55)^(bb20&bb44)^(bb55&bb44));bb54=bbv+ (((bbp)>>(6 )|(bbp)<<(32 -6 ))^((bbp)>>(11 )|(bbp)<<(32 -11 ))^((bbp)>>(25 )| (bbp)<<(32 -25 )))+((bbp&bbn)^(~bbp&bbs))+bb6[56 +3 ]+bbw[56 +3 ];bbc+=bb54 ;bbv=bb54+bb89;bb89=(((bbv)>>(2 )|(bbv)<<(32 -2 ))^((bbv)>>(13 )|(bbv)<<( 32 -13 ))^((bbv)>>(22 )|(bbv)<<(32 -22 )))+((bbv&bb20)^(bbv&bb55)^(bb20& bb55));bb54=bbs+(((bbc)>>(6 )|(bbc)<<(32 -6 ))^((bbc)>>(11 )|(bbc)<<(32 - 11 ))^((bbc)>>(25 )|(bbc)<<(32 -25 )))+((bbc&bbp)^(~bbc&bbn))+bb6[56 +4 ]+ bbw[56 +4 ];bb44+=bb54;bbs=bb54+bb89;bb89=(((bbs)>>(2 )|(bbs)<<(32 -2 ))^( (bbs)>>(13 )|(bbs)<<(32 -13 ))^((bbs)>>(22 )|(bbs)<<(32 -22 )))+((bbs&bbv)^ (bbs&bb20)^(bbv&bb20));bb54=bbn+(((bb44)>>(6 )|(bb44)<<(32 -6 ))^((bb44)>> (11 )|(bb44)<<(32 -11 ))^((bb44)>>(25 )|(bb44)<<(32 -25 )))+((bb44&bbc)^(~ bb44&bbp))+bb6[56 +5 ]+bbw[56 +5 ];bb55+=bb54;bbn=bb54+bb89;bb89=(((bbn)>> (2 )|(bbn)<<(32 -2 ))^((bbn)>>(13 )|(bbn)<<(32 -13 ))^((bbn)>>(22 )|(bbn)<<( 32 -22 )))+((bbn&bbs)^(bbn&bbv)^(bbs&bbv));bb54=bbp+(((bb55)>>(6 )|(bb55 )<<(32 -6 ))^((bb55)>>(11 )|(bb55)<<(32 -11 ))^((bb55)>>(25 )|(bb55)<<(32 - 25 )))+((bb55&bb44)^(~bb55&bbc))+bb6[56 +6 ]+bbw[56 +6 ];bb20+=bb54;bbp= bb54+bb89;bb89=(((bbp)>>(2 )|(bbp)<<(32 -2 ))^((bbp)>>(13 )|(bbp)<<(32 -13 ))^((bbp)>>(22 )|(bbp)<<(32 -22 )))+((bbp&bbn)^(bbp&bbs)^(bbn&bbs));bb54 =bbc+(((bb20)>>(6 )|(bb20)<<(32 -6 ))^((bb20)>>(11 )|(bb20)<<(32 -11 ))^(( bb20)>>(25 )|(bb20)<<(32 -25 )))+((bb20&bb55)^(~bb20&bb44))+bb6[56 +7 ]+ bbw[56 +7 ];bbv+=bb54;bbc=bb54+bb89;bb23[0 ]+=bbc;bb23[1 ]+=bbp;bb23[2 ]+= bbn;bb23[3 ]+=bbs;bb23[4 ]+=bbv;bb23[5 ]+=bb20;bb23[6 ]+=bb55;bb23[7 ]+= bb44;}}bbb bb1865(bb412*bbi){bb40 bbd bb23[8 ]={0x6a09e667 ,0xbb67ae85 , 0x3c6ef372 ,0xa54ff53a ,0x510e527f ,0x9b05688c ,0x1f83d9ab ,0x5be0cd19 }; bbi->bb5=0 ;bb74(bbi->bb23,bb23,bb12(bb23));}bbb bb1962(bb965*bbi){ bb40 bbd bb23[8 ]={0xc1059ed8 ,0x367cd507 ,0x3070dd17 ,0xf70e5939 , 0xffc00b31 ,0x68581511 ,0x64f98fa7 ,0xbefa4fa4 };bbi->bb5=0 ;bb74(bbi-> bb23,bb23,bb12(bb23));}bbb bb1277(bb412*bbi,bbh bbb*bb516,bbo bb5){ bbh bbf*bbx=(bbh bbf* )bb516;bbo bb398=bbi->bb5%bb12(bbi->bb105);bbi ->bb5+=bb5;bbm(bb398){bbo bb11=bb12(bbi->bb105)-bb398;bb74(bbi->bb105 +bb398,bbx,((bb5)<(bb11)?(bb5):(bb11)));bbm(bb5<bb11)bb4;bbx+=bb11; bb5-=bb11;bb2470(bbi->bb23,bbi->bb105);}bb90(;bb5>=bb12(bbi->bb105); bb5-=bb12(bbi->bb105),bbx+=bb12(bbi->bb105))bb2470(bbi->bb23,bbx); bb74(bbi->bb105,bbx,bb5);}bb40 bbb bb2395(bb412*bbi,bbb*bb1,bbo bb366 ){bbd bb1037[2 ]={(bbd)(bbi->bb5>>29 ),(bbd)(bbi->bb5<<3 )};bbf bb437[ bb12(bb1037)];bbo bbz;bb90(bbz=0 ;bbz<bb12(bb437);bbz++)bb437[bbz]= bb1037[bbz/4 ]>>((3 -bbz%4 ) *8 )&0xff ;{bbf bb1352[]={0x80 },bb1353[bb12( bbi->bb105)]={0 };bbo bb398=bbi->bb5%bb12(bbi->bb105);bb1277(bbi, bb1352,1 );bb1277(bbi,bb1353,(bb12(bbi->bb105) *2 -1 -bb12(bb437)-bb398)% bb12(bbi->bb105));}bb1277(bbi,bb437,bb12(bb437));bb90(bbz=0 ;bbz<bb366 ;bbz++)((bbf* )bb1)[bbz]=bbi->bb23[bbz/4 ]>>((3 -bbz%4 ) *8 )&0xff ;}bbb bb1860(bb412*bbi,bbb*bb1){bb2395(bbi,bb1,32 );}bbb bb1913(bb965*bbi, bbb*bb1){bb2395(bbi,bb1,28 );} #ifdef _MSC_VER #define bb2134( bbc) bbc##i64 #else #define bb2134( bbc) bbc##ll #endif #define bb543( bbc, bbp, bbn, bbs) bb2134( bbc), bb2134( bbp), bb2134 \ ( bbn), bb2134( bbs), bb40 bbb bb2405(bb57 bb23[8 ],bbh bbf bb98[128 ]){bb40 bb57 bb6[80 ]={ bb543(0x428a2f98d728ae22 ,0x7137449123ef65cd ,0xb5c0fbcfec4d3b2f , 0xe9b5dba58189dbbc )bb543(0x3956c25bf348b538 ,0x59f111f1b605d019 , 0x923f82a4af194f9b ,0xab1c5ed5da6d8118 )bb543(0xd807aa98a3030242 , 0x12835b0145706fbe ,0x243185be4ee4b28c ,0x550c7dc3d5ffb4e2 )bb543( 0x72be5d74f27b896f ,0x80deb1fe3b1696b1 ,0x9bdc06a725c71235 , 0xc19bf174cf692694 )bb543(0xe49b69c19ef14ad2 ,0xefbe4786384f25e3 , 0x0fc19dc68b8cd5b5 ,0x240ca1cc77ac9c65 )bb543(0x2de92c6f592b0275 , 0x4a7484aa6ea6e483 ,0x5cb0a9dcbd41fbd4 ,0x76f988da831153b5 )bb543( 0x983e5152ee66dfab ,0xa831c66d2db43210 ,0xb00327c898fb213f , 0xbf597fc7beef0ee4 )bb543(0xc6e00bf33da88fc2 ,0xd5a79147930aa725 , 0x06ca6351e003826f ,0x142929670a0e6e70 )bb543(0x27b70a8546d22ffc , 0x2e1b21385c26c926 ,0x4d2c6dfc5ac42aed ,0x53380d139d95b3df )bb543( 0x650a73548baf63de ,0x766a0abb3c77b2a8 ,0x81c2c92e47edaee6 , 0x92722c851482353b )bb543(0xa2bfe8a14cf10364 ,0xa81a664bbc423001 , 0xc24b8b70d0f89791 ,0xc76c51a30654be30 )bb543(0xd192e819d6ef5218 , 0xd69906245565a910 ,0xf40e35855771202a ,0x106aa07032bbd1b8 )bb543( 0x19a4c116b8d2d0c8 ,0x1e376c085141ab53 ,0x2748774cdf8eeb99 , 0x34b0bcb5e19b48a8 )bb543(0x391c0cb3c5c95a63 ,0x4ed8aa4ae3418acb , 0x5b9cca4f7763e373 ,0x682e6ff3d6b2b8a3 )bb543(0x748f82ee5defb2fc , 0x78a5636f43172f60 ,0x84c87814a1f0ab72 ,0x8cc702081a6439ec )bb543( 0x90befffa23631e28 ,0xa4506cebde82bde9 ,0xbef9a3f7b2c67915 , 0xc67178f2e372532b )bb543(0xca273eceea26619c ,0xd186b8c721c0c207 , 0xeada7dd6cde0eb1e ,0xf57d4f7fee6ed178 )bb543(0x06f067aa72176fba , 0x0a637dc5a2c898a6 ,0x113f9804bef90dae ,0x1b710b35131c471b )bb543( 0x28db77f523047d84 ,0x32caab7b40c72493 ,0x3c9ebe0a15c9bebc , 0x431d67c49c100d4c )bb543(0x4cc5d4becb3e42b6 ,0x597f299cfc657e2a , 0x5fcb6fab3ad6faec ,0x6c44198c4a475817 )};bb27(bb12(bbe)>=4 );{bb57 bbc= bb23[0 ],bbp=bb23[1 ],bbn=bb23[2 ],bbs=bb23[3 ],bbv=bb23[4 ],bb20=bb23[5 ], bb55=bb23[6 ],bb44=bb23[7 ];bb57 bb110[16 ],bb54,bb89;bbe bbz;bb90(bbz=0 ;bbz<16 ;bbz++,bb98+=8 )bb110[bbz]=((bb57)(bbd)(bb98[7 ]|bb98[6 ]<<8 |bb98 [5 ]<<16 |bb98[4 ]<<24 )|(bb57)(bbd)(bb98[3 ]|bb98[2 ]<<8 |bb98[1 ]<<16 |bb98[ 0 ]<<24 )<<32 );bb90(bbz=0 ;bbz<80 ;bbz++){bb57 bbw;bbm(bbz<16 )bbw=bb110[ bbz];bb50{bbw=bb110[(bbz+16 -15 )&15 ];bb54=((bbw)>>(1 )|(bbw)<<(64 -1 ))^( (bbw)>>(8 )|(bbw)<<(64 -8 ))^(bbw>>7 );bbw=bb110[(bbz+16 -2 )&15 ];bb89=(( bbw)>>(19 )|(bbw)<<(64 -19 ))^((bbw)>>(61 )|(bbw)<<(64 -61 ))^(bbw>>6 );bbw= (bb110[bbz&15 ]+=bb54+bb110[(bbz+16 -7 )&15 ]+bb89);}bb89=(((bbc)>>(28 )|( bbc)<<(64 -28 ))^((bbc)>>(34 )|(bbc)<<(64 -34 ))^((bbc)>>(39 )|(bbc)<<(64 - 39 )))+((bbc&bbp)^(bbc&bbn)^(bbp&bbn));bb54=bb44+(((bbv)>>(14 )|(bbv)<< (64 -14 ))^((bbv)>>(18 )|(bbv)<<(64 -18 ))^((bbv)>>(41 )|(bbv)<<(64 -41 )))+( (bbv&bb20)^(~bbv&bb55))+bb6[bbz]+bbw;bbs+=bb54;bb44=bb55;bb55=bb20; bb20=bbv;bbv=bbs;bbs=bbn;bbn=bbp;bbp=bbc;bbc=bb54+bb89;}bb23[0 ]+=bbc; bb23[1 ]+=bbp;bb23[2 ]+=bbn;bb23[3 ]+=bbs;bb23[4 ]+=bbv;bb23[5 ]+=bb20; bb23[6 ]+=bb55;bb23[7 ]+=bb44;}}bbb bb1851(bb315*bbi){bb40 bb57 bb23[8 ] ={bb543(0x6a09e667f3bcc908 ,0xbb67ae8584caa73b ,0x3c6ef372fe94f82b , 0xa54ff53a5f1d36f1 )bb543(0x510e527fade682d1 ,0x9b05688c2b3e6c1f , 0x1f83d9abfb41bd6b ,0x5be0cd19137e2179 )};bbi->bb5=0 ;bb74(bbi->bb23, bb23,bb12(bb23));}bbb bb1837(bb626*bbi){bb40 bb57 bb23[8 ]={bb543( 0xcbbb9d5dc1059ed8 ,0x629a292a367cd507 ,0x9159015a3070dd17 , 0x152fecd8f70e5939 )bb543(0x67332667ffc00b31 ,0x8eb44a8768581511 , 0xdb0c2e0d64f98fa7 ,0x47b5481dbefa4fa4 )};bbi->bb5=0 ;bb74(bbi->bb23, bb23,bb12(bb23));}bbb bb1838(bb978*bbi){bb40 bb57 bb23[8 ]={bb543( 0x8C3D37C819544DA2 ,0x73E1996689DCD4D6 ,0x1DFAB7AE32FF9C82 , 0x679DD514582F9FCF )bb543(0x0F6D2B697BD44DA8 ,0x77E36F7304C48942 , 0x3F9D85A86A1D36C8 ,0x1112E6AD91D692A1 )};bbi->bb5=0 ;bb74(bbi->bb23, bb23,bb12(bb23));}bbb bb1854(bb961*bbi){bb40 bb57 bb23[8 ]={bb543( 0x22312194FC2BF72C ,0x9F555FA3C84C64C2 ,0x2393B86B6F53B151 , 0x963877195940EABD )bb543(0x96283EE2A88EFFE3 ,0xBE5E1E2553863992 , 0x2B0199FC2C85B8AA ,0x0EB72DDC81C52CA2 )};bbi->bb5=0 ;bb74(bbi->bb23, bb23,bb12(bb23));}bbb bb1065(bb315*bbi,bbh bbb*bb516,bbo bb5){bbh bbf *bbx=(bbh bbf* )bb516;bbo bb398=bbi->bb5%bb12(bbi->bb105);bbi->bb5+= bb5;bbm(bb398){bbo bb11=bb12(bbi->bb105)-bb398;bb74(bbi->bb105+bb398, bbx,((bb5)<(bb11)?(bb5):(bb11)));bbm(bb5<bb11)bb4;bbx+=bb11;bb5-=bb11 ;bb2405(bbi->bb23,bbi->bb105);}bb90(;bb5>=bb12(bbi->bb105);bb5-=bb12( bbi->bb105),bbx+=bb12(bbi->bb105))bb2405(bbi->bb23,bbx);bb74(bbi-> bb105,bbx,bb5);}bb40 bbb bb2270(bb315*bbi,bbb*bb1,bbo bb366){bbd bb1037[4 ]={0 ,0 ,(bbd)(bbi->bb5>>29 ),(bbd)(bbi->bb5<<3 )};bbf bb437[bb12 (bb1037)];bbo bbz;bb90(bbz=0 ;bbz<bb12(bb437);bbz++)bb437[bbz]=bb1037[ bbz/4 ]>>((3 -bbz%4 ) *8 )&0xff ;{bbf bb1352[]={0x80 },bb1353[bb12(bbi-> bb105)]={0 };bbo bb398=bbi->bb5%bb12(bbi->bb105);bb1065(bbi,bb1352,1 ); bb1065(bbi,bb1353,(bb12(bbi->bb105) *2 -1 -bb12(bb437)-bb398)%bb12(bbi ->bb105));}bb1065(bbi,bb437,bb12(bb437));{bb90(bbz=0 ;bbz<bb366;bbz++)( (bbf* )bb1)[bbz]=(bbf)(bbi->bb23[bbz/8 ]>>((7 -bbz%8 ) *8 )&0xff );}}bbb bb1885(bb315*bbi,bbb*bb1){bb2270(bbi,bb1,64 );}bbb bb1855(bb626*bbi, bbb*bb1){bb2270(bbi,bb1,48 );}bbb bb1897(bb626*bbi,bbb*bb1){bb2270(bbi ,bb1,28 );}bbb bb1845(bb626*bbi,bbb*bb1){bb2270(bbi,bb1,32 );}bbb bb1955 (bbb*bb1,bbh bbb*bbx,bbo bb5){bb459 bb82;bb1898(&bb82);bb1331(&bb82, bbx,bb5);bb1844(&bb82,bb1);}bbb bb1916(bbb*bb1,bbh bbb*bbx,bbo bb5){ bb412 bb82;bb1865(&bb82);bb1277(&bb82,bbx,bb5);bb1860(&bb82,bb1);}bbb bb2020(bbb*bb1,bbh bbb*bbx,bbo bb5){bb626 bb82;bb1837(&bb82);bb1065(& bb82,bbx,bb5);bb1855(&bb82,bb1);}bbb bb1983(bbb*bb1,bbh bbb*bbx,bbo bb5){bb315 bb82;bb1851(&bb82);bb1065(&bb82,bbx,bb5);bb1885(&bb82,bb1); }bbb bb1967(bbb*bb1,bbh bbb*bbx,bbo bb5){bb978 bb82;bb1838(&bb82); bb1065(&bb82,bbx,bb5);bb1897(&bb82,bb1);}bbb bb1987(bbb*bb1,bbh bbb* bbx,bbo bb5){bb961 bb82;bb1854(&bb82);bb1065(&bb82,bbx,bb5);bb1845(& bb82,bb1);}bbb bb2080(bbb*bb1,bb62 bbx){bb1955(bb1,bbx,(bbo)bb1133( bbx));}bbb bb2022(bbb*bb1,bb62 bbx){bb1916(bb1,bbx,(bbo)bb1133(bbx)); }bbb bb2091(bbb*bb1,bb62 bbx){bb2020(bb1,bbx,(bbo)bb1133(bbx));}bbb bb2082(bbb*bb1,bb62 bbx){bb1983(bb1,bbx,(bbo)bb1133(bbx));}bbb bb2057 (bbb*bb1,bb62 bbx){bb1967(bb1,bbx,(bbo)bb1133(bbx));}bbb bb2087(bbb* bb1,bb62 bbx){bb1987(bb1,bbx,(bbo)bb1133(bbx));}
gpl-2.0
jthornber/linux-2.6
drivers/infiniband/core/umem.c
806
9151
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/hugetlb.h> #include <linux/dma-attrs.h> #include <linux/slab.h> #include <rdma/ib_umem_odp.h> #include "uverbs.h" static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) { struct scatterlist *sg; struct page *page; int i; if (umem->nmap > 0) ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->nmap, DMA_BIDIRECTIONAL); for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { page = sg_page(sg); if (umem->writable && dirty) set_page_dirty_lock(page); put_page(page); } sg_free_table(&umem->sg_head); return; } /** * ib_umem_get - Pin and DMA map userspace memory. * * If access flags indicate ODP memory, avoid pinning. Instead, stores * the mm for future page fault handling in conjunction with MMU notifiers. * * @context: userspace context to pin memory for * @addr: userspace virtual address to start at * @size: length of region to pin * @access: IB_ACCESS_xxx flags for memory being pinned * @dmasync: flush in-flight DMA when the memory region is written */ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, size_t size, int access, int dmasync) { struct ib_umem *umem; struct page **page_list; struct vm_area_struct **vma_list; unsigned long locked; unsigned long lock_limit; unsigned long cur_base; unsigned long npages; int ret; int i; DEFINE_DMA_ATTRS(attrs); struct scatterlist *sg, *sg_list_start; int need_release = 0; if (dmasync) dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); if (!size) return ERR_PTR(-EINVAL); /* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error. */ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size)) return ERR_PTR(-EINVAL); if (!can_do_mlock()) return ERR_PTR(-EPERM); umem = kzalloc(sizeof *umem, GFP_KERNEL); if (!umem) return ERR_PTR(-ENOMEM); umem->context = context; umem->length = size; umem->address = addr; umem->page_size = PAGE_SIZE; umem->pid = get_task_pid(current, PIDTYPE_PID); /* * We ask for writable memory if any of the following * access flags are set. "Local write" and "remote write" * obviously require write access. "Remote atomic" can do * things like fetch and add, which will modify memory, and * "MW bind" can change permissions by binding a window. */ umem->writable = !!(access & (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); if (access & IB_ACCESS_ON_DEMAND) { ret = ib_umem_odp_get(context, umem); if (ret) { kfree(umem); return ERR_PTR(ret); } return umem; } umem->odp_data = NULL; /* We assume the memory is from hugetlb until proved otherwise */ umem->hugetlb = 1; page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { kfree(umem); return ERR_PTR(-ENOMEM); } /* * if we can't alloc the vma_list, it's not so bad; * just assume the memory is not hugetlb memory */ vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); if (!vma_list) umem->hugetlb = 0; npages = ib_umem_num_pages(umem); down_write(&current->mm->mmap_sem); locked = npages + current->mm->pinned_vm; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { ret = -ENOMEM; goto out; } cur_base = addr & PAGE_MASK; if (npages == 0) { ret = -EINVAL; goto out; } ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); if (ret) goto out; need_release = 1; sg_list_start = umem->sg_head.sgl; while (npages) { ret = get_user_pages(current, current->mm, cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), 1, !umem->writable, page_list, vma_list); if (ret < 0) goto out; umem->npages += ret; cur_base += ret * PAGE_SIZE; npages -= ret; for_each_sg(sg_list_start, sg, ret, i) { if (vma_list && !is_vm_hugetlb_page(vma_list[i])) umem->hugetlb = 0; sg_set_page(sg, page_list[i], PAGE_SIZE, 0); } /* preparing for next loop */ sg_list_start = sg; } umem->nmap = ib_dma_map_sg_attrs(context->device, umem->sg_head.sgl, umem->npages, DMA_BIDIRECTIONAL, &attrs); if (umem->nmap <= 0) { ret = -ENOMEM; goto out; } ret = 0; out: if (ret < 0) { if (need_release) __ib_umem_release(context->device, umem, 0); put_pid(umem->pid); kfree(umem); } else current->mm->pinned_vm = locked; up_write(&current->mm->mmap_sem); if (vma_list) free_page((unsigned long) vma_list); free_page((unsigned long) page_list); return ret < 0 ? ERR_PTR(ret) : umem; } EXPORT_SYMBOL(ib_umem_get); static void ib_umem_account(struct work_struct *work) { struct ib_umem *umem = container_of(work, struct ib_umem, work); down_write(&umem->mm->mmap_sem); umem->mm->pinned_vm -= umem->diff; up_write(&umem->mm->mmap_sem); mmput(umem->mm); kfree(umem); } /** * ib_umem_release - release memory pinned with ib_umem_get * @umem: umem struct to release */ void ib_umem_release(struct ib_umem *umem) { struct ib_ucontext *context = umem->context; struct mm_struct *mm; struct task_struct *task; unsigned long diff; if (umem->odp_data) { ib_umem_odp_release(umem); return; } __ib_umem_release(umem->context->device, umem, 1); task = get_pid_task(umem->pid, PIDTYPE_PID); put_pid(umem->pid); if (!task) goto out; mm = get_task_mm(task); put_task_struct(task); if (!mm) goto out; diff = ib_umem_num_pages(umem); /* * We may be called with the mm's mmap_sem already held. This * can happen when a userspace munmap() is the call that drops * the last reference to our file and calls our release * method. If there are memory regions to destroy, we'll end * up here and not be able to take the mmap_sem. In that case * we defer the vm_locked accounting to the system workqueue. */ if (context->closing) { if (!down_write_trylock(&mm->mmap_sem)) { INIT_WORK(&umem->work, ib_umem_account); umem->mm = mm; umem->diff = diff; queue_work(ib_wq, &umem->work); return; } } else down_write(&mm->mmap_sem); mm->pinned_vm -= diff; up_write(&mm->mmap_sem); mmput(mm); out: kfree(umem); } EXPORT_SYMBOL(ib_umem_release); int ib_umem_page_count(struct ib_umem *umem) { int shift; int i; int n; struct scatterlist *sg; if (umem->odp_data) return ib_umem_num_pages(umem); shift = ilog2(umem->page_size); n = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) n += sg_dma_len(sg) >> shift; return n; } EXPORT_SYMBOL(ib_umem_page_count); /* * Copy from the given ib_umem's pages to the given buffer. * * umem - the umem to copy from * offset - offset to start copying from * dst - destination buffer * length - buffer length * * Returns 0 on success, or an error code. */ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { size_t end = offset + length; int ret; if (offset > umem->length || length > umem->length - offset) { pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", offset, umem->length, end); return -EINVAL; } ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) return ret; else if (ret != length) return -EINVAL; else return 0; } EXPORT_SYMBOL(ib_umem_copy_from);
gpl-2.0
rjwysocki/linux-pm
fs/nilfs2/page.c
806
14936
/* * page.c - buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Ryusuke Konishi <ryusuke@osrg.net>, * Seiji Kihara <kihara@osrg.net>. */ #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/bitops.h> #include <linux/page-flags.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/pagevec.h> #include <linux/gfp.h> #include "nilfs.h" #include "page.h" #include "mdt.h" #define NILFS_BUFFER_INHERENT_BITS \ ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked)) static struct buffer_head * __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, int blkbits, unsigned long b_state) { unsigned long first_block; struct buffer_head *bh; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); wait_on_buffer(bh); return bh; } struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct address_space *mapping, unsigned long blkoff, unsigned long b_state) { int blkbits = inode->i_blkbits; pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); struct page *page; struct buffer_head *bh; page = grab_cache_page(mapping, index); if (unlikely(!page)) return NULL; bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); page_cache_release(page); return NULL; } return bh; } /** * nilfs_forget_buffer - discard dirty state * @inode: owner inode of the buffer * @bh: buffer head of the buffer to be discarded */ void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; const unsigned long clear_bits = (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); lock_buffer(bh); set_mask_bits(&bh->b_state, clear_bits, 0); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); unlock_buffer(bh); brelse(bh); } /** * nilfs_copy_buffer -- copy buffer data and flags * @dbh: destination buffer * @sbh: source buffer */ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) { void *kaddr0, *kaddr1; unsigned long bits; struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct buffer_head *bh; kaddr0 = kmap_atomic(spage); kaddr1 = kmap_atomic(dpage); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); kunmap_atomic(kaddr1); kunmap_atomic(kaddr0); dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; bh = dbh; bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped)); while ((bh = bh->b_this_page) != dbh) { lock_buffer(bh); bits &= bh->b_state; unlock_buffer(bh); } if (bits & (1UL << BH_Uptodate)) SetPageUptodate(dpage); else ClearPageUptodate(dpage); if (bits & (1UL << BH_Mapped)) SetPageMappedToDisk(dpage); else ClearPageMappedToDisk(dpage); } /** * nilfs_page_buffers_clean - check if a page has dirty buffers or not. * @page: page to be checked * * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. * Otherwise, it returns non-zero value. */ int nilfs_page_buffers_clean(struct page *page) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (buffer_dirty(bh)) return 0; bh = bh->b_this_page; } while (bh != head); return 1; } void nilfs_page_bug(struct page *page) { struct address_space *m; unsigned long ino; if (unlikely(!page)) { printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); return; } m = page->mapping; ino = m ? m->host->i_ino : 0; printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", page, atomic_read(&page->_count), (unsigned long long)page->index, page->flags, m, ino); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); do { printk(KERN_CRIT " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", i++, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr, bh->b_state); bh = bh->b_this_page; } while (bh != head); } } /** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh, *sbufs; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = sbufs = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= (1UL << BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); } int nilfs_copy_dirty_pages(struct address_space *dmap, struct address_space *smap) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; int err = 0; pagevec_init(&pvec, 0); repeat: if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) return 0; for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i], *dpage; lock_page(page); if (unlikely(!PageDirty(page))) NILFS_PAGE_BUG(page, "inconsistent dirty state"); dpage = grab_cache_page(dmap, page->index); if (unlikely(!dpage)) { /* No empty page is added to the page cache */ err = -ENOMEM; unlock_page(page); break; } if (unlikely(!page_has_buffers(page))) NILFS_PAGE_BUG(page, "found empty page in dat page cache"); nilfs_copy_page(dpage, page, 1); __set_page_dirty_nobuffers(dpage); unlock_page(dpage); page_cache_release(dpage); unlock_page(page); } pagevec_release(&pvec); cond_resched(); if (likely(!err)) goto repeat; return err; } /** * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache * @dmap: destination page cache * @smap: source page cache * * No pages must no be added to the cache during this process. * This must be ensured by the caller. */ void nilfs_copy_back_pages(struct address_space *dmap, struct address_space *smap) { struct pagevec pvec; unsigned int i, n; pgoff_t index = 0; int err; pagevec_init(&pvec, 0); repeat: n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE); if (!n) return; index = pvec.pages[n - 1]->index + 1; for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i], *dpage; pgoff_t offset = page->index; lock_page(page); dpage = find_lock_page(dmap, offset); if (dpage) { /* override existing page on the destination cache */ WARN_ON(PageDirty(dpage)); nilfs_copy_page(dpage, page, 0); unlock_page(dpage); page_cache_release(dpage); } else { struct page *page2; /* move the page to the destination cache */ spin_lock_irq(&smap->tree_lock); page2 = radix_tree_delete(&smap->page_tree, offset); WARN_ON(page2 != page); smap->nrpages--; spin_unlock_irq(&smap->tree_lock); spin_lock_irq(&dmap->tree_lock); err = radix_tree_insert(&dmap->page_tree, offset, page); if (unlikely(err < 0)) { WARN_ON(err == -EEXIST); page->mapping = NULL; page_cache_release(page); /* for cache */ } else { page->mapping = dmap; dmap->nrpages++; if (PageDirty(page)) radix_tree_tag_set(&dmap->page_tree, offset, PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&dmap->tree_lock); } unlock_page(page); } pagevec_release(&pvec); cond_resched(); goto repeat; } /** * nilfs_clear_dirty_pages - discard dirty pages in address space * @mapping: address space with dirty pages for discarding * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; lock_page(page); nilfs_clear_dirty_page(page, silent); unlock_page(page); } pagevec_release(&pvec); cond_resched(); } } /** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) { nilfs_warning(sb, __func__, "discard page: offset %lld, ino %lu", page_offset(page), inode->i_ino); } ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; const unsigned long clear_bits = (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) { nilfs_warning(sb, __func__, "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); } unsigned nilfs_page_count_clean_buffers(struct page *page, unsigned from, unsigned to) { unsigned block_start, block_end; struct buffer_head *bh, *head; unsigned nc = 0; for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + bh->b_size; if (block_end > from && block_start < to && !buffer_dirty(bh)) nc++; } return nc; } void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) { mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; mapping->a_ops = &empty_aops; } /* * NILFS2 needs clear_page_dirty() in the following two cases: * * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears * page dirty flags when it copies back pages from the shadow cache * (gcdat->{i_mapping,i_btnode_cache}) to its original cache * (dat->{i_mapping,i_btnode_cache}). * * 2) Some B-tree operations like insertion or deletion may dispose buffers * in dirty state, and this needs to cancel the dirty state of their pages. */ int __nilfs_clear_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping) { spin_lock_irq(&mapping->tree_lock); if (test_bit(PG_dirty, &page->flags)) { radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); spin_unlock_irq(&mapping->tree_lock); return clear_page_dirty_for_io(page); } spin_unlock_irq(&mapping->tree_lock); return 0; } return TestClearPageDirty(page); } /** * nilfs_find_uncommitted_extent - find extent of uncommitted data * @inode: inode * @start_blk: start block offset (in) * @blkoff: start offset of the found extent (out) * * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in * @blkoff and return its length in blocks. Otherwise, zero is * returned. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, sector_t *blkoff) { unsigned int i; pgoff_t index; unsigned int nblocks_in_page; unsigned long length = 0; sector_t b; struct pagevec pvec; struct page *page; if (inode->i_mapping->nrpages == 0) return 0; index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); repeat: pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, pvec.pages); if (pvec.nr == 0) return length; if (length > 0 && pvec.pages[0]->index > index) goto out; b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); i = 0; do { page = pvec.pages[i]; lock_page(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (b < start_blk) continue; if (buffer_delay(bh)) { if (length == 0) *blkoff = b; length++; } else if (length > 0) { goto out_locked; } } while (++b, bh = bh->b_this_page, bh != head); } else { if (length > 0) goto out_locked; b += nblocks_in_page; } unlock_page(page); } while (++i < pagevec_count(&pvec)); index = page->index + 1; pagevec_release(&pvec); cond_resched(); goto repeat; out_locked: unlock_page(page); out: pagevec_release(&pvec); return length; }
gpl-2.0
stedman420/android_kernel_lge_lgl55c
net/netfilter/ipvs/ip_vs_app.c
806
13502
/* * ip_vs_app.c: Application module support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference * is that ip_vs_app module handles the reverse direction (incoming requests * and outgoing responses). * * IP_MASQ_APP application masquerading module * * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar> * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/tcp.h> #include <asm/system.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <net/ip_vs.h> EXPORT_SYMBOL(register_ip_vs_app); EXPORT_SYMBOL(unregister_ip_vs_app); EXPORT_SYMBOL(register_ip_vs_app_inc); /* ipvs application list head */ static LIST_HEAD(ip_vs_app_list); static DEFINE_MUTEX(__ip_vs_app_mutex); /* * Get an ip_vs_app object */ static inline int ip_vs_app_get(struct ip_vs_app *app) { return try_module_get(app->module); } static inline void ip_vs_app_put(struct ip_vs_app *app) { module_put(app->module); } /* * Allocate/initialize app incarnation and register it in proto apps. */ static int ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port) { struct ip_vs_protocol *pp; struct ip_vs_app *inc; int ret; if (!(pp = ip_vs_proto_get(proto))) return -EPROTONOSUPPORT; if (!pp->unregister_app) return -EOPNOTSUPP; inc = kmemdup(app, sizeof(*inc), GFP_KERNEL); if (!inc) return -ENOMEM; INIT_LIST_HEAD(&inc->p_list); INIT_LIST_HEAD(&inc->incs_list); inc->app = app; inc->port = htons(port); atomic_set(&inc->usecnt, 0); if (app->timeouts) { inc->timeout_table = ip_vs_create_timeout_table(app->timeouts, app->timeouts_size); if (!inc->timeout_table) { ret = -ENOMEM; goto out; } } ret = pp->register_app(inc); if (ret) goto out; list_add(&inc->a_list, &app->incs_list); IP_VS_DBG(9, "%s application %s:%u registered\n", pp->name, inc->name, inc->port); return 0; out: kfree(inc->timeout_table); kfree(inc); return ret; } /* * Release app incarnation */ static void ip_vs_app_inc_release(struct ip_vs_app *inc) { struct ip_vs_protocol *pp; if (!(pp = ip_vs_proto_get(inc->protocol))) return; if (pp->unregister_app) pp->unregister_app(inc); IP_VS_DBG(9, "%s App %s:%u unregistered\n", pp->name, inc->name, inc->port); list_del(&inc->a_list); kfree(inc->timeout_table); kfree(inc); } /* * Get reference to app inc (only called from softirq) * */ int ip_vs_app_inc_get(struct ip_vs_app *inc) { int result; atomic_inc(&inc->usecnt); if (unlikely((result = ip_vs_app_get(inc->app)) != 1)) atomic_dec(&inc->usecnt); return result; } /* * Put the app inc (only called from timer or net softirq) */ void ip_vs_app_inc_put(struct ip_vs_app *inc) { ip_vs_app_put(inc->app); atomic_dec(&inc->usecnt); } /* * Register an application incarnation in protocol applications */ int register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port) { int result; mutex_lock(&__ip_vs_app_mutex); result = ip_vs_app_inc_new(app, proto, port); mutex_unlock(&__ip_vs_app_mutex); return result; } /* * ip_vs_app registration routine */ int register_ip_vs_app(struct ip_vs_app *app) { /* increase the module use count */ ip_vs_use_count_inc(); mutex_lock(&__ip_vs_app_mutex); list_add(&app->a_list, &ip_vs_app_list); mutex_unlock(&__ip_vs_app_mutex); return 0; } /* * ip_vs_app unregistration routine * We are sure there are no app incarnations attached to services */ void unregister_ip_vs_app(struct ip_vs_app *app) { struct ip_vs_app *inc, *nxt; mutex_lock(&__ip_vs_app_mutex); list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) { ip_vs_app_inc_release(inc); } list_del(&app->a_list); mutex_unlock(&__ip_vs_app_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); } /* * Bind ip_vs_conn to its ip_vs_app (called by cp constructor) */ int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { return pp->app_conn_bind(cp); } /* * Unbind cp from application incarnation (called by cp destructor) */ void ip_vs_unbind_app(struct ip_vs_conn *cp) { struct ip_vs_app *inc = cp->app; if (!inc) return; if (inc->unbind_conn) inc->unbind_conn(inc, cp); if (inc->done_conn) inc->done_conn(inc, cp); ip_vs_app_inc_put(inc); cp->app = NULL; } /* * Fixes th->seq based on ip_vs_seq info. */ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 seq = ntohl(th->seq); /* * Adjust seq with delta-offset for all packets after * the most recent resized pkt seq and with previous_delta offset * for all packets before most recent resized pkt seq. */ if (vseq->delta || vseq->previous_delta) { if(after(seq, vseq->init_seq)) { th->seq = htonl(seq + vseq->delta); IP_VS_DBG(9, "%s(): added delta (%d) to seq\n", __func__, vseq->delta); } else { th->seq = htonl(seq + vseq->previous_delta); IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n", __func__, vseq->previous_delta); } } } /* * Fixes th->ack_seq based on ip_vs_seq info. */ static inline void vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 ack_seq = ntohl(th->ack_seq); /* * Adjust ack_seq with delta-offset for * the packets AFTER most recent resized pkt has caused a shift * for packets before most recent resized pkt, use previous_delta */ if (vseq->delta || vseq->previous_delta) { /* since ack_seq is the number of octet that is expected to receive next, so compare it with init_seq+delta */ if(after(ack_seq, vseq->init_seq+vseq->delta)) { th->ack_seq = htonl(ack_seq - vseq->delta); IP_VS_DBG(9, "%s(): subtracted delta " "(%d) from ack_seq\n", __func__, vseq->delta); } else { th->ack_seq = htonl(ack_seq - vseq->previous_delta); IP_VS_DBG(9, "%s(): subtracted " "previous_delta (%d) from ack_seq\n", __func__, vseq->previous_delta); } } } /* * Updates ip_vs_seq if pkt has been resized * Assumes already checked proto==IPPROTO_TCP and diff!=0. */ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, unsigned flag, __u32 seq, int diff) { /* spinlock is to keep updating cp->flags atomic */ spin_lock(&cp->lock); if (!(cp->flags & flag) || after(seq, vseq->init_seq)) { vseq->previous_delta = vseq->delta; vseq->delta += diff; vseq->init_seq = seq; cp->flags |= flag; } spin_unlock(&cp->lock); } static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_seq(&cp->out_seq, th); if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_ack_seq(&cp->in_seq, th); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; if (!app->pkt_out(app, cp, skb, &diff)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->out_seq, IP_VS_CONN_F_OUT_SEQ, seq, diff); return 1; } /* * Output pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL * returns false if it can't handle packet (oom) */ int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_out(cp, skb, app); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; return app->pkt_out(app, cp, skb, NULL); } static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_seq(&cp->in_seq, th); if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_ack_seq(&cp->out_seq, th); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; if (!app->pkt_in(app, cp, skb, &diff)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->in_seq, IP_VS_CONN_F_IN_SEQ, seq, diff); return 1; } /* * Input pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL. * returns false if can't handle packet (oom). */ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_in(cp, skb, app); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; return app->pkt_in(app, cp, skb, NULL); } #ifdef CONFIG_PROC_FS /* * /proc/net/ip_vs_app entry function */ static struct ip_vs_app *ip_vs_app_idx(loff_t pos) { struct ip_vs_app *app, *inc; list_for_each_entry(app, &ip_vs_app_list, a_list) { list_for_each_entry(inc, &app->incs_list, a_list) { if (pos-- == 0) return inc; } } return NULL; } static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&__ip_vs_app_mutex); return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_vs_app *inc, *app; struct list_head *e; ++*pos; if (v == SEQ_START_TOKEN) return ip_vs_app_idx(0); inc = v; app = inc->app; if ((e = inc->a_list.next) != &app->incs_list) return list_entry(e, struct ip_vs_app, a_list); /* go on to next application */ for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) { app = list_entry(e, struct ip_vs_app, a_list); list_for_each_entry(inc, &app->incs_list, a_list) { return inc; } } return NULL; } static void ip_vs_app_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&__ip_vs_app_mutex); } static int ip_vs_app_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "prot port usecnt name\n"); else { const struct ip_vs_app *inc = v; seq_printf(seq, "%-3s %-7u %-6d %-17s\n", ip_vs_proto_name(inc->protocol), ntohs(inc->port), atomic_read(&inc->usecnt), inc->name); } return 0; } static const struct seq_operations ip_vs_app_seq_ops = { .start = ip_vs_app_seq_start, .next = ip_vs_app_seq_next, .stop = ip_vs_app_seq_stop, .show = ip_vs_app_seq_show, }; static int ip_vs_app_open(struct inode *inode, struct file *file) { return seq_open(file, &ip_vs_app_seq_ops); } static const struct file_operations ip_vs_app_fops = { .owner = THIS_MODULE, .open = ip_vs_app_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Replace a segment of data with a new segment */ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len) { int diff; int o_offset; int o_left; EnterFunction(9); diff = n_len - o_len; o_offset = o_buf - (char *)skb->data; /* The length of left data after o_buf+o_len in the skb data */ o_left = skb->len - (o_offset + o_len); if (diff <= 0) { memmove(o_buf + n_len, o_buf + o_len, o_left); memcpy(o_buf, n_buf, n_len); skb_trim(skb, skb->len + diff); } else if (diff <= skb_tailroom(skb)) { skb_put(skb, diff); memmove(o_buf + n_len, o_buf + o_len, o_left); memcpy(o_buf, n_buf, n_len); } else { if (pskb_expand_head(skb, skb_headroom(skb), diff, pri)) return -ENOMEM; skb_put(skb, diff); memmove(skb->data + o_offset + n_len, skb->data + o_offset + o_len, o_left); skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len); } /* must update the iph total length here */ ip_hdr(skb)->tot_len = htons(skb->len); LeaveFunction(9); return 0; } int __init ip_vs_app_init(void) { /* we will replace it with proc_net_ipvs_create() soon */ proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); return 0; } void ip_vs_app_cleanup(void) { proc_net_remove(&init_net, "ip_vs_app"); }
gpl-2.0
smksyj/linux_modified_mlock
fs/cifs/smb2inode.c
1318
7728
/* * fs/cifs/smb2inode.c * * Copyright (C) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 * Author(s): Pavel Shilovsky (pshilovsky@samba.org), * Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <asm/div64.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "cifs_unicode.h" #include "fscache.h" #include "smb2glob.h" #include "smb2pdu.h" #include "smb2proto.h" static int smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, __u32 desired_access, __u32 create_disposition, __u32 create_options, void *data, int command) { int rc, tmprc = 0; __le16 *utf16_path; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; struct cifs_open_parms oparms; struct cifs_fid fid; utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) return -ENOMEM; oparms.tcon = tcon; oparms.desired_access = desired_access; oparms.disposition = create_disposition; oparms.create_options = create_options; oparms.fid = &fid; oparms.reconnect = false; rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); if (rc) { kfree(utf16_path); return rc; } switch (command) { case SMB2_OP_DELETE: break; case SMB2_OP_QUERY_INFO: tmprc = SMB2_query_info(xid, tcon, fid.persistent_fid, fid.volatile_fid, (struct smb2_file_all_info *)data); break; case SMB2_OP_MKDIR: /* * Directories are created through parameters in the * SMB2_open() call. */ break; case SMB2_OP_RENAME: tmprc = SMB2_rename(xid, tcon, fid.persistent_fid, fid.volatile_fid, (__le16 *)data); break; case SMB2_OP_HARDLINK: tmprc = SMB2_set_hardlink(xid, tcon, fid.persistent_fid, fid.volatile_fid, (__le16 *)data); break; case SMB2_OP_SET_EOF: tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid, fid.volatile_fid, current->tgid, (__le64 *)data, false); break; case SMB2_OP_SET_INFO: tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid, fid.volatile_fid, (FILE_BASIC_INFO *)data); break; default: cifs_dbg(VFS, "Invalid command\n"); break; } rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); if (tmprc) rc = tmprc; kfree(utf16_path); return rc; } void move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src) { memcpy(dst, src, (size_t)(&src->CurrentByteOffset) - (size_t)src); dst->CurrentByteOffset = src->CurrentByteOffset; dst->Mode = src->Mode; dst->AlignmentRequirement = src->AlignmentRequirement; dst->IndexNumber1 = 0; /* we don't use it */ } int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, FILE_ALL_INFO *data, bool *adjust_tz, bool *symlink) { int rc; struct smb2_file_all_info *smb2_data; *adjust_tz = false; *symlink = false; smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, GFP_KERNEL); if (smb2_data == NULL) return -ENOMEM; rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, 0, smb2_data, SMB2_OP_QUERY_INFO); if (rc == -EOPNOTSUPP) { *symlink = true; /* Failed on a symbolic link - query a reparse point info */ rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN, OPEN_REPARSE_POINT, smb2_data, SMB2_OP_QUERY_INFO); } if (rc) goto out; move_smb2_info_to_cifs(data, smb2_data); out: kfree(smb2_data); return rc; } int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_open_op_close(xid, tcon, cifs_sb, name, FILE_WRITE_ATTRIBUTES, FILE_CREATE, CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR); } void smb2_mkdir_setinfo(struct inode *inode, const char *name, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { FILE_BASIC_INFO data; struct cifsInodeInfo *cifs_i; u32 dosattrs; int tmprc; memset(&data, 0, sizeof(data)); cifs_i = CIFS_I(inode); dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; data.Attributes = cpu_to_le32(dosattrs); tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name, FILE_WRITE_ATTRIBUTES, FILE_CREATE, CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO); if (tmprc == 0) cifs_i->cifsAttrs = dosattrs; } int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE, NULL, SMB2_OP_DELETE); } int smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT, NULL, SMB2_OP_DELETE); } static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb, __u32 access, int command) { __le16 *smb2_to_name = NULL; int rc; smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb); if (smb2_to_name == NULL) { rc = -ENOMEM; goto smb2_rename_path; } rc = smb2_open_op_close(xid, tcon, cifs_sb, from_name, access, FILE_OPEN, 0, smb2_to_name, command); smb2_rename_path: kfree(smb2_to_name); return rc; } int smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, DELETE, SMB2_OP_RENAME); } int smb2_create_hardlink(const unsigned int xid, struct cifs_tcon *tcon, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb) { return smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, FILE_READ_ATTRIBUTES, SMB2_OP_HARDLINK); } int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon, const char *full_path, __u64 size, struct cifs_sb_info *cifs_sb, bool set_alloc) { __le64 eof = cpu_to_le64(size); return smb2_open_op_close(xid, tcon, cifs_sb, full_path, FILE_WRITE_DATA, FILE_OPEN, 0, &eof, SMB2_OP_SET_EOF); } int smb2_set_file_info(struct inode *inode, const char *full_path, FILE_BASIC_INFO *buf, const unsigned int xid) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink; int rc; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path, FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf, SMB2_OP_SET_INFO); cifs_put_tlink(tlink); return rc; }
gpl-2.0
Serranove/android_kernel_samsung_serranovelte
drivers/media/i2c/soc_camera/rj54n1cb0c.c
2086
36197
/* * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/rj54n1cb0c.h> #include <media/soc_camera.h> #include <media/v4l2-subdev.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> #define RJ54N1_DEV_CODE 0x0400 #define RJ54N1_DEV_CODE2 0x0401 #define RJ54N1_OUT_SEL 0x0403 #define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404 #define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405 #define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406 #define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407 #define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408 #define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409 #define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a #define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b #define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c #define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d #define RJ54N1_RESIZE_N 0x040e #define RJ54N1_RESIZE_N_STEP 0x040f #define RJ54N1_RESIZE_STEP 0x0410 #define RJ54N1_RESIZE_HOLD_H 0x0411 #define RJ54N1_RESIZE_HOLD_L 0x0412 #define RJ54N1_H_OBEN_OFS 0x0413 #define RJ54N1_V_OBEN_OFS 0x0414 #define RJ54N1_RESIZE_CONTROL 0x0415 #define RJ54N1_STILL_CONTROL 0x0417 #define RJ54N1_INC_USE_SEL_H 0x0425 #define RJ54N1_INC_USE_SEL_L 0x0426 #define RJ54N1_MIRROR_STILL_MODE 0x0427 #define RJ54N1_INIT_START 0x0428 #define RJ54N1_SCALE_1_2_LEV 0x0429 #define RJ54N1_SCALE_4_LEV 0x042a #define RJ54N1_Y_GAIN 0x04d8 #define RJ54N1_APT_GAIN_UP 0x04fa #define RJ54N1_RA_SEL_UL 0x0530 #define RJ54N1_BYTE_SWAP 0x0531 #define RJ54N1_OUT_SIGPO 0x053b #define RJ54N1_WB_SEL_WEIGHT_I 0x054e #define RJ54N1_BIT8_WB 0x0569 #define RJ54N1_HCAPS_WB 0x056a #define RJ54N1_VCAPS_WB 0x056b #define RJ54N1_HCAPE_WB 0x056c #define RJ54N1_VCAPE_WB 0x056d #define RJ54N1_EXPOSURE_CONTROL 0x058c #define RJ54N1_FRAME_LENGTH_S_H 0x0595 #define RJ54N1_FRAME_LENGTH_S_L 0x0596 #define RJ54N1_FRAME_LENGTH_P_H 0x0597 #define RJ54N1_FRAME_LENGTH_P_L 0x0598 #define RJ54N1_PEAK_H 0x05b7 #define RJ54N1_PEAK_50 0x05b8 #define RJ54N1_PEAK_60 0x05b9 #define RJ54N1_PEAK_DIFF 0x05ba #define RJ54N1_IOC 0x05ef #define RJ54N1_TG_BYPASS 0x0700 #define RJ54N1_PLL_L 0x0701 #define RJ54N1_PLL_N 0x0702 #define RJ54N1_PLL_EN 0x0704 #define RJ54N1_RATIO_TG 0x0706 #define RJ54N1_RATIO_T 0x0707 #define RJ54N1_RATIO_R 0x0708 #define RJ54N1_RAMP_TGCLK_EN 0x0709 #define RJ54N1_OCLK_DSP 0x0710 #define RJ54N1_RATIO_OP 0x0711 #define RJ54N1_RATIO_O 0x0712 #define RJ54N1_OCLK_SEL_EN 0x0713 #define RJ54N1_CLK_RST 0x0717 #define RJ54N1_RESET_STANDBY 0x0718 #define RJ54N1_FWFLG 0x07fe #define E_EXCLK (1 << 7) #define SOFT_STDBY (1 << 4) #define SEN_RSTX (1 << 2) #define TG_RSTX (1 << 1) #define DSP_RSTX (1 << 0) #define RESIZE_HOLD_SEL (1 << 2) #define RESIZE_GO (1 << 1) /* * When cropping, the camera automatically centers the cropped region, there * doesn't seem to be a way to specify an explicit location of the rectangle. */ #define RJ54N1_COLUMN_SKIP 0 #define RJ54N1_ROW_SKIP 0 #define RJ54N1_MAX_WIDTH 1600 #define RJ54N1_MAX_HEIGHT 1200 #define PLL_L 2 #define PLL_N 0x31 /* I2C addresses: 0x50, 0x51, 0x60, 0x61 */ /* RJ54N1CB0C has only one fixed colorspace per pixelcode */ struct rj54n1_datafmt { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; }; /* Find a data format by a pixel code in an array */ static const struct rj54n1_datafmt *rj54n1_find_datafmt( enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt, int n) { int i; for (i = 0; i < n; i++) if (fmt[i].code == code) return fmt + i; return NULL; } static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, }; struct rj54n1_clock_div { u8 ratio_tg; /* can be 0 or an odd number */ u8 ratio_t; u8 ratio_r; u8 ratio_op; u8 ratio_o; }; struct rj54n1 { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct rj54n1_clock_div clk_div; const struct rj54n1_datafmt *fmt; struct v4l2_rect rect; /* Sensor window */ unsigned int tgclk_mhz; bool auto_wb; unsigned short width; /* Output window */ unsigned short height; unsigned short resize; /* Sensor * 1024 / resize = Output */ unsigned short scale; u8 bank; }; struct rj54n1_reg_val { u16 reg; u8 val; }; static const struct rj54n1_reg_val bank_4[] = { {0x417, 0}, {0x42c, 0}, {0x42d, 0xf0}, {0x42e, 0}, {0x42f, 0x50}, {0x430, 0xf5}, {0x431, 0x16}, {0x432, 0x20}, {0x433, 0}, {0x434, 0xc8}, {0x43c, 8}, {0x43e, 0x90}, {0x445, 0x83}, {0x4ba, 0x58}, {0x4bb, 4}, {0x4bc, 0x20}, {0x4db, 4}, {0x4fe, 2}, }; static const struct rj54n1_reg_val bank_5[] = { {0x514, 0}, {0x516, 0}, {0x518, 0}, {0x51a, 0}, {0x51d, 0xff}, {0x56f, 0x28}, {0x575, 0x40}, {0x5bc, 0x48}, {0x5c1, 6}, {0x5e5, 0x11}, {0x5e6, 0x43}, {0x5e7, 0x33}, {0x5e8, 0x21}, {0x5e9, 0x30}, {0x5ea, 0x0}, {0x5eb, 0xa5}, {0x5ec, 0xff}, {0x5fe, 2}, }; static const struct rj54n1_reg_val bank_7[] = { {0x70a, 0}, {0x714, 0xff}, {0x715, 0xff}, {0x716, 0x1f}, {0x7FE, 2}, }; static const struct rj54n1_reg_val bank_8[] = { {0x800, 0x00}, {0x801, 0x01}, {0x802, 0x61}, {0x805, 0x00}, {0x806, 0x00}, {0x807, 0x00}, {0x808, 0x00}, {0x809, 0x01}, {0x80A, 0x61}, {0x80B, 0x00}, {0x80C, 0x01}, {0x80D, 0x00}, {0x80E, 0x00}, {0x80F, 0x00}, {0x810, 0x00}, {0x811, 0x01}, {0x812, 0x61}, {0x813, 0x00}, {0x814, 0x11}, {0x815, 0x00}, {0x816, 0x41}, {0x817, 0x00}, {0x818, 0x51}, {0x819, 0x01}, {0x81A, 0x1F}, {0x81B, 0x00}, {0x81C, 0x01}, {0x81D, 0x00}, {0x81E, 0x11}, {0x81F, 0x00}, {0x820, 0x41}, {0x821, 0x00}, {0x822, 0x51}, {0x823, 0x00}, {0x824, 0x00}, {0x825, 0x00}, {0x826, 0x47}, {0x827, 0x01}, {0x828, 0x4F}, {0x829, 0x00}, {0x82A, 0x00}, {0x82B, 0x00}, {0x82C, 0x30}, {0x82D, 0x00}, {0x82E, 0x40}, {0x82F, 0x00}, {0x830, 0xB3}, {0x831, 0x00}, {0x832, 0xE3}, {0x833, 0x00}, {0x834, 0x00}, {0x835, 0x00}, {0x836, 0x00}, {0x837, 0x00}, {0x838, 0x00}, {0x839, 0x01}, {0x83A, 0x61}, {0x83B, 0x00}, {0x83C, 0x01}, {0x83D, 0x00}, {0x83E, 0x00}, {0x83F, 0x00}, {0x840, 0x00}, {0x841, 0x01}, {0x842, 0x61}, {0x843, 0x00}, {0x844, 0x1D}, {0x845, 0x00}, {0x846, 0x00}, {0x847, 0x00}, {0x848, 0x00}, {0x849, 0x01}, {0x84A, 0x1F}, {0x84B, 0x00}, {0x84C, 0x05}, {0x84D, 0x00}, {0x84E, 0x19}, {0x84F, 0x01}, {0x850, 0x21}, {0x851, 0x01}, {0x852, 0x5D}, {0x853, 0x00}, {0x854, 0x00}, {0x855, 0x00}, {0x856, 0x19}, {0x857, 0x01}, {0x858, 0x21}, {0x859, 0x00}, {0x85A, 0x00}, {0x85B, 0x00}, {0x85C, 0x00}, {0x85D, 0x00}, {0x85E, 0x00}, {0x85F, 0x00}, {0x860, 0xB3}, {0x861, 0x00}, {0x862, 0xE3}, {0x863, 0x00}, {0x864, 0x00}, {0x865, 0x00}, {0x866, 0x00}, {0x867, 0x00}, {0x868, 0x00}, {0x869, 0xE2}, {0x86A, 0x00}, {0x86B, 0x01}, {0x86C, 0x06}, {0x86D, 0x00}, {0x86E, 0x00}, {0x86F, 0x00}, {0x870, 0x60}, {0x871, 0x8C}, {0x872, 0x10}, {0x873, 0x00}, {0x874, 0xE0}, {0x875, 0x00}, {0x876, 0x27}, {0x877, 0x01}, {0x878, 0x00}, {0x879, 0x00}, {0x87A, 0x00}, {0x87B, 0x03}, {0x87C, 0x00}, {0x87D, 0x00}, {0x87E, 0x00}, {0x87F, 0x00}, {0x880, 0x00}, {0x881, 0x00}, {0x882, 0x00}, {0x883, 0x00}, {0x884, 0x00}, {0x885, 0x00}, {0x886, 0xF8}, {0x887, 0x00}, {0x888, 0x03}, {0x889, 0x00}, {0x88A, 0x64}, {0x88B, 0x00}, {0x88C, 0x03}, {0x88D, 0x00}, {0x88E, 0xB1}, {0x88F, 0x00}, {0x890, 0x03}, {0x891, 0x01}, {0x892, 0x1D}, {0x893, 0x00}, {0x894, 0x03}, {0x895, 0x01}, {0x896, 0x4B}, {0x897, 0x00}, {0x898, 0xE5}, {0x899, 0x00}, {0x89A, 0x01}, {0x89B, 0x00}, {0x89C, 0x01}, {0x89D, 0x04}, {0x89E, 0xC8}, {0x89F, 0x00}, {0x8A0, 0x01}, {0x8A1, 0x01}, {0x8A2, 0x61}, {0x8A3, 0x00}, {0x8A4, 0x01}, {0x8A5, 0x00}, {0x8A6, 0x00}, {0x8A7, 0x00}, {0x8A8, 0x00}, {0x8A9, 0x00}, {0x8AA, 0x7F}, {0x8AB, 0x03}, {0x8AC, 0x00}, {0x8AD, 0x00}, {0x8AE, 0x00}, {0x8AF, 0x00}, {0x8B0, 0x00}, {0x8B1, 0x00}, {0x8B6, 0x00}, {0x8B7, 0x01}, {0x8B8, 0x00}, {0x8B9, 0x00}, {0x8BA, 0x02}, {0x8BB, 0x00}, {0x8BC, 0xFF}, {0x8BD, 0x00}, {0x8FE, 2}, }; static const struct rj54n1_reg_val bank_10[] = { {0x10bf, 0x69} }; /* Clock dividers - these are default register values, divider = register + 1 */ static const struct rj54n1_clock_div clk_div = { .ratio_tg = 3 /* default: 5 */, .ratio_t = 4 /* default: 1 */, .ratio_r = 4 /* default: 0 */, .ratio_op = 1 /* default: 5 */, .ratio_o = 9 /* default: 0 */, }; static struct rj54n1 *to_rj54n1(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct rj54n1, subdev); } static int reg_read(struct i2c_client *client, const u16 reg) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* set bank */ if (rj54n1->bank != reg >> 8) { dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); if (ret < 0) return ret; rj54n1->bank = reg >> 8; } return i2c_smbus_read_byte_data(client, reg & 0xff); } static int reg_write(struct i2c_client *client, const u16 reg, const u8 data) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* set bank */ if (rj54n1->bank != reg >> 8) { dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); if (ret < 0) return ret; rj54n1->bank = reg >> 8; } dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data); return i2c_smbus_write_byte_data(client, reg & 0xff, data); } static int reg_set(struct i2c_client *client, const u16 reg, const u8 data, const u8 mask) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, (ret & ~mask) | (data & mask)); } static int reg_write_multiple(struct i2c_client *client, const struct rj54n1_reg_val *rv, const int n) { int i, ret; for (i = 0; i < n; i++) { ret = reg_write(client, rv->reg, rv->val); if (ret < 0) return ret; rv++; } return 0; } static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index >= ARRAY_SIZE(rj54n1_colour_fmts)) return -EINVAL; *code = rj54n1_colour_fmts[index].code; return 0; } static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* Switch between preview and still shot modes */ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80); } static int rj54n1_set_rect(struct i2c_client *client, u16 reg_x, u16 reg_y, u16 reg_xy, u32 width, u32 height) { int ret; ret = reg_write(client, reg_xy, ((width >> 4) & 0x70) | ((height >> 8) & 7)); if (!ret) ret = reg_write(client, reg_x, width & 0xff); if (!ret) ret = reg_write(client, reg_y, height & 0xff); return ret; } /* * Some commands, specifically certain initialisation sequences, require * a commit operation. */ static int rj54n1_commit(struct i2c_client *client) { int ret = reg_write(client, RJ54N1_INIT_START, 1); msleep(10); if (!ret) ret = reg_write(client, RJ54N1_INIT_START, 0); return ret; } static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, s32 *out_w, s32 *out_h); static int rj54n1_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); const struct v4l2_rect *rect = &a->c; int dummy = 0, output_w, output_h, input_w = rect->width, input_h = rect->height; int ret; /* arbitrary minimum width and height, edges unimportant */ soc_camera_limit_side(&dummy, &input_w, RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH); soc_camera_limit_side(&dummy, &input_h, RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT); output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize; output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize; dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n", input_w, input_h, rj54n1->resize, output_w, output_h); ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); if (ret < 0) return ret; rj54n1->width = output_w; rj54n1->height = output_h; rj54n1->resize = ret; rj54n1->rect.width = input_w; rj54n1->rect.height = input_h; return 0; } static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); a->c = rj54n1->rect; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { a->bounds.left = RJ54N1_COLUMN_SKIP; a->bounds.top = RJ54N1_ROW_SKIP; a->bounds.width = RJ54N1_MAX_WIDTH; a->bounds.height = RJ54N1_MAX_HEIGHT; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int rj54n1_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); mf->code = rj54n1->fmt->code; mf->colorspace = rj54n1->fmt->colorspace; mf->field = V4L2_FIELD_NONE; mf->width = rj54n1->width; mf->height = rj54n1->height; return 0; } /* * The actual geometry configuration routine. It scales the input window into * the output one, updates the window sizes and returns an error or the resize * coefficient on success. Note: we only use the "Fixed Scaling" on this camera. */ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, s32 *out_w, s32 *out_h) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); unsigned int skip, resize, input_w = *in_w, input_h = *in_h, output_w = *out_w, output_h = *out_h; u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom; unsigned int peak, peak_50, peak_60; int ret; /* * We have a problem with crops, where the window is larger than 512x384 * and output window is larger than a half of the input one. In this * case we have to either reduce the input window to equal or below * 512x384 or the output window to equal or below 1/2 of the input. */ if (output_w > max(512U, input_w / 2)) { if (2 * output_w > RJ54N1_MAX_WIDTH) { input_w = RJ54N1_MAX_WIDTH; output_w = RJ54N1_MAX_WIDTH / 2; } else { input_w = output_w * 2; } dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n", input_w, output_w); } if (output_h > max(384U, input_h / 2)) { if (2 * output_h > RJ54N1_MAX_HEIGHT) { input_h = RJ54N1_MAX_HEIGHT; output_h = RJ54N1_MAX_HEIGHT / 2; } else { input_h = output_h * 2; } dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n", input_h, output_h); } /* Idea: use the read mode for snapshots, handle separate geometries */ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L, RJ54N1_Y_OUTPUT_SIZE_S_L, RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h); if (!ret) ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L, RJ54N1_Y_OUTPUT_SIZE_P_L, RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h); if (ret < 0) return ret; if (output_w > input_w && output_h > input_h) { input_w = output_w; input_h = output_h; resize = 1024; } else { unsigned int resize_x, resize_y; resize_x = (input_w * 1024 + output_w / 2) / output_w; resize_y = (input_h * 1024 + output_h / 2) / output_h; /* We want max(resize_x, resize_y), check if it still fits */ if (resize_x > resize_y && (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT) resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) / output_h; else if (resize_y > resize_x && (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH) resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) / output_w; else resize = max(resize_x, resize_y); /* Prohibited value ranges */ switch (resize) { case 2040 ... 2047: resize = 2039; break; case 4080 ... 4095: resize = 4079; break; case 8160 ... 8191: resize = 8159; break; case 16320 ... 16384: resize = 16319; } } /* Set scaling */ ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff); if (!ret) ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8); if (ret < 0) return ret; /* * Configure a skipping bitmask. The sensor will select a skipping value * among set bits automatically. This is very unclear in the datasheet * too. I was told, in this register one enables all skipping values, * that are required for a specific resize, and the camera selects * automatically, which ones to use. But it is unclear how to identify, * which cropping values are needed. Secondly, why don't we just set all * bits and let the camera choose? Would it increase processing time and * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to * improve the image quality or stability for larger frames (see comment * above), but I didn't check the framerate. */ skip = min(resize / 1024, 15U); inc_sel = 1 << skip; if (inc_sel <= 2) inc_sel = 0xc; else if (resize & 1023 && skip < 15) inc_sel |= 1 << (skip + 1); ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc); if (!ret) ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8); if (!rj54n1->auto_wb) { /* Auto white balance window */ wb_left = output_w / 16; wb_right = (3 * output_w / 4 - 3) / 4; wb_top = output_h / 16; wb_bottom = (3 * output_h / 4 - 3) / 4; wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) | ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1); if (!ret) ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8); if (!ret) ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left); if (!ret) ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top); if (!ret) ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right); if (!ret) ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom); } /* Antiflicker */ peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz / 10000; peak_50 = peak / 6; peak_60 = peak / 5; if (!ret) ret = reg_write(client, RJ54N1_PEAK_H, ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8)); if (!ret) ret = reg_write(client, RJ54N1_PEAK_50, peak_50); if (!ret) ret = reg_write(client, RJ54N1_PEAK_60, peak_60); if (!ret) ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150); /* Start resizing */ if (!ret) ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | RESIZE_GO | 1); if (ret < 0) return ret; /* Constant taken from manufacturer's example */ msleep(230); ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1); if (ret < 0) return ret; *in_w = (output_w * resize + 512) / 1024; *in_h = (output_h * resize + 512) / 1024; *out_w = output_w; *out_h = output_h; dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n", *in_w, *in_h, resize, output_w, output_h, skip); return resize; } static int rj54n1_set_clock(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* Enable external clock */ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY); /* Leave stand-by. Note: use this when implementing suspend / resume */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK); if (!ret) ret = reg_write(client, RJ54N1_PLL_L, PLL_L); if (!ret) ret = reg_write(client, RJ54N1_PLL_N, PLL_N); /* TGCLK dividers */ if (!ret) ret = reg_write(client, RJ54N1_RATIO_TG, rj54n1->clk_div.ratio_tg); if (!ret) ret = reg_write(client, RJ54N1_RATIO_T, rj54n1->clk_div.ratio_t); if (!ret) ret = reg_write(client, RJ54N1_RATIO_R, rj54n1->clk_div.ratio_r); /* Enable TGCLK & RAMP */ if (!ret) ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3); /* Disable clock output */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_DSP, 0); /* Set divisors */ if (!ret) ret = reg_write(client, RJ54N1_RATIO_OP, rj54n1->clk_div.ratio_op); if (!ret) ret = reg_write(client, RJ54N1_RATIO_O, rj54n1->clk_div.ratio_o); /* Enable OCLK */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); /* Use PLL for Timing Generator, write 2 to reserved bits */ if (!ret) ret = reg_write(client, RJ54N1_TG_BYPASS, 2); /* Take sensor out of reset */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SEN_RSTX); /* Enable PLL */ if (!ret) ret = reg_write(client, RJ54N1_PLL_EN, 1); /* Wait for PLL to stabilise */ msleep(10); /* Enable clock to frequency divider */ if (!ret) ret = reg_write(client, RJ54N1_CLK_RST, 1); if (!ret) ret = reg_read(client, RJ54N1_CLK_RST); if (ret != 1) { dev_err(&client->dev, "Resetting RJ54N1CB0C clock failed: %d!\n", ret); return -EIO; } /* Start the PLL */ ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1); /* Enable OCLK */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); return ret; } static int rj54n1_reg_init(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret = rj54n1_set_clock(client); if (!ret) ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7)); if (!ret) ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10)); /* Set binning divisors */ if (!ret) ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4)); if (!ret) ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf); /* Switch to fixed resize mode */ if (!ret) ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1); /* Set gain */ if (!ret) ret = reg_write(client, RJ54N1_Y_GAIN, 0x84); /* * Mirror the image back: default is upside down and left-to-right... * Set manual preview / still shot switching */ if (!ret) ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27); if (!ret) ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4)); /* Auto exposure area */ if (!ret) ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80); /* Check current auto WB config */ if (!ret) ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I); if (ret >= 0) { rj54n1->auto_wb = ret & 0x80; ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5)); } if (!ret) ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8)); if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | DSP_RSTX | SEN_RSTX); /* Commit init */ if (!ret) ret = rj54n1_commit(client); /* Take DSP, TG, sensor out of reset */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX); /* Start register update? Same register as 0x?FE in many bank_* sets */ if (!ret) ret = reg_write(client, RJ54N1_FWFLG, 2); /* Constant taken from manufacturer's example */ msleep(700); return ret; } static int rj54n1_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); const struct rj54n1_datafmt *fmt; int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 || mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE || mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE || mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE || mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE; dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", __func__, mf->code, mf->width, mf->height); fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, ARRAY_SIZE(rj54n1_colour_fmts)); if (!fmt) { fmt = rj54n1->fmt; mf->code = fmt->code; } mf->field = V4L2_FIELD_NONE; mf->colorspace = fmt->colorspace; v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align, &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0); return 0; } static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); const struct rj54n1_datafmt *fmt; int output_w, output_h, max_w, max_h, input_w = rj54n1->rect.width, input_h = rj54n1->rect.height; int ret; /* * The host driver can call us without .try_fmt(), so, we have to take * care ourseleves */ rj54n1_try_fmt(sd, mf); /* * Verify if the sensor has just been powered on. TODO: replace this * with proper PM, when a suitable API is available. */ ret = reg_read(client, RJ54N1_RESET_STANDBY); if (ret < 0) return ret; if (!(ret & E_EXCLK)) { ret = rj54n1_reg_init(client); if (ret < 0) return ret; } dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", __func__, mf->code, mf->width, mf->height); /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ switch (mf->code) { case V4L2_MBUS_FMT_YUYV8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; case V4L2_MBUS_FMT_YVYU8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); break; case V4L2_MBUS_FMT_RGB565_2X8_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; case V4L2_MBUS_FMT_RGB565_2X8_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); break; case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); break; case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); break; case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); break; case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); break; case V4L2_MBUS_FMT_SBGGR10_1X10: ret = reg_write(client, RJ54N1_OUT_SEL, 5); break; default: ret = -EINVAL; } /* Special case: a raw mode with 10 bits of data per clock tick */ if (!ret) ret = reg_set(client, RJ54N1_OCLK_SEL_EN, (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2); if (ret < 0) return ret; /* Supported scales 1:1 >= scale > 1:16 */ max_w = mf->width * (16 * 1024 - 1) / 1024; if (input_w > max_w) input_w = max_w; max_h = mf->height * (16 * 1024 - 1) / 1024; if (input_h > max_h) input_h = max_h; output_w = mf->width; output_h = mf->height; ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); if (ret < 0) return ret; fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, ARRAY_SIZE(rj54n1_colour_fmts)); rj54n1->fmt = fmt; rj54n1->resize = ret; rj54n1->rect.width = input_w; rj54n1->rect.height = input_h; rj54n1->width = output_w; rj54n1->height = output_h; mf->width = output_w; mf->height = output_h; mf->field = V4L2_FIELD_NONE; mf->colorspace = fmt->colorspace; return 0; } static int rj54n1_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = V4L2_IDENT_RJ54N1CB0C; id->revision = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int rj54n1_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg < 0x400 || reg->reg > 0x1fff) /* Registers > 0x0800 are only available from Sharp support */ return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; reg->size = 1; reg->val = reg_read(client, reg->reg); if (reg->val > 0xff) return -EIO; return 0; } static int rj54n1_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg < 0x400 || reg->reg > 0x1fff) /* Registers >= 0x0800 are only available from Sharp support */ return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int rj54n1_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); return soc_camera_set_power(&client->dev, ssdd, on); } static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl) { struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl); struct v4l2_subdev *sd = &rj54n1->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); int data; switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1); else data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1); if (data < 0) return -EIO; return 0; case V4L2_CID_HFLIP: if (ctrl->val) data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2); else data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2); if (data < 0) return -EIO; return 0; case V4L2_CID_GAIN: if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0) return -EIO; return 0; case V4L2_CID_AUTO_WHITE_BALANCE: /* Auto WB area - whole image */ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7, 0x80) < 0) return -EIO; rj54n1->auto_wb = ctrl->val; return 0; } return -EINVAL; } static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = { .s_ctrl = rj54n1_s_ctrl, }; static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = { .g_chip_ident = rj54n1_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = rj54n1_g_register, .s_register = rj54n1_s_register, #endif .s_power = rj54n1_s_power, }; static int rj54n1_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); return 0; } static int rj54n1_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */ if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING) return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4); else return reg_write(client, RJ54N1_OUT_SIGPO, 0); } static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = { .s_stream = rj54n1_s_stream, .s_mbus_fmt = rj54n1_s_fmt, .g_mbus_fmt = rj54n1_g_fmt, .try_mbus_fmt = rj54n1_try_fmt, .enum_mbus_fmt = rj54n1_enum_fmt, .g_crop = rj54n1_g_crop, .s_crop = rj54n1_s_crop, .cropcap = rj54n1_cropcap, .g_mbus_config = rj54n1_g_mbus_config, .s_mbus_config = rj54n1_s_mbus_config, }; static struct v4l2_subdev_ops rj54n1_subdev_ops = { .core = &rj54n1_subdev_core_ops, .video = &rj54n1_subdev_video_ops, }; /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int rj54n1_video_probe(struct i2c_client *client, struct rj54n1_pdata *priv) { struct rj54n1 *rj54n1 = to_rj54n1(client); int data1, data2; int ret; ret = rj54n1_s_power(&rj54n1->subdev, 1); if (ret < 0) return ret; /* Read out the chip version register */ data1 = reg_read(client, RJ54N1_DEV_CODE); data2 = reg_read(client, RJ54N1_DEV_CODE2); if (data1 != 0x51 || data2 != 0x10) { ret = -ENODEV; dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n", data1, data2); goto done; } /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */ ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7); if (ret < 0) goto done; dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n", data1, data2); ret = v4l2_ctrl_handler_setup(&rj54n1->hdl); done: rj54n1_s_power(&rj54n1->subdev, 0); return ret; } static int rj54n1_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct rj54n1 *rj54n1; struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct rj54n1_pdata *rj54n1_priv; int ret; if (!ssdd || !ssdd->drv_priv) { dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n"); return -EINVAL; } rj54n1_priv = ssdd->drv_priv; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); return -EIO; } rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL); if (!rj54n1) return -ENOMEM; v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops); v4l2_ctrl_handler_init(&rj54n1->hdl, 4); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 66); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); rj54n1->subdev.ctrl_handler = &rj54n1->hdl; if (rj54n1->hdl.error) return rj54n1->hdl.error; rj54n1->clk_div = clk_div; rj54n1->rect.left = RJ54N1_COLUMN_SKIP; rj54n1->rect.top = RJ54N1_ROW_SKIP; rj54n1->rect.width = RJ54N1_MAX_WIDTH; rj54n1->rect.height = RJ54N1_MAX_HEIGHT; rj54n1->width = RJ54N1_MAX_WIDTH; rj54n1->height = RJ54N1_MAX_HEIGHT; rj54n1->fmt = &rj54n1_colour_fmts[0]; rj54n1->resize = 1024; rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) / (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1); ret = rj54n1_video_probe(client, rj54n1_priv); if (ret < 0) v4l2_ctrl_handler_free(&rj54n1->hdl); return ret; } static int rj54n1_remove(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); v4l2_device_unregister_subdev(&rj54n1->subdev); if (ssdd->free_bus) ssdd->free_bus(ssdd); v4l2_ctrl_handler_free(&rj54n1->hdl); return 0; } static const struct i2c_device_id rj54n1_id[] = { { "rj54n1cb0c", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, rj54n1_id); static struct i2c_driver rj54n1_i2c_driver = { .driver = { .name = "rj54n1cb0c", }, .probe = rj54n1_probe, .remove = rj54n1_remove, .id_table = rj54n1_id, }; module_i2c_driver(rj54n1_i2c_driver); MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
veo-labs/linux-veobox
drivers/net/hamradio/baycom_ser_hdx.c
2086
21003
/*****************************************************************************/ /* * baycom_ser_hdx.c -- baycom ser12 halfduplex radio modem driver. * * Copyright (C) 1996-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Please note that the GPL allows you to use the driver, NOT the radio. * In order to use the radio, you need a license from the communications * authority of your country. * * * Supported modems * * ser12: This is a very simple 1200 baud AFSK modem. The modem consists only * of a modulator/demodulator chip, usually a TI TCM3105. The computer * is responsible for regenerating the receiver bit clock, as well as * for handling the HDLC protocol. The modem connects to a serial port, * hence the name. Since the serial port is not used as an async serial * port, the kernel driver for serial ports cannot be used, and this * driver only supports standard serial hardware (8250, 16450, 16550A) * * * Command line options (insmod command line) * * mode ser12 hardware DCD * ser12* software DCD * ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware * mutes audio input to the modem * ser12+ hardware DCD, inverted signal at DCD pin * iobase base address of the port; common values are 0x3f8, 0x2f8, 0x3e8, 0x2e8 * irq interrupt line of the port; common values are 4,3 * * * History: * 0.1 26.06.1996 Adapted from baycom.c and made network driver interface * 18.10.1996 Changed to new user space access routines (copy_{to,from}_user) * 0.3 26.04.1997 init code/data tagged * 0.4 08.07.1997 alternative ser12 decoding algorithm (uses delta CTS ints) * 0.5 11.11.1997 ser12/par96 split into separate files * 0.6 14.04.1998 cleanups * 0.7 03.08.1999 adapt to Linus' new __setup/__initcall * 0.8 10.08.1999 use module_init/module_exit * 0.9 12.02.2000 adapted to softnet driver interface * 0.10 03.07.2000 fix interface name handling */ /*****************************************************************************/ #include <linux/capability.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/hdlcdrv.h> #include <linux/baycom.h> #include <linux/jiffies.h> /* --------------------------------------------------------------------- */ #define BAYCOM_DEBUG /* --------------------------------------------------------------------- */ static const char bc_drvname[] = "baycom_ser_hdx"; static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" "baycom_ser_hdx: version 0.10\n"; /* --------------------------------------------------------------------- */ #define NR_PORTS 4 static struct net_device *baycom_device[NR_PORTS]; /* --------------------------------------------------------------------- */ #define RBR(iobase) (iobase+0) #define THR(iobase) (iobase+0) #define IER(iobase) (iobase+1) #define IIR(iobase) (iobase+2) #define FCR(iobase) (iobase+2) #define LCR(iobase) (iobase+3) #define MCR(iobase) (iobase+4) #define LSR(iobase) (iobase+5) #define MSR(iobase) (iobase+6) #define SCR(iobase) (iobase+7) #define DLL(iobase) (iobase+0) #define DLM(iobase) (iobase+1) #define SER12_EXTENT 8 /* ---------------------------------------------------------------------- */ /* * Information that need to be kept for each board. */ struct baycom_state { struct hdlcdrv_state hdrv; int opt_dcd; struct modem_state { short arb_divider; unsigned char flags; unsigned int shreg; struct modem_state_ser12 { unsigned char tx_bit; int dcd_sum0, dcd_sum1, dcd_sum2; unsigned char last_sample; unsigned char last_rxbit; unsigned int dcd_shreg; unsigned int dcd_time; unsigned int bit_pll; unsigned char interm_sample; } ser12; } modem; #ifdef BAYCOM_DEBUG struct debug_vals { unsigned long last_jiffies; unsigned cur_intcnt; unsigned last_intcnt; int cur_pllcorr; int last_pllcorr; } debug_vals; #endif /* BAYCOM_DEBUG */ }; /* --------------------------------------------------------------------- */ static inline void baycom_int_freq(struct baycom_state *bc) { #ifdef BAYCOM_DEBUG unsigned long cur_jiffies = jiffies; /* * measure the interrupt frequency */ bc->debug_vals.cur_intcnt++; if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) { bc->debug_vals.last_jiffies = cur_jiffies; bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt; bc->debug_vals.cur_intcnt = 0; bc->debug_vals.last_pllcorr = bc->debug_vals.cur_pllcorr; bc->debug_vals.cur_pllcorr = 0; } #endif /* BAYCOM_DEBUG */ } /* --------------------------------------------------------------------- */ /* * ===================== SER12 specific routines ========================= */ static inline void ser12_set_divisor(struct net_device *dev, unsigned char divisor) { outb(0x81, LCR(dev->base_addr)); /* DLAB = 1 */ outb(divisor, DLL(dev->base_addr)); outb(0, DLM(dev->base_addr)); outb(0x01, LCR(dev->base_addr)); /* word length = 6 */ /* * make sure the next interrupt is generated; * 0 must be used to power the modem; the modem draws its * power from the TxD line */ outb(0x00, THR(dev->base_addr)); /* * it is important not to set the divider while transmitting; * this reportedly makes some UARTs generating interrupts * in the hundredthousands per second region * Reported by: Ignacio.Arenaza@studi.epfl.ch (Ignacio Arenaza Nuno) */ } /* --------------------------------------------------------------------- */ /* * must call the TX arbitrator every 10ms */ #define SER12_ARB_DIVIDER(bc) (bc->opt_dcd ? 24 : 36) #define SER12_DCD_INTERVAL(bc) (bc->opt_dcd ? 12 : 240) static inline void ser12_tx(struct net_device *dev, struct baycom_state *bc) { /* one interrupt per channel bit */ ser12_set_divisor(dev, 12); /* * first output the last bit (!) then call HDLC transmitter, * since this may take quite long */ outb(0x0e | (!!bc->modem.ser12.tx_bit), MCR(dev->base_addr)); if (bc->modem.shreg <= 1) bc->modem.shreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv); bc->modem.ser12.tx_bit = !(bc->modem.ser12.tx_bit ^ (bc->modem.shreg & 1)); bc->modem.shreg >>= 1; } /* --------------------------------------------------------------------- */ static inline void ser12_rx(struct net_device *dev, struct baycom_state *bc) { unsigned char cur_s; /* * do demodulator */ cur_s = inb(MSR(dev->base_addr)) & 0x10; /* the CTS line */ hdlcdrv_channelbit(&bc->hdrv, cur_s); bc->modem.ser12.dcd_shreg = (bc->modem.ser12.dcd_shreg << 1) | (cur_s != bc->modem.ser12.last_sample); bc->modem.ser12.last_sample = cur_s; if(bc->modem.ser12.dcd_shreg & 1) { if (!bc->opt_dcd) { unsigned int dcdspos, dcdsneg; dcdspos = dcdsneg = 0; dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1); if (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe)) dcdspos += 2; dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1); dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1); dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1); bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg; } else bc->modem.ser12.dcd_sum0--; } if(!bc->modem.ser12.dcd_time) { hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 + bc->modem.ser12.dcd_sum1 + bc->modem.ser12.dcd_sum2) < 0); bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1; bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0; /* offset to ensure DCD off on silent input */ bc->modem.ser12.dcd_sum0 = 2; bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc); } bc->modem.ser12.dcd_time--; if (!bc->opt_dcd) { /* * PLL code for the improved software DCD algorithm */ if (bc->modem.ser12.interm_sample) { /* * intermediate sample; set timing correction to normal */ ser12_set_divisor(dev, 4); } else { /* * do PLL correction and call HDLC receiver */ switch (bc->modem.ser12.dcd_shreg & 7) { case 1: /* transition too late */ ser12_set_divisor(dev, 5); #ifdef BAYCOM_DEBUG bc->debug_vals.cur_pllcorr++; #endif /* BAYCOM_DEBUG */ break; case 4: /* transition too early */ ser12_set_divisor(dev, 3); #ifdef BAYCOM_DEBUG bc->debug_vals.cur_pllcorr--; #endif /* BAYCOM_DEBUG */ break; default: ser12_set_divisor(dev, 4); break; } bc->modem.shreg >>= 1; if (bc->modem.ser12.last_sample == bc->modem.ser12.last_rxbit) bc->modem.shreg |= 0x10000; bc->modem.ser12.last_rxbit = bc->modem.ser12.last_sample; } if (++bc->modem.ser12.interm_sample >= 3) bc->modem.ser12.interm_sample = 0; /* * DCD stuff */ if (bc->modem.ser12.dcd_shreg & 1) { unsigned int dcdspos, dcdsneg; dcdspos = dcdsneg = 0; dcdspos += ((bc->modem.ser12.dcd_shreg >> 1) & 1); dcdspos += (!(bc->modem.ser12.dcd_shreg & 0x7ffffffe)) << 1; dcdsneg += ((bc->modem.ser12.dcd_shreg >> 2) & 1); dcdsneg += ((bc->modem.ser12.dcd_shreg >> 3) & 1); dcdsneg += ((bc->modem.ser12.dcd_shreg >> 4) & 1); bc->modem.ser12.dcd_sum0 += 16*dcdspos - dcdsneg; } } else { /* * PLL algorithm for the hardware squelch DCD algorithm */ if (bc->modem.ser12.interm_sample) { /* * intermediate sample; set timing correction to normal */ ser12_set_divisor(dev, 6); } else { /* * do PLL correction and call HDLC receiver */ switch (bc->modem.ser12.dcd_shreg & 3) { case 1: /* transition too late */ ser12_set_divisor(dev, 7); #ifdef BAYCOM_DEBUG bc->debug_vals.cur_pllcorr++; #endif /* BAYCOM_DEBUG */ break; case 2: /* transition too early */ ser12_set_divisor(dev, 5); #ifdef BAYCOM_DEBUG bc->debug_vals.cur_pllcorr--; #endif /* BAYCOM_DEBUG */ break; default: ser12_set_divisor(dev, 6); break; } bc->modem.shreg >>= 1; if (bc->modem.ser12.last_sample == bc->modem.ser12.last_rxbit) bc->modem.shreg |= 0x10000; bc->modem.ser12.last_rxbit = bc->modem.ser12.last_sample; } bc->modem.ser12.interm_sample = !bc->modem.ser12.interm_sample; /* * DCD stuff */ bc->modem.ser12.dcd_sum0 -= (bc->modem.ser12.dcd_shreg & 1); } outb(0x0d, MCR(dev->base_addr)); /* transmitter off */ if (bc->modem.shreg & 1) { hdlcdrv_putbits(&bc->hdrv, bc->modem.shreg >> 1); bc->modem.shreg = 0x10000; } if(!bc->modem.ser12.dcd_time) { if (bc->opt_dcd & 1) hdlcdrv_setdcd(&bc->hdrv, !((inb(MSR(dev->base_addr)) ^ bc->opt_dcd) & 0x80)); else hdlcdrv_setdcd(&bc->hdrv, (bc->modem.ser12.dcd_sum0 + bc->modem.ser12.dcd_sum1 + bc->modem.ser12.dcd_sum2) < 0); bc->modem.ser12.dcd_sum2 = bc->modem.ser12.dcd_sum1; bc->modem.ser12.dcd_sum1 = bc->modem.ser12.dcd_sum0; /* offset to ensure DCD off on silent input */ bc->modem.ser12.dcd_sum0 = 2; bc->modem.ser12.dcd_time = SER12_DCD_INTERVAL(bc); } bc->modem.ser12.dcd_time--; } /* --------------------------------------------------------------------- */ static irqreturn_t ser12_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct baycom_state *bc = netdev_priv(dev); unsigned char iir; if (!dev || !bc || bc->hdrv.magic != HDLCDRV_MAGIC) return IRQ_NONE; /* fast way out */ if ((iir = inb(IIR(dev->base_addr))) & 1) return IRQ_NONE; baycom_int_freq(bc); do { switch (iir & 6) { case 6: inb(LSR(dev->base_addr)); break; case 4: inb(RBR(dev->base_addr)); break; case 2: /* * check if transmitter active */ if (hdlcdrv_ptt(&bc->hdrv)) ser12_tx(dev, bc); else { ser12_rx(dev, bc); bc->modem.arb_divider--; } outb(0x00, THR(dev->base_addr)); break; default: inb(MSR(dev->base_addr)); break; } iir = inb(IIR(dev->base_addr)); } while (!(iir & 1)); if (bc->modem.arb_divider <= 0) { bc->modem.arb_divider = SER12_ARB_DIVIDER(bc); local_irq_enable(); hdlcdrv_arbitrate(dev, &bc->hdrv); } local_irq_enable(); hdlcdrv_transmitter(dev, &bc->hdrv); hdlcdrv_receiver(dev, &bc->hdrv); local_irq_disable(); return IRQ_HANDLED; } /* --------------------------------------------------------------------- */ enum uart { c_uart_unknown, c_uart_8250, c_uart_16450, c_uart_16550, c_uart_16550A}; static const char *uart_str[] = { "unknown", "8250", "16450", "16550", "16550A" }; static enum uart ser12_check_uart(unsigned int iobase) { unsigned char b1,b2,b3; enum uart u; enum uart uart_tab[] = { c_uart_16450, c_uart_unknown, c_uart_16550, c_uart_16550A }; b1 = inb(MCR(iobase)); outb(b1 | 0x10, MCR(iobase)); /* loopback mode */ b2 = inb(MSR(iobase)); outb(0x1a, MCR(iobase)); b3 = inb(MSR(iobase)) & 0xf0; outb(b1, MCR(iobase)); /* restore old values */ outb(b2, MSR(iobase)); if (b3 != 0x90) return c_uart_unknown; inb(RBR(iobase)); inb(RBR(iobase)); outb(0x01, FCR(iobase)); /* enable FIFOs */ u = uart_tab[(inb(IIR(iobase)) >> 6) & 3]; if (u == c_uart_16450) { outb(0x5a, SCR(iobase)); b1 = inb(SCR(iobase)); outb(0xa5, SCR(iobase)); b2 = inb(SCR(iobase)); if ((b1 != 0x5a) || (b2 != 0xa5)) u = c_uart_8250; } return u; } /* --------------------------------------------------------------------- */ static int ser12_open(struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); enum uart u; if (!dev || !bc) return -ENXIO; if (!dev->base_addr || dev->base_addr > 0x1000-SER12_EXTENT || dev->irq < 2 || dev->irq > 15) return -ENXIO; if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser12")) return -EACCES; memset(&bc->modem, 0, sizeof(bc->modem)); bc->hdrv.par.bitrate = 1200; if ((u = ser12_check_uart(dev->base_addr)) == c_uart_unknown) { release_region(dev->base_addr, SER12_EXTENT); return -EIO; } outb(0, FCR(dev->base_addr)); /* disable FIFOs */ outb(0x0d, MCR(dev->base_addr)); outb(0, IER(dev->base_addr)); if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED, "baycom_ser12", dev)) { release_region(dev->base_addr, SER12_EXTENT); return -EBUSY; } /* * enable transmitter empty interrupt */ outb(2, IER(dev->base_addr)); /* * set the SIO to 6 Bits/character and 19200 or 28800 baud, so that * we get exactly (hopefully) 2 or 3 interrupts per radio symbol, * depending on the usage of the software DCD routine */ ser12_set_divisor(dev, bc->opt_dcd ? 6 : 4); printk(KERN_INFO "%s: ser12 at iobase 0x%lx irq %u uart %s\n", bc_drvname, dev->base_addr, dev->irq, uart_str[u]); return 0; } /* --------------------------------------------------------------------- */ static int ser12_close(struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); if (!dev || !bc) return -EINVAL; /* * disable interrupts */ outb(0, IER(dev->base_addr)); outb(1, MCR(dev->base_addr)); free_irq(dev->irq, dev); release_region(dev->base_addr, SER12_EXTENT); printk(KERN_INFO "%s: close ser12 at iobase 0x%lx irq %u\n", bc_drvname, dev->base_addr, dev->irq); return 0; } /* --------------------------------------------------------------------- */ /* * ===================== hdlcdrv driver interface ========================= */ /* --------------------------------------------------------------------- */ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, struct hdlcdrv_ioctl *hi, int cmd); /* --------------------------------------------------------------------- */ static struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, .close = ser12_close, .ioctl = baycom_ioctl, }; /* --------------------------------------------------------------------- */ static int baycom_setmode(struct baycom_state *bc, const char *modestr) { if (strchr(modestr, '*')) bc->opt_dcd = 0; else if (strchr(modestr, '+')) bc->opt_dcd = -1; else if (strchr(modestr, '@')) bc->opt_dcd = -2; else bc->opt_dcd = 1; return 0; } /* --------------------------------------------------------------------- */ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, struct hdlcdrv_ioctl *hi, int cmd) { struct baycom_state *bc; struct baycom_ioctl bi; if (!dev) return -EINVAL; bc = netdev_priv(dev); BUG_ON(bc->hdrv.magic != HDLCDRV_MAGIC); if (cmd != SIOCDEVPRIVATE) return -ENOIOCTLCMD; switch (hi->cmd) { default: break; case HDLCDRVCTL_GETMODE: strcpy(hi->data.modename, "ser12"); if (bc->opt_dcd <= 0) strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+"); if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; case HDLCDRVCTL_SETMODE: if (netif_running(dev) || !capable(CAP_NET_ADMIN)) return -EACCES; hi->data.modename[sizeof(hi->data.modename)-1] = '\0'; return baycom_setmode(bc, hi->data.modename); case HDLCDRVCTL_MODELIST: strcpy(hi->data.modename, "ser12"); if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; case HDLCDRVCTL_MODEMPARMASK: return HDLCDRV_PARMASK_IOBASE | HDLCDRV_PARMASK_IRQ; } if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: return -ENOIOCTLCMD; #ifdef BAYCOM_DEBUG case BAYCOMCTL_GETDEBUG: bi.data.dbg.debug1 = bc->hdrv.ptt_keyed; bi.data.dbg.debug2 = bc->debug_vals.last_intcnt; bi.data.dbg.debug3 = bc->debug_vals.last_pllcorr; break; #endif /* BAYCOM_DEBUG */ } if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) return -EFAULT; return 0; } /* --------------------------------------------------------------------- */ /* * command line settable parameters */ static char *mode[NR_PORTS] = { "ser12*", }; static int iobase[NR_PORTS] = { 0x3f8, }; static int irq[NR_PORTS] = { 4, }; module_param_array(mode, charp, NULL, 0); MODULE_PARM_DESC(mode, "baycom operating mode; * for software DCD"); module_param_array(iobase, int, NULL, 0); MODULE_PARM_DESC(iobase, "baycom io base address"); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(irq, "baycom irq number"); MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu"); MODULE_DESCRIPTION("Baycom ser12 half duplex amateur radio modem driver"); MODULE_LICENSE("GPL"); /* --------------------------------------------------------------------- */ static int __init init_baycomserhdx(void) { int i, found = 0; char set_hw = 1; printk(bc_drvinfo); /* * register net devices */ for (i = 0; i < NR_PORTS; i++) { struct net_device *dev; struct baycom_state *bc; char ifname[IFNAMSIZ]; sprintf(ifname, "bcsh%d", i); if (!mode[i]) set_hw = 0; if (!set_hw) iobase[i] = irq[i] = 0; dev = hdlcdrv_register(&ser12_ops, sizeof(struct baycom_state), ifname, iobase[i], irq[i], 0); if (IS_ERR(dev)) break; bc = netdev_priv(dev); if (set_hw && baycom_setmode(bc, mode[i])) set_hw = 0; found++; baycom_device[i] = dev; } if (!found) return -ENXIO; return 0; } static void __exit cleanup_baycomserhdx(void) { int i; for(i = 0; i < NR_PORTS; i++) { struct net_device *dev = baycom_device[i]; if (dev) hdlcdrv_unregister(dev); } } module_init(init_baycomserhdx); module_exit(cleanup_baycomserhdx); /* --------------------------------------------------------------------- */ #ifndef MODULE /* * format: baycom_ser_hdx=io,irq,mode * mode: ser12 hardware DCD * ser12* software DCD * ser12@ hardware/software DCD, i.e. no explicit DCD signal but hardware * mutes audio input to the modem * ser12+ hardware DCD, inverted signal at DCD pin */ static int __init baycom_ser_hdx_setup(char *str) { static unsigned nr_dev; int ints[3]; if (nr_dev >= NR_PORTS) return 0; str = get_options(str, 3, ints); if (ints[0] < 2) return 0; mode[nr_dev] = str; iobase[nr_dev] = ints[1]; irq[nr_dev] = ints[2]; nr_dev++; return 1; } __setup("baycom_ser_hdx=", baycom_ser_hdx_setup); #endif /* MODULE */ /* --------------------------------------------------------------------- */
gpl-2.0
gdachs/linux
drivers/regulator/max8973-regulator.c
2086
14180
/* * max8973-regulator.c -- Maxim max8973 * * Regulator driver for MAXIM 8973 DC-DC step-down switching regulator. * * Copyright (c) 2012, NVIDIA Corporation. * * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/max8973-regulator.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> /* Register definitions */ #define MAX8973_VOUT 0x0 #define MAX8973_VOUT_DVS 0x1 #define MAX8973_CONTROL1 0x2 #define MAX8973_CONTROL2 0x3 #define MAX8973_CHIPID1 0x4 #define MAX8973_CHIPID2 0x5 #define MAX8973_MAX_VOUT_REG 2 /* MAX8973_VOUT */ #define MAX8973_VOUT_ENABLE BIT(7) #define MAX8973_VOUT_MASK 0x7F /* MAX8973_VOUT_DVS */ #define MAX8973_DVS_VOUT_MASK 0x7F /* MAX8973_CONTROL1 */ #define MAX8973_SNS_ENABLE BIT(7) #define MAX8973_FPWM_EN_M BIT(6) #define MAX8973_NFSR_ENABLE BIT(5) #define MAX8973_AD_ENABLE BIT(4) #define MAX8973_BIAS_ENABLE BIT(3) #define MAX8973_FREQSHIFT_9PER BIT(2) #define MAX8973_RAMP_12mV_PER_US 0x0 #define MAX8973_RAMP_25mV_PER_US 0x1 #define MAX8973_RAMP_50mV_PER_US 0x2 #define MAX8973_RAMP_200mV_PER_US 0x3 /* MAX8973_CONTROL2 */ #define MAX8973_WDTMR_ENABLE BIT(6) #define MAX8973_DISCH_ENBABLE BIT(5) #define MAX8973_FT_ENABLE BIT(4) #define MAX8973_CKKADV_TRIP_DISABLE 0xC #define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0 #define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4 #define MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS 0x8 #define MAX8973_CONTROL_CLKADV_TRIP_MASK 0x00030000 #define MAX8973_INDUCTOR_MIN_30_PER 0x0 #define MAX8973_INDUCTOR_NOMINAL 0x1 #define MAX8973_INDUCTOR_PLUS_30_PER 0x2 #define MAX8973_INDUCTOR_PLUS_60_PER 0x3 #define MAX8973_CONTROL_INDUCTOR_VALUE_MASK 0x00300000 #define MAX8973_MIN_VOLATGE 606250 #define MAX8973_MAX_VOLATGE 1400000 #define MAX8973_VOLATGE_STEP 6250 #define MAX8973_BUCK_N_VOLTAGE 0x80 /* Maxim 8973 chip information */ struct max8973_chip { struct device *dev; struct regulator_desc desc; struct regulator_dev *rdev; struct regmap *regmap; bool enable_external_control; int dvs_gpio; int lru_index[MAX8973_MAX_VOUT_REG]; int curr_vout_val[MAX8973_MAX_VOUT_REG]; int curr_vout_reg; int curr_gpio_val; bool valid_dvs_gpio; }; /* * find_voltage_set_register: Find new voltage configuration register (VOUT). * The finding of the new VOUT register will be based on the LRU mechanism. * Each VOUT register will have different voltage configured . This * Function will look if any of the VOUT register have requested voltage set * or not. * - If it is already there then it will make that register as most * recently used and return as found so that caller need not to set * the VOUT register but need to set the proper gpios to select this * VOUT register. * - If requested voltage is not found then it will use the least * recently mechanism to get new VOUT register for new configuration * and will return not_found so that caller need to set new VOUT * register and then gpios (both). */ static bool find_voltage_set_register(struct max8973_chip *tps, int req_vsel, int *vout_reg, int *gpio_val) { int i; bool found = false; int new_vout_reg = tps->lru_index[MAX8973_MAX_VOUT_REG - 1]; int found_index = MAX8973_MAX_VOUT_REG - 1; for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) { if (tps->curr_vout_val[tps->lru_index[i]] == req_vsel) { new_vout_reg = tps->lru_index[i]; found_index = i; found = true; goto update_lru_index; } } update_lru_index: for (i = found_index; i > 0; i--) tps->lru_index[i] = tps->lru_index[i - 1]; tps->lru_index[0] = new_vout_reg; *gpio_val = new_vout_reg; *vout_reg = MAX8973_VOUT + new_vout_reg; return found; } static int max8973_dcdc_get_voltage_sel(struct regulator_dev *rdev) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int data; int ret; ret = regmap_read(max->regmap, max->curr_vout_reg, &data); if (ret < 0) { dev_err(max->dev, "register %d read failed, err = %d\n", max->curr_vout_reg, ret); return ret; } return data & MAX8973_VOUT_MASK; } static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev, unsigned vsel) { struct max8973_chip *max = rdev_get_drvdata(rdev); int ret; bool found = false; int vout_reg = max->curr_vout_reg; int gpio_val = max->curr_gpio_val; /* * If gpios are available to select the VOUT register then least * recently used register for new configuration. */ if (max->valid_dvs_gpio) found = find_voltage_set_register(max, vsel, &vout_reg, &gpio_val); if (!found) { ret = regmap_update_bits(max->regmap, vout_reg, MAX8973_VOUT_MASK, vsel); if (ret < 0) { dev_err(max->dev, "register %d update failed, err %d\n", vout_reg, ret); return ret; } max->curr_vout_reg = vout_reg; max->curr_vout_val[gpio_val] = vsel; } /* Select proper VOUT register vio gpios */ if (max->valid_dvs_gpio) { gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1); max->curr_gpio_val = gpio_val; } return 0; } static int max8973_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct max8973_chip *max = rdev_get_drvdata(rdev); int ret; int pwm; /* Enable force PWM mode in FAST mode only. */ switch (mode) { case REGULATOR_MODE_FAST: pwm = MAX8973_FPWM_EN_M; break; case REGULATOR_MODE_NORMAL: pwm = 0; break; default: return -EINVAL; } ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1, MAX8973_FPWM_EN_M, pwm); if (ret < 0) dev_err(max->dev, "register %d update failed, err %d\n", MAX8973_CONTROL1, ret); return ret; } static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int data; int ret; ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data); if (ret < 0) { dev_err(max->dev, "register %d read failed, err %d\n", MAX8973_CONTROL1, ret); return ret; } return (data & MAX8973_FPWM_EN_M) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; } static struct regulator_ops max8973_dcdc_ops = { .get_voltage_sel = max8973_dcdc_get_voltage_sel, .set_voltage_sel = max8973_dcdc_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .set_mode = max8973_dcdc_set_mode, .get_mode = max8973_dcdc_get_mode, }; static int max8973_init_dcdc(struct max8973_chip *max, struct max8973_regulator_platform_data *pdata) { int ret; uint8_t control1 = 0; uint8_t control2 = 0; if (pdata->control_flags & MAX8973_CONTROL_REMOTE_SENSE_ENABLE) control1 |= MAX8973_SNS_ENABLE; if (!(pdata->control_flags & MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE)) control1 |= MAX8973_NFSR_ENABLE; if (pdata->control_flags & MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE) control1 |= MAX8973_AD_ENABLE; if (pdata->control_flags & MAX8973_CONTROL_BIAS_ENABLE) control1 |= MAX8973_BIAS_ENABLE; if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE) control1 |= MAX8973_FREQSHIFT_9PER; /* Set ramp delay */ if (pdata->reg_init_data && pdata->reg_init_data->constraints.ramp_delay) { if (pdata->reg_init_data->constraints.ramp_delay < 25000) control1 |= MAX8973_RAMP_12mV_PER_US; else if (pdata->reg_init_data->constraints.ramp_delay < 50000) control1 |= MAX8973_RAMP_25mV_PER_US; else if (pdata->reg_init_data->constraints.ramp_delay < 200000) control1 |= MAX8973_RAMP_50mV_PER_US; else control1 |= MAX8973_RAMP_200mV_PER_US; } else { control1 |= MAX8973_RAMP_12mV_PER_US; max->desc.ramp_delay = 12500; } if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE)) control2 |= MAX8973_DISCH_ENBABLE; /* Clock advance trip configuration */ switch (pdata->control_flags & MAX8973_CONTROL_CLKADV_TRIP_MASK) { case MAX8973_CONTROL_CLKADV_TRIP_DISABLED: control2 |= MAX8973_CKKADV_TRIP_DISABLE; break; case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US: control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US; break; case MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US: control2 |= MAX8973_CKKADV_TRIP_150mV_PER_US; break; case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS: control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS; break; } /* Configure inductor value */ switch (pdata->control_flags & MAX8973_CONTROL_INDUCTOR_VALUE_MASK) { case MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL: control2 |= MAX8973_INDUCTOR_NOMINAL; break; case MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER: control2 |= MAX8973_INDUCTOR_MIN_30_PER; break; case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER: control2 |= MAX8973_INDUCTOR_PLUS_30_PER; break; case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER: control2 |= MAX8973_INDUCTOR_PLUS_60_PER; break; } ret = regmap_write(max->regmap, MAX8973_CONTROL1, control1); if (ret < 0) { dev_err(max->dev, "register %d write failed, err = %d", MAX8973_CONTROL1, ret); return ret; } ret = regmap_write(max->regmap, MAX8973_CONTROL2, control2); if (ret < 0) { dev_err(max->dev, "register %d write failed, err = %d", MAX8973_CONTROL2, ret); return ret; } /* If external control is enabled then disable EN bit */ if (max->enable_external_control) { ret = regmap_update_bits(max->regmap, MAX8973_VOUT, MAX8973_VOUT_ENABLE, 0); if (ret < 0) dev_err(max->dev, "register %d update failed, err = %d", MAX8973_VOUT, ret); } return ret; } static const struct regmap_config max8973_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MAX8973_CHIPID2, .cache_type = REGCACHE_RBTREE, }; static int max8973_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max8973_regulator_platform_data *pdata; struct regulator_config config = { }; struct regulator_dev *rdev; struct max8973_chip *max; int ret; pdata = client->dev.platform_data; if (!pdata) { dev_err(&client->dev, "No Platform data"); return -EIO; } max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); if (!max) { dev_err(&client->dev, "Memory allocation for max failed\n"); return -ENOMEM; } max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config); if (IS_ERR(max->regmap)) { ret = PTR_ERR(max->regmap); dev_err(&client->dev, "regmap init failed, err %d\n", ret); return ret; } i2c_set_clientdata(client, max); max->dev = &client->dev; max->desc.name = id->name; max->desc.id = 0; max->desc.ops = &max8973_dcdc_ops; max->desc.type = REGULATOR_VOLTAGE; max->desc.owner = THIS_MODULE; max->desc.min_uV = MAX8973_MIN_VOLATGE; max->desc.uV_step = MAX8973_VOLATGE_STEP; max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE; if (!pdata->enable_ext_control) { max->desc.enable_reg = MAX8973_VOUT; max->desc.enable_mask = MAX8973_VOUT_ENABLE; max8973_dcdc_ops.enable = regulator_enable_regmap; max8973_dcdc_ops.disable = regulator_disable_regmap; max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap; } max->enable_external_control = pdata->enable_ext_control; max->dvs_gpio = pdata->dvs_gpio; max->curr_gpio_val = pdata->dvs_def_state; max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state; max->lru_index[0] = max->curr_vout_reg; max->valid_dvs_gpio = false; if (gpio_is_valid(max->dvs_gpio)) { int gpio_flags; int i; gpio_flags = (pdata->dvs_def_state) ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; ret = devm_gpio_request_one(&client->dev, max->dvs_gpio, gpio_flags, "max8973-dvs"); if (ret) { dev_err(&client->dev, "gpio_request for gpio %d failed, err = %d\n", max->dvs_gpio, ret); return ret; } max->valid_dvs_gpio = true; /* * Initialize the lru index with vout_reg id * The index 0 will be most recently used and * set with the max->curr_vout_reg */ for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) max->lru_index[i] = i; max->lru_index[0] = max->curr_vout_reg; max->lru_index[max->curr_vout_reg] = 0; } ret = max8973_init_dcdc(max, pdata); if (ret < 0) { dev_err(max->dev, "Max8973 Init failed, err = %d\n", ret); return ret; } config.dev = &client->dev; config.init_data = pdata->reg_init_data; config.driver_data = max; config.of_node = client->dev.of_node; config.regmap = max->regmap; /* Register the regulators */ rdev = regulator_register(&max->desc, &config); if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); dev_err(max->dev, "regulator register failed, err %d\n", ret); return ret; } max->rdev = rdev; return 0; } static int max8973_remove(struct i2c_client *client) { struct max8973_chip *max = i2c_get_clientdata(client); regulator_unregister(max->rdev); return 0; } static const struct i2c_device_id max8973_id[] = { {.name = "max8973",}, {}, }; MODULE_DEVICE_TABLE(i2c, max8973_id); static struct i2c_driver max8973_i2c_driver = { .driver = { .name = "max8973", .owner = THIS_MODULE, }, .probe = max8973_probe, .remove = max8973_remove, .id_table = max8973_id, }; static int __init max8973_init(void) { return i2c_add_driver(&max8973_i2c_driver); } subsys_initcall(max8973_init); static void __exit max8973_cleanup(void) { i2c_del_driver(&max8973_i2c_driver); } module_exit(max8973_cleanup); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_DESCRIPTION("MAX8973 voltage regulator driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
djmatt604/kernel_samsung_msm8660-common
arch/x86/pci/bus_numa.c
4134
1985
#include <linux/init.h> #include <linux/pci.h> #include <linux/range.h> #include "bus_numa.h" int pci_root_num; struct pci_root_info pci_root_info[PCI_ROOT_NR]; void x86_pci_root_bus_res_quirks(struct pci_bus *b) { int i; int j; struct pci_root_info *info; /* don't go for it if _CRS is used already */ if (b->resource[0] != &ioport_resource || b->resource[1] != &iomem_resource) return; if (!pci_root_num) return; for (i = 0; i < pci_root_num; i++) { if (pci_root_info[i].bus_min == b->number) break; } if (i == pci_root_num) return; printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", b->number); pci_bus_remove_resources(b); info = &pci_root_info[i]; for (j = 0; j < info->res_num; j++) { struct resource *res; struct resource *root; res = &info->res[j]; pci_bus_add_resource(b, res, 0); if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; insert_resource(root, res); } } void __devinit update_res(struct pci_root_info *info, resource_size_t start, resource_size_t end, unsigned long flags, int merge) { int i; struct resource *res; if (start > end) return; if (start == MAX_RESOURCE) return; if (!merge) goto addit; /* try to merge it with old one */ for (i = 0; i < info->res_num; i++) { resource_size_t final_start, final_end; resource_size_t common_start, common_end; res = &info->res[i]; if (res->flags != flags) continue; common_start = max(res->start, start); common_end = min(res->end, end); if (common_start > common_end + 1) continue; final_start = min(res->start, start); final_end = max(res->end, end); res->start = final_start; res->end = final_end; return; } addit: /* need to add that */ if (info->res_num >= RES_NUM) return; res = &info->res[info->res_num]; res->name = info->name; res->flags = flags; res->start = start; res->end = end; res->child = NULL; info->res_num++; }
gpl-2.0
xerpi/linux
fs/ecryptfs/miscdev.c
4390
15086
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2008 International Business Machines Corp. * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/fs.h> #include <linux/hash.h> #include <linux/random.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/module.h> #include "ecryptfs_kernel.h" static atomic_t ecryptfs_num_miscdev_opens; /** * ecryptfs_miscdev_poll * @file: dev file * @pt: dev poll table (ignored) * * Returns the poll mask */ static unsigned int ecryptfs_miscdev_poll(struct file *file, poll_table *pt) { struct ecryptfs_daemon *daemon = file->private_data; unsigned int mask = 0; mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { printk(KERN_WARNING "%s: Attempt to poll on zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) goto out_unlock_daemon; if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL) goto out_unlock_daemon; daemon->flags |= ECRYPTFS_DAEMON_IN_POLL; mutex_unlock(&daemon->mux); poll_wait(file, &daemon->wait, pt); mutex_lock(&daemon->mux); if (!list_empty(&daemon->msg_ctx_out_queue)) mask |= POLLIN | POLLRDNORM; out_unlock_daemon: daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL; mutex_unlock(&daemon->mux); return mask; } /** * ecryptfs_miscdev_open * @inode: inode of miscdev handle (ignored) * @file: file for miscdev handle * * Returns zero on success; non-zero otherwise */ static int ecryptfs_miscdev_open(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_find_daemon_by_euid(&daemon); if (!rc) { rc = -EINVAL; goto out_unlock_daemon_list; } rc = ecryptfs_spawn_daemon(&daemon, file); if (rc) { printk(KERN_ERR "%s: Error attempting to spawn daemon; " "rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { rc = -EBUSY; goto out_unlock_daemon; } daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; file->private_data = daemon; atomic_inc(&ecryptfs_num_miscdev_opens); out_unlock_daemon: mutex_unlock(&daemon->mux); out_unlock_daemon_list: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_miscdev_release * @inode: inode of fs/ecryptfs/euid handle (ignored) * @file: file for fs/ecryptfs/euid handle * * This keeps the daemon registered until the daemon sends another * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_miscdev_release(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = file->private_data; int rc; mutex_lock(&daemon->mux); BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; atomic_dec(&ecryptfs_num_miscdev_opens); mutex_unlock(&daemon->mux); mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_exorcise_daemon(daemon); mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { printk(KERN_CRIT "%s: Fatal error whilst attempting to " "shut down daemon; rc = [%d]. Please report this " "bug.\n", __func__, rc); BUG(); } return rc; } /** * ecryptfs_send_miscdev * @data: Data to send to daemon; may be NULL * @data_size: Amount of data to send to daemon * @msg_ctx: Message context, which is used to handle the reply. If * this is NULL, then we do not expect a reply. * @msg_type: Type of message * @msg_flags: Flags for message * @daemon: eCryptfs daemon object * * Add msg_ctx to queue and then, if it exists, notify the blocked * miscdevess about the data being available. Must be called with * ecryptfs_daemon_hash_mux held. * * Returns zero on success; non-zero otherwise */ int ecryptfs_send_miscdev(char *data, size_t data_size, struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, u16 msg_flags, struct ecryptfs_daemon *daemon) { struct ecryptfs_message *msg; msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL); if (!msg) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to kmalloc(%zd, GFP_KERNEL)\n", __func__, (sizeof(*msg) + data_size)); return -ENOMEM; } mutex_lock(&msg_ctx->mux); msg_ctx->msg = msg; msg_ctx->msg->index = msg_ctx->index; msg_ctx->msg->data_len = data_size; msg_ctx->type = msg_type; memcpy(msg_ctx->msg->data, data, data_size); msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size); list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue); mutex_unlock(&msg_ctx->mux); mutex_lock(&daemon->mux); daemon->num_queued_msg_ctx++; wake_up_interruptible(&daemon->wait); mutex_unlock(&daemon->mux); return 0; } /* * miscdevfs packet format: * Octet 0: Type * Octets 1-4: network byte order msg_ctx->counter * Octets 5-N0: Size of struct ecryptfs_message to follow * Octets N0-N1: struct ecryptfs_message (including data) * * Octets 5-N1 not written if the packet type does not include a message */ #define PKT_TYPE_SIZE 1 #define PKT_CTR_SIZE 4 #define MIN_NON_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE) #define MIN_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \ + ECRYPTFS_MIN_PKT_LEN_SIZE) /* 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES comes from tag 65 packet format */ #define MAX_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \ + ECRYPTFS_MAX_PKT_LEN_SIZE \ + sizeof(struct ecryptfs_message) \ + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) #define PKT_TYPE_OFFSET 0 #define PKT_CTR_OFFSET PKT_TYPE_SIZE #define PKT_LEN_OFFSET (PKT_TYPE_SIZE + PKT_CTR_SIZE) /** * ecryptfs_miscdev_read - format and send message from queue * @file: miscdevfs handle * @buf: User buffer into which to copy the next message on the daemon queue * @count: Amount of space available in @buf * @ppos: Offset in file (ignored) * * Pulls the most recent message from the daemon queue, formats it for * being sent via a miscdevfs handle, and copies it into @buf * * Returns the number of bytes copied into the user buffer */ static ssize_t ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ecryptfs_daemon *daemon = file->private_data; struct ecryptfs_msg_ctx *msg_ctx; size_t packet_length_size; char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE]; size_t i; size_t total_length; int rc; mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; printk(KERN_WARNING "%s: Attempt to read from zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) { rc = 0; goto out_unlock_daemon; } /* This daemon will not go away so long as this flag is set */ daemon->flags |= ECRYPTFS_DAEMON_IN_READ; check_list: if (list_empty(&daemon->msg_ctx_out_queue)) { mutex_unlock(&daemon->mux); rc = wait_event_interruptible( daemon->wait, !list_empty(&daemon->msg_ctx_out_queue)); mutex_lock(&daemon->mux); if (rc < 0) { rc = 0; goto out_unlock_daemon; } } if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; goto out_unlock_daemon; } if (list_empty(&daemon->msg_ctx_out_queue)) { /* Something else jumped in since the * wait_event_interruptable() and removed the * message from the queue; try again */ goto check_list; } msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, struct ecryptfs_msg_ctx, daemon_out_list); BUG_ON(!msg_ctx); mutex_lock(&msg_ctx->mux); if (msg_ctx->msg) { rc = ecryptfs_write_packet_length(packet_length, msg_ctx->msg_size, &packet_length_size); if (rc) { rc = 0; printk(KERN_WARNING "%s: Error writing packet length; " "rc = [%d]\n", __func__, rc); goto out_unlock_msg_ctx; } } else { packet_length_size = 0; msg_ctx->msg_size = 0; } total_length = (PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_length_size + msg_ctx->msg_size); if (count < total_length) { rc = 0; printk(KERN_WARNING "%s: Only given user buffer of " "size [%zd], but we need [%zd] to read the " "pending message\n", __func__, count, total_length); goto out_unlock_msg_ctx; } rc = -EFAULT; if (put_user(msg_ctx->type, buf)) goto out_unlock_msg_ctx; if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(&buf[PKT_CTR_OFFSET]))) goto out_unlock_msg_ctx; i = PKT_TYPE_SIZE + PKT_CTR_SIZE; if (msg_ctx->msg) { if (copy_to_user(&buf[i], packet_length, packet_length_size)) goto out_unlock_msg_ctx; i += packet_length_size; if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) goto out_unlock_msg_ctx; i += msg_ctx->msg_size; } rc = i; list_del(&msg_ctx->daemon_out_list); kfree(msg_ctx->msg); msg_ctx->msg = NULL; /* We do not expect a reply from the userspace daemon for any * message type other than ECRYPTFS_MSG_REQUEST */ if (msg_ctx->type != ECRYPTFS_MSG_REQUEST) ecryptfs_msg_ctx_alloc_to_free(msg_ctx); out_unlock_msg_ctx: mutex_unlock(&msg_ctx->mux); out_unlock_daemon: daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ; mutex_unlock(&daemon->mux); return rc; } /** * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon * @data: Bytes comprising struct ecryptfs_message * @data_size: sizeof(struct ecryptfs_message) + data len * @seq: Sequence number for miscdev response packet * * Returns zero on success; non-zero otherwise */ static int ecryptfs_miscdev_response(struct ecryptfs_daemon *daemon, char *data, size_t data_size, u32 seq) { struct ecryptfs_message *msg = (struct ecryptfs_message *)data; int rc; if ((sizeof(*msg) + msg->data_len) != data_size) { printk(KERN_WARNING "%s: (sizeof(*msg) + msg->data_len) = " "[%zd]; data_size = [%zd]. Invalid packet.\n", __func__, (sizeof(*msg) + msg->data_len), data_size); rc = -EINVAL; goto out; } rc = ecryptfs_process_response(daemon, msg, seq); if (rc) printk(KERN_ERR "Error processing response message; rc = [%d]\n", rc); out: return rc; } /** * ecryptfs_miscdev_write - handle write to daemon miscdev handle * @file: File for misc dev handle * @buf: Buffer containing user data * @count: Amount of data in @buf * @ppos: Pointer to offset in file (ignored) * * Returns the number of bytes read from @buf */ static ssize_t ecryptfs_miscdev_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { __be32 counter_nbo; u32 seq; size_t packet_size, packet_size_length; char *data; unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE]; ssize_t rc; if (count == 0) { return 0; } else if (count == MIN_NON_MSG_PKT_SIZE) { /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */ goto memdup; } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) { printk(KERN_WARNING "%s: Acceptable packet size range is " "[%d-%zu], but amount of data written is [%zu].", __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count); return -EINVAL; } if (copy_from_user(packet_size_peek, &buf[PKT_LEN_OFFSET], sizeof(packet_size_peek))) { printk(KERN_WARNING "%s: Error while inspecting packet size\n", __func__); return -EFAULT; } rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size, &packet_size_length); if (rc) { printk(KERN_WARNING "%s: Error parsing packet length; " "rc = [%zd]\n", __func__, rc); return rc; } if ((PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_size_length + packet_size) != count) { printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__, packet_size); return -EINVAL; } memdup: data = memdup_user(buf, count); if (IS_ERR(data)) { printk(KERN_ERR "%s: memdup_user returned error [%ld]\n", __func__, PTR_ERR(data)); return PTR_ERR(data); } switch (data[PKT_TYPE_OFFSET]) { case ECRYPTFS_MSG_RESPONSE: if (count < (MIN_MSG_PKT_SIZE + sizeof(struct ecryptfs_message))) { printk(KERN_WARNING "%s: Minimum acceptable packet " "size is [%zd], but amount of data written is " "only [%zd]. Discarding response packet.\n", __func__, (MIN_MSG_PKT_SIZE + sizeof(struct ecryptfs_message)), count); rc = -EINVAL; goto out_free; } memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE); seq = be32_to_cpu(counter_nbo); rc = ecryptfs_miscdev_response(file->private_data, &data[PKT_LEN_OFFSET + packet_size_length], packet_size, seq); if (rc) { printk(KERN_WARNING "%s: Failed to deliver miscdev " "response to requesting operation; rc = [%zd]\n", __func__, rc); goto out_free; } break; case ECRYPTFS_MSG_HELO: case ECRYPTFS_MSG_QUIT: break; default: ecryptfs_printk(KERN_WARNING, "Dropping miscdev " "message of unrecognized type [%d]\n", data[0]); rc = -EINVAL; goto out_free; } rc = count; out_free: kfree(data); return rc; } static const struct file_operations ecryptfs_miscdev_fops = { .owner = THIS_MODULE, .open = ecryptfs_miscdev_open, .poll = ecryptfs_miscdev_poll, .read = ecryptfs_miscdev_read, .write = ecryptfs_miscdev_write, .release = ecryptfs_miscdev_release, .llseek = noop_llseek, }; static struct miscdevice ecryptfs_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "ecryptfs", .fops = &ecryptfs_miscdev_fops }; /** * ecryptfs_init_ecryptfs_miscdev * * Messages sent to the userspace daemon from the kernel are placed on * a queue associated with the daemon. The next read against the * miscdev handle by that daemon will return the oldest message placed * on the message queue for the daemon. * * Returns zero on success; non-zero otherwise */ int __init ecryptfs_init_ecryptfs_miscdev(void) { int rc; atomic_set(&ecryptfs_num_miscdev_opens, 0); rc = misc_register(&ecryptfs_miscdev); if (rc) printk(KERN_ERR "%s: Failed to register miscellaneous device " "for communications with userspace daemons; rc = [%d]\n", __func__, rc); return rc; } /** * ecryptfs_destroy_ecryptfs_miscdev * * All of the daemons must be exorcised prior to calling this * function. */ void ecryptfs_destroy_ecryptfs_miscdev(void) { BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0); misc_deregister(&ecryptfs_miscdev); }
gpl-2.0
n8ohu/android_kernel_pantech_lgvr
sound/isa/msnd/msnd_pinnacle_mixer.c
4646
10237
/*************************************************************************** msnd_pinnacle_mixer.c - description ------------------- begin : Fre Jun 7 2002 copyright : (C) 2002 by karsten wiese email : annabellesgarden@yahoo.de ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include <linux/io.h> #include <sound/core.h> #include <sound/control.h> #include "msnd.h" #include "msnd_pinnacle.h" #define MSND_MIXER_VOLUME 0 #define MSND_MIXER_PCM 1 #define MSND_MIXER_AUX 2 /* Input source 1 (aux1) */ #define MSND_MIXER_IMIX 3 /* Recording monitor */ #define MSND_MIXER_SYNTH 4 #define MSND_MIXER_SPEAKER 5 #define MSND_MIXER_LINE 6 #define MSND_MIXER_MIC 7 #define MSND_MIXER_RECLEV 11 /* Recording level */ #define MSND_MIXER_IGAIN 12 /* Input gain */ #define MSND_MIXER_OGAIN 13 /* Output gain */ #define MSND_MIXER_DIGITAL 17 /* Digital (input) 1 */ /* Device mask bits */ #define MSND_MASK_VOLUME (1 << MSND_MIXER_VOLUME) #define MSND_MASK_SYNTH (1 << MSND_MIXER_SYNTH) #define MSND_MASK_PCM (1 << MSND_MIXER_PCM) #define MSND_MASK_SPEAKER (1 << MSND_MIXER_SPEAKER) #define MSND_MASK_LINE (1 << MSND_MIXER_LINE) #define MSND_MASK_MIC (1 << MSND_MIXER_MIC) #define MSND_MASK_IMIX (1 << MSND_MIXER_IMIX) #define MSND_MASK_RECLEV (1 << MSND_MIXER_RECLEV) #define MSND_MASK_IGAIN (1 << MSND_MIXER_IGAIN) #define MSND_MASK_OGAIN (1 << MSND_MIXER_OGAIN) #define MSND_MASK_AUX (1 << MSND_MIXER_AUX) #define MSND_MASK_DIGITAL (1 << MSND_MIXER_DIGITAL) static int snd_msndmix_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[3] = { "Analog", "MASS", "SPDIF", }; struct snd_msnd *chip = snd_kcontrol_chip(kcontrol); unsigned items = test_bit(F_HAVEDIGITAL, &chip->flags) ? 3 : 2; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = items; if (uinfo->value.enumerated.item >= items) uinfo->value.enumerated.item = items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_msndmix_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_msnd *chip = snd_kcontrol_chip(kcontrol); /* MSND_MASK_IMIX is the default */ ucontrol->value.enumerated.item[0] = 0; if (chip->recsrc & MSND_MASK_SYNTH) { ucontrol->value.enumerated.item[0] = 1; } else if ((chip->recsrc & MSND_MASK_DIGITAL) && test_bit(F_HAVEDIGITAL, &chip->flags)) { ucontrol->value.enumerated.item[0] = 2; } return 0; } static int snd_msndmix_set_mux(struct snd_msnd *chip, int val) { unsigned newrecsrc; int change; unsigned char msndbyte; switch (val) { case 0: newrecsrc = MSND_MASK_IMIX; msndbyte = HDEXAR_SET_ANA_IN; break; case 1: newrecsrc = MSND_MASK_SYNTH; msndbyte = HDEXAR_SET_SYNTH_IN; break; case 2: newrecsrc = MSND_MASK_DIGITAL; msndbyte = HDEXAR_SET_DAT_IN; break; default: return -EINVAL; } change = newrecsrc != chip->recsrc; if (change) { change = 0; if (!snd_msnd_send_word(chip, 0, 0, msndbyte)) if (!snd_msnd_send_dsp_cmd(chip, HDEX_AUX_REQ)) { chip->recsrc = newrecsrc; change = 1; } } return change; } static int snd_msndmix_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_msnd *msnd = snd_kcontrol_chip(kcontrol); return snd_msndmix_set_mux(msnd, ucontrol->value.enumerated.item[0]); } static int snd_msndmix_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 100; return 0; } static int snd_msndmix_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_msnd *msnd = snd_kcontrol_chip(kcontrol); int addr = kcontrol->private_value; unsigned long flags; spin_lock_irqsave(&msnd->mixer_lock, flags); ucontrol->value.integer.value[0] = msnd->left_levels[addr] * 100; ucontrol->value.integer.value[0] /= 0xFFFF; ucontrol->value.integer.value[1] = msnd->right_levels[addr] * 100; ucontrol->value.integer.value[1] /= 0xFFFF; spin_unlock_irqrestore(&msnd->mixer_lock, flags); return 0; } #define update_volm(a, b) \ do { \ writew((dev->left_levels[a] >> 1) * \ readw(dev->SMA + SMA_wCurrMastVolLeft) / 0xffff, \ dev->SMA + SMA_##b##Left); \ writew((dev->right_levels[a] >> 1) * \ readw(dev->SMA + SMA_wCurrMastVolRight) / 0xffff, \ dev->SMA + SMA_##b##Right); \ } while (0); #define update_potm(d, s, ar) \ do { \ writeb((dev->left_levels[d] >> 8) * \ readw(dev->SMA + SMA_wCurrMastVolLeft) / 0xffff, \ dev->SMA + SMA_##s##Left); \ writeb((dev->right_levels[d] >> 8) * \ readw(dev->SMA + SMA_wCurrMastVolRight) / 0xffff, \ dev->SMA + SMA_##s##Right); \ if (snd_msnd_send_word(dev, 0, 0, ar) == 0) \ snd_msnd_send_dsp_cmd(dev, HDEX_AUX_REQ); \ } while (0); #define update_pot(d, s, ar) \ do { \ writeb(dev->left_levels[d] >> 8, \ dev->SMA + SMA_##s##Left); \ writeb(dev->right_levels[d] >> 8, \ dev->SMA + SMA_##s##Right); \ if (snd_msnd_send_word(dev, 0, 0, ar) == 0) \ snd_msnd_send_dsp_cmd(dev, HDEX_AUX_REQ); \ } while (0); static int snd_msndmix_set(struct snd_msnd *dev, int d, int left, int right) { int bLeft, bRight; int wLeft, wRight; int updatemaster = 0; if (d >= LEVEL_ENTRIES) return -EINVAL; bLeft = left * 0xff / 100; wLeft = left * 0xffff / 100; bRight = right * 0xff / 100; wRight = right * 0xffff / 100; dev->left_levels[d] = wLeft; dev->right_levels[d] = wRight; switch (d) { /* master volume unscaled controls */ case MSND_MIXER_LINE: /* line pot control */ /* scaled by IMIX in digital mix */ writeb(bLeft, dev->SMA + SMA_bInPotPosLeft); writeb(bRight, dev->SMA + SMA_bInPotPosRight); if (snd_msnd_send_word(dev, 0, 0, HDEXAR_IN_SET_POTS) == 0) snd_msnd_send_dsp_cmd(dev, HDEX_AUX_REQ); break; case MSND_MIXER_MIC: /* mic pot control */ if (dev->type == msndClassic) return -EINVAL; /* scaled by IMIX in digital mix */ writeb(bLeft, dev->SMA + SMA_bMicPotPosLeft); writeb(bRight, dev->SMA + SMA_bMicPotPosRight); if (snd_msnd_send_word(dev, 0, 0, HDEXAR_MIC_SET_POTS) == 0) snd_msnd_send_dsp_cmd(dev, HDEX_AUX_REQ); break; case MSND_MIXER_VOLUME: /* master volume */ writew(wLeft, dev->SMA + SMA_wCurrMastVolLeft); writew(wRight, dev->SMA + SMA_wCurrMastVolRight); /* fall through */ case MSND_MIXER_AUX: /* aux pot control */ /* scaled by master volume */ /* fall through */ /* digital controls */ case MSND_MIXER_SYNTH: /* synth vol (dsp mix) */ case MSND_MIXER_PCM: /* pcm vol (dsp mix) */ case MSND_MIXER_IMIX: /* input monitor (dsp mix) */ /* scaled by master volume */ updatemaster = 1; break; default: return -EINVAL; } if (updatemaster) { /* update master volume scaled controls */ update_volm(MSND_MIXER_PCM, wCurrPlayVol); update_volm(MSND_MIXER_IMIX, wCurrInVol); if (dev->type == msndPinnacle) update_volm(MSND_MIXER_SYNTH, wCurrMHdrVol); update_potm(MSND_MIXER_AUX, bAuxPotPos, HDEXAR_AUX_SET_POTS); } return 0; } static int snd_msndmix_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_msnd *msnd = snd_kcontrol_chip(kcontrol); int change, addr = kcontrol->private_value; int left, right; unsigned long flags; left = ucontrol->value.integer.value[0] % 101; right = ucontrol->value.integer.value[1] % 101; spin_lock_irqsave(&msnd->mixer_lock, flags); change = msnd->left_levels[addr] != left || msnd->right_levels[addr] != right; snd_msndmix_set(msnd, addr, left, right); spin_unlock_irqrestore(&msnd->mixer_lock, flags); return change; } #define DUMMY_VOLUME(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_msndmix_volume_info, \ .get = snd_msndmix_volume_get, .put = snd_msndmix_volume_put, \ .private_value = addr } static struct snd_kcontrol_new snd_msnd_controls[] = { DUMMY_VOLUME("Master Volume", 0, MSND_MIXER_VOLUME), DUMMY_VOLUME("PCM Volume", 0, MSND_MIXER_PCM), DUMMY_VOLUME("Aux Volume", 0, MSND_MIXER_AUX), DUMMY_VOLUME("Line Volume", 0, MSND_MIXER_LINE), DUMMY_VOLUME("Mic Volume", 0, MSND_MIXER_MIC), DUMMY_VOLUME("Monitor", 0, MSND_MIXER_IMIX), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_msndmix_info_mux, .get = snd_msndmix_get_mux, .put = snd_msndmix_put_mux, } }; int __devinit snd_msndmix_new(struct snd_card *card) { struct snd_msnd *chip = card->private_data; unsigned int idx; int err; if (snd_BUG_ON(!chip)) return -EINVAL; spin_lock_init(&chip->mixer_lock); strcpy(card->mixername, "MSND Pinnacle Mixer"); for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) err = snd_ctl_add(card, snd_ctl_new1(snd_msnd_controls + idx, chip)); if (err < 0) return err; return 0; } EXPORT_SYMBOL(snd_msndmix_new); void snd_msndmix_setup(struct snd_msnd *dev) { update_pot(MSND_MIXER_LINE, bInPotPos, HDEXAR_IN_SET_POTS); update_potm(MSND_MIXER_AUX, bAuxPotPos, HDEXAR_AUX_SET_POTS); update_volm(MSND_MIXER_PCM, wCurrPlayVol); update_volm(MSND_MIXER_IMIX, wCurrInVol); if (dev->type == msndPinnacle) { update_pot(MSND_MIXER_MIC, bMicPotPos, HDEXAR_MIC_SET_POTS); update_volm(MSND_MIXER_SYNTH, wCurrMHdrVol); } } EXPORT_SYMBOL(snd_msndmix_setup); int snd_msndmix_force_recsrc(struct snd_msnd *dev, int recsrc) { dev->recsrc = -1; return snd_msndmix_set_mux(dev, recsrc); } EXPORT_SYMBOL(snd_msndmix_force_recsrc);
gpl-2.0
AOKP/kernel_oppo_n1
drivers/media/video/tm6000/tm6000-stds.c
4902
23631
/* * tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include "tm6000.h" #include "tm6000-regs.h" static unsigned int tm6010_a_mode; module_param(tm6010_a_mode, int, 0644); MODULE_PARM_DESC(tm6010_a_mode, "set tm6010 sif audio mode"); struct tm6000_reg_settings { unsigned char req; unsigned char reg; unsigned char value; }; struct tm6000_std_settings { v4l2_std_id id; struct tm6000_reg_settings *common; }; static struct tm6000_reg_settings composite_pal_m[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings composite_pal_nc[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings composite_pal[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25 }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings composite_secam[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24 }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18 }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xff }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings composite_ntsc[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_std_settings composite_stds[] = { { .id = V4L2_STD_PAL_M, .common = composite_pal_m, }, { .id = V4L2_STD_PAL_Nc, .common = composite_pal_nc, }, { .id = V4L2_STD_PAL, .common = composite_pal, }, { .id = V4L2_STD_SECAM, .common = composite_secam, }, { .id = V4L2_STD_NTSC, .common = composite_ntsc, }, }; static struct tm6000_reg_settings svideo_pal_m[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x05 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings svideo_pal_nc[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x37 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings svideo_pal[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x33 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25 }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings svideo_secam[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x31 }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24 }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92 }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18 }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xff }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_reg_settings svideo_ntsc[] = { { TM6010_REQ07_R3F_RESET, 0x01 }, { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x01 }, { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f }, { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f }, { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03 }, { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30 }, { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0x8b }, { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e }, { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b }, { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 }, { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 }, { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c }, { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc }, { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc }, { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd }, { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 }, { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 }, { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 }, { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c }, { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c }, { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 }, { TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6f }, { TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd }, { TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07 }, { TM6010_REQ07_R3F_RESET, 0x00 }, { 0, 0, 0 } }; static struct tm6000_std_settings svideo_stds[] = { { .id = V4L2_STD_PAL_M, .common = svideo_pal_m, }, { .id = V4L2_STD_PAL_Nc, .common = svideo_pal_nc, }, { .id = V4L2_STD_PAL, .common = svideo_pal, }, { .id = V4L2_STD_SECAM, .common = svideo_secam, }, { .id = V4L2_STD_NTSC, .common = svideo_ntsc, }, }; static int tm6000_set_audio_std(struct tm6000_core *dev) { uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */ uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */ uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */ uint8_t nicam_flag = 0; /* No NICAM */ if (dev->radio) { tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04); tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0x80); tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x0c); /* set mono or stereo */ if (dev->amode == V4L2_TUNER_MODE_MONO) tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00); else if (dev->amode == V4L2_TUNER_MODE_STEREO) tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x02); tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x18); tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x0a); tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x40); tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe); tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13); tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80); tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0xff); return 0; } /* * STD/MN shouldn't be affected by tm6010_a_mode, as there's just one * audio standard for each V4L2_STD type. */ if ((dev->norm & V4L2_STD_NTSC) == V4L2_STD_NTSC_M_KR) { areg_05 |= 0x04; } else if ((dev->norm & V4L2_STD_NTSC) == V4L2_STD_NTSC_M_JP) { areg_05 |= 0x43; } else if (dev->norm & V4L2_STD_MN) { areg_05 |= 0x22; } else switch (tm6010_a_mode) { /* auto */ case 0: if ((dev->norm & V4L2_STD_SECAM) == V4L2_STD_SECAM_L) areg_05 |= 0x00; else /* Other PAL/SECAM standards */ areg_05 |= 0x10; break; /* A2 */ case 1: if (dev->norm & V4L2_STD_DK) areg_05 = 0x09; else areg_05 = 0x05; break; /* NICAM */ case 2: if (dev->norm & V4L2_STD_DK) { areg_05 = 0x06; } else if (dev->norm & V4L2_STD_PAL_I) { areg_05 = 0x08; } else if (dev->norm & V4L2_STD_SECAM_L) { areg_05 = 0x0a; areg_02 = 0x02; } else { areg_05 = 0x07; } nicam_flag = 1; break; /* other */ case 3: if (dev->norm & V4L2_STD_DK) { areg_05 = 0x0b; } else { areg_05 = 0x02; } break; } tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, areg_02); tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0); tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, areg_05); tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, areg_06); tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08); tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91); tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20); tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12); tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20); tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0); tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80); tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0); tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80); tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12); tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe); tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20); tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14); tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe); tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01); tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0); tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32); tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64); tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20); tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00); tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13); tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00); tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80); return 0; } void tm6000_get_std_res(struct tm6000_core *dev) { /* Currently, those are the only supported resoltions */ if (dev->norm & V4L2_STD_525_60) dev->height = 480; else dev->height = 576; dev->width = 720; } static int tm6000_load_std(struct tm6000_core *dev, struct tm6000_reg_settings *set) { int i, rc; /* Load board's initialization table */ for (i = 0; set[i].req; i++) { rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value); if (rc < 0) { printk(KERN_ERR "Error %i while setting " "req %d, reg %d to value %d\n", rc, set[i].req, set[i].reg, set[i].value); return rc; } } return 0; } int tm6000_set_standard(struct tm6000_core *dev) { struct tm6000_input *input; int i, rc = 0; u8 reg_07_fe = 0x8a; u8 reg_08_f1 = 0xfc; u8 reg_08_e2 = 0xf0; u8 reg_08_e6 = 0x0f; tm6000_get_std_res(dev); if (!dev->radio) input = &dev->vinput[dev->input]; else input = &dev->rinput; if (dev->dev_type == TM6010) { switch (input->vmux) { case TM6000_VMUX_VIDEO_A: tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4); tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1); tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0); tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2); tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8); reg_07_fe |= 0x01; break; case TM6000_VMUX_VIDEO_B: tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8); tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1); tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0); tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2); tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe8); reg_07_fe |= 0x01; break; case TM6000_VMUX_VIDEO_AB: tm6000_set_reg(dev, TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc); tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8); reg_08_e6 = 0x00; tm6000_set_reg(dev, TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2); tm6000_set_reg(dev, TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0); tm6000_set_reg(dev, TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2); tm6000_set_reg(dev, TM6010_REQ08_RED_GAIN_SEL, 0xe0); break; default: break; } switch (input->amux) { case TM6000_AMUX_ADC1: tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x00, 0x0f); /* Mux overflow workaround */ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL, 0x10, 0xf0); break; case TM6000_AMUX_ADC2: tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x08, 0x0f); /* Mux overflow workaround */ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL, 0x10, 0xf0); break; case TM6000_AMUX_SIF1: reg_08_e2 |= 0x02; reg_08_e6 = 0x08; reg_07_fe |= 0x40; reg_08_f1 |= 0x02; tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3); tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x02, 0x0f); /* Mux overflow workaround */ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30, 0xf0); break; case TM6000_AMUX_SIF2: reg_08_e2 |= 0x02; reg_08_e6 = 0x08; reg_07_fe |= 0x40; reg_08_f1 |= 0x02; tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf7); tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x02, 0x0f); /* Mux overflow workaround */ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL, 0x30, 0xf0); break; default: break; } tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, reg_08_e2); tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, reg_08_e6); tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, reg_08_f1); tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, reg_07_fe); } else { switch (input->vmux) { case TM6000_VMUX_VIDEO_A: tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10); tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00); tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, input->v_gpio, 0); break; case TM6000_VMUX_VIDEO_B: tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x00); tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x00); tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x0f); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, input->v_gpio, 0); break; case TM6000_VMUX_VIDEO_AB: tm6000_set_reg(dev, TM6000_REQ07_RE3_VADC_INP_LPF_SEL1, 0x10); tm6000_set_reg(dev, TM6000_REQ07_RE5_VADC_INP_LPF_SEL2, 0x10); tm6000_set_reg(dev, TM6000_REQ07_RE8_VADC_PWDOWN_CTL, 0x00); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, input->v_gpio, 1); break; default: break; } switch (input->amux) { case TM6000_AMUX_ADC1: tm6000_set_reg_mask(dev, TM6000_REQ07_REB_VADC_AADC_MODE, 0x00, 0x0f); break; case TM6000_AMUX_ADC2: tm6000_set_reg_mask(dev, TM6000_REQ07_REB_VADC_AADC_MODE, 0x04, 0x0f); break; default: break; } } if (input->type == TM6000_INPUT_SVIDEO) { for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) { if (dev->norm & svideo_stds[i].id) { rc = tm6000_load_std(dev, svideo_stds[i].common); goto ret; } } return -EINVAL; } else { for (i = 0; i < ARRAY_SIZE(composite_stds); i++) { if (dev->norm & composite_stds[i].id) { rc = tm6000_load_std(dev, composite_stds[i].common); goto ret; } } return -EINVAL; } ret: if (rc < 0) return rc; if ((dev->dev_type == TM6010) && ((input->amux == TM6000_AMUX_SIF1) || (input->amux == TM6000_AMUX_SIF2))) tm6000_set_audio_std(dev); msleep(40); return 0; }
gpl-2.0
tank0412/linux-3.4-sunxi
drivers/media/video/cx25821/cx25821-audio-upstream.c
4902
20308
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821-video.h" #include "cx25821-audio-upstream.h" #include <linux/fs.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/uaccess.h> MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards"); MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>"); MODULE_LICENSE("GPL"); static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF | FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR; int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev, struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 3) lines = 3; BUG_ON(lines < 2); /* write CDT */ for (i = 0; i < lines; i++) { cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx_write(cdt + 16 * i + 4, 0); cx_write(cdt + 16 * i + 8, 0); cx_write(cdt + 16 * i + 12, 0); } /* write CMDS */ cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW); cx_write(ch->cmds_start + 16, ch->ctrl_start); /* IQ size */ cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW); cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1); return 0; } static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev, __le32 *rp, dma_addr_t databuf_phys_addr, unsigned int bpl, int fifo_enable) { unsigned int line; struct sram_channel *sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; int offset = 0; /* scan lines */ for (line = 0; line < LINES_PER_AUDIO_BUFFER; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(databuf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ /* Check if we need to enable the FIFO * after the first 3 lines. * For the upstream audio channel, * the risc engine will enable the FIFO */ if (fifo_enable && line == 2) { *(rp++) = RISC_WRITECR; *(rp++) = sram_ch->dma_ctl; *(rp++) = sram_ch->fld_aud_fifo_en; *(rp++) = 0x00000020; } offset += AUDIO_LINE_SIZE; } return rp; } int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev, struct pci_dev *pci, unsigned int bpl, unsigned int lines) { __le32 *rp; int fifo_enable = 0; int frame = 0, i = 0; int frame_size = AUDIO_DATA_BUF_SZ; int databuf_offset = 0; int risc_flag = RISC_CNT_INC; dma_addr_t risc_phys_jump_addr; /* Virtual address of Risc buffer program */ rp = dev->_risc_virt_addr; /* sync instruction */ *(rp++) = cpu_to_le32(RISC_RESYNC | AUDIO_SYNC_LINE); for (frame = 0; frame < NUM_AUDIO_FRAMES; frame++) { databuf_offset = frame_size * frame; if (frame == 0) { fifo_enable = 1; risc_flag = RISC_CNT_RESET; } else { fifo_enable = 0; risc_flag = RISC_CNT_INC; } /* Calculate physical jump address */ if ((frame + 1) == NUM_AUDIO_FRAMES) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE; } else { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE * (frame + 1); } rp = cx25821_risc_field_upstream_audio(dev, rp, dev->_audiodata_buf_phys_addr + databuf_offset, bpl, fifo_enable); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* Loop to (Nth)FrameRISC or to Start of Risc program & * generate IRQ */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); /* Recalculate virtual address based on frame index */ rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 + (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4); } return 0; } void cx25821_free_memory_audio(struct cx25821_dev *dev) { if (dev->_risc_virt_addr) { pci_free_consistent(dev->pci, dev->_audiorisc_size, dev->_risc_virt_addr, dev->_risc_phys_addr); dev->_risc_virt_addr = NULL; } if (dev->_audiodata_buf_virt_addr) { pci_free_consistent(dev->pci, dev->_audiodata_buf_size, dev->_audiodata_buf_virt_addr, dev->_audiodata_buf_phys_addr); dev->_audiodata_buf_virt_addr = NULL; } } void cx25821_stop_upstream_audio(struct cx25821_dev *dev) { struct sram_channel *sram_ch = dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels; u32 tmp = 0; if (!dev->_audio_is_running) { printk(KERN_DEBUG pr_fmt("No audio file is currently running so return!\n")); return; } /* Disable RISC interrupts */ cx_write(sram_ch->int_msk, 0); /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */ tmp = cx_read(sram_ch->dma_ctl); cx_write(sram_ch->dma_ctl, tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en)); /* Clear data buffer memory */ if (dev->_audiodata_buf_virt_addr) memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); dev->_audio_is_running = 0; dev->_is_first_audio_frame = 0; dev->_audioframe_count = 0; dev->_audiofile_status = END_OF_FILE; kfree(dev->_irq_audio_queues); dev->_irq_audio_queues = NULL; kfree(dev->_audiofilename); } void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev) { if (dev->_audio_is_running) cx25821_stop_upstream_audio(dev); cx25821_free_memory_audio(dev); } int cx25821_get_audio_data(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int frame_index_temp = dev->_audioframe_index; int i = 0; int line_size = AUDIO_LINE_SIZE; int frame_size = AUDIO_DATA_BUF_SZ; int frame_offset = frame_size * frame_index_temp; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t file_offset = dev->_audioframe_count * frame_size; loff_t pos; mm_segment_t old_fs; if (dev->_audiofile_status == END_OF_FILE) return 0; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (i = 0; i < dev->_audio_lines_count; i++) { pos = file_offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev->_audiodata_buf_virt_addr + frame_offset / 4), mybuf, vfs_read_retval); } file_offset += vfs_read_retval; frame_offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); filp_close(myfile, NULL); } return 0; } static void cx25821_audioups_handler(struct work_struct *work) { struct cx25821_dev *dev = container_of(work, struct cx25821_dev, _audio_work_entry); if (!dev) { pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n", __func__); return; } cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel]. sram_channels); } int cx25821_openfile_audio(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int i = 0, j = 0; int line_size = AUDIO_LINE_SIZE; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t pos; loff_t offset = (unsigned long)0; mm_segment_t old_fs; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { pos = offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev-> _audiodata_buf_virt_addr + offset / 4), mybuf, vfs_read_retval); } offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; if (vfs_read_retval < line_size) break; } dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); myfile->f_pos = 0; filp_close(myfile, NULL); } return 0; } static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev, struct sram_channel *sram_ch, int bpl) { int ret = 0; dma_addr_t dma_addr; dma_addr_t data_dma_addr; cx25821_free_memory_audio(dev); dev->_risc_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_riscbuf_size, &dma_addr); dev->_risc_virt_start_addr = dev->_risc_virt_addr; dev->_risc_phys_start_addr = dma_addr; dev->_risc_phys_addr = dma_addr; dev->_audiorisc_size = dev->audio_upstream_riscbuf_size; if (!dev->_risc_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size); /* For Audio Data buffer allocation */ dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size, &data_dma_addr); dev->_audiodata_buf_phys_addr = data_dma_addr; dev->_audiodata_buf_size = dev->audio_upstream_databuf_size; if (!dev->_audiodata_buf_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); ret = cx25821_openfile_audio(dev, sram_ch); if (ret < 0) return ret; /* Creating RISC programs */ ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl, dev->_audio_lines_count); if (ret < 0) { printk(KERN_DEBUG pr_fmt("ERROR creating audio upstream RISC programs!\n")); goto error; } return 0; error: return ret; } int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num, u32 status) { int i = 0; u32 int_msk_tmp; struct sram_channel *channel = dev->channels[chan_num].sram_channels; dma_addr_t risc_phys_jump_addr; __le32 *rp; if (status & FLD_AUD_SRC_RISCI1) { /* Get interrupt_index of the program that interrupted */ u32 prog_cnt = cx_read(channel->gpcnt); /* Since we've identified our IRQ, clear our bits from the * interrupt mask and interrupt status registers */ cx_write(channel->int_msk, 0); cx_write(channel->int_stat, cx_read(channel->int_stat)); spin_lock(&dev->slock); while (prog_cnt != dev->_last_index_irq) { /* Update _last_index_irq */ if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1)) dev->_last_index_irq++; else dev->_last_index_irq = 0; dev->_audioframe_index = dev->_last_index_irq; queue_work(dev->_irq_audio_queues, &dev->_audio_work_entry); } if (dev->_is_first_audio_frame) { dev->_is_first_audio_frame = 0; if (dev->_risc_virt_start_addr != NULL) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE; rp = cx25821_risc_field_upstream_audio(dev, dev->_risc_virt_start_addr + 1, dev->_audiodata_buf_phys_addr, AUDIO_LINE_SIZE, FIFO_DISABLE); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) { *(rp++) = cpu_to_le32(RISC_NOOP); } } /* Jump to 2nd Audio Frame */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_RESET); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } } spin_unlock(&dev->slock); } else { if (status & FLD_AUD_SRC_OF) pr_warn("%s(): Audio Received Overflow Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_SYNC) pr_warn("%s(): Audio Received Sync Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_OPC_ERR) pr_warn("%s(): Audio Received OpCode Error Interrupt!\n", __func__); /* Read and write back the interrupt status register to clear * our bits */ cx_write(channel->int_stat, cx_read(channel->int_stat)); } if (dev->_audiofile_status == END_OF_FILE) { pr_warn("EOF Channel Audio Framecount = %d\n", dev->_audioframe_count); return -1; } /* ElSE, set the interrupt mask register, re-enable irq. */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp |= _intr_msk); return 0; } static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id) { struct cx25821_dev *dev = dev_id; u32 msk_stat, audio_status; int handled = 0; struct sram_channel *sram_ch; if (!dev) return -1; sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; msk_stat = cx_read(sram_ch->int_mstat); audio_status = cx_read(sram_ch->int_stat); /* Only deal with our interrupt */ if (audio_status) { handled = cx25821_audio_upstream_irq(dev, dev->_audio_upstream_channel, audio_status); } if (handled < 0) cx25821_stop_upstream_audio(dev); else handled += handled; return IRQ_RETVAL(handled); } static void cx25821_wait_fifo_enable(struct cx25821_dev *dev, struct sram_channel *sram_ch) { int count = 0; u32 tmp; do { /* Wait 10 microsecond before checking to see if the FIFO is * turned ON. */ udelay(10); tmp = cx_read(sram_ch->dma_ctl); /* 10 millisecond timeout */ if (count++ > 1000) { pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n", __func__); return; } } while (!(tmp & sram_ch->fld_aud_fifo_en)); } int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev, struct sram_channel *sram_ch) { u32 tmp = 0; int err = 0; /* Set the physical start address of the RISC program in the initial * program counter(IPC) member of the CMDS. */ cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr); /* Risc IPC High 64 bits 63-32 */ cx_write(sram_ch->cmds_start + 4, 0); /* reset counter */ cx_write(sram_ch->gpcnt_ctl, 3); /* Set the line length (It looks like we do not need to set the * line length) */ cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH); /* Set the input mode to 16-bit */ tmp = cx_read(sram_ch->aud_cfg); tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE | FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE; cx_write(sram_ch->aud_cfg, tmp); /* Read and write back the interrupt status register to clear it */ tmp = cx_read(sram_ch->int_stat); cx_write(sram_ch->int_stat, tmp); /* Clear our bits from the interrupt status register. */ cx_write(sram_ch->int_stat, _intr_msk); /* Set the interrupt mask register, enable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit)); tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp |= _intr_msk); err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio, IRQF_SHARED, dev->name, dev); if (err < 0) { pr_err("%s: can't get upstream IRQ %d\n", dev->name, dev->pci->irq); goto fail_irq; } /* Start the DMA engine */ tmp = cx_read(sram_ch->dma_ctl); cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en); dev->_audio_is_running = 1; dev->_is_first_audio_frame = 1; /* The fifo_en bit turns on by the first Risc program */ cx25821_wait_fifo_enable(dev, sram_ch); return 0; fail_irq: cx25821_dev_unregister(dev); return err; } int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select) { struct sram_channel *sram_ch; int retval = 0; int err = 0; int str_length = 0; if (dev->_audio_is_running) { pr_warn("Audio Channel is still running so return!\n"); return 0; } dev->_audio_upstream_channel = channel_select; sram_ch = dev->channels[channel_select].sram_channels; /* Work queue */ INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler); dev->_irq_audio_queues = create_singlethread_workqueue("cx25821_audioworkqueue"); if (!dev->_irq_audio_queues) { printk(KERN_DEBUG pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n")); return -ENOMEM; } dev->_last_index_irq = 0; dev->_audio_is_running = 0; dev->_audioframe_count = 0; dev->_audiofile_status = RESET_STATUS; dev->_audio_lines_count = LINES_PER_AUDIO_BUFFER; _line_size = AUDIO_LINE_SIZE; if (dev->input_audiofilename) { str_length = strlen(dev->input_audiofilename); dev->_audiofilename = kmemdup(dev->input_audiofilename, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; /* Default if filename is empty string */ if (strcmp(dev->input_audiofilename, "") == 0) dev->_audiofilename = "/root/audioGOOD.wav"; } else { str_length = strlen(_defaultAudioName); dev->_audiofilename = kmemdup(_defaultAudioName, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; } retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch, _line_size, 0); dev->audio_upstream_riscbuf_size = AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS + RISC_SYNC_INSTRUCTION_SIZE; dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS; /* Allocating buffers and prepare RISC program */ retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size); if (retval < 0) { pr_err("%s: Failed to set up Audio upstream buffers!\n", dev->name); goto error; } /* Start RISC engine */ cx25821_start_audio_dma_upstream(dev, sram_ch); return 0; error: cx25821_dev_unregister(dev); return err; }
gpl-2.0
dorimanx/Dorimanx-LG-G2-D802-Kernel
drivers/media/video/cx25821/cx25821-audio-upstream.c
4902
20308
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821-video.h" #include "cx25821-audio-upstream.h" #include <linux/fs.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/uaccess.h> MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards"); MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>"); MODULE_LICENSE("GPL"); static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF | FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR; int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev, struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 3) lines = 3; BUG_ON(lines < 2); /* write CDT */ for (i = 0; i < lines; i++) { cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx_write(cdt + 16 * i + 4, 0); cx_write(cdt + 16 * i + 8, 0); cx_write(cdt + 16 * i + 12, 0); } /* write CMDS */ cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW); cx_write(ch->cmds_start + 16, ch->ctrl_start); /* IQ size */ cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW); cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1); return 0; } static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev, __le32 *rp, dma_addr_t databuf_phys_addr, unsigned int bpl, int fifo_enable) { unsigned int line; struct sram_channel *sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; int offset = 0; /* scan lines */ for (line = 0; line < LINES_PER_AUDIO_BUFFER; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(databuf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ /* Check if we need to enable the FIFO * after the first 3 lines. * For the upstream audio channel, * the risc engine will enable the FIFO */ if (fifo_enable && line == 2) { *(rp++) = RISC_WRITECR; *(rp++) = sram_ch->dma_ctl; *(rp++) = sram_ch->fld_aud_fifo_en; *(rp++) = 0x00000020; } offset += AUDIO_LINE_SIZE; } return rp; } int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev, struct pci_dev *pci, unsigned int bpl, unsigned int lines) { __le32 *rp; int fifo_enable = 0; int frame = 0, i = 0; int frame_size = AUDIO_DATA_BUF_SZ; int databuf_offset = 0; int risc_flag = RISC_CNT_INC; dma_addr_t risc_phys_jump_addr; /* Virtual address of Risc buffer program */ rp = dev->_risc_virt_addr; /* sync instruction */ *(rp++) = cpu_to_le32(RISC_RESYNC | AUDIO_SYNC_LINE); for (frame = 0; frame < NUM_AUDIO_FRAMES; frame++) { databuf_offset = frame_size * frame; if (frame == 0) { fifo_enable = 1; risc_flag = RISC_CNT_RESET; } else { fifo_enable = 0; risc_flag = RISC_CNT_INC; } /* Calculate physical jump address */ if ((frame + 1) == NUM_AUDIO_FRAMES) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE; } else { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE * (frame + 1); } rp = cx25821_risc_field_upstream_audio(dev, rp, dev->_audiodata_buf_phys_addr + databuf_offset, bpl, fifo_enable); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* Loop to (Nth)FrameRISC or to Start of Risc program & * generate IRQ */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); /* Recalculate virtual address based on frame index */ rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 + (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4); } return 0; } void cx25821_free_memory_audio(struct cx25821_dev *dev) { if (dev->_risc_virt_addr) { pci_free_consistent(dev->pci, dev->_audiorisc_size, dev->_risc_virt_addr, dev->_risc_phys_addr); dev->_risc_virt_addr = NULL; } if (dev->_audiodata_buf_virt_addr) { pci_free_consistent(dev->pci, dev->_audiodata_buf_size, dev->_audiodata_buf_virt_addr, dev->_audiodata_buf_phys_addr); dev->_audiodata_buf_virt_addr = NULL; } } void cx25821_stop_upstream_audio(struct cx25821_dev *dev) { struct sram_channel *sram_ch = dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels; u32 tmp = 0; if (!dev->_audio_is_running) { printk(KERN_DEBUG pr_fmt("No audio file is currently running so return!\n")); return; } /* Disable RISC interrupts */ cx_write(sram_ch->int_msk, 0); /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */ tmp = cx_read(sram_ch->dma_ctl); cx_write(sram_ch->dma_ctl, tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en)); /* Clear data buffer memory */ if (dev->_audiodata_buf_virt_addr) memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); dev->_audio_is_running = 0; dev->_is_first_audio_frame = 0; dev->_audioframe_count = 0; dev->_audiofile_status = END_OF_FILE; kfree(dev->_irq_audio_queues); dev->_irq_audio_queues = NULL; kfree(dev->_audiofilename); } void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev) { if (dev->_audio_is_running) cx25821_stop_upstream_audio(dev); cx25821_free_memory_audio(dev); } int cx25821_get_audio_data(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int frame_index_temp = dev->_audioframe_index; int i = 0; int line_size = AUDIO_LINE_SIZE; int frame_size = AUDIO_DATA_BUF_SZ; int frame_offset = frame_size * frame_index_temp; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t file_offset = dev->_audioframe_count * frame_size; loff_t pos; mm_segment_t old_fs; if (dev->_audiofile_status == END_OF_FILE) return 0; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (i = 0; i < dev->_audio_lines_count; i++) { pos = file_offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev->_audiodata_buf_virt_addr + frame_offset / 4), mybuf, vfs_read_retval); } file_offset += vfs_read_retval; frame_offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); filp_close(myfile, NULL); } return 0; } static void cx25821_audioups_handler(struct work_struct *work) { struct cx25821_dev *dev = container_of(work, struct cx25821_dev, _audio_work_entry); if (!dev) { pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n", __func__); return; } cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel]. sram_channels); } int cx25821_openfile_audio(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int i = 0, j = 0; int line_size = AUDIO_LINE_SIZE; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t pos; loff_t offset = (unsigned long)0; mm_segment_t old_fs; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { pos = offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev-> _audiodata_buf_virt_addr + offset / 4), mybuf, vfs_read_retval); } offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; if (vfs_read_retval < line_size) break; } dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); myfile->f_pos = 0; filp_close(myfile, NULL); } return 0; } static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev, struct sram_channel *sram_ch, int bpl) { int ret = 0; dma_addr_t dma_addr; dma_addr_t data_dma_addr; cx25821_free_memory_audio(dev); dev->_risc_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_riscbuf_size, &dma_addr); dev->_risc_virt_start_addr = dev->_risc_virt_addr; dev->_risc_phys_start_addr = dma_addr; dev->_risc_phys_addr = dma_addr; dev->_audiorisc_size = dev->audio_upstream_riscbuf_size; if (!dev->_risc_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size); /* For Audio Data buffer allocation */ dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size, &data_dma_addr); dev->_audiodata_buf_phys_addr = data_dma_addr; dev->_audiodata_buf_size = dev->audio_upstream_databuf_size; if (!dev->_audiodata_buf_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); ret = cx25821_openfile_audio(dev, sram_ch); if (ret < 0) return ret; /* Creating RISC programs */ ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl, dev->_audio_lines_count); if (ret < 0) { printk(KERN_DEBUG pr_fmt("ERROR creating audio upstream RISC programs!\n")); goto error; } return 0; error: return ret; } int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num, u32 status) { int i = 0; u32 int_msk_tmp; struct sram_channel *channel = dev->channels[chan_num].sram_channels; dma_addr_t risc_phys_jump_addr; __le32 *rp; if (status & FLD_AUD_SRC_RISCI1) { /* Get interrupt_index of the program that interrupted */ u32 prog_cnt = cx_read(channel->gpcnt); /* Since we've identified our IRQ, clear our bits from the * interrupt mask and interrupt status registers */ cx_write(channel->int_msk, 0); cx_write(channel->int_stat, cx_read(channel->int_stat)); spin_lock(&dev->slock); while (prog_cnt != dev->_last_index_irq) { /* Update _last_index_irq */ if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1)) dev->_last_index_irq++; else dev->_last_index_irq = 0; dev->_audioframe_index = dev->_last_index_irq; queue_work(dev->_irq_audio_queues, &dev->_audio_work_entry); } if (dev->_is_first_audio_frame) { dev->_is_first_audio_frame = 0; if (dev->_risc_virt_start_addr != NULL) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE; rp = cx25821_risc_field_upstream_audio(dev, dev->_risc_virt_start_addr + 1, dev->_audiodata_buf_phys_addr, AUDIO_LINE_SIZE, FIFO_DISABLE); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) { *(rp++) = cpu_to_le32(RISC_NOOP); } } /* Jump to 2nd Audio Frame */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_RESET); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } } spin_unlock(&dev->slock); } else { if (status & FLD_AUD_SRC_OF) pr_warn("%s(): Audio Received Overflow Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_SYNC) pr_warn("%s(): Audio Received Sync Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_OPC_ERR) pr_warn("%s(): Audio Received OpCode Error Interrupt!\n", __func__); /* Read and write back the interrupt status register to clear * our bits */ cx_write(channel->int_stat, cx_read(channel->int_stat)); } if (dev->_audiofile_status == END_OF_FILE) { pr_warn("EOF Channel Audio Framecount = %d\n", dev->_audioframe_count); return -1; } /* ElSE, set the interrupt mask register, re-enable irq. */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp |= _intr_msk); return 0; } static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id) { struct cx25821_dev *dev = dev_id; u32 msk_stat, audio_status; int handled = 0; struct sram_channel *sram_ch; if (!dev) return -1; sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; msk_stat = cx_read(sram_ch->int_mstat); audio_status = cx_read(sram_ch->int_stat); /* Only deal with our interrupt */ if (audio_status) { handled = cx25821_audio_upstream_irq(dev, dev->_audio_upstream_channel, audio_status); } if (handled < 0) cx25821_stop_upstream_audio(dev); else handled += handled; return IRQ_RETVAL(handled); } static void cx25821_wait_fifo_enable(struct cx25821_dev *dev, struct sram_channel *sram_ch) { int count = 0; u32 tmp; do { /* Wait 10 microsecond before checking to see if the FIFO is * turned ON. */ udelay(10); tmp = cx_read(sram_ch->dma_ctl); /* 10 millisecond timeout */ if (count++ > 1000) { pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n", __func__); return; } } while (!(tmp & sram_ch->fld_aud_fifo_en)); } int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev, struct sram_channel *sram_ch) { u32 tmp = 0; int err = 0; /* Set the physical start address of the RISC program in the initial * program counter(IPC) member of the CMDS. */ cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr); /* Risc IPC High 64 bits 63-32 */ cx_write(sram_ch->cmds_start + 4, 0); /* reset counter */ cx_write(sram_ch->gpcnt_ctl, 3); /* Set the line length (It looks like we do not need to set the * line length) */ cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH); /* Set the input mode to 16-bit */ tmp = cx_read(sram_ch->aud_cfg); tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE | FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE; cx_write(sram_ch->aud_cfg, tmp); /* Read and write back the interrupt status register to clear it */ tmp = cx_read(sram_ch->int_stat); cx_write(sram_ch->int_stat, tmp); /* Clear our bits from the interrupt status register. */ cx_write(sram_ch->int_stat, _intr_msk); /* Set the interrupt mask register, enable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit)); tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp |= _intr_msk); err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio, IRQF_SHARED, dev->name, dev); if (err < 0) { pr_err("%s: can't get upstream IRQ %d\n", dev->name, dev->pci->irq); goto fail_irq; } /* Start the DMA engine */ tmp = cx_read(sram_ch->dma_ctl); cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en); dev->_audio_is_running = 1; dev->_is_first_audio_frame = 1; /* The fifo_en bit turns on by the first Risc program */ cx25821_wait_fifo_enable(dev, sram_ch); return 0; fail_irq: cx25821_dev_unregister(dev); return err; } int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select) { struct sram_channel *sram_ch; int retval = 0; int err = 0; int str_length = 0; if (dev->_audio_is_running) { pr_warn("Audio Channel is still running so return!\n"); return 0; } dev->_audio_upstream_channel = channel_select; sram_ch = dev->channels[channel_select].sram_channels; /* Work queue */ INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler); dev->_irq_audio_queues = create_singlethread_workqueue("cx25821_audioworkqueue"); if (!dev->_irq_audio_queues) { printk(KERN_DEBUG pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n")); return -ENOMEM; } dev->_last_index_irq = 0; dev->_audio_is_running = 0; dev->_audioframe_count = 0; dev->_audiofile_status = RESET_STATUS; dev->_audio_lines_count = LINES_PER_AUDIO_BUFFER; _line_size = AUDIO_LINE_SIZE; if (dev->input_audiofilename) { str_length = strlen(dev->input_audiofilename); dev->_audiofilename = kmemdup(dev->input_audiofilename, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; /* Default if filename is empty string */ if (strcmp(dev->input_audiofilename, "") == 0) dev->_audiofilename = "/root/audioGOOD.wav"; } else { str_length = strlen(_defaultAudioName); dev->_audiofilename = kmemdup(_defaultAudioName, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; } retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch, _line_size, 0); dev->audio_upstream_riscbuf_size = AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS + RISC_SYNC_INSTRUCTION_SIZE; dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS; /* Allocating buffers and prepare RISC program */ retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size); if (retval < 0) { pr_err("%s: Failed to set up Audio upstream buffers!\n", dev->name); goto error; } /* Start RISC engine */ cx25821_start_audio_dma_upstream(dev, sram_ch); return 0; error: cx25821_dev_unregister(dev); return err; }
gpl-2.0
Vajnar/linux-stable-hx4700
drivers/media/rc/keymaps/rc-ati-x10.c
7718
3132
/* * ATI X10 RF remote keytable * * Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi> * * This file is based on the static generic keytable previously found in * ati_remote.c, which is * Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net> * Copyright (c) 2002 Vladimir Dergachev * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <media/rc-map.h> static struct rc_map_table ati_x10[] = { { 0x0d, KEY_1 }, { 0x0e, KEY_2 }, { 0x0f, KEY_3 }, { 0x10, KEY_4 }, { 0x11, KEY_5 }, { 0x12, KEY_6 }, { 0x13, KEY_7 }, { 0x14, KEY_8 }, { 0x15, KEY_9 }, { 0x17, KEY_0 }, { 0x00, KEY_A }, { 0x01, KEY_B }, { 0x19, KEY_C }, { 0x1b, KEY_D }, { 0x21, KEY_E }, { 0x23, KEY_F }, { 0x18, KEY_KPENTER }, /* "check" */ { 0x16, KEY_MENU }, /* "menu" */ { 0x02, KEY_POWER }, /* Power */ { 0x03, KEY_TV }, /* TV */ { 0x04, KEY_DVD }, /* DVD */ { 0x05, KEY_WWW }, /* WEB */ { 0x06, KEY_BOOKMARKS }, /* "book" */ { 0x07, KEY_EDIT }, /* "hand" */ { 0x1c, KEY_COFFEE }, /* "timer" */ { 0x20, KEY_FRONT }, /* "max" */ { 0x1d, KEY_LEFT }, /* left */ { 0x1f, KEY_RIGHT }, /* right */ { 0x22, KEY_DOWN }, /* down */ { 0x1a, KEY_UP }, /* up */ { 0x1e, KEY_OK }, /* "OK" */ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */ { 0x08, KEY_VOLUMEUP }, /* VOL - */ { 0x0a, KEY_MUTE }, /* MUTE */ { 0x0b, KEY_CHANNELUP }, /* CH + */ { 0x0c, KEY_CHANNELDOWN },/* CH - */ { 0x27, KEY_RECORD }, /* ( o) red */ { 0x25, KEY_PLAY }, /* ( >) */ { 0x24, KEY_REWIND }, /* (<<) */ { 0x26, KEY_FORWARD }, /* (>>) */ { 0x28, KEY_STOP }, /* ([]) */ { 0x29, KEY_PAUSE }, /* ('') */ { 0x2b, KEY_PREVIOUS }, /* (<-) */ { 0x2a, KEY_NEXT }, /* (>+) */ { 0x2d, KEY_INFO }, /* PLAYING */ { 0x2e, KEY_HOME }, /* TOP */ { 0x2f, KEY_END }, /* END */ { 0x30, KEY_SELECT }, /* SELECT */ }; static struct rc_map_list ati_x10_map = { .map = { .scan = ati_x10, .size = ARRAY_SIZE(ati_x10), .rc_type = RC_TYPE_OTHER, .name = RC_MAP_ATI_X10, } }; static int __init init_rc_map_ati_x10(void) { return rc_map_register(&ati_x10_map); } static void __exit exit_rc_map_ati_x10(void) { rc_map_unregister(&ati_x10_map); } module_init(init_rc_map_ati_x10) module_exit(exit_rc_map_ati_x10) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
gpl-2.0
randomblame/android_kernel_hisense_3.4
drivers/media/video/ivtv/ivtv-mailbox.c
13094
13758
/* mailbox functions Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdarg.h> #include "ivtv-driver.h" #include "ivtv-mailbox.h" /* Firmware mailbox flags*/ #define IVTV_MBOX_FIRMWARE_DONE 0x00000004 #define IVTV_MBOX_DRIVER_DONE 0x00000002 #define IVTV_MBOX_DRIVER_BUSY 0x00000001 #define IVTV_MBOX_FREE 0x00000000 /* Firmware mailbox standard timeout */ #define IVTV_API_STD_TIMEOUT 0x02000000 #define API_CACHE (1 << 0) /* Allow the command to be stored in the cache */ #define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */ #define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */ #define API_DMA (1 << 3) /* DMA mailbox, has special handling */ #define API_HIGH_VOL (1 << 5) /* High volume command (i.e. called during encoding or decoding) */ #define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */ #define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */ #define API_NO_POLL (1 << 6) /* Avoid pointless polling */ struct ivtv_api_info { int flags; /* Flags, see above */ const char *name; /* The name of the command */ }; #define API_ENTRY(x, f) [x] = { (f), #x } static const struct ivtv_api_info api_info[256] = { /* MPEG encoder API */ API_ENTRY(CX2341X_ENC_PING_FW, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_START_CAPTURE, API_RESULT | API_NO_POLL), API_ENTRY(CX2341X_ENC_STOP_CAPTURE, API_RESULT), API_ENTRY(CX2341X_ENC_SET_AUDIO_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_VIDEO_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_PCR_ID, API_CACHE), API_ENTRY(CX2341X_ENC_SET_FRAME_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_FRAME_SIZE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_BIT_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_GOP_PROPERTIES, API_CACHE), API_ENTRY(CX2341X_ENC_SET_ASPECT_RATIO, API_CACHE), API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_MODE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_DNR_FILTER_PROPS, API_CACHE), API_ENTRY(CX2341X_ENC_SET_CORING_LEVELS, API_CACHE), API_ENTRY(CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_VBI_LINE, API_RESULT), API_ENTRY(CX2341X_ENC_SET_STREAM_TYPE, API_CACHE), API_ENTRY(CX2341X_ENC_SET_OUTPUT_PORT, API_CACHE), API_ENTRY(CX2341X_ENC_SET_AUDIO_PROPERTIES, API_CACHE), API_ENTRY(CX2341X_ENC_HALT_FW, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_GET_VERSION, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SET_GOP_CLOSURE, API_CACHE), API_ENTRY(CX2341X_ENC_GET_SEQ_END, API_RESULT), API_ENTRY(CX2341X_ENC_SET_PGM_INDEX_INFO, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SET_VBI_CONFIG, API_RESULT), API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE), API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA | API_HIGH_VOL), API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT), API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE), API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT), API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB | API_HIGH_VOL), API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE), API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT), API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE), API_ENTRY(CX2341X_ENC_SET_PLACEHOLDER, API_CACHE), API_ENTRY(CX2341X_ENC_MUTE_VIDEO, API_RESULT), API_ENTRY(CX2341X_ENC_MUTE_AUDIO, API_RESULT), API_ENTRY(CX2341X_ENC_SET_VERT_CROP_LINE, API_FAST_RESULT), API_ENTRY(CX2341X_ENC_MISC, API_FAST_RESULT), /* Obsolete PULLDOWN API command */ API_ENTRY(0xb1, API_CACHE), /* MPEG decoder API */ API_ENTRY(CX2341X_DEC_PING_FW, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_START_PLAYBACK, API_RESULT | API_NO_POLL), API_ENTRY(CX2341X_DEC_STOP_PLAYBACK, API_RESULT), API_ENTRY(CX2341X_DEC_SET_PLAYBACK_SPEED, API_RESULT), API_ENTRY(CX2341X_DEC_STEP_VIDEO, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE), API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA | API_HIGH_VOL), API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT), API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE), API_ENTRY(CX2341X_DEC_GET_VERSION, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_STREAM_INPUT, API_CACHE), API_ENTRY(CX2341X_DEC_GET_TIMING_INFO, API_RESULT /*| API_NO_WAIT_RES*/), API_ENTRY(CX2341X_DEC_SET_AUDIO_MODE, API_CACHE), API_ENTRY(CX2341X_DEC_SET_EVENT_NOTIFICATION, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DISPLAY_BUFFERS, API_CACHE), API_ENTRY(CX2341X_DEC_EXTRACT_VBI, API_RESULT), API_ENTRY(CX2341X_DEC_SET_DECODER_SOURCE, API_FAST_RESULT), API_ENTRY(CX2341X_DEC_SET_PREBUFFERING, API_CACHE), /* OSD API */ API_ENTRY(CX2341X_OSD_GET_FRAMEBUFFER, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_GET_PIXEL_FORMAT, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_PIXEL_FORMAT, API_CACHE), API_ENTRY(CX2341X_OSD_GET_STATE, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_STATE, API_CACHE), API_ENTRY(CX2341X_OSD_GET_OSD_COORDS, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_OSD_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_SCREEN_COORDS, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_SCREEN_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_GLOBAL_ALPHA, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_GLOBAL_ALPHA, API_CACHE), API_ENTRY(CX2341X_OSD_SET_BLEND_COORDS, API_CACHE), API_ENTRY(CX2341X_OSD_GET_FLICKER_STATE, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_FLICKER_STATE, API_CACHE), API_ENTRY(CX2341X_OSD_BLT_COPY, API_RESULT), API_ENTRY(CX2341X_OSD_BLT_FILL, API_RESULT), API_ENTRY(CX2341X_OSD_BLT_TEXT, API_RESULT), API_ENTRY(CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, API_CACHE), API_ENTRY(CX2341X_OSD_SET_CHROMA_KEY, API_CACHE), API_ENTRY(CX2341X_OSD_GET_ALPHA_CONTENT_INDEX, API_FAST_RESULT), API_ENTRY(CX2341X_OSD_SET_ALPHA_CONTENT_INDEX, API_CACHE) }; static int try_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int mb) { u32 flags = readl(&mbdata->mbox[mb].flags); int is_free = flags == IVTV_MBOX_FREE || (flags & IVTV_MBOX_FIRMWARE_DONE); /* if the mailbox is free, then try to claim it */ if (is_free && !test_and_set_bit(mb, &mbdata->busy)) { write_sync(IVTV_MBOX_DRIVER_BUSY, &mbdata->mbox[mb].flags); return 1; } return 0; } /* Try to find a free mailbox. Note mailbox 0 is reserved for DMA and so is not attempted here. */ static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int flags) { unsigned long then = jiffies; int i, mb; int max_mbox = mbdata->max_mbox; int retries = 100; /* All slow commands use the same mailbox, serializing them and also leaving the other mailbox free for simple fast commands. */ if ((flags & API_FAST_RESULT) == API_RESULT) max_mbox = 1; /* find free non-DMA mailbox */ for (i = 0; i < retries; i++) { for (mb = 1; mb <= max_mbox; mb++) if (try_mailbox(itv, mbdata, mb)) return mb; /* Sleep before a retry, if not atomic */ if (!(flags & API_NO_WAIT_MB)) { if (time_after(jiffies, then + msecs_to_jiffies(10*retries))) break; ivtv_msleep_timeout(10, 0); } } return -ENODEV; } static void write_mailbox(volatile struct ivtv_mailbox __iomem *mbox, int cmd, int args, u32 data[]) { int i; write_sync(cmd, &mbox->cmd); write_sync(IVTV_API_STD_TIMEOUT, &mbox->timeout); for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) write_sync(data[i], &mbox->data[i]); write_sync(IVTV_MBOX_DRIVER_DONE | IVTV_MBOX_DRIVER_BUSY, &mbox->flags); } static void clear_all_mailboxes(struct ivtv *itv, struct ivtv_mailbox_data *mbdata) { int i; for (i = 0; i <= mbdata->max_mbox; i++) { IVTV_DEBUG_WARN("Clearing mailbox %d: cmd 0x%08x flags 0x%08x\n", i, readl(&mbdata->mbox[i].cmd), readl(&mbdata->mbox[i].flags)); write_sync(0, &mbdata->mbox[i].flags); clear_bit(i, &mbdata->busy); } } static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[]) { struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox; volatile struct ivtv_mailbox __iomem *mbox; int api_timeout = msecs_to_jiffies(1000); int flags, mb, i; unsigned long then; /* sanity checks */ if (NULL == mbdata) { IVTV_ERR("No mailbox allocated\n"); return -ENODEV; } if (args < 0 || args > CX2341X_MBOX_MAX_DATA || cmd < 0 || cmd > 255 || api_info[cmd].name == NULL) { IVTV_ERR("Invalid MB call: cmd = 0x%02x, args = %d\n", cmd, args); return -EINVAL; } if (api_info[cmd].flags & API_HIGH_VOL) { IVTV_DEBUG_HI_MB("MB Call: %s\n", api_info[cmd].name); } else { IVTV_DEBUG_MB("MB Call: %s\n", api_info[cmd].name); } /* clear possibly uninitialized part of data array */ for (i = args; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = 0; /* If this command was issued within the last 30 minutes and with identical data, then just return 0 as there is no need to issue this command again. Just an optimization to prevent unnecessary use of mailboxes. */ if (itv->api_cache[cmd].last_jiffies && time_before(jiffies, itv->api_cache[cmd].last_jiffies + msecs_to_jiffies(1800000)) && !memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) { itv->api_cache[cmd].last_jiffies = jiffies; return 0; } flags = api_info[cmd].flags; if (flags & API_DMA) { for (i = 0; i < 100; i++) { mb = i % (mbdata->max_mbox + 1); if (try_mailbox(itv, mbdata, mb)) { write_mailbox(&mbdata->mbox[mb], cmd, args, data); clear_bit(mb, &mbdata->busy); return 0; } IVTV_DEBUG_WARN("%s: mailbox %d not free %08x\n", api_info[cmd].name, mb, readl(&mbdata->mbox[mb].flags)); } IVTV_WARN("Could not find free DMA mailbox for %s\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } if ((flags & API_FAST_RESULT) == API_FAST_RESULT) api_timeout = msecs_to_jiffies(100); mb = get_mailbox(itv, mbdata, flags); if (mb < 0) { IVTV_DEBUG_WARN("No free mailbox found (%s)\n", api_info[cmd].name); clear_all_mailboxes(itv, mbdata); return -EBUSY; } mbox = &mbdata->mbox[mb]; write_mailbox(mbox, cmd, args, data); if (flags & API_CACHE) { memcpy(itv->api_cache[cmd].data, data, sizeof(itv->api_cache[cmd].data)); itv->api_cache[cmd].last_jiffies = jiffies; } if ((flags & API_RESULT) == 0) { clear_bit(mb, &mbdata->busy); return 0; } /* Get results */ then = jiffies; if (!(flags & API_NO_POLL)) { /* First try to poll, then switch to delays */ for (i = 0; i < 100; i++) { if (readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE) break; } } while (!(readl(&mbox->flags) & IVTV_MBOX_FIRMWARE_DONE)) { if (time_after(jiffies, then + api_timeout)) { IVTV_DEBUG_WARN("Could not get result (%s)\n", api_info[cmd].name); /* reset the mailbox, but it is likely too late already */ write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return -EIO; } if (flags & API_NO_WAIT_RES) mdelay(1); else ivtv_msleep_timeout(1, 0); } if (time_after(jiffies, then + msecs_to_jiffies(100))) IVTV_DEBUG_WARN("%s took %u jiffies\n", api_info[cmd].name, jiffies_to_msecs(jiffies - then)); for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++) data[i] = readl(&mbox->data[i]); write_sync(0, &mbox->flags); clear_bit(mb, &mbdata->busy); return 0; } int ivtv_api(struct ivtv *itv, int cmd, int args, u32 data[]) { int res = ivtv_api_call(itv, cmd, args, data); /* Allow a single retry, probably already too late though. If there is no free mailbox then that is usually an indication of a more serious problem. */ return (res == -EBUSY) ? ivtv_api_call(itv, cmd, args, data) : res; } int ivtv_api_func(void *priv, u32 cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) { return ivtv_api(priv, cmd, in, data); } int ivtv_vapi_result(struct ivtv *itv, u32 data[CX2341X_MBOX_MAX_DATA], int cmd, int args, ...) { va_list ap; int i; va_start(ap, args); for (i = 0; i < args; i++) { data[i] = va_arg(ap, u32); } va_end(ap); return ivtv_api(itv, cmd, args, data); } int ivtv_vapi(struct ivtv *itv, int cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list ap; int i; va_start(ap, args); for (i = 0; i < args; i++) { data[i] = va_arg(ap, u32); } va_end(ap); return ivtv_api(itv, cmd, args, data); } /* This one is for stuff that can't sleep.. irq handlers, etc.. */ void ivtv_api_get_data(struct ivtv_mailbox_data *mbdata, int mb, int argc, u32 data[]) { volatile u32 __iomem *p = mbdata->mbox[mb].data; int i; for (i = 0; i < argc; i++, p++) data[i] = readl(p); } /* Wipe api cache */ void ivtv_mailbox_cache_invalidate(struct ivtv *itv) { int i; for (i = 0; i < 256; i++) itv->api_cache[i].last_jiffies = 0; }
gpl-2.0
TheWhisp/android_kernel_msm_caf
sound/core/seq/seq_compat.c
13094
4715
/* * 32bit -> 64bit ioctl wrapper for sequencer API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from seq.c */ #include <linux/compat.h> #include <linux/slab.h> struct snd_seq_port_info32 { struct snd_seq_addr addr; /* client/port numbers */ char name[64]; /* port name */ u32 capability; /* port capability bits */ u32 type; /* port type bits */ s32 midi_channels; /* channels per MIDI port */ s32 midi_voices; /* voices per MIDI port */ s32 synth_voices; /* voices per SYNTH port */ s32 read_use; /* R/O: subscribers for output (from this port) */ s32 write_use; /* R/O: subscribers for input (to this port) */ u32 kernel; /* reserved for kernel use (must be NULL) */ u32 flags; /* misc. conditioning */ unsigned char time_queue; /* queue # for timestamping */ char reserved[59]; /* for future use */ }; static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned int cmd, struct snd_seq_port_info32 __user *data32) { int err = -EFAULT; struct snd_seq_port_info *data; mm_segment_t fs; data = memdup_user(data32, sizeof(*data32)); if (IS_ERR(data)) return PTR_ERR(data); if (get_user(data->flags, &data32->flags) || get_user(data->time_queue, &data32->time_queue)) goto error; data->kernel = NULL; fs = snd_enter_user(); err = snd_seq_do_ioctl(client, cmd, data); snd_leave_user(fs); if (err < 0) goto error; if (copy_to_user(data32, data, sizeof(*data32)) || put_user(data->flags, &data32->flags) || put_user(data->time_queue, &data32->time_queue)) err = -EFAULT; error: kfree(data); return err; } /* */ enum { SNDRV_SEQ_IOCTL_CREATE_PORT32 = _IOWR('S', 0x20, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_DELETE_PORT32 = _IOW ('S', 0x21, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_GET_PORT_INFO32 = _IOWR('S', 0x22, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_SET_PORT_INFO32 = _IOW ('S', 0x23, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32 = _IOWR('S', 0x52, struct snd_seq_port_info32), }; static long snd_seq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_seq_client *client = file->private_data; void __user *argp = compat_ptr(arg); if (snd_BUG_ON(!client)) return -ENXIO; switch (cmd) { case SNDRV_SEQ_IOCTL_PVERSION: case SNDRV_SEQ_IOCTL_CLIENT_ID: case SNDRV_SEQ_IOCTL_SYSTEM_INFO: case SNDRV_SEQ_IOCTL_GET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_CREATE_QUEUE: case SNDRV_SEQ_IOCTL_DELETE_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_SET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS: case SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_GET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_SET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_REMOVE_EVENTS: case SNDRV_SEQ_IOCTL_QUERY_SUBS: case SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION: case SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT: case SNDRV_SEQ_IOCTL_RUNNING_MODE: return snd_seq_do_ioctl(client, cmd, argp); case SNDRV_SEQ_IOCTL_CREATE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, argp); case SNDRV_SEQ_IOCTL_DELETE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, argp); case SNDRV_SEQ_IOCTL_GET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_GET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_SET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_SET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, argp); } return -ENOIOCTLCMD; }
gpl-2.0
lyn1337/LinuxDSc2
linux-2.6.x/arch/m68knommu/platform/5249/config.c
39
2972
/***************************************************************************/ /* * linux/arch/m68knommu/platform/5249/config.c * * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) */ /***************************************************************************/ #include <linux/config.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/param.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcftimer.h> #include <asm/mcfsim.h> #include <asm/mcfdma.h> /***************************************************************************/ void coldfire_tick(void); void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); unsigned long coldfire_timer_offset(void); void coldfire_trap_init(void); void coldfire_reset(void); /***************************************************************************/ /* * DMA channel base address table. */ unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS] = { MCF_MBAR + MCFDMA_BASE0, MCF_MBAR + MCFDMA_BASE1, MCF_MBAR + MCFDMA_BASE2, MCF_MBAR + MCFDMA_BASE3, }; unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS]; /***************************************************************************/ void mcf_autovector(unsigned int vec) { volatile unsigned char *mbar; if ((vec >= 25) && (vec <= 31)) { mbar = (volatile unsigned char *) MCF_MBAR; vec = 0x1 << (vec - 24); *(mbar + MCFSIM_AVR) |= vec; mcf_setimr(mcf_getimr() & ~vec); } } /***************************************************************************/ void mcf_settimericr(unsigned int timer, unsigned int level) { volatile unsigned char *icrp; unsigned int icr, imr; if (timer <= 2) { switch (timer) { case 2: icr = MCFSIM_TIMER2ICR; imr = MCFSIM_IMR_TIMER2; break; default: icr = MCFSIM_TIMER1ICR; imr = MCFSIM_IMR_TIMER1; break; } icrp = (volatile unsigned char *) (MCF_MBAR + icr); *icrp = MCFSIM_ICR_AUTOVEC | (level << 2) | MCFSIM_ICR_PRI3; mcf_setimr(mcf_getimr() & ~imr); } } /***************************************************************************/ int mcf_timerirqpending(int timer) { unsigned int imr = 0; switch (timer) { case 1: imr = MCFSIM_IMR_TIMER1; break; case 2: imr = MCFSIM_IMR_TIMER2; break; default: break; } return (mcf_getipr() & imr); } /***************************************************************************/ void config_BSP(char *commandp, int size) { mcf_setimr(MCFSIM_IMR_MASKALL); #if defined(CONFIG_BOOTPARAM) strncpy(commandp, CONFIG_BOOTPARAM_STRING, size); commandp[size-1] = 0; #else memset(commandp, 0, size); #endif mach_sched_init = coldfire_timer_init; mach_tick = coldfire_tick; mach_gettimeoffset = coldfire_timer_offset; mach_trap_init = coldfire_trap_init; mach_reset = coldfire_reset; } /***************************************************************************/
gpl-2.0
FEDEVEL/imx6tinyrex-linux-3.0.35
drivers/media/video/mxc/capture/adv7180.c
39
35973
/* * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved. */ /* * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ /*! * @file adv7180.c * * @brief Analog Device ADV7180 video decoder functions * * @ingroup Camera */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/regulator/consumer.h> #include <linux/fsl_devices.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-int-device.h> #include "mxc_v4l2_capture.h" static struct regulator *dvddio_regulator; static struct regulator *dvdd_regulator; static struct regulator *avdd_regulator; static struct regulator *pvdd_regulator; static struct fsl_mxc_tvin_platform_data *tvin_plat; extern void gpio_sensor_active(void); extern void gpio_sensor_inactive(void); static int adv7180_probe(struct i2c_client *adapter, const struct i2c_device_id *id); static int adv7180_detach(struct i2c_client *client); static const struct i2c_device_id adv7180_id[] = { {"adv7180", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, adv7180_id); static struct i2c_driver adv7180_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7180", }, .probe = adv7180_probe, .remove = adv7180_detach, .id_table = adv7180_id, }; /*! * Maintains the information on the current state of the sensor. */ struct sensor { struct sensor_data sen; v4l2_std_id std_id; } adv7180_data; /*! List of input video formats supported. The video formats is corresponding * with v4l2 id in video_fmt_t */ typedef enum { ADV7180_NTSC = 0, /*!< Locked on (M) NTSC video signal. */ ADV7180_PAL, /*!< (B, G, H, I, N)PAL video signal. */ ADV7180_NOT_LOCKED, /*!< Not locked on a signal. */ } video_fmt_idx; /*! Number of video standards supported (including 'not locked' signal). */ #define ADV7180_STD_MAX (ADV7180_PAL + 1) /*! Video format structure. */ typedef struct { int v4l2_id; /*!< Video for linux ID. */ char name[16]; /*!< Name (e.g., "NTSC", "PAL", etc.) */ u16 raw_width; /*!< Raw width. */ u16 raw_height; /*!< Raw height. */ u16 active_width; /*!< Active width. */ u16 active_height; /*!< Active height. */ } video_fmt_t; /*! Description of video formats supported. * * PAL: raw=720x625, active=720x576. * NTSC: raw=720x525, active=720x480. */ static video_fmt_t video_fmts[] = { { /*! NTSC */ .v4l2_id = V4L2_STD_NTSC, .name = "NTSC", .raw_width = 720, /* SENS_FRM_WIDTH */ .raw_height = 525, /* SENS_FRM_HEIGHT */ .active_width = 720, /* ACT_FRM_WIDTH plus 1 */ .active_height = 480, /* ACT_FRM_WIDTH plus 1 */ }, { /*! (B, G, H, I, N) PAL */ .v4l2_id = V4L2_STD_PAL, .name = "PAL", .raw_width = 720, .raw_height = 625, .active_width = 720, .active_height = 576, }, { /*! Unlocked standard */ .v4l2_id = V4L2_STD_ALL, .name = "Autodetect", .raw_width = 720, .raw_height = 625, .active_width = 720, .active_height = 576, }, }; /*!* Standard index of ADV7180. */ static video_fmt_idx video_idx = ADV7180_PAL; /*! @brief This mutex is used to provide mutual exclusion. * * Create a mutex that can be used to provide mutually exclusive * read/write access to the globally accessible data structures * and variables that were defined above. */ static DEFINE_MUTEX(mutex); #define IF_NAME "adv7180" #define ADV7180_INPUT_CTL 0x00 /* Input Control */ #define ADV7180_STATUS_1 0x10 /* Status #1 */ #define ADV7180_BRIGHTNESS 0x0a /* Brightness */ #define ADV7180_IDENT 0x11 /* IDENT */ #define ADV7180_VSYNC_FIELD_CTL_1 0x31 /* VSYNC Field Control #1 */ #define ADV7180_MANUAL_WIN_CTL 0x3d /* Manual Window Control */ #define ADV7180_SD_SATURATION_CB 0xe3 /* SD Saturation Cb */ #define ADV7180_SD_SATURATION_CR 0xe4 /* SD Saturation Cr */ #define ADV7180_PWR_MNG 0x0f /* Power Management */ /* supported controls */ /* This hasn't been fully implemented yet. * This is how it should work, though. */ static struct v4l2_queryctrl adv7180_qctrl[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, /* check this value */ .maximum = 255, /* check this value */ .step = 1, /* check this value */ .default_value = 127, /* check this value */ .flags = 0, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, /* check this value */ .maximum = 255, /* check this value */ .step = 0x1, /* check this value */ .default_value = 127, /* check this value */ .flags = 0, } }; /*********************************************************************** * I2C transfert. ***********************************************************************/ /*! Read one register from a ADV7180 i2c slave device. * * @param *reg register in the device we wish to access. * * @return 0 if success, an error code otherwise. */ static inline int adv7180_read(u8 reg) { int val; val = i2c_smbus_read_byte_data(adv7180_data.sen.i2c_client, reg); if (val < 0) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "%s:read reg error: reg=%2x\n", __func__, reg); return -1; } return val; } /*! Write one register of a ADV7180 i2c slave device. * * @param *reg register in the device we wish to access. * * @return 0 if success, an error code otherwise. */ static int adv7180_write_reg(u8 reg, u8 val) { s32 ret; ret = i2c_smbus_write_byte_data(adv7180_data.sen.i2c_client, reg, val); if (ret < 0) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "%s:write reg error:reg=%2x,val=%2x\n", __func__, reg, val); return -1; } return 0; } /*********************************************************************** * mxc_v4l2_capture interface. ***********************************************************************/ /*! * Return attributes of current video standard. * Since this device autodetects the current standard, this function also * sets the values that need to be changed if the standard changes. * There is no set std equivalent function. * * @return None. */ static void adv7180_get_std(v4l2_std_id *std) { int tmp; int idx; dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_get_std\n"); /* Read the AD_RESULT to get the detect output video standard */ tmp = adv7180_read(ADV7180_STATUS_1) & 0x70; mutex_lock(&mutex); if (tmp == 0x40) { /* PAL */ *std = V4L2_STD_PAL; idx = ADV7180_PAL; } else if (tmp == 0) { /*NTSC*/ *std = V4L2_STD_NTSC; idx = ADV7180_NTSC; } else { *std = V4L2_STD_ALL; idx = ADV7180_NOT_LOCKED; dev_dbg(&adv7180_data.sen.i2c_client->dev, "Got invalid video standard!\n"); } mutex_unlock(&mutex); /* This assumes autodetect which this device uses. */ if (*std != adv7180_data.std_id) { video_idx = idx; adv7180_data.std_id = *std; adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width; adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height; } } /*********************************************************************** * IOCTL Functions from v4l2_int_ioctl_desc. ***********************************************************************/ /*! * ioctl_g_ifparm - V4L2 sensor interface handler for vidioc_int_g_ifparm_num * s: pointer to standard V4L2 device structure * p: pointer to standard V4L2 vidioc_int_g_ifparm_num ioctl structure * * Gets slave interface parameters. * Calculates the required xclk value to support the requested * clock parameters in p. This value is returned in the p * parameter. * * vidioc_int_g_ifparm returns platform-specific information about the * interface settings used by the sensor. * * Called on open. */ static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_ifparm\n"); if (s == NULL) { pr_err(" ERROR!! no slave device set!\n"); return -1; } /* Initialize structure to 0s then set any non-0 values. */ memset(p, 0, sizeof(*p)); p->if_type = V4L2_IF_TYPE_BT656; /* This is the only possibility. */ p->u.bt656.mode = V4L2_IF_TYPE_BT656_MODE_NOBT_8BIT; p->u.bt656.nobt_hs_inv = 1; p->u.bt656.bt_sync_correct = 1; /* ADV7180 has a dedicated clock so no clock settings needed. */ return 0; } /*! * Sets the camera power. * * s pointer to the camera device * on if 1, power is to be turned on. 0 means power is to be turned off * * ioctl_s_power - V4L2 sensor interface handler for vidioc_int_s_power_num * @s: pointer to standard V4L2 device structure * @on: power state to which device is to be set * * Sets devices power state to requrested state, if possible. * This is called on open, close, suspend and resume. */ static int ioctl_s_power(struct v4l2_int_device *s, int on) { struct sensor *sensor = s->priv; dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_s_power\n"); if (on && !sensor->sen.on) { gpio_sensor_active(); if (adv7180_write_reg(ADV7180_PWR_MNG, 0x04) != 0) return -EIO; /* * FIXME:Additional 400ms to wait the chip to be stable? * This is a workaround for preview scrolling issue. */ msleep(400); } else if (!on && sensor->sen.on) { if (adv7180_write_reg(ADV7180_PWR_MNG, 0x24) != 0) return -EIO; gpio_sensor_inactive(); } sensor->sen.on = on; return 0; } /*! * ioctl_g_parm - V4L2 sensor interface handler for VIDIOC_G_PARM ioctl * @s: pointer to standard V4L2 device structure * @a: pointer to standard V4L2 VIDIOC_G_PARM ioctl structure * * Returns the sensor's video CAPTURE parameters. */ static int ioctl_g_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { struct sensor *sensor = s->priv; struct v4l2_captureparm *cparm = &a->parm.capture; dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_parm\n"); switch (a->type) { /* These are all the possible cases. */ case V4L2_BUF_TYPE_VIDEO_CAPTURE: pr_debug(" type is V4L2_BUF_TYPE_VIDEO_CAPTURE\n"); memset(a, 0, sizeof(*a)); a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; cparm->capability = sensor->sen.streamcap.capability; cparm->timeperframe = sensor->sen.streamcap.timeperframe; cparm->capturemode = sensor->sen.streamcap.capturemode; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: break; default: pr_debug("ioctl_g_parm:type is unknown %d\n", a->type); break; } return 0; } /*! * ioctl_s_parm - V4L2 sensor interface handler for VIDIOC_S_PARM ioctl * @s: pointer to standard V4L2 device structure * @a: pointer to standard V4L2 VIDIOC_S_PARM ioctl structure * * Configures the sensor to use the input parameters, if possible. If * not possible, reverts to the old parameters and returns the * appropriate error code. * * This driver cannot change these settings. */ static int ioctl_s_parm(struct v4l2_int_device *s, struct v4l2_streamparm *a) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_parm\n"); switch (a->type) { /* These are all the possible cases. */ case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: break; default: pr_debug(" type is unknown - %d\n", a->type); break; } return 0; } /*! * ioctl_g_fmt_cap - V4L2 sensor interface handler for ioctl_g_fmt_cap * @s: pointer to standard V4L2 device structure * @f: pointer to standard V4L2 v4l2_format structure * * Returns the sensor's current pixel format in the v4l2_format * parameter. */ static int ioctl_g_fmt_cap(struct v4l2_int_device *s, struct v4l2_format *f) { struct sensor *sensor = s->priv; dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_g_fmt_cap\n"); switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: pr_debug(" Returning size of %dx%d\n", sensor->sen.pix.width, sensor->sen.pix.height); f->fmt.pix = sensor->sen.pix; break; case V4L2_BUF_TYPE_PRIVATE: { v4l2_std_id std; adv7180_get_std(&std); f->fmt.pix.pixelformat = (u32)std; } break; default: f->fmt.pix = sensor->sen.pix; break; } return 0; } /*! * ioctl_queryctrl - V4L2 sensor interface handler for VIDIOC_QUERYCTRL ioctl * @s: pointer to standard V4L2 device structure * @qc: standard V4L2 VIDIOC_QUERYCTRL ioctl structure * * If the requested control is supported, returns the control information * from the video_control[] array. Otherwise, returns -EINVAL if the * control is not supported. */ static int ioctl_queryctrl(struct v4l2_int_device *s, struct v4l2_queryctrl *qc) { int i; dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_queryctrl\n"); for (i = 0; i < ARRAY_SIZE(adv7180_qctrl); i++) if (qc->id && qc->id == adv7180_qctrl[i].id) { memcpy(qc, &(adv7180_qctrl[i]), sizeof(*qc)); return 0; } return -EINVAL; } /*! * ioctl_g_ctrl - V4L2 sensor interface handler for VIDIOC_G_CTRL ioctl * @s: pointer to standard V4L2 device structure * @vc: standard V4L2 VIDIOC_G_CTRL ioctl structure * * If the requested control is supported, returns the control's current * value from the video_control[] array. Otherwise, returns -EINVAL * if the control is not supported. */ static int ioctl_g_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc) { int ret = 0; int sat = 0; dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_g_ctrl\n"); switch (vc->id) { case V4L2_CID_BRIGHTNESS: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_BRIGHTNESS\n"); adv7180_data.sen.brightness = adv7180_read(ADV7180_BRIGHTNESS); vc->value = adv7180_data.sen.brightness; break; case V4L2_CID_CONTRAST: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_CONTRAST\n"); vc->value = adv7180_data.sen.contrast; break; case V4L2_CID_SATURATION: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_SATURATION\n"); sat = adv7180_read(ADV7180_SD_SATURATION_CB); adv7180_data.sen.saturation = sat; vc->value = adv7180_data.sen.saturation; break; case V4L2_CID_HUE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_HUE\n"); vc->value = adv7180_data.sen.hue; break; case V4L2_CID_AUTO_WHITE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_AUTO_WHITE_BALANCE\n"); break; case V4L2_CID_DO_WHITE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_DO_WHITE_BALANCE\n"); break; case V4L2_CID_RED_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_RED_BALANCE\n"); vc->value = adv7180_data.sen.red; break; case V4L2_CID_BLUE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_BLUE_BALANCE\n"); vc->value = adv7180_data.sen.blue; break; case V4L2_CID_GAMMA: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_GAMMA\n"); break; case V4L2_CID_EXPOSURE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_EXPOSURE\n"); vc->value = adv7180_data.sen.ae_mode; break; case V4L2_CID_AUTOGAIN: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_AUTOGAIN\n"); break; case V4L2_CID_GAIN: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_GAIN\n"); break; case V4L2_CID_HFLIP: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_HFLIP\n"); break; case V4L2_CID_VFLIP: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_VFLIP\n"); break; default: dev_dbg(&adv7180_data.sen.i2c_client->dev, " Default case\n"); vc->value = 0; ret = -EPERM; break; } return ret; } /*! * ioctl_s_ctrl - V4L2 sensor interface handler for VIDIOC_S_CTRL ioctl * @s: pointer to standard V4L2 device structure * @vc: standard V4L2 VIDIOC_S_CTRL ioctl structure * * If the requested control is supported, sets the control's current * value in HW (and updates the video_control[] array). Otherwise, * returns -EINVAL if the control is not supported. */ static int ioctl_s_ctrl(struct v4l2_int_device *s, struct v4l2_control *vc) { int retval = 0; u8 tmp; dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_s_ctrl\n"); switch (vc->id) { case V4L2_CID_BRIGHTNESS: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_BRIGHTNESS\n"); tmp = vc->value; adv7180_write_reg(ADV7180_BRIGHTNESS, tmp); adv7180_data.sen.brightness = vc->value; break; case V4L2_CID_CONTRAST: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_CONTRAST\n"); break; case V4L2_CID_SATURATION: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_SATURATION\n"); tmp = vc->value; adv7180_write_reg(ADV7180_SD_SATURATION_CB, tmp); adv7180_write_reg(ADV7180_SD_SATURATION_CR, tmp); adv7180_data.sen.saturation = vc->value; break; case V4L2_CID_HUE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_HUE\n"); break; case V4L2_CID_AUTO_WHITE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_AUTO_WHITE_BALANCE\n"); break; case V4L2_CID_DO_WHITE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_DO_WHITE_BALANCE\n"); break; case V4L2_CID_RED_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_RED_BALANCE\n"); break; case V4L2_CID_BLUE_BALANCE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_BLUE_BALANCE\n"); break; case V4L2_CID_GAMMA: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_GAMMA\n"); break; case V4L2_CID_EXPOSURE: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_EXPOSURE\n"); break; case V4L2_CID_AUTOGAIN: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_AUTOGAIN\n"); break; case V4L2_CID_GAIN: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_GAIN\n"); break; case V4L2_CID_HFLIP: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_HFLIP\n"); break; case V4L2_CID_VFLIP: dev_dbg(&adv7180_data.sen.i2c_client->dev, " V4L2_CID_VFLIP\n"); break; default: dev_dbg(&adv7180_data.sen.i2c_client->dev, " Default case\n"); retval = -EPERM; break; } return retval; } /*! * ioctl_enum_framesizes - V4L2 sensor interface handler for * VIDIOC_ENUM_FRAMESIZES ioctl * @s: pointer to standard V4L2 device structure * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure * * Return 0 if successful, otherwise -EINVAL. */ static int ioctl_enum_framesizes(struct v4l2_int_device *s, struct v4l2_frmsizeenum *fsize) { if (fsize->index >= 1) return -EINVAL; fsize->discrete.width = video_fmts[video_idx].active_width; fsize->discrete.height = video_fmts[video_idx].active_height; return 0; } /*! * ioctl_g_chip_ident - V4L2 sensor interface handler for * VIDIOC_DBG_G_CHIP_IDENT ioctl * @s: pointer to standard V4L2 device structure * @id: pointer to int * * Return 0. */ static int ioctl_g_chip_ident(struct v4l2_int_device *s, int *id) { ((struct v4l2_dbg_chip_ident *)id)->match.type = V4L2_CHIP_MATCH_I2C_DRIVER; strcpy(((struct v4l2_dbg_chip_ident *)id)->match.name, "adv7180_decoder"); ((struct v4l2_dbg_chip_ident *)id)->ident = V4L2_IDENT_ADV7180; return 0; } /*! * ioctl_init - V4L2 sensor interface handler for VIDIOC_INT_INIT * @s: pointer to standard V4L2 device structure */ static int ioctl_init(struct v4l2_int_device *s) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:ioctl_init\n"); return 0; } /*! * ioctl_dev_init - V4L2 sensor interface handler for vidioc_int_dev_init_num * @s: pointer to standard V4L2 device structure * * Initialise the device when slave attaches to the master. */ static int ioctl_dev_init(struct v4l2_int_device *s) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "adv7180:ioctl_dev_init\n"); return 0; } /*! * This structure defines all the ioctls for this module. */ static struct v4l2_int_ioctl_desc adv7180_ioctl_desc[] = { {vidioc_int_dev_init_num, (v4l2_int_ioctl_func*)ioctl_dev_init}, /*! * Delinitialise the dev. at slave detach. * The complement of ioctl_dev_init. */ /* {vidioc_int_dev_exit_num, (v4l2_int_ioctl_func *)ioctl_dev_exit}, */ {vidioc_int_s_power_num, (v4l2_int_ioctl_func*)ioctl_s_power}, {vidioc_int_g_ifparm_num, (v4l2_int_ioctl_func*)ioctl_g_ifparm}, /* {vidioc_int_g_needs_reset_num, (v4l2_int_ioctl_func *)ioctl_g_needs_reset}, */ /* {vidioc_int_reset_num, (v4l2_int_ioctl_func *)ioctl_reset}, */ {vidioc_int_init_num, (v4l2_int_ioctl_func*)ioctl_init}, /*! * VIDIOC_ENUM_FMT ioctl for the CAPTURE buffer type. */ /* {vidioc_int_enum_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_enum_fmt_cap}, */ /*! * VIDIOC_TRY_FMT ioctl for the CAPTURE buffer type. * This ioctl is used to negotiate the image capture size and * pixel format without actually making it take effect. */ /* {vidioc_int_try_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_try_fmt_cap}, */ {vidioc_int_g_fmt_cap_num, (v4l2_int_ioctl_func*)ioctl_g_fmt_cap}, /*! * If the requested format is supported, configures the HW to use that * format, returns error code if format not supported or HW can't be * correctly configured. */ /* {vidioc_int_s_fmt_cap_num, (v4l2_int_ioctl_func *)ioctl_s_fmt_cap}, */ {vidioc_int_g_parm_num, (v4l2_int_ioctl_func*)ioctl_g_parm}, {vidioc_int_s_parm_num, (v4l2_int_ioctl_func*)ioctl_s_parm}, {vidioc_int_queryctrl_num, (v4l2_int_ioctl_func*)ioctl_queryctrl}, {vidioc_int_g_ctrl_num, (v4l2_int_ioctl_func*)ioctl_g_ctrl}, {vidioc_int_s_ctrl_num, (v4l2_int_ioctl_func*)ioctl_s_ctrl}, {vidioc_int_enum_framesizes_num, (v4l2_int_ioctl_func *) ioctl_enum_framesizes}, {vidioc_int_g_chip_ident_num, (v4l2_int_ioctl_func *)ioctl_g_chip_ident}, }; static struct v4l2_int_slave adv7180_slave = { .ioctls = adv7180_ioctl_desc, .num_ioctls = ARRAY_SIZE(adv7180_ioctl_desc), }; static struct v4l2_int_device adv7180_int_device = { .module = THIS_MODULE, .name = "adv7180", .type = v4l2_int_type_slave, .u = { .slave = &adv7180_slave, }, }; /*********************************************************************** * I2C client and driver. ***********************************************************************/ /*! ADV7180 Reset function. * * @return None. */ static void adv7180_hard_reset(bool cvbs) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180:adv7180_hard_reset\n"); if (cvbs) { /* Set CVBS input on AIN1 */ adv7180_write_reg(ADV7180_INPUT_CTL, 0x00); } else { /* * Set YPbPr input on AIN1,4,5 and normal * operations(autodection of all stds). */ adv7180_write_reg(ADV7180_INPUT_CTL, 0x09); } /* Datasheet recommends */ adv7180_write_reg(0x01, 0xc8); adv7180_write_reg(0x02, 0x04); adv7180_write_reg(0x03, 0x00); adv7180_write_reg(0x04, 0x45); adv7180_write_reg(0x05, 0x00); adv7180_write_reg(0x06, 0x02); adv7180_write_reg(0x07, 0x7F); adv7180_write_reg(0x08, 0x80); adv7180_write_reg(0x0A, 0x00); adv7180_write_reg(0x0B, 0x00); adv7180_write_reg(0x0C, 0x36); adv7180_write_reg(0x0D, 0x7C); adv7180_write_reg(0x0E, 0x00); adv7180_write_reg(0x0F, 0x00); adv7180_write_reg(0x13, 0x00); adv7180_write_reg(0x14, 0x12); adv7180_write_reg(0x15, 0x00); adv7180_write_reg(0x16, 0x00); adv7180_write_reg(0x17, 0x01); adv7180_write_reg(0x18, 0x93); adv7180_write_reg(0xF1, 0x19); adv7180_write_reg(0x1A, 0x00); adv7180_write_reg(0x1B, 0x00); adv7180_write_reg(0x1C, 0x00); adv7180_write_reg(0x1D, 0x40); adv7180_write_reg(0x1E, 0x00); adv7180_write_reg(0x1F, 0x00); adv7180_write_reg(0x20, 0x00); adv7180_write_reg(0x21, 0x00); adv7180_write_reg(0x22, 0x00); adv7180_write_reg(0x23, 0xC0); adv7180_write_reg(0x24, 0x00); adv7180_write_reg(0x25, 0x00); adv7180_write_reg(0x26, 0x00); adv7180_write_reg(0x27, 0x58); adv7180_write_reg(0x28, 0x00); adv7180_write_reg(0x29, 0x00); adv7180_write_reg(0x2A, 0x00); adv7180_write_reg(0x2B, 0xE1); adv7180_write_reg(0x2C, 0xAE); adv7180_write_reg(0x2D, 0xF4); adv7180_write_reg(0x2E, 0x00); adv7180_write_reg(0x2F, 0xF0); adv7180_write_reg(0x30, 0x00); adv7180_write_reg(0x31, 0x12); adv7180_write_reg(0x32, 0x41); adv7180_write_reg(0x33, 0x84); adv7180_write_reg(0x34, 0x00); adv7180_write_reg(0x35, 0x02); adv7180_write_reg(0x36, 0x00); adv7180_write_reg(0x37, 0x01); adv7180_write_reg(0x38, 0x80); adv7180_write_reg(0x39, 0xC0); adv7180_write_reg(0x3A, 0x10); adv7180_write_reg(0x3B, 0x05); adv7180_write_reg(0x3C, 0x58); adv7180_write_reg(0x3D, 0xB2); adv7180_write_reg(0x3E, 0x64); adv7180_write_reg(0x3F, 0xE4); adv7180_write_reg(0x40, 0x90); adv7180_write_reg(0x41, 0x01); adv7180_write_reg(0x42, 0x7E); adv7180_write_reg(0x43, 0xA4); adv7180_write_reg(0x44, 0xFF); adv7180_write_reg(0x45, 0xB6); adv7180_write_reg(0x46, 0x12); adv7180_write_reg(0x48, 0x00); adv7180_write_reg(0x49, 0x00); adv7180_write_reg(0x4A, 0x00); adv7180_write_reg(0x4B, 0x00); adv7180_write_reg(0x4C, 0x00); adv7180_write_reg(0x4D, 0xEF); adv7180_write_reg(0x4E, 0x08); adv7180_write_reg(0x4F, 0x08); adv7180_write_reg(0x50, 0x08); adv7180_write_reg(0x51, 0x24); adv7180_write_reg(0x52, 0x0B); adv7180_write_reg(0x53, 0x4E); adv7180_write_reg(0x54, 0x80); adv7180_write_reg(0x55, 0x00); adv7180_write_reg(0x56, 0x10); adv7180_write_reg(0x57, 0x00); adv7180_write_reg(0x58, 0x00); adv7180_write_reg(0x59, 0x00); adv7180_write_reg(0x5A, 0x00); adv7180_write_reg(0x5B, 0x00); adv7180_write_reg(0x5C, 0x00); adv7180_write_reg(0x5D, 0x00); adv7180_write_reg(0x5E, 0x00); adv7180_write_reg(0x5F, 0x00); adv7180_write_reg(0x60, 0x00); adv7180_write_reg(0x61, 0x00); adv7180_write_reg(0x62, 0x20); adv7180_write_reg(0x63, 0x00); adv7180_write_reg(0x64, 0x00); adv7180_write_reg(0x65, 0x00); adv7180_write_reg(0x66, 0x00); adv7180_write_reg(0x67, 0x03); adv7180_write_reg(0x68, 0x01); adv7180_write_reg(0x69, 0x00); adv7180_write_reg(0x6A, 0x00); adv7180_write_reg(0x6B, 0xC0); adv7180_write_reg(0x6C, 0x00); adv7180_write_reg(0x6D, 0x00); adv7180_write_reg(0x6E, 0x00); adv7180_write_reg(0x6F, 0x00); adv7180_write_reg(0x70, 0x00); adv7180_write_reg(0x71, 0x00); adv7180_write_reg(0x72, 0x00); adv7180_write_reg(0x73, 0x10); adv7180_write_reg(0x74, 0x04); adv7180_write_reg(0x75, 0x01); adv7180_write_reg(0x76, 0x00); adv7180_write_reg(0x77, 0x3F); adv7180_write_reg(0x78, 0xFF); adv7180_write_reg(0x79, 0xFF); adv7180_write_reg(0x7A, 0xFF); adv7180_write_reg(0x7B, 0x1E); adv7180_write_reg(0x7C, 0xC0); adv7180_write_reg(0x7D, 0x00); adv7180_write_reg(0x7E, 0x00); adv7180_write_reg(0x7F, 0x00); adv7180_write_reg(0x80, 0x00); adv7180_write_reg(0x81, 0xC0); adv7180_write_reg(0x82, 0x04); adv7180_write_reg(0x83, 0x00); adv7180_write_reg(0x84, 0x0C); adv7180_write_reg(0x85, 0x02); adv7180_write_reg(0x86, 0x03); adv7180_write_reg(0x87, 0x63); adv7180_write_reg(0x88, 0x5A); adv7180_write_reg(0x89, 0x08); adv7180_write_reg(0x8A, 0x10); adv7180_write_reg(0x8B, 0x00); adv7180_write_reg(0x8C, 0x40); adv7180_write_reg(0x8D, 0x00); adv7180_write_reg(0x8E, 0x40); adv7180_write_reg(0x8F, 0x00); adv7180_write_reg(0x90, 0x00); adv7180_write_reg(0x91, 0x50); adv7180_write_reg(0x92, 0x00); adv7180_write_reg(0x93, 0x00); adv7180_write_reg(0x94, 0x00); adv7180_write_reg(0x95, 0x00); adv7180_write_reg(0x96, 0x00); adv7180_write_reg(0x97, 0xF0); adv7180_write_reg(0x98, 0x00); adv7180_write_reg(0x99, 0x00); adv7180_write_reg(0x9A, 0x00); adv7180_write_reg(0x9B, 0x00); adv7180_write_reg(0x9C, 0x00); adv7180_write_reg(0x9D, 0x00); adv7180_write_reg(0x9E, 0x00); adv7180_write_reg(0x9F, 0x00); adv7180_write_reg(0xA0, 0x00); adv7180_write_reg(0xA1, 0x00); adv7180_write_reg(0xA2, 0x00); adv7180_write_reg(0xA3, 0x00); adv7180_write_reg(0xA4, 0x00); adv7180_write_reg(0xA5, 0x00); adv7180_write_reg(0xA6, 0x00); adv7180_write_reg(0xA7, 0x00); adv7180_write_reg(0xA8, 0x00); adv7180_write_reg(0xA9, 0x00); adv7180_write_reg(0xAA, 0x00); adv7180_write_reg(0xAB, 0x00); adv7180_write_reg(0xAC, 0x00); adv7180_write_reg(0xAD, 0x00); adv7180_write_reg(0xAE, 0x60); adv7180_write_reg(0xAF, 0x00); adv7180_write_reg(0xB0, 0x00); adv7180_write_reg(0xB1, 0x60); adv7180_write_reg(0xB2, 0x1C); adv7180_write_reg(0xB3, 0x54); adv7180_write_reg(0xB4, 0x00); adv7180_write_reg(0xB5, 0x00); adv7180_write_reg(0xB6, 0x00); adv7180_write_reg(0xB7, 0x13); adv7180_write_reg(0xB8, 0x03); adv7180_write_reg(0xB9, 0x33); adv7180_write_reg(0xBF, 0x02); adv7180_write_reg(0xC0, 0x00); adv7180_write_reg(0xC1, 0x00); adv7180_write_reg(0xC2, 0x00); adv7180_write_reg(0xC3, 0x00); adv7180_write_reg(0xC4, 0x00); adv7180_write_reg(0xC5, 0x81); adv7180_write_reg(0xC6, 0x00); adv7180_write_reg(0xC7, 0x00); adv7180_write_reg(0xC8, 0x00); adv7180_write_reg(0xC9, 0x04); adv7180_write_reg(0xCC, 0x69); adv7180_write_reg(0xCD, 0x00); adv7180_write_reg(0xCE, 0x01); adv7180_write_reg(0xCF, 0xB4); adv7180_write_reg(0xD0, 0x00); adv7180_write_reg(0xD1, 0x10); adv7180_write_reg(0xD2, 0xFF); adv7180_write_reg(0xD3, 0xFF); adv7180_write_reg(0xD4, 0x7F); adv7180_write_reg(0xD5, 0x7F); adv7180_write_reg(0xD6, 0x3E); adv7180_write_reg(0xD7, 0x08); adv7180_write_reg(0xD8, 0x3C); adv7180_write_reg(0xD9, 0x08); adv7180_write_reg(0xDA, 0x3C); adv7180_write_reg(0xDB, 0x9B); adv7180_write_reg(0xDC, 0xAC); adv7180_write_reg(0xDD, 0x4C); adv7180_write_reg(0xDE, 0x00); adv7180_write_reg(0xDF, 0x00); adv7180_write_reg(0xE0, 0x14); adv7180_write_reg(0xE1, 0x80); adv7180_write_reg(0xE2, 0x80); adv7180_write_reg(0xE3, 0x80); adv7180_write_reg(0xE4, 0x80); adv7180_write_reg(0xE5, 0x25); adv7180_write_reg(0xE6, 0x44); adv7180_write_reg(0xE7, 0x63); adv7180_write_reg(0xE8, 0x65); adv7180_write_reg(0xE9, 0x14); adv7180_write_reg(0xEA, 0x63); adv7180_write_reg(0xEB, 0x55); adv7180_write_reg(0xEC, 0x55); adv7180_write_reg(0xEE, 0x00); adv7180_write_reg(0xEF, 0x4A); adv7180_write_reg(0xF0, 0x44); adv7180_write_reg(0xF1, 0x0C); adv7180_write_reg(0xF2, 0x32); adv7180_write_reg(0xF3, 0x00); adv7180_write_reg(0xF4, 0x3F); adv7180_write_reg(0xF5, 0xE0); adv7180_write_reg(0xF6, 0x69); adv7180_write_reg(0xF7, 0x10); adv7180_write_reg(0xF8, 0x00); adv7180_write_reg(0xF9, 0x03); adv7180_write_reg(0xFA, 0xFA); adv7180_write_reg(0xFB, 0x40); } /*! ADV7180 I2C attach function. * * @param *adapter struct i2c_adapter *. * * @return Error code indicating success or failure. */ /*! * ADV7180 I2C probe function. * Function set in i2c_driver struct. * Called by insmod. * * @param *adapter I2C adapter descriptor. * * @return Error code indicating success or failure. */ static int adv7180_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rev_id; int ret = 0; tvin_plat = client->dev.platform_data; printk(KERN_ERR"DBG sensor data is at %p\n", &adv7180_data); pr_debug("In adv7180_probe\n"); if (tvin_plat->dvddio_reg) { dvddio_regulator = regulator_get(&client->dev, tvin_plat->dvddio_reg); if (!IS_ERR_VALUE((unsigned long)dvddio_regulator)) { regulator_set_voltage(dvddio_regulator, 3300000, 3300000); if (regulator_enable(dvddio_regulator) != 0) return -ENODEV; } } if (tvin_plat->dvdd_reg) { dvdd_regulator = regulator_get(&client->dev, tvin_plat->dvdd_reg); if (!IS_ERR_VALUE((unsigned long)dvdd_regulator)) { regulator_set_voltage(dvdd_regulator, 1800000, 1800000); if (regulator_enable(dvdd_regulator) != 0) return -ENODEV; } } if (tvin_plat->avdd_reg) { avdd_regulator = regulator_get(&client->dev, tvin_plat->avdd_reg); if (!IS_ERR_VALUE((unsigned long)avdd_regulator)) { regulator_set_voltage(avdd_regulator, 1800000, 1800000); if (regulator_enable(avdd_regulator) != 0) return -ENODEV; } } if (tvin_plat->pvdd_reg) { pvdd_regulator = regulator_get(&client->dev, tvin_plat->pvdd_reg); if (!IS_ERR_VALUE((unsigned long)pvdd_regulator)) { regulator_set_voltage(pvdd_regulator, 1800000, 1800000); if (regulator_enable(pvdd_regulator) != 0) return -ENODEV; } } if (tvin_plat->io_init) tvin_plat->io_init(); if (tvin_plat->reset) tvin_plat->reset(); if (tvin_plat->pwdn) tvin_plat->pwdn(0); msleep(1); /* Set initial values for the sensor struct. */ memset(&adv7180_data, 0, sizeof(adv7180_data)); adv7180_data.sen.i2c_client = client; adv7180_data.sen.streamcap.timeperframe.denominator = 30; adv7180_data.sen.streamcap.timeperframe.numerator = 1; adv7180_data.std_id = V4L2_STD_ALL; video_idx = ADV7180_NOT_LOCKED; adv7180_data.sen.pix.width = video_fmts[video_idx].raw_width; adv7180_data.sen.pix.height = video_fmts[video_idx].raw_height; adv7180_data.sen.pix.pixelformat = V4L2_PIX_FMT_UYVY; /* YUV422 */ adv7180_data.sen.pix.priv = 1; /* 1 is used to indicate TV in */ adv7180_data.sen.on = true; gpio_sensor_active(); dev_dbg(&adv7180_data.sen.i2c_client->dev, "%s:adv7180 probe i2c address is 0x%02X\n", __func__, adv7180_data.sen.i2c_client->addr); /*! Read the revision ID of the tvin chip */ rev_id = adv7180_read(ADV7180_IDENT); dev_dbg(&adv7180_data.sen.i2c_client->dev, "%s:Analog Device adv7%2X0 detected!\n", __func__, rev_id); /*! ADV7180 initialization. */ adv7180_hard_reset(tvin_plat->cvbs); pr_debug(" type is %d (expect %d)\n", adv7180_int_device.type, v4l2_int_type_slave); pr_debug(" num ioctls is %d\n", adv7180_int_device.u.slave->num_ioctls); /* This function attaches this structure to the /dev/video0 device. * The pointer in priv points to the mt9v111_data structure here.*/ adv7180_int_device.priv = &adv7180_data; ret = v4l2_int_device_register(&adv7180_int_device); return ret; } /*! * ADV7180 I2C detach function. * Called on rmmod. * * @param *client struct i2c_client*. * * @return Error code indicating success or failure. */ static int adv7180_detach(struct i2c_client *client) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "%s:Removing %s video decoder @ 0x%02X from adapter %s\n", __func__, IF_NAME, client->addr << 1, client->adapter->name); /* Power down via i2c */ adv7180_write_reg(ADV7180_PWR_MNG, 0x24); if (dvddio_regulator) { regulator_disable(dvddio_regulator); regulator_put(dvddio_regulator); } if (dvdd_regulator) { regulator_disable(dvdd_regulator); regulator_put(dvdd_regulator); } if (avdd_regulator) { regulator_disable(avdd_regulator); regulator_put(avdd_regulator); } if (pvdd_regulator) { regulator_disable(pvdd_regulator); regulator_put(pvdd_regulator); } v4l2_int_device_unregister(&adv7180_int_device); return 0; } /*! * ADV7180 init function. * Called on insmod. * * @return Error code indicating success or failure. */ static __init int adv7180_init(void) { u8 err = 0; pr_debug("In adv7180_init\n"); /* Tells the i2c driver what functions to call for this driver. */ err = i2c_add_driver(&adv7180_i2c_driver); if (err != 0) pr_err("%s:driver registration failed, error=%d \n", __func__, err); return err; } /*! * ADV7180 cleanup function. * Called on rmmod. * * @return Error code indicating success or failure. */ static void __exit adv7180_clean(void) { dev_dbg(&adv7180_data.sen.i2c_client->dev, "In adv7180_clean\n"); i2c_del_driver(&adv7180_i2c_driver); gpio_sensor_inactive(); } module_init(adv7180_init); module_exit(adv7180_clean); MODULE_AUTHOR("Freescale Semiconductor"); MODULE_DESCRIPTION("Anolog Device ADV7180 video decoder driver"); MODULE_LICENSE("GPL");
gpl-2.0
JFCM121CAKE/android_kernel_samsung_jf
mm/mlock.c
295
16365
/* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include "internal.h" int can_do_mlock(void) { if (capable(CAP_IPC_LOCK)) return 1; if (rlimit(RLIMIT_MEMLOCK) != 0) return 1; return 0; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked pages are marked with PageMlocked() flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked page [PageMlocked(page)] is unevictable. As such, it will * be placed on the LRU "unevictable" list, rather than the [in]active lists. * The unevictable list is an LRU sibling list to the [in]active lists. * PageUnevictable is set to indicate the unevictable state. * * When lazy mlocking via vmscan, it is important to ensure that the * vma's VM_LOCKED status is not concurrently being modified, otherwise we * may have mlocked a page that is being munlocked. So lazy mlock must take * the mmap_sem for read, and verify that the vma really is locked * (see mm/rmap.c). */ /* * LRU accounting for clear_page_mlock() */ void __clear_page_mlock(struct page *page) { VM_BUG_ON(!PageLocked(page)); if (!page->mapping) { /* truncated ? */ return; } dec_zone_page_state(page, NR_MLOCK); count_vm_event(UNEVICTABLE_PGCLEARED); if (!isolate_lru_page(page)) { putback_lru_page(page); } else { /* * We lost the race. the page already moved to evictable list. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); } } /* * Mark page as mlocked if not already. * If page on LRU, isolate and putback to move to unevictable list. */ void mlock_vma_page(struct page *page) { /* Serialize with page migration */ BUG_ON(!PageLocked(page)); if (!TestSetPageMlocked(page)) { inc_zone_page_state(page, NR_MLOCK); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); } } /** * munlock_vma_page - munlock a vma page * @page - page to be unlocked * * called from munlock()/munmap() path with page supposedly on the LRU. * When we munlock a page, because the vma where we found the page is being * munlock()ed or munmap()ed, we want to check whether other vmas hold the * page locked so that we can leave it on the unevictable lru list and not * bother vmscan with it. However, to walk the page's rmap list in * try_to_munlock() we must isolate the page from the LRU. If some other * task has removed the page from the LRU, we won't be able to do that. * So we clear the PageMlocked as we might not get another chance. If we * can't isolate the page, we leave it for putback_lru_page() and vmscan * [page_referenced()/try_to_unmap()] to deal with. */ void munlock_vma_page(struct page *page) { /* For try_to_munlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); if (TestClearPageMlocked(page)) { dec_zone_page_state(page, NR_MLOCK); if (!isolate_lru_page(page)) { int ret = SWAP_AGAIN; /* * Optimization: if the page was mapped just once, * that's our mapping and we don't need to check all the * other vmas. */ if (page_mapcount(page) > 1) ret = try_to_munlock(page); /* * did try_to_unlock() succeed or punt? */ if (ret != SWAP_MLOCK) count_vm_event(UNEVICTABLE_PGMUNLOCKED); putback_lru_page(page); } else { /* * Some other task has removed the page from the LRU. * putback_lru_page() will take care of removing the * page from the unevictable list, if necessary. * vmscan [page_referenced()] will move the page back * to the unevictable list if some other vma has it * mlocked. */ if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); else count_vm_event(UNEVICTABLE_PGMUNLOCKED); } } } /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * * This takes care of making the pages present too. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held for at least read. */ static long __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long addr = start; int nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON(start < vma->vm_start); VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); gup_flags = FOLL_TOUCH | FOLL_MLOCK; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; return __get_user_pages(current, mm, addr, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } /** * mlock_vma_pages_range() - mlock pages in specified vma range. * @vma - the vma containing the specfied address range * @start - starting address in @vma to mlock * @end - end address [+1] in @vma to mlock * * For mmap()/mremap()/expansion of mlocked vma. * * return 0 on success for "normal" vmas. * * return number of pages [> 0] to be removed from locked_vm on success * of "special" vmas. */ long mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int nr_pages = (end - start) / PAGE_SIZE; BUG_ON(!(vma->vm_flags & VM_LOCKED)); /* * filter unlockable vmas */ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) goto no_mlock; if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || ((use_user_accessible_timers() && (vma == get_user_timers_vma(current->mm)))))) { __mlock_vma_pages_range(vma, start, end, NULL); /* Hide errors from mmap() and other callers */ return 0; } /* * User mapped kernel pages or huge pages: * make these pages present to populate the ptes, but * fall thru' to reset VM_LOCKED--no need to unlock, and * return nr_pages so these don't get counted against task's * locked limit. huge pages are already counted against * locked vm limit. */ make_pages_present(start, end); no_mlock: vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */ return nr_pages; /* error or pages NOT mlocked */ } /* * munlock_vma_pages_range() - munlock all pages in the vma range.' * @vma - vma containing range to be munlock()ed. * @start - start address in @vma of the range * @end - end of range in @vma. * * For mremap(), munmap() and exit(). * * Called with @vma VM_LOCKED. * * Returns with VM_LOCKED cleared. Callers must be prepared to * deal with this. * * We don't save and restore VM_LOCKED here because pages are * still on lru. In unmap path, pages might be scanned by reclaim * and re-mlocked by try_to_{munlock|unmap} before we unmap and * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long addr; lru_add_drain(); vma->vm_flags &= ~VM_LOCKED; for (addr = start; addr < end; addr += PAGE_SIZE) { struct page *page; /* * Although FOLL_DUMP is intended for get_dump_page(), * it just so happens that its special treatment of the * ZERO_PAGE (returning an error instead of doing get_page) * suits munlock very well (and if somehow an abnormal page * has sneaked into the range, we won't oops here: great). */ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); if (page && !IS_ERR(page)) { lock_page(page); /* * Like in __mlock_vma_pages_range(), * because we lock page here and migration is * blocked by the elevated reference, we need * only check for file-cache page truncation. */ if (page->mapping) munlock_vma_page(page); unlock_page(page); put_page(page); } cond_resched(); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes via make_pages_present(). * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; int nr_pages; int ret = 0; int lock = !!(newflags & VM_LOCKED); if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || ((use_user_accessible_timers()) && (vma == get_user_timers_vma(current->mm)))) goto out; /* don't set VM_LOCKED, don't count */ pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma_get_anon_name(vma)); if (*prev) { vma = *prev; goto success; } if (start != vma->vm_start) { ret = split_vma(mm, vma, start, 1); if (ret) goto out; } if (end != vma->vm_end) { ret = split_vma(mm, vma, end, 0); if (ret) goto out; } success: /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!lock) nr_pages = -nr_pages; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_sem held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, __mlock_vma_pages_range will bring it back. */ if (lock) vma->vm_flags = newflags; else munlock_vma_pages_range(vma, start, end); out: *prev = vma; return ret; } static int do_mlock(unsigned long start, size_t len, int on) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * prev; int error; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; prev = vma->vm_prev; if (start > vma->vm_start) prev = vma; for (nstart = start ; ; ) { vm_flags_t newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags | VM_LOCKED; if (!on) newflags &= ~VM_LOCKED; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(vma, &prev, nstart, tmp, newflags); if (error) break; nstart = tmp; if (nstart < prev->vm_end) nstart = prev->vm_end; if (nstart >= end) break; vma = prev->vm_next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; } } return error; } static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; int ret = 0; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. __mlock_vma_pages_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } ret = __mlock_posix_error_return(ret); break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; if (!can_do_mlock()) return -EPERM; lru_add_drain_all(); /* flush pagevec */ down_write(&current->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; locked = len >> PAGE_SHIFT; locked += current->mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); up_write(&current->mm->mmap_sem); if (!error) error = do_mlock_pages(start, len, 0); return error; } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; down_write(&current->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up_write(&current->mm->mmap_sem); return ret; } static int do_mlockall(int flags) { struct vm_area_struct * vma, * prev = NULL; unsigned int def_flags = 0; if (flags & MCL_FUTURE) def_flags = VM_LOCKED; current->mm->def_flags = def_flags; if (flags == MCL_FUTURE) goto out; for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { vm_flags_t newflags; newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret = -EINVAL; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; ret = -EPERM; if (!can_do_mlock()) goto out; if (flags & MCL_CURRENT) lru_add_drain_all(); /* flush pagevec */ down_write(&current->mm->mmap_sem); lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); up_write(&current->mm->mmap_sem); if (!ret && (flags & MCL_CURRENT)) { /* Ignore errors */ do_mlock_pages(0, TASK_SIZE, 1); } out: return ret; } SYSCALL_DEFINE0(munlockall) { int ret; down_write(&current->mm->mmap_sem); ret = do_mlockall(0); up_write(&current->mm->mmap_sem); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct user_struct *user) { unsigned long lock_limit, locked; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit == RLIM_INFINITY) allowed = 1; lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); if (!allowed && locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) goto out; get_uid(user); user->locked_shm += locked; allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct user_struct *user) { spin_lock(&shmlock_user_lock); user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; spin_unlock(&shmlock_user_lock); free_uid(user); }
gpl-2.0
kevin-a-naude/linux
drivers/media/pci/pt1/va1j5jf8007s.c
807
15554
/* * ISDB-S driver for VA1J5JF8007/VA1J5JF8011 * * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info> * * based on pt1dvr - http://pt1dvr.sourceforge.jp/ * by Tomoaki Ishikawa <tomy@users.sourceforge.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include "dvb_frontend.h" #include "va1j5jf8007s.h" enum va1j5jf8007s_tune_state { VA1J5JF8007S_IDLE, VA1J5JF8007S_SET_FREQUENCY_1, VA1J5JF8007S_SET_FREQUENCY_2, VA1J5JF8007S_SET_FREQUENCY_3, VA1J5JF8007S_CHECK_FREQUENCY, VA1J5JF8007S_SET_MODULATION, VA1J5JF8007S_CHECK_MODULATION, VA1J5JF8007S_SET_TS_ID, VA1J5JF8007S_CHECK_TS_ID, VA1J5JF8007S_TRACK, }; struct va1j5jf8007s_state { const struct va1j5jf8007s_config *config; struct i2c_adapter *adap; struct dvb_frontend fe; enum va1j5jf8007s_tune_state tune_state; }; static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr) { struct va1j5jf8007s_state *state; u8 addr; int i; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; s32 word, x1, x2, x3, x4, x5, y; state = fe->demodulator_priv; addr = state->config->demod_address; word = 0; for (i = 0; i < 2; i++) { write_buf[0] = 0xbc + i; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; word <<= 8; word |= read_buf[0]; } word -= 3000; if (word < 0) word = 0; x1 = int_sqrt(word << 16) * ((15625ll << 21) / 1000000); x2 = (s64)x1 * x1 >> 31; x3 = (s64)x2 * x1 >> 31; x4 = (s64)x2 * x2 >> 31; x5 = (s64)x4 * x1 >> 31; y = (58857ll << 23) / 1000; y -= (s64)x1 * ((89565ll << 24) / 1000) >> 30; y += (s64)x2 * ((88977ll << 24) / 1000) >> 28; y -= (s64)x3 * ((50259ll << 25) / 1000) >> 27; y += (s64)x4 * ((14341ll << 27) / 1000) >> 27; y -= (s64)x5 * ((16346ll << 30) / 10000) >> 28; *snr = y < 0 ? 0 : y >> 15; return 0; } static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_HW; } static int va1j5jf8007s_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; switch (state->tune_state) { case VA1J5JF8007S_IDLE: case VA1J5JF8007S_SET_FREQUENCY_1: case VA1J5JF8007S_SET_FREQUENCY_2: case VA1J5JF8007S_SET_FREQUENCY_3: case VA1J5JF8007S_CHECK_FREQUENCY: *status = 0; return 0; case VA1J5JF8007S_SET_MODULATION: case VA1J5JF8007S_CHECK_MODULATION: *status |= FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_SET_TS_ID: case VA1J5JF8007S_CHECK_TS_ID: *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; case VA1J5JF8007S_TRACK: *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_LOCK; return 0; } BUG(); } struct va1j5jf8007s_cb_map { u32 frequency; u8 cb; }; static const struct va1j5jf8007s_cb_map va1j5jf8007s_cb_maps[] = { { 986000, 0xb2 }, { 1072000, 0xd2 }, { 1154000, 0xe2 }, { 1291000, 0x20 }, { 1447000, 0x40 }, { 1615000, 0x60 }, { 1791000, 0x80 }, { 1972000, 0xa0 }, }; static u8 va1j5jf8007s_lookup_cb(u32 frequency) { int i; const struct va1j5jf8007s_cb_map *map; for (i = 0; i < ARRAY_SIZE(va1j5jf8007s_cb_maps); i++) { map = &va1j5jf8007s_cb_maps[i]; if (frequency < map->frequency) return map->cb; } return 0xc0; } static int va1j5jf8007s_set_frequency_1(struct va1j5jf8007s_state *state) { u32 frequency; u16 word; u8 buf[6]; struct i2c_msg msg; frequency = state->fe.dtv_property_cache.frequency; word = (frequency + 500) / 1000; if (frequency < 1072000) word = (word << 1 & ~0x1f) | (word & 0x0f); buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0x40 | word >> 8; buf[3] = word; buf[4] = 0xe0; buf[5] = va1j5jf8007s_lookup_cb(frequency); msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_frequency_2(struct va1j5jf8007s_state *state) { u8 buf[3]; struct i2c_msg msg; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xe4; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_frequency_3(struct va1j5jf8007s_state *state) { u32 frequency; u8 buf[4]; struct i2c_msg msg; frequency = state->fe.dtv_property_cache.frequency; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xf4; buf[3] = va1j5jf8007s_lookup_cb(frequency) | 0x4; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_frequency(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[2], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0xfe; write_buf[1] = 0xc1; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = read_buf[0] & 0x40; return 0; } static int va1j5jf8007s_set_modulation(struct va1j5jf8007s_state *state) { u8 buf[2]; struct i2c_msg msg; buf[0] = 0x03; buf[1] = 0x01; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_modulation(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0xc3; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = !(read_buf[0] & 0x10); return 0; } static int va1j5jf8007s_set_ts_id(struct va1j5jf8007s_state *state) { u32 ts_id; u8 buf[3]; struct i2c_msg msg; ts_id = state->fe.dtv_property_cache.stream_id; if (!ts_id || ts_id == NO_STREAM_ID_FILTER) return 0; buf[0] = 0x8f; buf[1] = ts_id >> 8; buf[2] = ts_id; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_check_ts_id(struct va1j5jf8007s_state *state, int *lock) { u8 addr; u8 write_buf[1], read_buf[2]; struct i2c_msg msgs[2]; u32 ts_id; ts_id = state->fe.dtv_property_cache.stream_id; if (!ts_id || ts_id == NO_STREAM_ID_FILTER) { *lock = 1; return 0; } addr = state->config->demod_address; write_buf[0] = 0xe6; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; *lock = (read_buf[0] << 8 | read_buf[1]) == ts_id; return 0; } static int va1j5jf8007s_tune(struct dvb_frontend *fe, bool re_tune, unsigned int mode_flags, unsigned int *delay, enum fe_status *status) { struct va1j5jf8007s_state *state; int ret; int lock = 0; state = fe->demodulator_priv; if (re_tune) state->tune_state = VA1J5JF8007S_SET_FREQUENCY_1; switch (state->tune_state) { case VA1J5JF8007S_IDLE: *delay = 3 * HZ; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_1: ret = va1j5jf8007s_set_frequency_1(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_SET_FREQUENCY_2; *delay = 0; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_2: ret = va1j5jf8007s_set_frequency_2(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_SET_FREQUENCY_3; *delay = (HZ + 99) / 100; *status = 0; return 0; case VA1J5JF8007S_SET_FREQUENCY_3: ret = va1j5jf8007s_set_frequency_3(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_FREQUENCY; *delay = 0; *status = 0; return 0; case VA1J5JF8007S_CHECK_FREQUENCY: ret = va1j5jf8007s_check_frequency(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 999) / 1000; *status = 0; return 0; } state->tune_state = VA1J5JF8007S_SET_MODULATION; *delay = 0; *status = FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_SET_MODULATION: ret = va1j5jf8007s_set_modulation(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_MODULATION; *delay = 0; *status = FE_HAS_SIGNAL; return 0; case VA1J5JF8007S_CHECK_MODULATION: ret = va1j5jf8007s_check_modulation(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 49) / 50; *status = FE_HAS_SIGNAL; return 0; } state->tune_state = VA1J5JF8007S_SET_TS_ID; *delay = 0; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; case VA1J5JF8007S_SET_TS_ID: ret = va1j5jf8007s_set_ts_id(state); if (ret < 0) return ret; state->tune_state = VA1J5JF8007S_CHECK_TS_ID; return 0; case VA1J5JF8007S_CHECK_TS_ID: ret = va1j5jf8007s_check_ts_id(state, &lock); if (ret < 0) return ret; if (!lock) { *delay = (HZ + 99) / 100; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; return 0; } state->tune_state = VA1J5JF8007S_TRACK; /* fall through */ case VA1J5JF8007S_TRACK: *delay = 3 * HZ; *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_LOCK; return 0; } BUG(); } static int va1j5jf8007s_init_frequency(struct va1j5jf8007s_state *state) { u8 buf[4]; struct i2c_msg msg; buf[0] = 0xfe; buf[1] = 0xc0; buf[2] = 0xf0; buf[3] = 0x04; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_set_sleep(struct va1j5jf8007s_state *state, int sleep) { u8 buf[2]; struct i2c_msg msg; buf[0] = 0x17; buf[1] = sleep ? 0x01 : 0x00; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; return 0; } static int va1j5jf8007s_sleep(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; int ret; state = fe->demodulator_priv; ret = va1j5jf8007s_init_frequency(state); if (ret < 0) return ret; return va1j5jf8007s_set_sleep(state, 1); } static int va1j5jf8007s_init(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; state->tune_state = VA1J5JF8007S_IDLE; return va1j5jf8007s_set_sleep(state, 0); } static void va1j5jf8007s_release(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops va1j5jf8007s_ops = { .delsys = { SYS_ISDBS }, .info = { .name = "VA1J5JF8007/VA1J5JF8011 ISDB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_MULTISTREAM, }, .read_snr = va1j5jf8007s_read_snr, .get_frontend_algo = va1j5jf8007s_get_frontend_algo, .read_status = va1j5jf8007s_read_status, .tune = va1j5jf8007s_tune, .sleep = va1j5jf8007s_sleep, .init = va1j5jf8007s_init, .release = va1j5jf8007s_release, }; static int va1j5jf8007s_prepare_1(struct va1j5jf8007s_state *state) { u8 addr; u8 write_buf[1], read_buf[1]; struct i2c_msg msgs[2]; addr = state->config->demod_address; write_buf[0] = 0x07; msgs[0].addr = addr; msgs[0].flags = 0; msgs[0].len = sizeof(write_buf); msgs[0].buf = write_buf; msgs[1].addr = addr; msgs[1].flags = I2C_M_RD; msgs[1].len = sizeof(read_buf); msgs[1].buf = read_buf; if (i2c_transfer(state->adap, msgs, 2) != 2) return -EREMOTEIO; if (read_buf[0] != 0x41) return -EIO; return 0; } static const u8 va1j5jf8007s_20mhz_prepare_bufs[][2] = { {0x04, 0x02}, {0x0d, 0x55}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04}, {0x8e, 0x02}, {0xa3, 0xf7}, {0xa5, 0xc0}, }; static const u8 va1j5jf8007s_25mhz_prepare_bufs[][2] = { {0x04, 0x02}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04}, {0x8e, 0x26}, {0xa3, 0xf7}, {0xa5, 0xc0}, }; static int va1j5jf8007s_prepare_2(struct va1j5jf8007s_state *state) { const u8 (*bufs)[2]; int size; u8 addr; u8 buf[2]; struct i2c_msg msg; int i; switch (state->config->frequency) { case VA1J5JF8007S_20MHZ: bufs = va1j5jf8007s_20mhz_prepare_bufs; size = ARRAY_SIZE(va1j5jf8007s_20mhz_prepare_bufs); break; case VA1J5JF8007S_25MHZ: bufs = va1j5jf8007s_25mhz_prepare_bufs; size = ARRAY_SIZE(va1j5jf8007s_25mhz_prepare_bufs); break; default: return -EINVAL; } addr = state->config->demod_address; msg.addr = addr; msg.flags = 0; msg.len = 2; msg.buf = buf; for (i = 0; i < size; i++) { memcpy(buf, bufs[i], sizeof(buf)); if (i2c_transfer(state->adap, &msg, 1) != 1) return -EREMOTEIO; } return 0; } /* must be called after va1j5jf8007t_attach */ int va1j5jf8007s_prepare(struct dvb_frontend *fe) { struct va1j5jf8007s_state *state; int ret; state = fe->demodulator_priv; ret = va1j5jf8007s_prepare_1(state); if (ret < 0) return ret; ret = va1j5jf8007s_prepare_2(state); if (ret < 0) return ret; return va1j5jf8007s_init_frequency(state); } struct dvb_frontend * va1j5jf8007s_attach(const struct va1j5jf8007s_config *config, struct i2c_adapter *adap) { struct va1j5jf8007s_state *state; struct dvb_frontend *fe; u8 buf[2]; struct i2c_msg msg; state = kzalloc(sizeof(struct va1j5jf8007s_state), GFP_KERNEL); if (!state) return NULL; state->config = config; state->adap = adap; fe = &state->fe; memcpy(&fe->ops, &va1j5jf8007s_ops, sizeof(struct dvb_frontend_ops)); fe->demodulator_priv = state; buf[0] = 0x01; buf[1] = 0x80; msg.addr = state->config->demod_address; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; if (i2c_transfer(state->adap, &msg, 1) != 1) { kfree(state); return NULL; } return fe; }
gpl-2.0
idanfima/Jetstreamkernel
drivers/net/qlge/qlge_mpi.c
807
31096
#include "qlge.h" int ql_unpause_mpi_risc(struct ql_adapter *qdev) { u32 tmp; /* Un-pause the RISC */ tmp = ql_read32(qdev, CSR); if (!(tmp & CSR_RP)) return -EIO; ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); return 0; } int ql_pause_mpi_risc(struct ql_adapter *qdev) { u32 tmp; int count = UDELAY_COUNT; /* Pause the RISC */ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); do { tmp = ql_read32(qdev, CSR); if (tmp & CSR_RP) break; mdelay(UDELAY_DELAY); count--; } while (count); return (count == 0) ? -ETIMEDOUT : 0; } int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) { u32 tmp; int count = UDELAY_COUNT; /* Reset the RISC */ ql_write32(qdev, CSR, CSR_CMD_SET_RST); do { tmp = ql_read32(qdev, CSR); if (tmp & CSR_RR) { ql_write32(qdev, CSR, CSR_CMD_CLR_RST); break; } mdelay(UDELAY_DELAY); count--; } while (count); return (count == 0) ? -ETIMEDOUT : 0; } int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); if (status) goto exit; /* get the data */ *data = ql_read32(qdev, PROC_DATA); exit: return status; } int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); if (status) goto exit; /* write the data to the data reg */ ql_write32(qdev, PROC_DATA, data); /* trigger the write */ ql_write32(qdev, PROC_ADDR, reg); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); if (status) goto exit; exit: return status; } int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) { int status; status = ql_write_mpi_reg(qdev, 0x00001010, 1); return status; } /* Determine if we are in charge of the firwmare. If * we are the lower of the 2 NIC pcie functions, or if * we are the higher function and the lower function * is not enabled. */ int ql_own_firmware(struct ql_adapter *qdev) { u32 temp; /* If we are the lower of the 2 NIC functions * on the chip the we are responsible for * core dump and firmware reset after an error. */ if (qdev->func < qdev->alt_func) return 1; /* If we are the higher of the 2 NIC functions * on the chip and the lower function is not * enabled, then we are responsible for * core dump and firmware reset after an error. */ temp = ql_read32(qdev, STS); if (!(temp & (1 << (8 + qdev->alt_func)))) return 1; return 0; } static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) { int i, status; status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); if (status) return -EBUSY; for (i = 0; i < mbcp->out_count; i++) { status = ql_read_mpi_reg(qdev, qdev->mailbox_out + i, &mbcp->mbox_out[i]); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); break; } } ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ return status; } /* Wait for a single mailbox command to complete. * Returns zero on success. */ static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) { int count = 100; u32 value; do { value = ql_read32(qdev, STS); if (value & STS_PI) return 0; mdelay(UDELAY_DELAY); /* 100ms */ } while (--count); return -ETIMEDOUT; } /* Execute a single mailbox command. * Caller must hold PROC_ADDR semaphore. */ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) { int i, status; /* * Make sure there's nothing pending. * This shouldn't happen. */ if (ql_read32(qdev, CSR) & CSR_HRI) return -EIO; status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); if (status) return status; /* * Fill the outbound mailboxes. */ for (i = 0; i < mbcp->in_count; i++) { status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, mbcp->mbox_in[i]); if (status) goto end; } /* * Wake up the MPI firmware. */ ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); end: ql_sem_unlock(qdev, SEM_PROC_REG_MASK); return status; } /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause * parameters, or loopback mode. We wake up a worker * to handler processing this since a mailbox command * will need to be sent to ACK the request. */ static int ql_idc_req_aen(struct ql_adapter *qdev) { int status; struct mbox_params *mbcp = &qdev->idc_mbc; netif_err(qdev, drv, qdev->ndev, "Enter!\n"); /* Get the status data and start up a thread to * handle the request. */ mbcp = &qdev->idc_mbc; mbcp->out_count = 4; status = ql_get_mb_sts(qdev, mbcp); if (status) { netif_err(qdev, drv, qdev->ndev, "Could not read MPI, resetting ASIC!\n"); ql_queue_asic_error(qdev); } else { /* Begin polled mode early so * we don't get another interrupt * when we leave mpi_worker. */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); } return status; } /* Process an inter-device event completion. * If good, signal the caller's completion. */ static int ql_idc_cmplt_aen(struct ql_adapter *qdev) { int status; struct mbox_params *mbcp = &qdev->idc_mbc; mbcp->out_count = 4; status = ql_get_mb_sts(qdev, mbcp); if (status) { netif_err(qdev, drv, qdev->ndev, "Could not read MPI, resetting RISC!\n"); ql_queue_fw_error(qdev); } else /* Wake up the sleeping mpi_idc_work thread that is * waiting for this event. */ complete(&qdev->ide_completion); return status; } static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 2; status = ql_get_mb_sts(qdev, mbcp); if (status) { netif_err(qdev, drv, qdev->ndev, "%s: Could not get mailbox status.\n", __func__); return; } qdev->link_status = mbcp->mbox_out[1]; netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); /* If we're coming back from an IDC event * then set up the CAM and frame routing. */ if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { status = ql_cam_route_initialize(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init CAM/Routing tables.\n"); return; } else clear_bit(QL_CAM_RT_SET, &qdev->flags); } /* Queue up a worker to check the frame * size information, and fix it if it's not * to our liking. */ if (!test_bit(QL_PORT_CFG, &qdev->flags)) { netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); set_bit(QL_PORT_CFG, &qdev->flags); /* Begin polled mode early so * we don't get another interrupt * when we leave mpi_worker dpc. */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); } ql_link_on(qdev); } static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 3; status = ql_get_mb_sts(qdev, mbcp); if (status) netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); ql_link_off(qdev); } static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 5; status = ql_get_mb_sts(qdev, mbcp); if (status) netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); else netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); return status; } static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 1; status = ql_get_mb_sts(qdev, mbcp); if (status) netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); else netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); return status; } static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 6; status = ql_get_mb_sts(qdev, mbcp); if (status) netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); else { int i; netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); for (i = 0; i < mbcp->out_count; i++) netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", i, mbcp->mbox_out[i]); } return status; } static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; mbcp->out_count = 2; status = ql_get_mb_sts(qdev, mbcp); if (status) { netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); } else { netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", mbcp->mbox_out[1]); qdev->fw_rev_id = mbcp->mbox_out[1]; status = ql_cam_route_initialize(qdev); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init CAM/Routing tables.\n"); } } /* Process an async event and clear it unless it's an * error condition. * This can get called iteratively from the mpi_work thread * when events arrive via an interrupt. * It also gets called when a mailbox command is polling for * it's completion. */ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; int orig_count = mbcp->out_count; /* Just get mailbox zero for now. */ mbcp->out_count = 1; status = ql_get_mb_sts(qdev, mbcp); if (status) { netif_err(qdev, drv, qdev->ndev, "Could not read MPI, resetting ASIC!\n"); ql_queue_asic_error(qdev); goto end; } switch (mbcp->mbox_out[0]) { /* This case is only active when we arrive here * as a result of issuing a mailbox command to * the firmware. */ case MB_CMD_STS_INTRMDT: case MB_CMD_STS_GOOD: case MB_CMD_STS_INVLD_CMD: case MB_CMD_STS_XFC_ERR: case MB_CMD_STS_CSUM_ERR: case MB_CMD_STS_ERR: case MB_CMD_STS_PARAM_ERR: /* We can only get mailbox status if we're polling from an * unfinished command. Get the rest of the status data and * return back to the caller. * We only end up here when we're polling for a mailbox * command completion. */ mbcp->out_count = orig_count; status = ql_get_mb_sts(qdev, mbcp); return status; /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause * parameters, or loopback mode. */ case AEN_IDC_REQ: status = ql_idc_req_aen(qdev); break; /* Process and inbound IDC event. * This will happen when we're trying to * change tx/rx max frame size, change pause * parameters or loopback mode. */ case AEN_IDC_CMPLT: case AEN_IDC_EXT: status = ql_idc_cmplt_aen(qdev); break; case AEN_LINK_UP: ql_link_up(qdev, mbcp); break; case AEN_LINK_DOWN: ql_link_down(qdev, mbcp); break; case AEN_FW_INIT_DONE: /* If we're in process on executing the firmware, * then convert the status to normal mailbox status. */ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { mbcp->out_count = orig_count; status = ql_get_mb_sts(qdev, mbcp); mbcp->mbox_out[0] = MB_CMD_STS_GOOD; return status; } ql_init_fw_done(qdev, mbcp); break; case AEN_AEN_SFP_IN: ql_sfp_in(qdev, mbcp); break; case AEN_AEN_SFP_OUT: ql_sfp_out(qdev, mbcp); break; /* This event can arrive at boot time or after an * MPI reset if the firmware failed to initialize. */ case AEN_FW_INIT_FAIL: /* If we're in process on executing the firmware, * then convert the status to normal mailbox status. */ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { mbcp->out_count = orig_count; status = ql_get_mb_sts(qdev, mbcp); mbcp->mbox_out[0] = MB_CMD_STS_ERR; return status; } netif_err(qdev, drv, qdev->ndev, "Firmware initialization failed.\n"); status = -EIO; ql_queue_fw_error(qdev); break; case AEN_SYS_ERR: netif_err(qdev, drv, qdev->ndev, "System Error.\n"); ql_queue_fw_error(qdev); status = -EIO; break; case AEN_AEN_LOST: ql_aen_lost(qdev, mbcp); break; case AEN_DCBX_CHG: /* Need to support AEN 8110 */ break; default: netif_err(qdev, drv, qdev->ndev, "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); /* Clear the MPI firmware status. */ } end: ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); /* Restore the original mailbox count to * what the caller asked for. This can get * changed when a mailbox command is waiting * for a response and an AEN arrives and * is handled. * */ mbcp->out_count = orig_count; return status; } /* Execute a single mailbox command. * mbcp is a pointer to an array of u32. Each * element in the array contains the value for it's * respective mailbox register. */ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status; unsigned long count; /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); /* Load the mailbox registers and wake up MPI RISC. */ status = ql_exec_mb_cmd(qdev, mbcp); if (status) goto end; /* If we're generating a system error, then there's nothing * to wait for. */ if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) goto end; /* Wait for the command to complete. We loop * here because some AEN might arrive while * we're waiting for the mailbox command to * complete. If more than 5 seconds expire we can * assume something is wrong. */ count = jiffies + HZ * MAILBOX_TIMEOUT; do { /* Wait for the interrupt to come in. */ status = ql_wait_mbx_cmd_cmplt(qdev); if (status) continue; /* Process the event. If it's an AEN, it * will be handled in-line or a worker * will be spawned. If it's our completion * we will catch it below. */ status = ql_mpi_handler(qdev, mbcp); if (status) goto end; /* It's either the completion for our mailbox * command complete or an AEN. If it's our * completion then get out. */ if (((mbcp->mbox_out[0] & 0x0000f000) == MB_CMD_STS_GOOD) || ((mbcp->mbox_out[0] & 0x0000f000) == MB_CMD_STS_INTRMDT)) goto done; } while (time_before(jiffies, count)); netif_err(qdev, drv, qdev->ndev, "Timed out waiting for mailbox complete.\n"); status = -ETIMEDOUT; goto end; done: /* Now we can clear the interrupt condition * and look at our status. */ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); if (((mbcp->mbox_out[0] & 0x0000f000) != MB_CMD_STS_GOOD) && ((mbcp->mbox_out[0] & 0x0000f000) != MB_CMD_STS_INTRMDT)) { status = -EIO; } end: /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); return status; } int ql_mb_sys_err(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 0; mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR; status = ql_mailbox_command(qdev, mbcp); return status; } /* Get MPI firmware version. This will be used for * driver banner and for ethtool info. * Returns zero on success. */ int ql_mb_about_fw(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status = 0; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 3; mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed about firmware command\n"); status = -EIO; } /* Store the firmware version */ qdev->fw_rev_id = mbcp->mbox_out[1]; return status; } /* Get functional state for MPI firmware. * Returns zero on success. */ int ql_mb_get_fw_state(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status = 0; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 2; mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed Get Firmware State.\n"); status = -EIO; } /* If bit zero is set in mbx 1 then the firmware is * running, but not initialized. This should never * happen. */ if (mbcp->mbox_out[1] & 1) { netif_err(qdev, drv, qdev->ndev, "Firmware waiting for initialization.\n"); status = -EIO; } return status; } /* Send and ACK mailbox command to the firmware to * let it continue with the change. */ int ql_mb_idc_ack(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status = 0; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 5; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_IDC_ACK; mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); status = -EIO; } return status; } /* Get link settings and maximum frame size settings * for the current port. * Most likely will block. */ int ql_mb_set_port_cfg(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status = 0; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 3; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; mbcp->mbox_in[1] = qdev->link_config; mbcp->mbox_in[2] = qdev->max_frame_size; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { netif_err(qdev, drv, qdev->ndev, "Port Config sent, wait for IDC.\n"); } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed Set Port Configuration.\n"); status = -EIO; } return status; } int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, u32 size) { int status = 0; struct mbox_params mbc; struct mbox_params *mbcp = &mbc; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 9; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; mbcp->mbox_in[1] = LSW(addr); mbcp->mbox_in[2] = MSW(req_dma); mbcp->mbox_in[3] = LSW(req_dma); mbcp->mbox_in[4] = MSW(size); mbcp->mbox_in[5] = LSW(size); mbcp->mbox_in[6] = MSW(MSD(req_dma)); mbcp->mbox_in[7] = LSW(MSD(req_dma)); mbcp->mbox_in[8] = MSW(addr); status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); status = -EIO; } return status; } /* Issue a mailbox command to dump RISC RAM. */ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, int word_count) { int status; char *my_buf; dma_addr_t buf_dma; my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), &buf_dma); if (!my_buf) return -EIO; status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); if (!status) memcpy(buf, my_buf, word_count * sizeof(u32)); pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, buf_dma); return status; } /* Get link settings and maximum frame size settings * for the current port. * Most likely will block. */ int ql_mb_get_port_cfg(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status = 0; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 3; mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed Get Port Configuration.\n"); status = -EIO; } else { netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, "Passed Get Port Configuration.\n"); qdev->link_config = mbcp->mbox_out[1]; qdev->max_frame_size = mbcp->mbox_out[2]; } return status; } int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 2; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; mbcp->mbox_in[1] = wol; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); status = -EIO; } return status; } int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; u8 *addr = qdev->ndev->dev_addr; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 8; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; if (enable_wol) { mbcp->mbox_in[1] = (u32)addr[0]; mbcp->mbox_in[2] = (u32)addr[1]; mbcp->mbox_in[3] = (u32)addr[2]; mbcp->mbox_in[4] = (u32)addr[3]; mbcp->mbox_in[5] = (u32)addr[4]; mbcp->mbox_in[6] = (u32)addr[5]; mbcp->mbox_in[7] = 0; } else { mbcp->mbox_in[1] = 0; mbcp->mbox_in[2] = 1; mbcp->mbox_in[3] = 1; mbcp->mbox_in[4] = 1; mbcp->mbox_in[5] = 1; mbcp->mbox_in[6] = 1; mbcp->mbox_in[7] = 0; } status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); status = -EIO; } return status; } /* IDC - Inter Device Communication... * Some firmware commands require consent of adjacent FCOE * function. This function waits for the OK, or a * counter-request for a little more time.i * The firmware will complete the request if the other * function doesn't respond. */ static int ql_idc_wait(struct ql_adapter *qdev) { int status = -ETIMEDOUT; long wait_time = 1 * HZ; struct mbox_params *mbcp = &qdev->idc_mbc; do { /* Wait here for the command to complete * via the IDC process. */ wait_time = wait_for_completion_timeout(&qdev->ide_completion, wait_time); if (!wait_time) { netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); break; } /* Now examine the response from the IDC process. * We might have a good completion or a request for * more wait time. */ if (mbcp->mbox_out[0] == AEN_IDC_EXT) { netif_err(qdev, drv, qdev->ndev, "IDC Time Extension from function.\n"); wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); status = 0; break; } else { netif_err(qdev, drv, qdev->ndev, "IDC: Invalid State 0x%.04x.\n", mbcp->mbox_out[0]); status = -EIO; break; } } while (wait_time); return status; } int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 2; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; mbcp->mbox_in[1] = led_config; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed to set LED Configuration.\n"); status = -EIO; } return status; } int ql_mb_get_led_cfg(struct ql_adapter *qdev) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 2; mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { netif_err(qdev, drv, qdev->ndev, "Failed to get LED Configuration.\n"); status = -EIO; } else qdev->led_config = mbcp->mbox_out[1]; return status; } int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->in_count = 1; mbcp->out_count = 2; mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; mbcp->mbox_in[1] = control; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) return status; if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { netif_err(qdev, drv, qdev->ndev, "Command not supported by firmware.\n"); status = -EINVAL; } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { /* This indicates that the firmware is * already in the state we are trying to * change it to. */ netif_err(qdev, drv, qdev->ndev, "Command parameters make no change.\n"); } return status; } /* Returns a negative error code or the mailbox command status. */ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) { struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int status; memset(mbcp, 0, sizeof(struct mbox_params)); *control = 0; mbcp->in_count = 1; mbcp->out_count = 1; mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; status = ql_mailbox_command(qdev, mbcp); if (status) return status; if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { *control = mbcp->mbox_in[1]; return status; } if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { netif_err(qdev, drv, qdev->ndev, "Command not supported by firmware.\n"); status = -EINVAL; } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { netif_err(qdev, drv, qdev->ndev, "Failed to get MPI traffic control.\n"); status = -EIO; } return status; } int ql_wait_fifo_empty(struct ql_adapter *qdev) { int count = 5; u32 mgmnt_fifo_empty; u32 nic_fifo_empty; do { nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; if (nic_fifo_empty && mgmnt_fifo_empty) return 0; msleep(100); } while (count-- > 0); return -ETIMEDOUT; } /* API called in work thread context to set new TX/RX * maximum frame size values to match MTU. */ static int ql_set_port_cfg(struct ql_adapter *qdev) { int status; rtnl_lock(); status = ql_mb_set_port_cfg(qdev); rtnl_unlock(); if (status) return status; status = ql_idc_wait(qdev); return status; } /* The following routines are worker threads that process * events that may sleep waiting for completion. */ /* This thread gets the maximum TX and RX frame size values * from the firmware and, if necessary, changes them to match * the MTU setting. */ void ql_mpi_port_cfg_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_port_cfg_work.work); int status; rtnl_lock(); status = ql_mb_get_port_cfg(qdev); rtnl_unlock(); if (status) { netif_err(qdev, drv, qdev->ndev, "Bug: Failed to get port config data.\n"); goto err; } if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && qdev->max_frame_size == CFG_DEFAULT_MAX_FRAME_SIZE) goto end; qdev->link_config |= CFG_JUMBO_FRAME_SIZE; qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; status = ql_set_port_cfg(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Bug: Failed to set port config data.\n"); goto err; } end: clear_bit(QL_PORT_CFG, &qdev->flags); return; err: ql_queue_fw_error(qdev); goto end; } /* Process an inter-device request. This is issues by * the firmware in response to another function requesting * a change to the port. We set a flag to indicate a change * has been made and then send a mailbox command ACKing * the change request. */ void ql_mpi_idc_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_idc_work.work); int status; struct mbox_params *mbcp = &qdev->idc_mbc; u32 aen; int timeout; rtnl_lock(); aen = mbcp->mbox_out[1] >> 16; timeout = (mbcp->mbox_out[1] >> 8) & 0xf; switch (aen) { default: netif_err(qdev, drv, qdev->ndev, "Bug: Unhandled IDC action.\n"); break; case MB_CMD_PORT_RESET: case MB_CMD_STOP_FW: ql_link_off(qdev); case MB_CMD_SET_PORT_CFG: /* Signal the resulting link up AEN * that the frame routing and mac addr * needs to be set. * */ set_bit(QL_CAM_RT_SET, &qdev->flags); /* Do ACK if required */ if (timeout) { status = ql_mb_idc_ack(qdev); if (status) netif_err(qdev, drv, qdev->ndev, "Bug: No pending IDC!\n"); } else { netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, "IDC ACK not required\n"); status = 0; /* success */ } break; /* These sub-commands issued by another (FCoE) * function are requesting to do an operation * on the shared resource (MPI environment). * We currently don't issue these so we just * ACK the request. */ case MB_CMD_IOP_RESTART_MPI: case MB_CMD_IOP_PREP_LINK_DOWN: /* Drop the link, reload the routing * table when link comes up. */ ql_link_off(qdev); set_bit(QL_CAM_RT_SET, &qdev->flags); /* Fall through. */ case MB_CMD_IOP_DVR_START: case MB_CMD_IOP_FLASH_ACC: case MB_CMD_IOP_CORE_DUMP_MPI: case MB_CMD_IOP_PREP_UPDATE_MPI: case MB_CMD_IOP_COMP_UPDATE_MPI: case MB_CMD_IOP_NONE: /* an IDC without params */ /* Do ACK if required */ if (timeout) { status = ql_mb_idc_ack(qdev); if (status) netif_err(qdev, drv, qdev->ndev, "Bug: No pending IDC!\n"); } else { netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, "IDC ACK not required\n"); status = 0; /* success */ } break; } rtnl_unlock(); } void ql_mpi_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_work.work); struct mbox_params mbc; struct mbox_params *mbcp = &mbc; int err = 0; rtnl_lock(); /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); while (ql_read32(qdev, STS) & STS_PI) { memset(mbcp, 0, sizeof(struct mbox_params)); mbcp->out_count = 1; /* Don't continue if an async event * did not complete properly. */ err = ql_mpi_handler(qdev, mbcp); if (err) break; } /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); rtnl_unlock(); ql_enable_completion_interrupt(qdev, 0); } void ql_mpi_reset_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_reset_work.work); cancel_delayed_work_sync(&qdev->mpi_work); cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); cancel_delayed_work_sync(&qdev->mpi_idc_work); /* If we're not the dominant NIC function, * then there is nothing to do. */ if (!ql_own_firmware(qdev)) { netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); return; } if (!ql_core_dump(qdev, qdev->mpi_coredump)) { netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); qdev->core_is_dumped = 1; queue_delayed_work(qdev->workqueue, &qdev->mpi_core_to_log, 5 * HZ); } ql_soft_reset_mpi_risc(qdev); }
gpl-2.0
fedya/aircam-openwrt
build_dir/toolchain-arm_v5te_gcc-linaro_uClibc-0.9.32_eabi/linux-2.6.28.fa2/arch/um/os-Linux/file.c
1575
10259
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <signal.h> #include <sys/ioctl.h> #include <sys/mount.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/un.h> #include "kern_constants.h" #include "os.h" #include "user.h" static void copy_stat(struct uml_stat *dst, const struct stat64 *src) { *dst = ((struct uml_stat) { .ust_dev = src->st_dev, /* device */ .ust_ino = src->st_ino, /* inode */ .ust_mode = src->st_mode, /* protection */ .ust_nlink = src->st_nlink, /* number of hard links */ .ust_uid = src->st_uid, /* user ID of owner */ .ust_gid = src->st_gid, /* group ID of owner */ .ust_size = src->st_size, /* total size, in bytes */ .ust_blksize = src->st_blksize, /* blocksize for filesys I/O */ .ust_blocks = src->st_blocks, /* number of blocks allocated */ .ust_atime = src->st_atime, /* time of last access */ .ust_mtime = src->st_mtime, /* time of last modification */ .ust_ctime = src->st_ctime, /* time of last change */ }); } int os_stat_fd(const int fd, struct uml_stat *ubuf) { struct stat64 sbuf; int err; CATCH_EINTR(err = fstat64(fd, &sbuf)); if (err < 0) return -errno; if (ubuf != NULL) copy_stat(ubuf, &sbuf); return err; } int os_stat_file(const char *file_name, struct uml_stat *ubuf) { struct stat64 sbuf; int err; CATCH_EINTR(err = stat64(file_name, &sbuf)); if (err < 0) return -errno; if (ubuf != NULL) copy_stat(ubuf, &sbuf); return err; } int os_access(const char *file, int mode) { int amode, err; amode = (mode & OS_ACC_R_OK ? R_OK : 0) | (mode & OS_ACC_W_OK ? W_OK : 0) | (mode & OS_ACC_X_OK ? X_OK : 0) | (mode & OS_ACC_F_OK ? F_OK : 0); err = access(file, amode); if (err < 0) return -errno; return 0; } /* FIXME? required only by hostaudio (because it passes ioctls verbatim) */ int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg) { int err; err = ioctl(fd, cmd, arg); if (err < 0) return -errno; return err; } /* FIXME: ensure namebuf in os_get_if_name is big enough */ int os_get_ifname(int fd, char* namebuf) { if (ioctl(fd, SIOCGIFNAME, namebuf) < 0) return -errno; return 0; } int os_set_slip(int fd) { int disc, sencap; disc = N_SLIP; if (ioctl(fd, TIOCSETD, &disc) < 0) return -errno; sencap = 0; if (ioctl(fd, SIOCSIFENCAP, &sencap) < 0) return -errno; return 0; } int os_mode_fd(int fd, int mode) { int err; CATCH_EINTR(err = fchmod(fd, mode)); if (err < 0) return -errno; return 0; } int os_file_type(char *file) { struct uml_stat buf; int err; err = os_stat_file(file, &buf); if (err < 0) return err; if (S_ISDIR(buf.ust_mode)) return OS_TYPE_DIR; else if (S_ISLNK(buf.ust_mode)) return OS_TYPE_SYMLINK; else if (S_ISCHR(buf.ust_mode)) return OS_TYPE_CHARDEV; else if (S_ISBLK(buf.ust_mode)) return OS_TYPE_BLOCKDEV; else if (S_ISFIFO(buf.ust_mode)) return OS_TYPE_FIFO; else if (S_ISSOCK(buf.ust_mode)) return OS_TYPE_SOCK; else return OS_TYPE_FILE; } int os_file_mode(const char *file, struct openflags *mode_out) { int err; *mode_out = OPENFLAGS(); err = access(file, W_OK); if (err && (errno != EACCES)) return -errno; else if (!err) *mode_out = of_write(*mode_out); err = access(file, R_OK); if (err && (errno != EACCES)) return -errno; else if (!err) *mode_out = of_read(*mode_out); return err; } int os_open_file(const char *file, struct openflags flags, int mode) { int fd, err, f = 0; if (flags.r && flags.w) f = O_RDWR; else if (flags.r) f = O_RDONLY; else if (flags.w) f = O_WRONLY; else f = 0; if (flags.s) f |= O_SYNC; if (flags.c) f |= O_CREAT; if (flags.t) f |= O_TRUNC; if (flags.e) f |= O_EXCL; if (flags.a) f |= O_APPEND; fd = open64(file, f, mode); if (fd < 0) return -errno; if (flags.cl && fcntl(fd, F_SETFD, 1)) { err = -errno; close(fd); return err; } return fd; } int os_connect_socket(const char *name) { struct sockaddr_un sock; int fd, err; sock.sun_family = AF_UNIX; snprintf(sock.sun_path, sizeof(sock.sun_path), "%s", name); fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) { err = -errno; goto out; } err = connect(fd, (struct sockaddr *) &sock, sizeof(sock)); if (err) { err = -errno; goto out_close; } return fd; out_close: close(fd); out: return err; } void os_close_file(int fd) { close(fd); } int os_seek_file(int fd, unsigned long long offset) { unsigned long long actual; actual = lseek64(fd, offset, SEEK_SET); if (actual != offset) return -errno; return 0; } int os_read_file(int fd, void *buf, int len) { int n = read(fd, buf, len); if (n < 0) return -errno; return n; } int os_write_file(int fd, const void *buf, int len) { int n = write(fd, (void *) buf, len); if (n < 0) return -errno; return n; } int os_file_size(const char *file, unsigned long long *size_out) { struct uml_stat buf; int err; err = os_stat_file(file, &buf); if (err < 0) { printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file, -err); return err; } if (S_ISBLK(buf.ust_mode)) { int fd; long blocks; fd = open(file, O_RDONLY, 0); if (fd < 0) { err = -errno; printk(UM_KERN_ERR "Couldn't open \"%s\", " "errno = %d\n", file, errno); return err; } if (ioctl(fd, BLKGETSIZE, &blocks) < 0) { err = -errno; printk(UM_KERN_ERR "Couldn't get the block size of " "\"%s\", errno = %d\n", file, errno); close(fd); return err; } *size_out = ((long long) blocks) * 512; close(fd); } else *size_out = buf.ust_size; return 0; } int os_file_modtime(const char *file, unsigned long *modtime) { struct uml_stat buf; int err; err = os_stat_file(file, &buf); if (err < 0) { printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file, -err); return err; } *modtime = buf.ust_mtime; return 0; } int os_set_exec_close(int fd) { int err; CATCH_EINTR(err = fcntl(fd, F_SETFD, FD_CLOEXEC)); if (err < 0) return -errno; return err; } int os_pipe(int *fds, int stream, int close_on_exec) { int err, type = stream ? SOCK_STREAM : SOCK_DGRAM; err = socketpair(AF_UNIX, type, 0, fds); if (err < 0) return -errno; if (!close_on_exec) return 0; err = os_set_exec_close(fds[0]); if (err < 0) goto error; err = os_set_exec_close(fds[1]); if (err < 0) goto error; return 0; error: printk(UM_KERN_ERR "os_pipe : Setting FD_CLOEXEC failed, err = %d\n", -err); close(fds[1]); close(fds[0]); return err; } int os_set_fd_async(int fd) { int err, flags; flags = fcntl(fd, F_GETFL); if (flags < 0) return -errno; flags |= O_ASYNC | O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) < 0) { err = -errno; printk(UM_KERN_ERR "os_set_fd_async : failed to set O_ASYNC " "and O_NONBLOCK on fd # %d, errno = %d\n", fd, errno); return err; } if ((fcntl(fd, F_SETSIG, SIGIO) < 0) || (fcntl(fd, F_SETOWN, os_getpid()) < 0)) { err = -errno; printk(UM_KERN_ERR "os_set_fd_async : Failed to fcntl F_SETOWN " "(or F_SETSIG) fd %d, errno = %d\n", fd, errno); return err; } return 0; } int os_clear_fd_async(int fd) { int flags; flags = fcntl(fd, F_GETFL); if (flags < 0) return -errno; flags &= ~(O_ASYNC | O_NONBLOCK); if (fcntl(fd, F_SETFL, flags) < 0) return -errno; return 0; } int os_set_fd_block(int fd, int blocking) { int flags; flags = fcntl(fd, F_GETFL); if (flags < 0) return -errno; if (blocking) flags &= ~O_NONBLOCK; else flags |= O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) < 0) return -errno; return 0; } int os_accept_connection(int fd) { int new; new = accept(fd, NULL, 0); if (new < 0) return -errno; return new; } #ifndef SHUT_RD #define SHUT_RD 0 #endif #ifndef SHUT_WR #define SHUT_WR 1 #endif #ifndef SHUT_RDWR #define SHUT_RDWR 2 #endif int os_shutdown_socket(int fd, int r, int w) { int what, err; if (r && w) what = SHUT_RDWR; else if (r) what = SHUT_RD; else if (w) what = SHUT_WR; else return -EINVAL; err = shutdown(fd, what); if (err < 0) return -errno; return 0; } int os_rcv_fd(int fd, int *helper_pid_out) { int new, n; char buf[CMSG_SPACE(sizeof(new))]; struct msghdr msg; struct cmsghdr *cmsg; struct iovec iov; msg.msg_name = NULL; msg.msg_namelen = 0; iov = ((struct iovec) { .iov_base = helper_pid_out, .iov_len = sizeof(*helper_pid_out) }); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); msg.msg_flags = 0; n = recvmsg(fd, &msg, 0); if (n < 0) return -errno; else if (n != iov.iov_len) *helper_pid_out = -1; cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { printk(UM_KERN_ERR "rcv_fd didn't receive anything, " "error = %d\n", errno); return -1; } if ((cmsg->cmsg_level != SOL_SOCKET) || (cmsg->cmsg_type != SCM_RIGHTS)) { printk(UM_KERN_ERR "rcv_fd didn't receive a descriptor\n"); return -1; } new = ((int *) CMSG_DATA(cmsg))[0]; return new; } int os_create_unix_socket(const char *file, int len, int close_on_exec) { struct sockaddr_un addr; int sock, err; sock = socket(PF_UNIX, SOCK_DGRAM, 0); if (sock < 0) return -errno; if (close_on_exec) { err = os_set_exec_close(sock); if (err < 0) printk(UM_KERN_ERR "create_unix_socket : " "close_on_exec failed, err = %d", -err); } addr.sun_family = AF_UNIX; snprintf(addr.sun_path, len, "%s", file); err = bind(sock, (struct sockaddr *) &addr, sizeof(addr)); if (err < 0) return -errno; return sock; } void os_flush_stdout(void) { fflush(stdout); } int os_lock_file(int fd, int excl) { int type = excl ? F_WRLCK : F_RDLCK; struct flock lock = ((struct flock) { .l_type = type, .l_whence = SEEK_SET, .l_start = 0, .l_len = 0 } ); int err, save; err = fcntl(fd, F_SETLK, &lock); if (!err) goto out; save = -errno; err = fcntl(fd, F_GETLK, &lock); if (err) { err = -errno; goto out; } printk(UM_KERN_ERR "F_SETLK failed, file already locked by pid %d\n", lock.l_pid); err = save; out: return err; }
gpl-2.0
geiti94/NEMESIS_KERNEL_N5
arch/powerpc/kernel/ptrace32.c
2087
8718
/* * ptrace for 32-bit processes running on a 64-bit kernel. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/switch_to.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* Macros to workout the correct index for the FPR in the thread struct */ #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) #define FPRHALF(i) (((i) - PT_FPR0) & 1) #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) #define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; int ret; switch (request) { /* * Read 4 bytes of the other process' storage * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is a pointer in the user's storage that contains an 8 byte * address in the other process of the 4 bytes that is to be read * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_PEEKTEXT_3264: case PPC_PTRACE_PEEKDATA_3264: { u32 tmp; int copied; u32 __user * addrOthers; ret = -EIO; /* Get the addr in the other process that we want to read */ if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; copied = access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 0); if (copied != sizeof(tmp)) break; ret = put_user(tmp, (u32 __user *)data); break; } /* Read a register (specified by ADDR) out of the "user area" */ case PTRACE_PEEKUSR: { int index; unsigned long tmp; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_get_reg(child, index, &tmp); if (ret) break; } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ tmp = ((unsigned int *)child->thread.fpr) [FPRINDEX(index)]; } ret = put_user((unsigned int)tmp, (u32 __user *)data); break; } /* * Read 4 bytes out of the other process' pt_regs area * data is a pointer specifying where the user wants the * 4 bytes copied into * addr is the offset into the other process' pt_regs structure * that is to be read * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_PEEKUSR_3264: { u32 index; u32 reg32bits; u64 tmp; u32 numReg; u32 part; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* Determine which part of the register the user wants */ if (index % 2) part = 1; /* want the 2nd half of the register (right-most). */ else part = 0; /* want the 1st half of the register (left-most). */ /* Validate the input - check to see if address is on the wrong boundary * or beyond the end of the user area */ if ((addr & 3) || numReg > PT_FPSCR) break; CHECK_FULL_REGS(child->thread.regs); if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ tmp = ((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]; } else { /* register within PT_REGS struct */ unsigned long tmp2; ret = ptrace_get_reg(child, numReg, &tmp2); if (ret) break; tmp = tmp2; } reg32bits = ((u32*)&tmp)[part]; ret = put_user(reg32bits, (u32 __user *)data); break; } /* * Write 4 bytes into the other process' storage * data is the 4 bytes that the user wants written * addr is a pointer in the user's storage that contains an * 8 byte address in the other process where the 4 bytes * that is to be written * (this is run in a 32-bit process looking at a 64-bit process) * when I and D space are separate, these will need to be fixed. */ case PPC_PTRACE_POKETEXT_3264: case PPC_PTRACE_POKEDATA_3264: { u32 tmp = data; u32 __user * addrOthers; /* Get the addr in the other process that we want to write into */ ret = -EIO; if (get_user(addrOthers, (u32 __user * __user *)addr) != 0) break; ret = 0; if (access_process_vm(child, (u64)addrOthers, &tmp, sizeof(tmp), 1) == sizeof(tmp)) break; ret = -EIO; break; } /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR32)) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { flush_fp_to_thread(child); /* * the user space code considers the floating point * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ ((unsigned int *)child->thread.fpr) [FPRINDEX(index)] = data; ret = 0; } break; } /* * Write 4 bytes into the other process' pt_regs area * data is the 4 bytes that the user wants written * addr is the offset into the other process' pt_regs structure * that is to be written into * (this is run in a 32-bit process looking at a 64-bit process) */ case PPC_PTRACE_POKEUSR_3264: { u32 index; u32 numReg; ret = -EIO; /* Determine which register the user wants */ index = (u64)addr >> 2; numReg = index / 2; /* * Validate the input - check to see if address is on the * wrong boundary or beyond the end of the user area */ if ((addr & 3) || (numReg > PT_FPSCR)) break; CHECK_FULL_REGS(child->thread.regs); if (numReg < PT_FPR0) { unsigned long freg; ret = ptrace_get_reg(child, numReg, &freg); if (ret) break; if (index % 2) freg = (freg & ~0xfffffffful) | (data & 0xfffffffful); else freg = (freg & 0xfffffffful) | (data << 32); ret = ptrace_put_reg(child, numReg, freg); } else { u64 *tmp; flush_fp_to_thread(child); /* get 64 bit FPR ... */ tmp = &(((u64 *)child->thread.fpr) [FPRINDEX_3264(numReg)]); /* ... write the 32 bit part we want */ ((u32 *)tmp)[index % 2] = data; ret = 0; } break; } case PTRACE_GET_DEBUGREG: { #ifndef CONFIG_PPC_ADV_DEBUG_REGS unsigned long dabr_fake; #endif ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS ret = put_user(child->thread.dac1, (u32 __user *)data); #else dabr_fake = ( (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); ret = put_user(dabr_fake, (u32 __user *)data); #endif break; } case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user( child, task_user_regset_view(current), 0, 0, PT_REGS_COUNT * sizeof(compat_long_t), compat_ptr(data)); case PTRACE_GETFPREGS: case PTRACE_SETFPREGS: case PTRACE_GETVRREGS: case PTRACE_SETVRREGS: case PTRACE_GETVSRREGS: case PTRACE_SETVSRREGS: case PTRACE_GETREGS64: case PTRACE_SETREGS64: case PTRACE_KILL: case PTRACE_SINGLESTEP: case PTRACE_DETACH: case PTRACE_SET_DEBUGREG: case PTRACE_SYSCALL: case PTRACE_CONT: case PPC_PTRACE_GETHWDBGINFO: case PPC_PTRACE_SETHWDEBUG: case PPC_PTRACE_DELHWDEBUG: ret = arch_ptrace(child, request, addr, data); break; default: ret = compat_ptrace_request(child, request, addr, data); break; } return ret; }
gpl-2.0
jetonbacaj/SomeKernel_G920P_PB6
drivers/staging/keucr/scsiglue.c
2087
11823
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/module.h> #include <linux/mutex.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include "usb.h" #include "scsiglue.h" #include "transport.h" /* Host functions */ /* * host_info() */ static const char *host_info(struct Scsi_Host *host) { /* pr_info("scsiglue --- host_info\n"); */ return "SCSI emulation for USB Mass Storage devices"; } /* * slave_alloc() */ static int slave_alloc(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); /* pr_info("scsiglue --- slave_alloc\n"); */ sdev->inquiry_len = 36; blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); if (us->subclass == USB_SC_UFI) sdev->sdev_target->pdt_1f_for_no_lun = 1; return 0; } /* * slave_configure() */ static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); /* pr_info("scsiglue --- slave_configure\n"); */ if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) { unsigned int max_sectors = 64; if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_CACHE_SIZE >> 9; if (queue_max_sectors(sdev->request_queue) > max_sectors) blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); } if (sdev->type == TYPE_DISK) { if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB) sdev->use_10_for_ms = 1; sdev->use_192_bytes_for_3f = 1; if (us->fflags & US_FL_NO_WP_DETECT) sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_8 = 1; if (us->fflags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; if (us->fflags & US_FL_CAPACITY_HEURISTICS) sdev->guess_capacity = 1; if (sdev->scsi_level > SCSI_2) sdev->sdev_target->scsi_level = sdev->scsi_level = SCSI_2; sdev->retry_hwerror = 1; sdev->allow_restart = 1; sdev->last_sector_bug = 1; } else { sdev->use_10_for_ms = 1; } if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_CBI) && sdev->scsi_level == SCSI_UNKNOWN) us->max_lun = 0; if (us->fflags & US_FL_NOT_LOCKABLE) sdev->lockable = 0; return 0; } /* This is always called with scsi_lock(host) held */ /* * queuecommand() */ static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) { struct us_data *us = host_to_us(srb->device->host); /* pr_info("scsiglue --- queuecommand\n"); */ /* check for state-transition errors */ if (us->srb != NULL) { /* pr_info("Error in %s: us->srb = %p\n" __FUNCTION__, us->srb); */ return SCSI_MLQUEUE_HOST_BUSY; } /* fail the command if we are disconnecting */ if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) { pr_info("Fail command during disconnect\n"); srb->result = DID_NO_CONNECT << 16; done(srb); return 0; } /* enqueue the command and wake up the control thread */ srb->scsi_done = done; us->srb = srb; complete(&us->cmnd_ready); return 0; } static DEF_SCSI_QCMD(queuecommand) /*********************************************************************** * Error handling functions ***********************************************************************/ /* Command timeout and abort */ /* * command_abort() */ static int command_abort(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); /* pr_info("scsiglue --- command_abort\n"); */ scsi_lock(us_to_host(us)); if (us->srb != srb) { scsi_unlock(us_to_host(us)); printk("-- nothing to abort\n"); return FAILED; } set_bit(US_FLIDX_TIMED_OUT, &us->dflags); if (!test_bit(US_FLIDX_RESETTING, &us->dflags)) { set_bit(US_FLIDX_ABORTING, &us->dflags); usb_stor_stop_transport(us); } scsi_unlock(us_to_host(us)); /* Wait for the aborted command to finish */ wait_for_completion(&us->notify); return SUCCESS; } /* This invokes the transport reset mechanism to reset the state of the * device. */ /* * device_reset() */ static int device_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; /* pr_info("scsiglue --- device_reset\n"); */ /* lock the device pointers and do the reset */ mutex_lock(&(us->dev_mutex)); result = us->transport_reset(us); mutex_unlock(&us->dev_mutex); return result < 0 ? FAILED : SUCCESS; } /* * bus_reset() */ static int bus_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; /* pr_info("scsiglue --- bus_reset\n"); */ result = usb_stor_port_reset(us); return result < 0 ? FAILED : SUCCESS; } /* * usb_stor_report_device_reset() */ void usb_stor_report_device_reset(struct us_data *us) { int i; struct Scsi_Host *host = us_to_host(us); /* pr_info("scsiglue --- usb_stor_report_device_reset\n"); */ scsi_report_device_reset(host, 0, 0); if (us->fflags & US_FL_SCM_MULT_TARG) { for (i = 1; i < host->max_id; ++i) scsi_report_device_reset(host, 0, i); } } /* * usb_stor_report_bus_reset() */ void usb_stor_report_bus_reset(struct us_data *us) { struct Scsi_Host *host = us_to_host(us); /* pr_info("scsiglue --- usb_stor_report_bus_reset\n"); */ scsi_lock(host); scsi_report_bus_reset(host, 0); scsi_unlock(host); } /*********************************************************************** * /proc/scsi/ functions ***********************************************************************/ /* we use this macro to help us write into the buffer */ #undef SPRINTF #define SPRINTF(args...) seq_printf(m, ##args) static int write_info(struct Scsi_Host *host, char *buffer, int length) { return length; } static int show_info(struct seq_file *m, struct Scsi_Host *host) { struct us_data *us = host_to_us(host); const char *string; /* print the controller name */ SPRINTF(" Host scsi%d: usb-storage\n", host->host_no); /* print product, vendor, and serial number strings */ if (us->pusb_dev->manufacturer) string = us->pusb_dev->manufacturer; else if (us->unusual_dev->vendorName) string = us->unusual_dev->vendorName; else string = "Unknown"; SPRINTF(" Vendor: %s\n", string); if (us->pusb_dev->product) string = us->pusb_dev->product; else if (us->unusual_dev->productName) string = us->unusual_dev->productName; else string = "Unknown"; SPRINTF(" Product: %s\n", string); if (us->pusb_dev->serial) string = us->pusb_dev->serial; else string = "None"; SPRINTF("Serial Number: %s\n", string); /* show the protocol and transport */ SPRINTF(" Protocol: %s\n", us->protocol_name); SPRINTF(" Transport: %s\n", us->transport_name); /* show the device flags */ SPRINTF(" Quirks:"); #define US_FLAG(name, value) \ do { \ if (us->fflags & value) \ SPRINTF(" " #name); \ } while (0); US_DO_ALL_FLAGS #undef US_FLAG seq_putc(m, '\n'); return 0; } /*********************************************************************** * Sysfs interface ***********************************************************************/ /* Output routine for the sysfs max_sectors file */ /* * show_max_sectors() */ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); /* pr_info("scsiglue --- ssize_t show_max_sectors\n"); */ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); } /* Input routine for the sysfs max_sectors file */ /* * store_max_sectors() */ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); unsigned short ms; /* pr_info("scsiglue --- ssize_t store_max_sectors\n"); */ if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { blk_queue_max_hw_sectors(sdev->request_queue, ms); return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(max_sectors, S_IRUGO | S_IWUSR, show_max_sectors, store_max_sectors); static struct device_attribute *sysfs_device_attr_list[] = {&dev_attr_max_sectors, NULL, }; /* this defines our host template, with which we'll allocate hosts */ /* * usb_stor_host_template() */ struct scsi_host_template usb_stor_host_template = { /* basic userland interface stuff */ .name = "eucr-storage", .proc_name = "eucr-storage", .write_info = write_info, .show_info = show_info, .info = host_info, /* command interface -- queued only */ .queuecommand = queuecommand, /* error and abort handlers */ .eh_abort_handler = command_abort, .eh_device_reset_handler = device_reset, .eh_bus_reset_handler = bus_reset, /* queue commands only, only one command per LUN */ .can_queue = 1, .cmd_per_lun = 1, /* unknown initiator id */ .this_id = -1, .slave_alloc = slave_alloc, .slave_configure = slave_configure, /* lots of sg segments can be handled */ .sg_tablesize = SG_ALL, /* limit the total size of a transfer to 120 KB */ .max_sectors = 240, /* merge commands... this seems to help performance, but * periodically someone should test to see which setting is more * optimal. */ .use_clustering = 1, /* emulated HBA */ .emulated = 1, /* we do our own delay after a device or bus reset */ .skip_settle_delay = 1, /* sysfs device attributes */ .sdev_attrs = sysfs_device_attr_list, /* module management */ .module = THIS_MODULE }; /* To Report "Illegal Request: Invalid Field in CDB */ unsigned char usb_stor_sense_invalidCDB[18] = { [0] = 0x70, /* current error */ [2] = ILLEGAL_REQUEST, /* Illegal Request = 0x05 */ [7] = 0x0a, /* additional length */ [12] = 0x24 /* Invalid Field in CDB */ }; /*********************************************************************** * Scatter-gather transfer buffer access routines ***********************************************************************/ /* * usb_stor_access_xfer_buf() */ unsigned int usb_stor_access_xfer_buf(struct us_data *us, unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr, unsigned int *offset, enum xfer_buf_dir dir) { unsigned int cnt; /* pr_info("transport --- usb_stor_access_xfer_buf\n"); */ struct scatterlist *sg = *sgptr; if (!sg) sg = scsi_sglist(srb); cnt = 0; while (cnt < buflen && sg) { struct page *page = sg_page(sg) + ((sg->offset + *offset) >> PAGE_SHIFT); unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE-1); unsigned int sglen = sg->length - *offset; if (sglen > buflen - cnt) { /* Transfer ends within this s-g entry */ sglen = buflen - cnt; *offset += sglen; } else { /* Transfer continues to next s-g entry */ *offset = 0; sg = sg_next(sg); } while (sglen > 0) { unsigned int plen = min(sglen, (unsigned int)PAGE_SIZE - poff); unsigned char *ptr = kmap(page); if (dir == TO_XFER_BUF) memcpy(ptr + poff, buffer + cnt, plen); else memcpy(buffer + cnt, ptr + poff, plen); kunmap(page); /* Start at the beginning of the next page */ poff = 0; ++page; cnt += plen; sglen -= plen; } } *sgptr = sg; /* Return the amount actually transferred */ return cnt; } /* * Store the contents of buffer into srb's transfer * buffer and set the SCSI residue. */ /* * usb_stor_set_xfer_buf() */ void usb_stor_set_xfer_buf(struct us_data *us, unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, unsigned int dir) { unsigned int offset = 0; struct scatterlist *sg = NULL; /* pr_info("transport --- usb_stor_set_xfer_buf\n"); */ /* TO_XFER_BUF = 0, FROM_XFER_BUF = 1 */ buflen = min(buflen, scsi_bufflen(srb)); buflen = usb_stor_access_xfer_buf(us, buffer, buflen, srb, &sg, &offset, dir); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); }
gpl-2.0
MoKee/android_kernel_samsung_lentislte
drivers/net/ethernet/sun/sunqe.c
2087
25758
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed * if you make it look like a LANCE. * * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> #include <asm/pgtable.h> #include <asm/irq.h> #include "sunqe.h" #define DRV_NAME "sunqe" #define DRV_VERSION "4.1" #define DRV_RELDATE "August 27, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); MODULE_LICENSE("GPL"); static struct sunqec *root_qec_dev; static void qe_set_multicast(struct net_device *dev); #define QEC_RESET_TRIES 200 static inline int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { u32 tmp = sbus_readl(gregs + GLOB_CTRL); if (tmp & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); return -1; } #define MACE_RESET_RETRIES 200 #define QE_RESET_RETRIES 200 static inline int qe_stop(struct sunqe *qep) { void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; int tries; /* Reset the MACE, then the QEC channel. */ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); tries = MACE_RESET_RETRIES; while (--tries) { u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); if (tmp & MREGS_BCONFIG_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); return -1; } sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); tries = QE_RESET_RETRIES; while (--tries) { u32 tmp = sbus_readl(cregs + CREG_CTRL); if (tmp & CREG_CTRL_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); return -1; } return 0; } static void qe_init_rings(struct sunqe *qep) { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int i; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; memset(qb, 0, sizeof(struct qe_init_block)); memset(qbufs, 0, sizeof(struct sunqe_buffers)); for (i = 0; i < RX_RING_SIZE; i++) { qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); qb->qe_rxd[i].rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); } } static int qe_init(struct sunqe *qep, int from_irq) { struct sunqec *qecp = qep->parent; void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; u32 tmp; int i; /* Shut it up. */ if (qe_stop(qep)) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(1, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); /* Setup the FIFO pointers into QEC local memory. */ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); sbus_writel(tmp, cregs + CREG_RXRBUFPTR); sbus_writel(tmp, cregs + CREG_RXWBUFPTR); tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + sbus_readl(gregs + GLOB_RSIZE); sbus_writel(tmp, cregs + CREG_TXRBUFPTR); sbus_writel(tmp, cregs + CREG_TXWBUFPTR); /* Clear the channel collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* For 10baseT, inter frame space nor throttle seems to be necessary. */ sbus_writel(0, cregs + CREG_PIPG); /* Now dork with the AMD MACE. */ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); sbus_writeb(0, mregs + MREGS_RXFCNTL); /* The QEC dma's the rx'd packets from local memory out to main memory, * and therefore it interrupts when the packet reception is "complete". * So don't listen for the MACE talking about it. */ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), mregs + MREGS_FCONFIG); /* Only usable interface on QuadEther is twisted pair. */ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); /* Tell MACE we are changing the ether address. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); sbus_writeb(e[0], mregs + MREGS_ETHADDR); sbus_writeb(e[1], mregs + MREGS_ETHADDR); sbus_writeb(e[2], mregs + MREGS_ETHADDR); sbus_writeb(e[3], mregs + MREGS_ETHADDR); sbus_writeb(e[4], mregs + MREGS_ETHADDR); sbus_writeb(e[5], mregs + MREGS_ETHADDR); /* Clear out the address filter. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0, mregs + MREGS_FILTER); /* Address changes are now complete. */ sbus_writeb(0, mregs + MREGS_IACONFIG); qe_init_rings(qep); /* Wait a little bit for the link to come up... */ mdelay(5); if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { int tries = 50; while (--tries) { u8 tmp; mdelay(5); barrier(); tmp = sbus_readb(mregs + MREGS_PHYCONFIG); if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) break; } if (tries == 0) printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); } /* Missed packet counter is cleared on a read. */ sbus_readb(mregs + MREGS_MPCNT); /* Reload multicast information, this will enable the receiver * and transmitter. */ qe_set_multicast(qep->dev); /* QEC should now start to show interrupts. */ return 0; } /* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) { struct net_device *dev = qep->dev; int mace_hwbug_workaround = 0; if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); dev->stats.tx_errors++; dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_JERROR) { printk(KERN_ERR "%s: Jabber error.\n", dev->name); } if (qe_status & CREG_STAT_BERROR) { printk(KERN_ERR "%s: Babble error.\n", dev->name); } if (qe_status & CREG_STAT_CCOFLOW) { dev->stats.tx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { dev->stats.rx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); dev->stats.rx_errors++; dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_dropped++; dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (mace_hwbug_workaround) qe_init(qep, 1); return mace_hwbug_workaround; } /* Per-QE receive interrupt service routine. Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */ static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int elem = qep->rx_new; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; unsigned char *this_qbuf = &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; __u32 this_qbuf_dvma = qbufs_dvma + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); struct qe_rxd *end_rxd = &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ /* Check for errors. */ if (len < ETH_ZLEN) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev->stats.rx_dropped++; } else { skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); skb_copy_to_linear_data(skb, this_qbuf, len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); elem = NEXT_RX(elem); this = &rxbase[elem]; } qep->rx_new = elem; } static void qe_tx_reclaim(struct sunqe *qep); /* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */ static irqreturn_t qec_interrupt(int irq, void *dev_id) { struct sunqec *qecp = dev_id; u32 qec_status; int channel = 0; /* Latch the status now. */ qec_status = sbus_readl(qecp->gregs + GLOB_STAT); while (channel < 4) { if (qec_status & 0xf) { struct sunqe *qep = qecp->qes[channel]; u32 qe_status; qe_status = sbus_readl(qep->qcregs + CREG_STAT); if (qe_status & CREG_STAT_ERRORS) { if (qe_is_bolixed(qep, qe_status)) goto next; } if (qe_status & CREG_STAT_RXIRQ) qe_rx(qep); if (netif_queue_stopped(qep->dev) && (qe_status & CREG_STAT_TXIRQ)) { spin_lock(&qep->lock); qe_tx_reclaim(qep); if (TX_BUFFS_AVAIL(qep) > 0) { /* Wake net queue and return to * lazy tx reclaim. */ netif_wake_queue(qep->dev); sbus_writel(1, qep->qcregs + CREG_TIMASK); } spin_unlock(&qep->lock); } next: ; } qec_status >>= 4; channel++; } return IRQ_HANDLED; } static int qe_open(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qep->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB | MREGS_MCONFIG_MBAENAB); return qe_init(qep, 0); } static int qe_close(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qe_stop(qep); return 0; } /* Reclaim TX'd frames from the ring. This must always run under * the IRQ protected qep->lock. */ static void qe_tx_reclaim(struct sunqe *qep) { struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; int elem = qep->tx_old; while (elem != qep->tx_new) { u32 flags = txbase[elem].tx_flags; if (flags & TXD_OWN) break; elem = NEXT_TX(elem); } qep->tx_old = elem; } static void qe_tx_timeout(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); int tx_full; spin_lock_irq(&qep->lock); /* Try to reclaim, if that frees up some tx * entries, we're fine. */ qe_tx_reclaim(qep); tx_full = TX_BUFFS_AVAIL(qep) <= 0; spin_unlock_irq(&qep->lock); if (! tx_full) goto out; printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); qe_init(qep, 1); out: netif_wake_queue(dev); } /* Get a packet queued to go onto the wire. */ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; unsigned char *txbuf; int len, entry; spin_lock_irq(&qep->lock); qe_tx_reclaim(qep); len = skb->len; entry = qep->tx_new; txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; txbuf_dvma = qbufs_dvma + qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); qep->tx_new = NEXT_TX(entry); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); dev->stats.tx_packets++; dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. * When the tx queue empties the tx irq handler * will wake up the queue and return us back to * the lazy tx reclaim scheme. */ netif_stop_queue(dev); sbus_writel(0, qep->qcregs + CREG_TIMASK); } spin_unlock_irq(&qep->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; int i; u32 crc; /* Lock out others. */ netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0xff, qep->mregs + MREGS_FILTER); sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } else if (dev->flags & IFF_PROMISC) { new_mconfig |= MREGS_MCONFIG_PROMISC; } else { u16 hash_table[4]; u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } /* Program the qe with the new filter value. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) { u8 tmp = *hbytes++; sbus_writeb(tmp, qep->mregs + MREGS_FILTER); } sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } /* Any change of the logical address filter, the physical address, * or enabling/disabling promiscuous mode causes the MACE to disable * the receiver. So we must re-enable them here or else the MACE * refuses to listen to anything on the network. Sheesh, took * me a day or two to find this bug. */ qep->mconfig = new_mconfig; sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); /* Let us get going again. */ netif_wake_queue(dev); } /* Ethtool support... */ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { const struct linux_prom_registers *regs; struct sunqe *qep = netdev_priv(dev); struct platform_device *op; strlcpy(info->driver, "sunqe", sizeof(info->driver)); strlcpy(info->version, "3.0", sizeof(info->version)); op = qep->op; regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", regs->which_io); } static u32 qe_get_link(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); void __iomem *mregs = qep->mregs; u8 phyconfig; spin_lock_irq(&qep->lock); phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); spin_unlock_irq(&qep->lock); return phyconfig & MREGS_PHYCONFIG_LSTAT; } static const struct ethtool_ops qe_ethtool_ops = { .get_drvinfo = qe_get_drvinfo, .get_link = qe_get_link, }; /* This is only called once at boot time for each card probed. */ static void qec_init_once(struct sunqec *qecp, struct platform_device *op) { u8 bsizes = qecp->qec_bursts; if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); } else if (bsizes & DMA_BURST32) { sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); } else { sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); } /* Packetsize only used in 100baseT BigMAC configurations, * set it to zero just to be on the safe side. */ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); /* Set the local memsize register, divided up to one piece per QE channel. */ sbus_writel((resource_size(&op->resource[1]) >> 2), qecp->gregs + GLOB_MSIZE); /* Divide up the local QEC memory amongst the 4 QE receiver and * transmitter FIFOs. Basically it is (total / 2 / num_channels). */ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_TSIZE); sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_RSIZE); } static u8 qec_get_burst(struct device_node *dp) { u8 bsizes, bsizes_more; /* Find and set the burst sizes for the QEC, since it * does the actual dma for all 4 channels. */ bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); bsizes &= 0xff; bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32)==0) bsizes = (DMA_BURST32 - 1); return bsizes; } static struct sunqec *get_qec(struct platform_device *child) { struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; qecp = dev_get_drvdata(&op->dev); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { u32 ctrl; qecp->op = op; qecp->gregs = of_ioremap(&op->resource[0], 0, GLOB_REG_SIZE, "QEC Global Registers"); if (!qecp->gregs) goto fail; /* Make sure the QEC is in MACE mode. */ ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); ctrl &= 0xf0000000; if (ctrl != GLOB_CTRL_MMODE) { printk(KERN_ERR "qec: Not in MACE mode!\n"); goto fail; } if (qec_global_reset(qecp->gregs)) goto fail; qecp->qec_bursts = qec_get_burst(op->dev.of_node); qec_init_once(qecp, op); if (request_irq(op->archdata.irqs[0], qec_interrupt, IRQF_SHARED, "qec", (void *) qecp)) { printk(KERN_ERR "qec: Can't register irq.\n"); goto fail; } dev_set_drvdata(&op->dev, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; } } return qecp; fail: if (qecp->gregs) of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); kfree(qecp); return NULL; } static const struct net_device_ops qec_ops = { .ndo_open = qe_open, .ndo_stop = qe_close, .ndo_start_xmit = qe_start_xmit, .ndo_set_rx_mode = qe_set_multicast, .ndo_tx_timeout = qe_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int qec_ether_init(struct platform_device *op) { static unsigned version_printed; struct net_device *dev; struct sunqec *qecp; struct sunqe *qe; int i, res; if (version_printed++ == 0) printk(KERN_INFO "%s", version); dev = alloc_etherdev(sizeof(struct sunqe)); if (!dev) return -ENOMEM; memcpy(dev->dev_addr, idprom->id_ethaddr, 6); qe = netdev_priv(dev); res = -ENODEV; i = of_getintprop_default(op->dev.of_node, "channel#", -1); if (i == -1) goto fail; qe->channel = i; spin_lock_init(&qe->lock); qecp = get_qec(op); if (!qecp) goto fail; qecp->qes[qe->channel] = qe; qe->dev = dev; qe->parent = qecp; qe->op = op; res = -ENOMEM; qe->qcregs = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "QEC Channel Registers"); if (!qe->qcregs) { printk(KERN_ERR "qe: Cannot map channel registers.\n"); goto fail; } qe->mregs = of_ioremap(&op->resource[1], 0, MREGS_REG_SIZE, "QE MACE Registers"); if (!qe->mregs) { printk(KERN_ERR "qe: Cannot map MACE registers.\n"); goto fail; } qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, &qe->qblock_dvma, GFP_ATOMIC); qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), &qe->buffers_dvma, GFP_ATOMIC); if (qe->qe_block == NULL || qe->qblock_dvma == 0 || qe->buffers == NULL || qe->buffers_dvma == 0) goto fail; /* Stop this QE. */ qe_stop(qe); SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->irq = op->archdata.irqs[0]; dev->dma = 0; dev->ethtool_ops = &qe_ethtool_ops; dev->netdev_ops = &qec_ops; res = register_netdev(dev); if (res) goto fail; dev_set_drvdata(&op->dev, qe); printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, dev->dev_addr); return 0; fail: if (qe->qcregs) of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); if (qe->mregs) of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); if (qe->qe_block) dma_free_coherent(&op->dev, PAGE_SIZE, qe->qe_block, qe->qblock_dvma); if (qe->buffers) dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qe->buffers, qe->buffers_dvma); free_netdev(dev); return res; } static int qec_sbus_probe(struct platform_device *op) { return qec_ether_init(op); } static int qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = dev_get_drvdata(&op->dev); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, qp->qe_block, qp->qblock_dvma); dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qp->buffers, qp->buffers_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id qec_sbus_match[] = { { .name = "qe", }, {}, }; MODULE_DEVICE_TABLE(of, qec_sbus_match); static struct platform_driver qec_sbus_driver = { .driver = { .name = "qec", .owner = THIS_MODULE, .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, .remove = qec_sbus_remove, }; static int __init qec_init(void) { return platform_driver_register(&qec_sbus_driver); } static void __exit qec_exit(void) { platform_driver_unregister(&qec_sbus_driver); while (root_qec_dev) { struct sunqec *next = root_qec_dev->next_module; struct platform_device *op = root_qec_dev->op; free_irq(op->archdata.irqs[0], (void *) root_qec_dev); of_iounmap(&op->resource[0], root_qec_dev->gregs, GLOB_REG_SIZE); kfree(root_qec_dev); root_qec_dev = next; } } module_init(qec_init); module_exit(qec_exit);
gpl-2.0
Zenfone2-Dev/kernel_4.3.y
tools/usb/ffs-aio-example/simple/host_app/test.c
2087
4554
/* * This is free and unencumbered software released into the public domain. * * Anyone is free to copy, modify, publish, use, compile, sell, or * distribute this software, either in source code form or as a compiled * binary, for any purpose, commercial or non-commercial, and by any * means. * * In jurisdictions that recognize copyright laws, the author or authors * of this software dedicate any and all copyright interest in the * software to the public domain. We make this dedication for the benefit * of the public at large and to the detriment of our heirs and * successors. We intend this dedication to be an overt act of * relinquishment in perpetuity of all present and future rights to this * software under copyright law. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * For more information, please refer to <http://unlicense.org/> */ #include <libusb.h> #include <stdio.h> #include <string.h> #include <unistd.h> #define VENDOR 0x1d6b #define PRODUCT 0x0105 #define BUF_LEN 8192 /* * struct test_state - describes test program state * @list: list of devices returned by libusb_get_device_list function * @found: pointer to struct describing tested device * @ctx: context, set to NULL * @handle: handle of tested device * @attached: indicates that device was attached to kernel, and has to be * reattached at the end of test program */ struct test_state { libusb_device *found; libusb_context *ctx; libusb_device_handle *handle; int attached; }; /* * test_init - initialize test program */ int test_init(struct test_state *state) { int i, ret; ssize_t cnt; libusb_device **list; state->found = NULL; state->ctx = NULL; state->handle = NULL; state->attached = 0; ret = libusb_init(&state->ctx); if (ret) { printf("cannot init libusb: %s\n", libusb_error_name(ret)); return 1; } cnt = libusb_get_device_list(state->ctx, &list); if (cnt <= 0) { printf("no devices found\n"); goto error1; } for (i = 0; i < cnt; ++i) { libusb_device *dev = list[i]; struct libusb_device_descriptor desc; ret = libusb_get_device_descriptor(dev, &desc); if (ret) { printf("unable to get device descriptor: %s\n", libusb_error_name(ret)); goto error2; } if (desc.idVendor == VENDOR && desc.idProduct == PRODUCT) { state->found = dev; break; } } if (!state->found) { printf("no devices found\n"); goto error2; } ret = libusb_open(state->found, &state->handle); if (ret) { printf("cannot open device: %s\n", libusb_error_name(ret)); goto error2; } if (libusb_claim_interface(state->handle, 0)) { ret = libusb_detach_kernel_driver(state->handle, 0); if (ret) { printf("unable to detach kernel driver: %s\n", libusb_error_name(ret)); goto error3; } state->attached = 1; ret = libusb_claim_interface(state->handle, 0); if (ret) { printf("cannot claim interface: %s\n", libusb_error_name(ret)); goto error4; } } return 0; error4: if (state->attached == 1) libusb_attach_kernel_driver(state->handle, 0); error3: libusb_close(state->handle); error2: libusb_free_device_list(list, 1); error1: libusb_exit(state->ctx); return 1; } /* * test_exit - cleanup test program */ void test_exit(struct test_state *state) { libusb_release_interface(state->handle, 0); if (state->attached == 1) libusb_attach_kernel_driver(state->handle, 0); libusb_close(state->handle); libusb_exit(state->ctx); } int main(void) { struct test_state state; struct libusb_config_descriptor *conf; struct libusb_interface_descriptor const *iface; unsigned char in_addr, out_addr; if (test_init(&state)) return 1; libusb_get_config_descriptor(state.found, 0, &conf); iface = &conf->interface[0].altsetting[0]; in_addr = iface->endpoint[0].bEndpointAddress; out_addr = iface->endpoint[1].bEndpointAddress; while (1) { static unsigned char buffer[BUF_LEN]; int bytes; libusb_bulk_transfer(state.handle, in_addr, buffer, BUF_LEN, &bytes, 500); libusb_bulk_transfer(state.handle, out_addr, buffer, BUF_LEN, &bytes, 500); } test_exit(&state); }
gpl-2.0
primiano/edison-kernel
fs/hppfs/hppfs.c
2087
16815
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/ctype.h> #include <linux/dcache.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/types.h> #include <linux/pid_namespace.h> #include <linux/namei.h> #include <asm/uaccess.h> #include <os.h> static struct inode *get_inode(struct super_block *, struct dentry *); struct hppfs_data { struct list_head list; char contents[PAGE_SIZE - sizeof(struct list_head)]; }; struct hppfs_private { struct file *proc_file; int host_fd; loff_t len; struct hppfs_data *contents; }; struct hppfs_inode_info { struct dentry *proc_dentry; struct inode vfs_inode; }; static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode) { return container_of(inode, struct hppfs_inode_info, vfs_inode); } #define HPPFS_SUPER_MAGIC 0xb00000ee static const struct super_operations hppfs_sbops; static int is_pid(struct dentry *dentry) { struct super_block *sb; int i; sb = dentry->d_sb; if (dentry->d_parent != sb->s_root) return 0; for (i = 0; i < dentry->d_name.len; i++) { if (!isdigit(dentry->d_name.name[i])) return 0; } return 1; } static char *dentry_name(struct dentry *dentry, int extra) { struct dentry *parent; char *root, *name; const char *seg_name; int len, seg_len; len = 0; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) len += strlen("pid") + 1; else len += parent->d_name.len + 1; parent = parent->d_parent; } root = "proc"; len += strlen(root); name = kmalloc(len + extra + 1, GFP_KERNEL); if (name == NULL) return NULL; name[len] = '\0'; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) { seg_name = "pid"; seg_len = strlen("pid"); } else { seg_name = parent->d_name.name; seg_len = parent->d_name.len; } len -= seg_len + 1; name[len] = '/'; strncpy(&name[len + 1], seg_name, seg_len); parent = parent->d_parent; } strncpy(name, root, strlen(root)); return name; } static int file_removed(struct dentry *dentry, const char *file) { char *host_file; int extra, fd; extra = 0; if (file != NULL) extra += strlen(file) + 1; host_file = dentry_name(dentry, extra + strlen("/remove")); if (host_file == NULL) { printk(KERN_ERR "file_removed : allocation failed\n"); return -ENOMEM; } if (file != NULL) { strcat(host_file, "/"); strcat(host_file, file); } strcat(host_file, "/remove"); fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); kfree(host_file); if (fd > 0) { os_close_file(fd); return 1; } return 0; } static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, unsigned int flags) { struct dentry *proc_dentry, *parent; struct qstr *name = &dentry->d_name; struct inode *inode; int err, deleted; deleted = file_removed(dentry, NULL); if (deleted < 0) return ERR_PTR(deleted); else if (deleted) return ERR_PTR(-ENOENT); parent = HPPFS_I(ino)->proc_dentry; mutex_lock(&parent->d_inode->i_mutex); proc_dentry = lookup_one_len(name->name, parent, name->len); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(proc_dentry)) return proc_dentry; err = -ENOMEM; inode = get_inode(ino->i_sb, proc_dentry); if (!inode) goto out; d_add(dentry, inode); return NULL; out: return ERR_PTR(err); } static const struct inode_operations hppfs_file_iops = { }; static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count, loff_t *ppos, int is_user) { ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); ssize_t n; read = file_inode(file)->i_fop->read; if (!is_user) set_fs(KERNEL_DS); n = (*read)(file, buf, count, &file->f_pos); if (!is_user) set_fs(USER_DS); if (ppos) *ppos = file->f_pos; return n; } static ssize_t hppfs_read_file(int fd, char __user *buf, ssize_t count) { ssize_t n; int cur, err; char *new_buf; n = -ENOMEM; new_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (new_buf == NULL) { printk(KERN_ERR "hppfs_read_file : kmalloc failed\n"); goto out; } n = 0; while (count > 0) { cur = min_t(ssize_t, count, PAGE_SIZE); err = os_read_file(fd, new_buf, cur); if (err < 0) { printk(KERN_ERR "hppfs_read : read failed, " "errno = %d\n", err); n = err; goto out_free; } else if (err == 0) break; if (copy_to_user(buf, new_buf, err)) { n = -EFAULT; goto out_free; } n += err; count -= err; } out_free: kfree(new_buf); out: return n; } static ssize_t hppfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hppfs_private *hppfs = file->private_data; struct hppfs_data *data; loff_t off; int err; if (hppfs->contents != NULL) { int rem; if (*ppos >= hppfs->len) return 0; data = hppfs->contents; off = *ppos; while (off >= sizeof(data->contents)) { data = list_entry(data->list.next, struct hppfs_data, list); off -= sizeof(data->contents); } if (off + count > hppfs->len) count = hppfs->len - off; rem = copy_to_user(buf, &data->contents[off], count); *ppos += count - rem; if (rem > 0) return -EFAULT; } else if (hppfs->host_fd != -1) { err = os_seek_file(hppfs->host_fd, *ppos); if (err) { printk(KERN_ERR "hppfs_read : seek failed, " "errno = %d\n", err); return err; } err = hppfs_read_file(hppfs->host_fd, buf, count); if (err < 0) { printk(KERN_ERR "hppfs_read: read failed: %d\n", err); return err; } count = err; if (count > 0) *ppos += count; } else count = read_proc(hppfs->proc_file, buf, count, ppos, 1); return count; } static ssize_t hppfs_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); write = file_inode(proc_file)->i_fop->write; return (*write)(proc_file, buf, len, ppos); } static int open_host_sock(char *host_file, int *filter_out) { char *end; int fd; end = &host_file[strlen(host_file)]; strcpy(end, "/rw"); *filter_out = 1; fd = os_connect_socket(host_file); if (fd > 0) return fd; strcpy(end, "/r"); *filter_out = 0; fd = os_connect_socket(host_file); return fd; } static void free_contents(struct hppfs_data *head) { struct hppfs_data *data; struct list_head *ele, *next; if (head == NULL) return; list_for_each_safe(ele, next, &head->list) { data = list_entry(ele, struct hppfs_data, list); kfree(data); } kfree(head); } static struct hppfs_data *hppfs_get_data(int fd, int filter, struct file *proc_file, struct file *hppfs_file, loff_t *size_out) { struct hppfs_data *data, *new, *head; int n, err; err = -ENOMEM; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { printk(KERN_ERR "hppfs_get_data : head allocation failed\n"); goto failed; } INIT_LIST_HEAD(&data->list); head = data; *size_out = 0; if (filter) { while ((n = read_proc(proc_file, data->contents, sizeof(data->contents), NULL, 0)) > 0) os_write_file(fd, data->contents, n); err = os_shutdown_socket(fd, 0, 1); if (err) { printk(KERN_ERR "hppfs_get_data : failed to shut down " "socket\n"); goto failed_free; } } while (1) { n = os_read_file(fd, data->contents, sizeof(data->contents)); if (n < 0) { err = n; printk(KERN_ERR "hppfs_get_data : read failed, " "errno = %d\n", err); goto failed_free; } else if (n == 0) break; *size_out += n; if (n < sizeof(data->contents)) break; new = kmalloc(sizeof(*data), GFP_KERNEL); if (new == 0) { printk(KERN_ERR "hppfs_get_data : data allocation " "failed\n"); err = -ENOMEM; goto failed_free; } INIT_LIST_HEAD(&new->list); list_add(&new->list, &data->list); data = new; } return head; failed_free: free_contents(head); failed: return ERR_PTR(err); } static struct hppfs_private *hppfs_data(void) { struct hppfs_private *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return data; *data = ((struct hppfs_private ) { .host_fd = -1, .len = -1, .contents = NULL } ); return data; } static int file_mode(int fmode) { if (fmode == (FMODE_READ | FMODE_WRITE)) return O_RDWR; if (fmode == FMODE_READ) return O_RDONLY; if (fmode == FMODE_WRITE) return O_WRONLY; return 0; } static int hppfs_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct path path; char *host_file; int err, fd, type, filter; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; host_file = dentry_name(file->f_path.dentry, strlen("/rw")); if (host_file == NULL) goto out_free2; path.mnt = inode->i_sb->s_fs_info; path.dentry = HPPFS_I(inode)->proc_dentry; data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free1; type = os_file_type(host_file); if (type == OS_TYPE_FILE) { fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); if (fd >= 0) data->host_fd = fd; else printk(KERN_ERR "hppfs_open : failed to open '%s', " "errno = %d\n", host_file, -fd); data->contents = NULL; } else if (type == OS_TYPE_DIR) { fd = open_host_sock(host_file, &filter); if (fd > 0) { data->contents = hppfs_get_data(fd, filter, data->proc_file, file, &data->len); if (!IS_ERR(data->contents)) data->host_fd = fd; } else printk(KERN_ERR "hppfs_open : failed to open a socket " "in '%s', errno = %d\n", host_file, -fd); } kfree(host_file); file->private_data = data; return 0; out_free1: kfree(host_file); out_free2: free_contents(data->contents); kfree(data); out: return err; } static int hppfs_dir_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct path path; int err; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; path.mnt = inode->i_sb->s_fs_info; path.dentry = HPPFS_I(inode)->proc_dentry; data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free; file->private_data = data; return 0; out_free: kfree(data); out: return err; } static loff_t hppfs_llseek(struct file *file, loff_t off, int where) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; loff_t (*llseek)(struct file *, loff_t, int); loff_t ret; llseek = file_inode(proc_file)->i_fop->llseek; if (llseek != NULL) { ret = (*llseek)(proc_file, off, where); if (ret < 0) return ret; } return default_llseek(file, off, where); } static int hppfs_release(struct inode *inode, struct file *file) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; if (proc_file) fput(proc_file); kfree(data); return 0; } static const struct file_operations hppfs_file_fops = { .owner = NULL, .llseek = hppfs_llseek, .read = hppfs_read, .write = hppfs_write, .open = hppfs_open, .release = hppfs_release, }; struct hppfs_dirent { void *vfs_dirent; filldir_t filldir; struct dentry *dentry; }; static int hppfs_filldir(void *d, const char *name, int size, loff_t offset, u64 inode, unsigned int type) { struct hppfs_dirent *dirent = d; if (file_removed(dirent->dentry, name)) return 0; return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset, inode, type); } static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; int (*readdir)(struct file *, void *, filldir_t); struct hppfs_dirent dirent = ((struct hppfs_dirent) { .vfs_dirent = ent, .filldir = filldir, .dentry = file->f_path.dentry }); int err; readdir = file_inode(proc_file)->i_fop->readdir; proc_file->f_pos = file->f_pos; err = (*readdir)(proc_file, &dirent, hppfs_filldir); file->f_pos = proc_file->f_pos; return err; } static const struct file_operations hppfs_dir_fops = { .owner = NULL, .readdir = hppfs_readdir, .open = hppfs_dir_open, .llseek = default_llseek, .release = hppfs_release, }; static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf) { sf->f_blocks = 0; sf->f_bfree = 0; sf->f_bavail = 0; sf->f_files = 0; sf->f_ffree = 0; sf->f_type = HPPFS_SUPER_MAGIC; return 0; } static struct inode *hppfs_alloc_inode(struct super_block *sb) { struct hppfs_inode_info *hi; hi = kmalloc(sizeof(*hi), GFP_KERNEL); if (!hi) return NULL; hi->proc_dentry = NULL; inode_init_once(&hi->vfs_inode); return &hi->vfs_inode; } void hppfs_evict_inode(struct inode *ino) { clear_inode(ino); dput(HPPFS_I(ino)->proc_dentry); mntput(ino->i_sb->s_fs_info); } static void hppfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kfree(HPPFS_I(inode)); } static void hppfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hppfs_i_callback); } static const struct super_operations hppfs_sbops = { .alloc_inode = hppfs_alloc_inode, .destroy_inode = hppfs_destroy_inode, .evict_inode = hppfs_evict_inode, .statfs = hppfs_statfs, }; static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen); } static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd); } static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; if (proc_dentry->d_inode->i_op->put_link) proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie); } static const struct inode_operations hppfs_dir_iops = { .lookup = hppfs_lookup, }; static const struct inode_operations hppfs_link_iops = { .readlink = hppfs_readlink, .follow_link = hppfs_follow_link, .put_link = hppfs_put_link, }; static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) { struct inode *proc_ino = dentry->d_inode; struct inode *inode = new_inode(sb); if (!inode) { dput(dentry); return NULL; } if (S_ISDIR(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_dir_iops; inode->i_fop = &hppfs_dir_fops; } else if (S_ISLNK(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_link_iops; inode->i_fop = &hppfs_file_fops; } else { inode->i_op = &hppfs_file_iops; inode->i_fop = &hppfs_file_fops; } HPPFS_I(inode)->proc_dentry = dentry; inode->i_uid = proc_ino->i_uid; inode->i_gid = proc_ino->i_gid; inode->i_atime = proc_ino->i_atime; inode->i_mtime = proc_ino->i_mtime; inode->i_ctime = proc_ino->i_ctime; inode->i_ino = proc_ino->i_ino; inode->i_mode = proc_ino->i_mode; set_nlink(inode, proc_ino->i_nlink); inode->i_size = proc_ino->i_size; inode->i_blocks = proc_ino->i_blocks; return inode; } static int hppfs_fill_super(struct super_block *sb, void *d, int silent) { struct inode *root_inode; struct vfsmount *proc_mnt; int err = -ENOENT; proc_mnt = mntget(task_active_pid_ns(current)->proc_mnt); if (IS_ERR(proc_mnt)) goto out; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = HPPFS_SUPER_MAGIC; sb->s_op = &hppfs_sbops; sb->s_fs_info = proc_mnt; err = -ENOMEM; root_inode = get_inode(sb, dget(proc_mnt->mnt_root)); sb->s_root = d_make_root(root_inode); if (!sb->s_root) goto out_mntput; return 0; out_mntput: mntput(proc_mnt); out: return(err); } static struct dentry *hppfs_read_super(struct file_system_type *type, int flags, const char *dev_name, void *data) { return mount_nodev(type, flags, data, hppfs_fill_super); } static struct file_system_type hppfs_type = { .owner = THIS_MODULE, .name = "hppfs", .mount = hppfs_read_super, .kill_sb = kill_anon_super, .fs_flags = 0, }; MODULE_ALIAS_FS("hppfs"); static int __init init_hppfs(void) { return register_filesystem(&hppfs_type); } static void __exit exit_hppfs(void) { unregister_filesystem(&hppfs_type); } module_init(init_hppfs) module_exit(exit_hppfs) MODULE_LICENSE("GPL");
gpl-2.0
gromaudio/linux-imx6
drivers/i2c/busses/i2c-piix4.c
4135
16006
/* Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700, SB800 AMD Hudson-2 SMSC Victory66 Note: we assume there can only be one device, with one SMBus interface. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <linux/io.h> /* PIIX4 SMBus address offsets */ #define SMBHSTSTS (0 + piix4_smba) #define SMBHSLVSTS (1 + piix4_smba) #define SMBHSTCNT (2 + piix4_smba) #define SMBHSTCMD (3 + piix4_smba) #define SMBHSTADD (4 + piix4_smba) #define SMBHSTDAT0 (5 + piix4_smba) #define SMBHSTDAT1 (6 + piix4_smba) #define SMBBLKDAT (7 + piix4_smba) #define SMBSLVCNT (8 + piix4_smba) #define SMBSHDWCMD (9 + piix4_smba) #define SMBSLVEVT (0xA + piix4_smba) #define SMBSLVDAT (0xC + piix4_smba) /* count for request_region */ #define SMBIOSIZE 8 /* PCI Address Constants */ #define SMBBA 0x090 #define SMBHSTCFG 0x0D2 #define SMBSLVC 0x0D3 #define SMBSHDW1 0x0D4 #define SMBSHDW2 0x0D5 #define SMBREV 0x0D6 /* Other settings */ #define MAX_TIMEOUT 500 #define ENABLE_INT9 0 /* PIIX4 constants */ #define PIIX4_QUICK 0x00 #define PIIX4_BYTE 0x04 #define PIIX4_BYTE_DATA 0x08 #define PIIX4_WORD_DATA 0x0C #define PIIX4_BLOCK_DATA 0x14 /* insmod parameters */ /* If force is set to anything different from 0, we forcibly enable the PIIX4. DANGEROUS! */ static int force; module_param (force, int, 0); MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!"); /* If force_addr is set to anything different from 0, we forcibly enable the PIIX4 at the given address. VERY DANGEROUS! */ static int force_addr; module_param (force_addr, int, 0); MODULE_PARM_DESC(force_addr, "Forcibly enable the PIIX4 at the given address. " "EXTREMELY DANGEROUS!"); static unsigned short piix4_smba; static int srvrworks_csb5_delay; static struct pci_driver piix4_driver; static struct i2c_adapter piix4_adapter; static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = { { .ident = "Sapphire AM2RD790", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."), DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"), }, }, { .ident = "DFI Lanparty UT 790FX", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."), DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"), }, }, { } }; /* The IBM entry is in a separate table because we only check it on Intel-based systems */ static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = { { .ident = "IBM", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { }, }; static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned char temp; if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) && (PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5)) srvrworks_csb5_delay = 1; /* On some motherboards, it was reported that accessing the SMBus caused severe hardware problems */ if (dmi_check_system(piix4_dmi_blacklist)) { dev_err(&PIIX4_dev->dev, "Accessing the SMBus on this system is unsafe!\n"); return -EPERM; } /* Don't access SMBus on IBM systems which get corrupted eeproms */ if (dmi_check_system(piix4_dmi_ibm) && PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) { dev_err(&PIIX4_dev->dev, "IBM system detected; this module " "may corrupt your serial eeprom! Refusing to load " "module!\n"); return -EPERM; } /* Determine the address of the SMBus areas */ if (force_addr) { piix4_smba = force_addr & 0xfff0; force = 0; } else { pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba); piix4_smba &= 0xfff0; if(piix4_smba == 0) { dev_err(&PIIX4_dev->dev, "SMBus base address " "uninitialized - upgrade BIOS or use " "force_addr=0xaddr\n"); return -ENODEV; } } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make sure, we disable the PIIX4 first. */ if (force_addr) { pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe); pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba); pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01); dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to " "new address %04x!\n", piix4_smba); } else if ((temp & 1) == 0) { if (force) { /* This should never need to be done, but has been * noted that many Dell machines have the SMBus * interface on the PIIX4 disabled!? NOTE: This assumes * I/O space and other allocations WERE done by the * Bios! Don't complain if your hardware does weird * things after enabling this. :') Check for Bios * updates before resorting to this. */ pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 1); dev_printk(KERN_NOTICE, &PIIX4_dev->dev, "WARNING: SMBus interface has been " "FORCEFULLY ENABLED!\n"); } else { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); release_region(piix4_smba, SMBIOSIZE); piix4_smba = 0; return -ENODEV; } } if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2)) dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n"); else if ((temp & 0x0E) == 0) dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n"); else dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration " "(or code out of date)!\n"); pci_read_config_byte(PIIX4_dev, SMBREV, &temp); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, temp); return 0; } static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev, const struct pci_device_id *id) { unsigned short smba_idx = 0xcd6; u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c; /* SB800 and later SMBus does not support forcing address */ if (force || force_addr) { dev_err(&PIIX4_dev->dev, "SMBus does not support " "forcing address!\n"); return -EINVAL; } /* Determine the address of the SMBus areas */ if (!request_region(smba_idx, 2, "smba_idx")) { dev_err(&PIIX4_dev->dev, "SMBus base address index region " "0x%x already in use!\n", smba_idx); return -EBUSY; } outb_p(smb_en, smba_idx); smba_en_lo = inb_p(smba_idx + 1); outb_p(smb_en + 1, smba_idx); smba_en_hi = inb_p(smba_idx + 1); release_region(smba_idx, 2); if ((smba_en_lo & 1) == 0) { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); return -ENODEV; } piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", piix4_smba); return -EBUSY; } /* Request the SMBus I2C bus config region */ if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) { dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region " "0x%x already in use!\n", piix4_smba + i2ccfg_offset); release_region(piix4_smba, SMBIOSIZE); piix4_smba = 0; return -EBUSY; } i2ccfg = inb_p(piix4_smba + i2ccfg_offset); release_region(piix4_smba + i2ccfg_offset, 1); if (i2ccfg & 1) dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n"); else dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n"); dev_info(&PIIX4_dev->dev, "SMBus Host Controller at 0x%x, revision %d\n", piix4_smba, i2ccfg >> 4); return 0; } static int piix4_transaction(void) { int temp; int result = 0; int timeout = 0; dev_dbg(&piix4_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); /* Make sure the SMBus host is ready to start transmitting */ if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_dbg(&piix4_adapter.dev, "SMBus busy (%02x). " "Resetting...\n", temp); outb_p(temp, SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&piix4_adapter.dev, "Successful!\n"); } } /* start the transaction by setting bit 6 */ outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT); /* We will always wait for a fraction of a second! (See PIIX4 docs errata) */ if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */ msleep(2); else msleep(1); while ((++timeout < MAX_TIMEOUT) && ((temp = inb_p(SMBHSTSTS)) & 0x01)) msleep(1); /* If the SMBus is still busy, we give up */ if (timeout == MAX_TIMEOUT) { dev_err(&piix4_adapter.dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { result = -EIO; dev_err(&piix4_adapter.dev, "Error: Failed bus transaction\n"); } if (temp & 0x08) { result = -EIO; dev_dbg(&piix4_adapter.dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); /* Clock stops and slave is stuck in mid-transmission */ } if (temp & 0x04) { result = -ENXIO; dev_dbg(&piix4_adapter.dev, "Error: no response!\n"); } if (inb_p(SMBHSTSTS) != 0x00) outb_p(inb(SMBHSTSTS), SMBHSTSTS); if ((temp = inb_p(SMBHSTSTS)) != 0x00) { dev_err(&piix4_adapter.dev, "Failed reset at end of " "transaction (%02x)\n", temp); } dev_dbg(&piix4_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, " "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1)); return result; } /* Return negative errno on error. */ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int i, len; int status; switch (size) { case I2C_SMBUS_QUICK: outb_p((addr << 1) | read_write, SMBHSTADD); size = PIIX4_QUICK; break; case I2C_SMBUS_BYTE: outb_p((addr << 1) | read_write, SMBHSTADD); if (read_write == I2C_SMBUS_WRITE) outb_p(command, SMBHSTCMD); size = PIIX4_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) outb_p(data->byte, SMBHSTDAT0); size = PIIX4_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { outb_p(data->word & 0xff, SMBHSTDAT0); outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); } size = PIIX4_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p((addr << 1) | read_write, SMBHSTADD); outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; outb_p(len, SMBHSTDAT0); i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= len; i++) outb_p(data->block[i], SMBBLKDAT); } size = PIIX4_BLOCK_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT); status = piix4_transaction(); if (status) return status; if ((read_write == I2C_SMBUS_WRITE) || (size == PIIX4_QUICK)) return 0; switch (size) { case PIIX4_BYTE: case PIIX4_BYTE_DATA: data->byte = inb_p(SMBHSTDAT0); break; case PIIX4_WORD_DATA: data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); break; case PIIX4_BLOCK_DATA: data->block[0] = inb_p(SMBHSTDAT0); if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */ for (i = 1; i <= data->block[0]; i++) data->block[i] = inb_p(SMBBLKDAT); break; } return 0; } static u32 piix4_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = piix4_access, .functionality = piix4_func, }; static struct i2c_adapter piix4_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static const struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1100LD) }, { 0, } }; MODULE_DEVICE_TABLE (pci, piix4_ids); static int __devinit piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; if ((dev->vendor == PCI_VENDOR_ID_ATI && dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && dev->revision >= 0x40) || dev->vendor == PCI_VENDOR_ID_AMD) /* base address location etc changed in SB800 */ retval = piix4_setup_sb800(dev, id); else retval = piix4_setup(dev, id); if (retval) return retval; /* set up the sysfs linkage to our parent device */ piix4_adapter.dev.parent = &dev->dev; snprintf(piix4_adapter.name, sizeof(piix4_adapter.name), "SMBus PIIX4 adapter at %04x", piix4_smba); if ((retval = i2c_add_adapter(&piix4_adapter))) { dev_err(&dev->dev, "Couldn't register adapter!\n"); release_region(piix4_smba, SMBIOSIZE); piix4_smba = 0; } return retval; } static void __devexit piix4_remove(struct pci_dev *dev) { if (piix4_smba) { i2c_del_adapter(&piix4_adapter); release_region(piix4_smba, SMBIOSIZE); piix4_smba = 0; } } static struct pci_driver piix4_driver = { .name = "piix4_smbus", .id_table = piix4_ids, .probe = piix4_probe, .remove = __devexit_p(piix4_remove), }; static int __init i2c_piix4_init(void) { return pci_register_driver(&piix4_driver); } static void __exit i2c_piix4_exit(void) { pci_unregister_driver(&piix4_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " "Philip Edelbrock <phil@netroedge.com>"); MODULE_DESCRIPTION("PIIX4 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_piix4_init); module_exit(i2c_piix4_exit);
gpl-2.0
Jackeagle/htc_butterfly_asia_india_4.4.2_kernel
arch/arm/mach-omap2/sdram-nokia.c
4903
6412
/* * SDRC register values for Nokia boards * * Copyright (C) 2008, 2010-2011 Nokia Corporation * * Lauri Leukkunen <lauri.leukkunen@nokia.com> * * Original code by Juha Yrjola <juha.yrjola@solidboot.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include "common.h" #include <plat/clock.h> #include <plat/sdrc.h> #include "sdram-nokia.h" /* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */ struct sdram_timings { u32 casl; u32 tDAL; u32 tDPL; u32 tRRD; u32 tRCD; u32 tRP; u32 tRAS; u32 tRC; u32 tRFC; u32 tXSR; u32 tREF; /* in ns */ u32 tXP; u32 tCKE; u32 tWTR; }; static const struct sdram_timings nokia_97dot6mhz_timings[] = { { .casl = 3, .tDAL = 30725, .tDPL = 15362, .tRRD = 10241, .tRCD = 20483, .tRP = 15362, .tRAS = 40967, .tRC = 56330, .tRFC = 138266, .tXSR = 204839, .tREF = 7798, .tXP = 2, .tCKE = 4, .tWTR = 2, }, }; static const struct sdram_timings nokia_166mhz_timings[] = { { .casl = 3, .tDAL = 33000, .tDPL = 15000, .tRRD = 12000, .tRCD = 22500, .tRP = 18000, .tRAS = 42000, .tRC = 66000, .tRFC = 138000, .tXSR = 200000, .tREF = 7800, .tXP = 2, .tCKE = 2, .tWTR = 2 }, }; static const struct sdram_timings nokia_195dot2mhz_timings[] = { { .casl = 3, .tDAL = 30725, .tDPL = 15362, .tRRD = 10241, .tRCD = 20483, .tRP = 15362, .tRAS = 40967, .tRC = 56330, .tRFC = 138266, .tXSR = 204839, .tREF = 7752, .tXP = 2, .tCKE = 4, .tWTR = 2, }, }; static const struct sdram_timings nokia_200mhz_timings[] = { { .casl = 3, .tDAL = 30000, .tDPL = 15000, .tRRD = 10000, .tRCD = 20000, .tRP = 15000, .tRAS = 40000, .tRC = 55000, .tRFC = 140000, .tXSR = 200000, .tREF = 7800, .tXP = 2, .tCKE = 4, .tWTR = 2 }, }; static const struct { long rate; struct sdram_timings const *data; } nokia_timings[] = { { 83000000, nokia_166mhz_timings }, { 97600000, nokia_97dot6mhz_timings }, { 100000000, nokia_200mhz_timings }, { 166000000, nokia_166mhz_timings }, { 195200000, nokia_195dot2mhz_timings }, { 200000000, nokia_200mhz_timings }, }; static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1]; static unsigned long sdrc_get_fclk_period(long rate) { /* In picoseconds */ return 1000000000 / rate; } static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate) { unsigned long tick_ps; /* Calculate in picosecs to yield more exact results */ tick_ps = sdrc_get_fclk_period(rate); return (time_ps + tick_ps - 1) / tick_ps; } #undef DEBUG #ifdef DEBUG static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit, int ticks, long rate, const char *name) #else static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit, int ticks) #endif { int mask, nr_bits; nr_bits = end_bit - st_bit + 1; if (ticks >= 1 << nr_bits) return -1; mask = (1 << nr_bits) - 1; *regval &= ~(mask << st_bit); *regval |= ticks << st_bit; #ifdef DEBUG printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks, (unsigned int)sdrc_get_fclk_period(rate) * ticks / 1000); #endif return 0; } #ifdef DEBUG #define SDRC_SET_ONE(reg, st, end, field, rate) \ if (set_sdrc_timing_regval((reg), (st), (end), \ memory_timings->field, (rate), #field) < 0) \ err = -1; #else #define SDRC_SET_ONE(reg, st, end, field, rate) \ if (set_sdrc_timing_regval((reg), (st), (end), \ memory_timings->field) < 0) \ err = -1; #endif #ifdef DEBUG static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit, int time, long rate, const char *name) #else static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit, int time, long rate) #endif { int ticks, ret; ret = 0; if (time == 0) ticks = 0; else ticks = sdrc_ps_to_ticks(time, rate); #ifdef DEBUG ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks, rate, name); #else ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks); #endif return ret; } #ifdef DEBUG #define SDRC_SET_ONE_PS(reg, st, end, field, rate) \ if (set_sdrc_timing_regval_ps((reg), (st), (end), \ memory_timings->field, \ (rate), #field) < 0) \ err = -1; #else #define SDRC_SET_ONE_PS(reg, st, end, field, rate) \ if (set_sdrc_timing_regval_ps((reg), (st), (end), \ memory_timings->field, (rate)) < 0) \ err = -1; #endif static int sdrc_timings(int id, long rate, const struct sdram_timings *memory_timings) { u32 ticks_per_ms; u32 rfr, l; u32 actim_ctrla = 0, actim_ctrlb = 0; u32 rfr_ctrl; int err = 0; long l3_rate = rate / 1000; SDRC_SET_ONE_PS(&actim_ctrla, 0, 4, tDAL, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 6, 8, tDPL, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 9, 11, tRRD, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate); SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate); SDRC_SET_ONE_PS(&actim_ctrlb, 0, 7, tXSR, l3_rate); SDRC_SET_ONE(&actim_ctrlb, 8, 10, tXP, l3_rate); SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate); SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate); ticks_per_ms = l3_rate; rfr = memory_timings[0].tREF * ticks_per_ms / 1000000; if (rfr > 65535 + 50) rfr = 65535; else rfr -= 50; #ifdef DEBUG printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr); #endif l = rfr << 8; rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */ nokia_sdrc_params[id].rate = rate; nokia_sdrc_params[id].actim_ctrla = actim_ctrla; nokia_sdrc_params[id].actim_ctrlb = actim_ctrlb; nokia_sdrc_params[id].rfr_ctrl = rfr_ctrl; nokia_sdrc_params[id].mr = 0x32; nokia_sdrc_params[id + 1].rate = 0; return err; } struct omap_sdrc_params *nokia_get_sdram_timings(void) { int err = 0; int i; for (i = 0; i < ARRAY_SIZE(nokia_timings); i++) { err |= sdrc_timings(i, nokia_timings[i].rate, nokia_timings[i].data); if (err) pr_err("%s: error with rate %ld: %d\n", __func__, nokia_timings[i].rate, err); } return err ? NULL : nokia_sdrc_params; }
gpl-2.0
mifl/android_kernel_pantech_p9090
drivers/isdn/hardware/eicon/io.c
5159
27973
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "di_defs.h" #include "pc.h" #include "pr_pc.h" #include "divasync.h" #define MIPS_SCOM #include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */ #include "di.h" #include "mi_pc.h" #include "io.h" extern ADAPTER * adapter[MAX_ADAPTER]; extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER]; void request (PISDN_ADAPTER, ENTITY *); static void pcm_req (PISDN_ADAPTER, ENTITY *); /* -------------------------------------------------------------------------- local functions -------------------------------------------------------------------------- */ #define ReqFunc(N) \ static void Request##N(ENTITY *e) \ { if ( IoAdapters[N] ) (* IoAdapters[N]->DIRequest)(IoAdapters[N], e) ; } ReqFunc(0) ReqFunc(1) ReqFunc(2) ReqFunc(3) ReqFunc(4) ReqFunc(5) ReqFunc(6) ReqFunc(7) ReqFunc(8) ReqFunc(9) ReqFunc(10) ReqFunc(11) ReqFunc(12) ReqFunc(13) ReqFunc(14) ReqFunc(15) IDI_CALL Requests[MAX_ADAPTER] = { &Request0, &Request1, &Request2, &Request3, &Request4, &Request5, &Request6, &Request7, &Request8, &Request9, &Request10, &Request11, &Request12, &Request13, &Request14, &Request15 }; /*****************************************************************************/ /* This array should indicate all new services, that this version of XDI is able to provide to his clients */ static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ+1] = { (DIVA_XDI_EXTENDED_FEATURES_VALID | DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR | DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS | #if defined(DIVA_IDI_RX_DMA) DIVA_XDI_EXTENDED_FEATURE_CMA | DIVA_XDI_EXTENDED_FEATURE_RX_DMA | DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA | #endif DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC), 0 }; /*****************************************************************************/ void dump_xlog_buffer (PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc) { dword logLen ; word *Xlog = xlogDesc->buf ; word logCnt = xlogDesc->cnt ; word logOut = xlogDesc->out / sizeof(*Xlog) ; DBG_FTL(("%s: ************* XLOG recovery (%d) *************", &IoAdapter->Name[0], (int)logCnt)) DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0])) for ( ; logCnt > 0 ; --logCnt ) { if ( !GET_WORD(&Xlog[logOut]) ) { if ( --logCnt == 0 ) break ; logOut = 0 ; } if ( GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)) ) { if ( logCnt > 2 ) { DBG_FTL(("Possibly corrupted XLOG: %d entries left", (int)logCnt)) } break ; } logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog))) ; DBG_FTL_MXLOG(( (char *)&Xlog[logOut + 1], (dword)(logLen - 2) )) logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog) ; } DBG_FTL(("%s: ***************** end of XLOG *****************", &IoAdapter->Name[0])) } /*****************************************************************************/ #if defined(XDI_USE_XLOG) static char *(ExceptionCauseTable[]) = { "Interrupt", "TLB mod /IBOUND", "TLB load /DBOUND", "TLB store", "Address error load", "Address error store", "Instruction load bus error", "Data load/store bus error", "Syscall", "Breakpoint", "Reverd instruction", "Coprocessor unusable", "Overflow", "TRAP", "VCEI", "Floating Point Exception", "CP2", "Reserved 17", "Reserved 18", "Reserved 19", "Reserved 20", "Reserved 21", "Reserved 22", "WATCH", "Reserved 24", "Reserved 25", "Reserved 26", "Reserved 27", "Reserved 28", "Reserved 29", "Reserved 30", "VCED" } ; #endif void dump_trap_frame (PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame) { MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame ; dword __iomem *regs; regs = &xcept->regs[0] ; DBG_FTL(("%s: ***************** CPU TRAPPED *****************", &IoAdapter->Name[0])) DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0])) DBG_FTL(("Cause: %s", ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2])) DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x", READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr), READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr))) DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x", READ_DWORD(&regs[ 0]), READ_DWORD(&regs[ 1]), READ_DWORD(&regs[ 2]), READ_DWORD(&regs[ 3]))) DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x", READ_DWORD(&regs[ 4]), READ_DWORD(&regs[ 5]), READ_DWORD(&regs[ 6]), READ_DWORD(&regs[ 7]))) DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x", READ_DWORD(&regs[ 8]), READ_DWORD(&regs[ 9]), READ_DWORD(&regs[10]), READ_DWORD(&regs[11]))) DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x", READ_DWORD(&regs[12]), READ_DWORD(&regs[13]), READ_DWORD(&regs[14]), READ_DWORD(&regs[15]))) DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x", READ_DWORD(&regs[16]), READ_DWORD(&regs[17]), READ_DWORD(&regs[18]), READ_DWORD(&regs[19]))) DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x", READ_DWORD(&regs[20]), READ_DWORD(&regs[21]), READ_DWORD(&regs[22]), READ_DWORD(&regs[23]))) DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x", READ_DWORD(&regs[24]), READ_DWORD(&regs[25]), READ_DWORD(&regs[26]), READ_DWORD(&regs[27]))) DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x", READ_DWORD(&regs[28]), READ_DWORD(&regs[29]), READ_DWORD(&regs[30]), READ_DWORD(&regs[31]))) DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x", READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo), READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass))) } /* -------------------------------------------------------------------------- Real XDI Request function -------------------------------------------------------------------------- */ void request(PISDN_ADAPTER IoAdapter, ENTITY * e) { byte i; diva_os_spin_lock_magic_t irql; /* * if the Req field in the entity structure is 0, * we treat this request as a special function call */ if ( !e->Req ) { IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e ; switch (e->Rc) { #if defined(DIVA_IDI_RX_DMA) case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: { diva_xdi_dma_descriptor_operation_t* pI = \ &syncReq->xdi_dma_descriptor_operation.info; if (!IoAdapter->dma_map) { pI->operation = -1; pI->descriptor_number = -1; return; } diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op"); if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) { pI->descriptor_number = diva_alloc_dma_map_entry (\ (struct _diva_dma_map_entry*)IoAdapter->dma_map); if (pI->descriptor_number >= 0) { dword dma_magic; void* local_addr; diva_get_dma_map_entry (\ (struct _diva_dma_map_entry*)IoAdapter->dma_map, pI->descriptor_number, &local_addr, &dma_magic); pI->descriptor_address = local_addr; pI->descriptor_magic = dma_magic; pI->operation = 0; } else { pI->operation = -1; } } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) && (pI->descriptor_number >= 0)) { diva_free_dma_map_entry((struct _diva_dma_map_entry*)IoAdapter->dma_map, pI->descriptor_number); pI->descriptor_number = -1; pI->operation = 0; } else { pI->descriptor_number = -1; pI->operation = -1; } diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "dma_op"); } return; #endif case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: { diva_xdi_get_logical_adapter_number_s_t *pI = \ &syncReq->xdi_logical_adapter_number.info; pI->logical_adapter_number = IoAdapter->ANum; pI->controller = IoAdapter->ControllerNumber; pI->total_controllers = IoAdapter->Properties.Adapters; } return; case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: { diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info; memset (&prms, 0x00, sizeof(prms)); prms.structure_length = min_t(size_t, sizeof(prms), pI->structure_length); memset (pI, 0x00, pI->structure_length); prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \ DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0; prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \ DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0; memcpy (pI, &prms, prms.structure_length); } return; case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR: syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar; return; case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: { dword i; diva_xdi_get_extended_xdi_features_t* pI =\ &syncReq->xdi_extended_features.info; pI->buffer_length_in_bytes &= ~0x80000000; if (pI->buffer_length_in_bytes && pI->features) { memset (pI->features, 0x00, pI->buffer_length_in_bytes); } for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) && (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) { pI->features[i] = extended_xdi_features[i]; } if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) || (!pI->features)) { pI->buffer_length_in_bytes =\ (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ); } } return; case IDI_SYNC_REQ_XDI_GET_STREAM: if (IoAdapter) { diva_xdi_provide_istream_info (&IoAdapter->a, &syncReq->xdi_stream_info.info); } else { syncReq->xdi_stream_info.info.provided_service = 0; } return; case IDI_SYNC_REQ_GET_NAME: if ( IoAdapter ) { strcpy (&syncReq->GetName.name[0], IoAdapter->Name) ; DBG_TRC(("xdi: Adapter %d / Name '%s'", IoAdapter->ANum, IoAdapter->Name)) return ; } syncReq->GetName.name[0] = '\0' ; break ; case IDI_SYNC_REQ_GET_SERIAL: if ( IoAdapter ) { syncReq->GetSerial.serial = IoAdapter->serialNo ; DBG_TRC(("xdi: Adapter %d / SerialNo %ld", IoAdapter->ANum, IoAdapter->serialNo)) return ; } syncReq->GetSerial.serial = 0 ; break ; case IDI_SYNC_REQ_GET_CARDTYPE: if ( IoAdapter ) { syncReq->GetCardType.cardtype = IoAdapter->cardType ; DBG_TRC(("xdi: Adapter %d / CardType %ld", IoAdapter->ANum, IoAdapter->cardType)) return ; } syncReq->GetCardType.cardtype = 0 ; break ; case IDI_SYNC_REQ_GET_XLOG: if ( IoAdapter ) { pcm_req (IoAdapter, e) ; return ; } e->Ind = 0 ; break ; case IDI_SYNC_REQ_GET_DBG_XLOG: if ( IoAdapter ) { pcm_req (IoAdapter, e) ; return ; } e->Ind = 0 ; break ; case IDI_SYNC_REQ_GET_FEATURES: if ( IoAdapter ) { syncReq->GetFeatures.features = (unsigned short)IoAdapter->features ; return ; } syncReq->GetFeatures.features = 0 ; break ; case IDI_SYNC_REQ_PORTDRV_HOOK: if ( IoAdapter ) { DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored")) return ; } break; } if ( IoAdapter ) { return ; } } DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc)) if ( !IoAdapter ) { DBG_FTL(("xdi: uninitialized Adapter used - ignore request")) return ; } diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req"); /* * assign an entity */ if ( !(e->Id &0x1f) ) { if ( IoAdapter->e_count >= IoAdapter->e_max ) { DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored", IoAdapter->e_max)) diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req"); return ; } /* * find a new free id */ for ( i = 1 ; IoAdapter->e_tbl[i].e ; ++i ) ; IoAdapter->e_tbl[i].e = e ; IoAdapter->e_count++ ; e->No = (byte)i ; e->More = 0 ; e->RCurrent = 0xff ; } else { i = e->No ; } /* * if the entity is still busy, ignore the request call */ if ( e->More & XBUSY ) { DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req)) if ( !IoAdapter->trapped && IoAdapter->trapFnc ) { IoAdapter->trapFnc (IoAdapter) ; /* Firs trap, also notify user if supported */ if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) { (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum); } } diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req"); return ; } /* * initialize transmit status variables */ e->More |= XBUSY ; e->More &= ~XMOREF ; e->XCurrent = 0 ; e->XOffset = 0 ; /* * queue this entity in the adapter request queue */ IoAdapter->e_tbl[i].next = 0 ; if ( IoAdapter->head ) { IoAdapter->e_tbl[IoAdapter->tail].next = i ; IoAdapter->tail = i ; } else { IoAdapter->head = i ; IoAdapter->tail = i ; } /* * queue the DPC to process the request */ diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr); diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req"); } /* --------------------------------------------------------------------- Main DPC routine --------------------------------------------------------------------- */ void DIDpcRoutine (struct _diva_os_soft_isr* psoft_isr, void* Context) { PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context ; ADAPTER* a = &IoAdapter->a ; diva_os_atomic_t* pin_dpc = &IoAdapter->in_dpc; if (diva_os_atomic_increment (pin_dpc) == 1) { do { if ( IoAdapter->tst_irq (a) ) { if ( !IoAdapter->Unavailable ) IoAdapter->dpc (a) ; IoAdapter->clr_irq (a) ; } IoAdapter->out (a) ; } while (diva_os_atomic_decrement (pin_dpc) > 0); /* ---------------------------------------------------------------- Look for XLOG request (cards with indirect addressing) ---------------------------------------------------------------- */ if (IoAdapter->pcm_pending) { struct pc_maint *pcm; diva_os_spin_lock_magic_t OldIrql ; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_dpc"); pcm = (struct pc_maint *)IoAdapter->pcm_data; switch (IoAdapter->pcm_pending) { case 1: /* ask card for XLOG */ a->ram_out (a, &IoAdapter->pcm->rc, 0) ; a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ; IoAdapter->pcm_pending = 2; break; case 2: /* Try to get XLOG from the card */ if ((int)(a->ram_in (a, &IoAdapter->pcm->rc))) { a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ; IoAdapter->pcm_pending = 3; } break; case 3: /* let XDI recovery XLOG */ break; } diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_dpc"); } /* ---------------------------------------------------------------- */ } } /* -------------------------------------------------------------------------- XLOG interface -------------------------------------------------------------------------- */ static void pcm_req (PISDN_ADAPTER IoAdapter, ENTITY *e) { diva_os_spin_lock_magic_t OldIrql ; int i, rc ; ADAPTER *a = &IoAdapter->a ; struct pc_maint *pcm = (struct pc_maint *)&e->Ind ; /* * special handling of I/O based card interface * the memory access isn't an atomic operation ! */ if ( IoAdapter->Properties.Card == CARD_MAE ) { diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_1"); IoAdapter->pcm_data = (void *)pcm; IoAdapter->pcm_pending = 1; diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr); diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_1"); for ( rc = 0, i = (IoAdapter->trapped ? 3000 : 250) ; !rc && (i > 0) ; --i ) { diva_os_sleep (1) ; if (IoAdapter->pcm_pending == 3) { diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_3"); IoAdapter->pcm_pending = 0; IoAdapter->pcm_data = NULL ; diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_3"); return ; } diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_2"); diva_os_schedule_soft_isr (&IoAdapter->req_soft_isr); diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_2"); } diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_4"); IoAdapter->pcm_pending = 0; IoAdapter->pcm_data = NULL ; diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &OldIrql, "data_pcm_4"); goto Trapped ; } /* * memory based shared ram is accessible from different * processors without disturbing concurrent processes. */ a->ram_out (a, &IoAdapter->pcm->rc, 0) ; a->ram_out (a, &IoAdapter->pcm->req, pcm->req) ; for ( i = (IoAdapter->trapped ? 3000 : 250) ; --i > 0 ; ) { diva_os_sleep (1) ; rc = (int)(a->ram_in (a, &IoAdapter->pcm->rc)) ; if ( rc ) { a->ram_in_buffer (a, IoAdapter->pcm, pcm, sizeof(*pcm)) ; return ; } } Trapped: if ( IoAdapter->trapFnc ) { int trapped = IoAdapter->trapped; IoAdapter->trapFnc (IoAdapter) ; /* Firs trap, also notify user if supported */ if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) { (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum); } } } /*------------------------------------------------------------------*/ /* ram access functions for memory mapped cards */ /*------------------------------------------------------------------*/ byte mem_in (ADAPTER *a, void *addr) { byte val; volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); val = READ_BYTE(Base + (unsigned long)addr); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); return (val); } word mem_inw (ADAPTER *a, void *addr) { word val; volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); val = READ_WORD((Base + (unsigned long)addr)); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); return (val); } void mem_in_dw (ADAPTER *a, void *addr, dword* data, int dwords) { volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); while (dwords--) { *data++ = READ_DWORD((Base + (unsigned long)addr)); addr+=4; } DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_in_buffer (ADAPTER *a, void *addr, void *buffer, word length) { volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); memcpy_fromio(buffer, (Base + (unsigned long)addr), length); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_look_ahead (ADAPTER *a, PBUFFER *RBuffer, ENTITY *e) { PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io ; IoAdapter->RBuffer.length = mem_inw (a, &RBuffer->length) ; mem_in_buffer (a, RBuffer->P, IoAdapter->RBuffer.P, IoAdapter->RBuffer.length) ; e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer ; } void mem_out (ADAPTER *a, void *addr, byte data) { volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); WRITE_BYTE(Base + (unsigned long)addr, data); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_outw (ADAPTER *a, void *addr, word data) { volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); WRITE_WORD((Base + (unsigned long)addr), data); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_out_dw (ADAPTER *a, void *addr, const dword* data, int dwords) { volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); while (dwords--) { WRITE_DWORD((Base + (unsigned long)addr), *data); addr+=4; data++; } DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_out_buffer (ADAPTER *a, void *addr, void *buffer, word length) { volatile byte __iomem * Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); memcpy_toio((Base + (unsigned long)addr), buffer, length) ; DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } void mem_inc (ADAPTER *a, void *addr) { volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); byte x = READ_BYTE(Base + (unsigned long)addr); WRITE_BYTE(Base + (unsigned long)addr, x + 1); DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); } /*------------------------------------------------------------------*/ /* ram access functions for io-mapped cards */ /*------------------------------------------------------------------*/ byte io_in(ADAPTER * a, void * adr) { byte val; byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port + 4, (word)(unsigned long)adr); val = inpp(Port); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); return(val); } word io_inw(ADAPTER * a, void * adr) { word val; byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port + 4, (word)(unsigned long)adr); val = inppw(Port); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); return(val); } void io_in_buffer(ADAPTER * a, void * adr, void * buffer, word len) { byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); byte* P = (byte*)buffer; if ((long)adr & 1) { outppw(Port+4, (word)(unsigned long)adr); *P = inpp(Port); P++; adr = ((byte *) adr) + 1; len--; if (!len) { DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); return; } } outppw(Port+4, (word)(unsigned long)adr); inppw_buffer (Port, P, len+1); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } void io_look_ahead(ADAPTER * a, PBUFFER * RBuffer, ENTITY * e) { byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port+4, (word)(unsigned long)RBuffer); ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port); inppw_buffer (Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1); e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } void io_out(ADAPTER * a, void * adr, byte data) { byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port+4, (word)(unsigned long)adr); outpp(Port, data); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } void io_outw(ADAPTER * a, void * adr, word data) { byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port+4, (word)(unsigned long)adr); outppw(Port, data); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } void io_out_buffer(ADAPTER * a, void * adr, void * buffer, word len) { byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); byte* P = (byte*)buffer; if ((long)adr & 1) { outppw(Port+4, (word)(unsigned long)adr); outpp(Port, *P); P++; adr = ((byte *) adr) + 1; len--; if (!len) { DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); return; } } outppw(Port+4, (word)(unsigned long)adr); outppw_buffer (Port, P, len+1); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } void io_inc(ADAPTER * a, void * adr) { byte x; byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); outppw(Port+4, (word)(unsigned long)adr); x = inpp(Port); outppw(Port+4, (word)(unsigned long)adr); outpp(Port, x+1); DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); } /*------------------------------------------------------------------*/ /* OS specific functions related to queuing of entities */ /*------------------------------------------------------------------*/ void free_entity(ADAPTER * a, byte e_no) { PISDN_ADAPTER IoAdapter; diva_os_spin_lock_magic_t irql; IoAdapter = (PISDN_ADAPTER) a->io; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free"); IoAdapter->e_tbl[e_no].e = NULL; IoAdapter->e_count--; diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_free"); } void assign_queue(ADAPTER * a, byte e_no, word ref) { PISDN_ADAPTER IoAdapter; diva_os_spin_lock_magic_t irql; IoAdapter = (PISDN_ADAPTER) a->io; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign"); IoAdapter->e_tbl[e_no].assign_ref = ref; IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign; IoAdapter->assign = e_no; diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign"); } byte get_assign(ADAPTER * a, word ref) { PISDN_ADAPTER IoAdapter; diva_os_spin_lock_magic_t irql; byte e_no; IoAdapter = (PISDN_ADAPTER) a->io; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign_get"); for(e_no = (byte)IoAdapter->assign; e_no && IoAdapter->e_tbl[e_no].assign_ref!=ref; e_no = IoAdapter->e_tbl[e_no].next); diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_assign_get"); return e_no; } void req_queue(ADAPTER * a, byte e_no) { PISDN_ADAPTER IoAdapter; diva_os_spin_lock_magic_t irql; IoAdapter = (PISDN_ADAPTER) a->io; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q"); IoAdapter->e_tbl[e_no].next = 0; if(IoAdapter->head) { IoAdapter->e_tbl[IoAdapter->tail].next = e_no; IoAdapter->tail = e_no; } else { IoAdapter->head = e_no; IoAdapter->tail = e_no; } diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_q"); } byte look_req(ADAPTER * a) { PISDN_ADAPTER IoAdapter; IoAdapter = (PISDN_ADAPTER) a->io; return ((byte)IoAdapter->head) ; } void next_req(ADAPTER * a) { PISDN_ADAPTER IoAdapter; diva_os_spin_lock_magic_t irql; IoAdapter = (PISDN_ADAPTER) a->io; diva_os_enter_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next"); IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next; if(!IoAdapter->head) IoAdapter->tail = 0; diva_os_leave_spin_lock (&IoAdapter->data_spin_lock, &irql, "data_req_next"); } /*------------------------------------------------------------------*/ /* memory map functions */ /*------------------------------------------------------------------*/ ENTITY * entity_ptr(ADAPTER * a, byte e_no) { PISDN_ADAPTER IoAdapter; IoAdapter = (PISDN_ADAPTER) a->io; return (IoAdapter->e_tbl[e_no].e); } void * PTR_X(ADAPTER * a, ENTITY * e) { return ((void *) e->X); } void * PTR_R(ADAPTER * a, ENTITY * e) { return ((void *) e->R); } void * PTR_P(ADAPTER * a, ENTITY * e, void * P) { return P; } void CALLBACK(ADAPTER * a, ENTITY * e) { if ( e && e->callback ) e->callback (e) ; }
gpl-2.0
me4488/NOPE_Kernel_V2
scripts/dtc/libfdt/fdt_rw.c
7207
12294
/* * libfdt - Flat Device Tree manipulation * Copyright (C) 2006 David Gibson, IBM Corporation. * * libfdt is dual licensed: you can use it either under the terms of * the GPL, or the BSD license, at your option. * * a) This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Alternatively, * * b) Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libfdt_env.h" #include <fdt.h> #include <libfdt.h> #include "libfdt_internal.h" static int _fdt_blocks_misordered(const void *fdt, int mem_rsv_size, int struct_size) { return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8)) || (fdt_off_dt_struct(fdt) < (fdt_off_mem_rsvmap(fdt) + mem_rsv_size)) || (fdt_off_dt_strings(fdt) < (fdt_off_dt_struct(fdt) + struct_size)) || (fdt_totalsize(fdt) < (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))); } static int _fdt_rw_check_header(void *fdt) { FDT_CHECK_HEADER(fdt); if (fdt_version(fdt) < 17) return -FDT_ERR_BADVERSION; if (_fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry), fdt_size_dt_struct(fdt))) return -FDT_ERR_BADLAYOUT; if (fdt_version(fdt) > 17) fdt_set_version(fdt, 17); return 0; } #define FDT_RW_CHECK_HEADER(fdt) \ { \ int err; \ if ((err = _fdt_rw_check_header(fdt)) != 0) \ return err; \ } static inline int _fdt_data_size(void *fdt) { return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); } static int _fdt_splice(void *fdt, void *splicepoint, int oldlen, int newlen) { char *p = splicepoint; char *end = (char *)fdt + _fdt_data_size(fdt); if (((p + oldlen) < p) || ((p + oldlen) > end)) return -FDT_ERR_BADOFFSET; if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt))) return -FDT_ERR_NOSPACE; memmove(p + newlen, p + oldlen, end - p - oldlen); return 0; } static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p, int oldn, int newn) { int delta = (newn - oldn) * sizeof(*p); int err; err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p)); if (err) return err; fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta); fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); return 0; } static int _fdt_splice_struct(void *fdt, void *p, int oldlen, int newlen) { int delta = newlen - oldlen; int err; if ((err = _fdt_splice(fdt, p, oldlen, newlen))) return err; fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta); fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); return 0; } static int _fdt_splice_string(void *fdt, int newlen) { void *p = (char *)fdt + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); int err; if ((err = _fdt_splice(fdt, p, 0, newlen))) return err; fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen); return 0; } static int _fdt_find_add_string(void *fdt, const char *s) { char *strtab = (char *)fdt + fdt_off_dt_strings(fdt); const char *p; char *new; int len = strlen(s) + 1; int err; p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s); if (p) /* found it */ return (p - strtab); new = strtab + fdt_size_dt_strings(fdt); err = _fdt_splice_string(fdt, len); if (err) return err; memcpy(new, s, len); return (new - strtab); } int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size) { struct fdt_reserve_entry *re; int err; FDT_RW_CHECK_HEADER(fdt); re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt)); err = _fdt_splice_mem_rsv(fdt, re, 0, 1); if (err) return err; re->address = cpu_to_fdt64(address); re->size = cpu_to_fdt64(size); return 0; } int fdt_del_mem_rsv(void *fdt, int n) { struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n); int err; FDT_RW_CHECK_HEADER(fdt); if (n >= fdt_num_mem_rsv(fdt)) return -FDT_ERR_NOTFOUND; err = _fdt_splice_mem_rsv(fdt, re, 1, 0); if (err) return err; return 0; } static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name, int len, struct fdt_property **prop) { int oldlen; int err; *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); if (! (*prop)) return oldlen; if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen), FDT_TAGALIGN(len)))) return err; (*prop)->len = cpu_to_fdt32(len); return 0; } static int _fdt_add_property(void *fdt, int nodeoffset, const char *name, int len, struct fdt_property **prop) { int proplen; int nextoffset; int namestroff; int err; if ((nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0) return nextoffset; namestroff = _fdt_find_add_string(fdt, name); if (namestroff < 0) return namestroff; *prop = _fdt_offset_ptr_w(fdt, nextoffset); proplen = sizeof(**prop) + FDT_TAGALIGN(len); err = _fdt_splice_struct(fdt, *prop, 0, proplen); if (err) return err; (*prop)->tag = cpu_to_fdt32(FDT_PROP); (*prop)->nameoff = cpu_to_fdt32(namestroff); (*prop)->len = cpu_to_fdt32(len); return 0; } int fdt_set_name(void *fdt, int nodeoffset, const char *name) { char *namep; int oldlen, newlen; int err; FDT_RW_CHECK_HEADER(fdt); namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen); if (!namep) return oldlen; newlen = strlen(name); err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen+1), FDT_TAGALIGN(newlen+1)); if (err) return err; memcpy(namep, name, newlen+1); return 0; } int fdt_setprop(void *fdt, int nodeoffset, const char *name, const void *val, int len) { struct fdt_property *prop; int err; FDT_RW_CHECK_HEADER(fdt); err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop); if (err == -FDT_ERR_NOTFOUND) err = _fdt_add_property(fdt, nodeoffset, name, len, &prop); if (err) return err; memcpy(prop->data, val, len); return 0; } int fdt_delprop(void *fdt, int nodeoffset, const char *name) { struct fdt_property *prop; int len, proplen; FDT_RW_CHECK_HEADER(fdt); prop = fdt_get_property_w(fdt, nodeoffset, name, &len); if (! prop) return len; proplen = sizeof(*prop) + FDT_TAGALIGN(len); return _fdt_splice_struct(fdt, prop, proplen, 0); } int fdt_add_subnode_namelen(void *fdt, int parentoffset, const char *name, int namelen) { struct fdt_node_header *nh; int offset, nextoffset; int nodelen; int err; uint32_t tag; uint32_t *endtag; FDT_RW_CHECK_HEADER(fdt); offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen); if (offset >= 0) return -FDT_ERR_EXISTS; else if (offset != -FDT_ERR_NOTFOUND) return offset; /* Try to place the new node after the parent's properties */ fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ do { offset = nextoffset; tag = fdt_next_tag(fdt, offset, &nextoffset); } while ((tag == FDT_PROP) || (tag == FDT_NOP)); nh = _fdt_offset_ptr_w(fdt, offset); nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE; err = _fdt_splice_struct(fdt, nh, 0, nodelen); if (err) return err; nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); memcpy(nh->name, name, namelen); endtag = (uint32_t *)((char *)nh + nodelen - FDT_TAGSIZE); *endtag = cpu_to_fdt32(FDT_END_NODE); return offset; } int fdt_add_subnode(void *fdt, int parentoffset, const char *name) { return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name)); } int fdt_del_node(void *fdt, int nodeoffset) { int endoffset; FDT_RW_CHECK_HEADER(fdt); endoffset = _fdt_node_end_offset(fdt, nodeoffset); if (endoffset < 0) return endoffset; return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset), endoffset - nodeoffset, 0); } static void _fdt_packblocks(const char *old, char *new, int mem_rsv_size, int struct_size) { int mem_rsv_off, struct_off, strings_off; mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8); struct_off = mem_rsv_off + mem_rsv_size; strings_off = struct_off + struct_size; memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size); fdt_set_off_mem_rsvmap(new, mem_rsv_off); memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size); fdt_set_off_dt_struct(new, struct_off); fdt_set_size_dt_struct(new, struct_size); memmove(new + strings_off, old + fdt_off_dt_strings(old), fdt_size_dt_strings(old)); fdt_set_off_dt_strings(new, strings_off); fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); } int fdt_open_into(const void *fdt, void *buf, int bufsize) { int err; int mem_rsv_size, struct_size; int newsize; const char *fdtstart = fdt; const char *fdtend = fdtstart + fdt_totalsize(fdt); char *tmp; FDT_CHECK_HEADER(fdt); mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) * sizeof(struct fdt_reserve_entry); if (fdt_version(fdt) >= 17) { struct_size = fdt_size_dt_struct(fdt); } else { struct_size = 0; while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END) ; } if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) { /* no further work necessary */ err = fdt_move(fdt, buf, bufsize); if (err) return err; fdt_set_version(buf, 17); fdt_set_size_dt_struct(buf, struct_size); fdt_set_totalsize(buf, bufsize); return 0; } /* Need to reorder */ newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size + struct_size + fdt_size_dt_strings(fdt); if (bufsize < newsize) return -FDT_ERR_NOSPACE; /* First attempt to build converted tree at beginning of buffer */ tmp = buf; /* But if that overlaps with the old tree... */ if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) { /* Try right after the old tree instead */ tmp = (char *)(uintptr_t)fdtend; if ((tmp + newsize) > ((char *)buf + bufsize)) return -FDT_ERR_NOSPACE; } _fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size); memmove(buf, tmp, newsize); fdt_set_magic(buf, FDT_MAGIC); fdt_set_totalsize(buf, bufsize); fdt_set_version(buf, 17); fdt_set_last_comp_version(buf, 16); fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt)); return 0; } int fdt_pack(void *fdt) { int mem_rsv_size; FDT_RW_CHECK_HEADER(fdt); mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) * sizeof(struct fdt_reserve_entry); _fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); fdt_set_totalsize(fdt, _fdt_data_size(fdt)); return 0; }
gpl-2.0
Split-Screen/android_kernel_motorola_msm8610
drivers/staging/rtl8192u/ieee80211/digest.c
7719
2506
/* * Cryptographic API. * * Digest operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ //#include <linux/crypto.h> #include "rtl_crypto.h" #include <linux/mm.h> #include <linux/errno.h> #include <linux/highmem.h> #include <asm/scatterlist.h> #include "internal.h" static void init(struct crypto_tfm *tfm) { tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm)); } static void update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg) { unsigned int i; for (i = 0; i < nsg; i++) { struct page *pg = sg[i].page; unsigned int offset = sg[i].offset; unsigned int l = sg[i].length; do { unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); char *p = kmap_atomic(pg) + offset; tfm->__crt_alg->cra_digest.dia_update (crypto_tfm_ctx(tfm), p, bytes_from_page); kunmap_atomic(p); crypto_yield(tfm); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); } } static void final(struct crypto_tfm *tfm, u8 *out) { tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); } static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { u32 flags; if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) return -ENOSYS; return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm), key, keylen, &flags); } static void digest(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg, u8 *out) { unsigned int i; tfm->crt_digest.dit_init(tfm); for (i = 0; i < nsg; i++) { char *p = kmap_atomic(sg[i].page) + sg[i].offset; tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), p, sg[i].length); kunmap_atomic(p); crypto_yield(tfm); } crypto_digest_final(tfm, out); } int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) { return flags ? -EINVAL : 0; } int crypto_init_digest_ops(struct crypto_tfm *tfm) { struct digest_tfm *ops = &tfm->crt_digest; ops->dit_init = init; ops->dit_update = update; ops->dit_final = final; ops->dit_digest = digest; ops->dit_setkey = setkey; return crypto_alloc_hmac_block(tfm); } void crypto_exit_digest_ops(struct crypto_tfm *tfm) { crypto_free_hmac_block(tfm); }
gpl-2.0
jrior001/android_kernel_samsung_d2
drivers/scsi/scsicam.c
7975
7910
/* * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc. * * Copyright 1993, 1994 Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@Colorado.EDU * +1 (303) 786-7975 * * For more information, please consult the SCSI-CAM draft. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/genhd.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <asm/unaligned.h> #include <scsi/scsicam.h> static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, unsigned int *secs); /** * scsi_bios_ptable - Read PC partition table out of first sector of device. * @dev: from this device * * Description: Reads the first sector from the device and returns %0x42 bytes * starting at offset %0x1be. * Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error. */ unsigned char *scsi_bios_ptable(struct block_device *dev) { unsigned char *res = kmalloc(66, GFP_KERNEL); if (res) { struct block_device *bdev = dev->bd_contains; Sector sect; void *data = read_dev_sector(bdev, 0, &sect); if (data) { memcpy(res, data + 0x1be, 66); put_dev_sector(sect); } else { kfree(res); res = NULL; } } return res; } EXPORT_SYMBOL(scsi_bios_ptable); /** * scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors. * @bdev: which device * @capacity: size of the disk in sectors * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders * * Description : determine the BIOS mapping/geometry used for a drive in a * SCSI-CAM system, storing the results in ip as required * by the HDIO_GETGEO ioctl(). * * Returns : -1 on failure, 0 on success. */ int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) { unsigned char *p; u64 capacity64 = capacity; /* Suppress gcc warning */ int ret; p = scsi_bios_ptable(bdev); if (!p) return -1; /* try to infer mapping from partition table */ ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2, (unsigned int *)ip + 0, (unsigned int *)ip + 1); kfree(p); if (ret == -1 && capacity64 < (1ULL << 32)) { /* pick some standard mapping with at most 1024 cylinders, and at most 62 sectors per track - this works up to 7905 MB */ ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, (unsigned int *)ip + 0, (unsigned int *)ip + 1); } /* if something went wrong, then apparently we have to return a geometry with more than 1024 cylinders */ if (ret || ip[0] > 255 || ip[1] > 63) { if ((capacity >> 11) > 65534) { ip[0] = 255; ip[1] = 63; } else { ip[0] = 64; ip[1] = 32; } if (capacity > 65535*63*255) ip[2] = 65535; else ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); } return 0; } EXPORT_SYMBOL(scsicam_bios_param); /** * scsi_partsize - Parse cylinders/heads/sectors from PC partition table * @buf: partition table, see scsi_bios_ptable() * @capacity: size of the disk in sectors * @cyls: put cylinders here * @hds: put heads here * @secs: put sectors here * * Description: determine the BIOS mapping/geometry used to create the partition * table, storing the results in *cyls, *hds, and *secs * * Returns: -1 on failure, 0 on success. */ int scsi_partsize(unsigned char *buf, unsigned long capacity, unsigned int *cyls, unsigned int *hds, unsigned int *secs) { struct partition *p = (struct partition *)buf, *largest = NULL; int i, largest_cyl; int cyl, ext_cyl, end_head, end_cyl, end_sector; unsigned int logical_end, physical_end, ext_physical_end; if (*(unsigned short *) (buf + 64) == 0xAA55) { for (largest_cyl = -1, i = 0; i < 4; ++i, ++p) { if (!p->sys_ind) continue; #ifdef DEBUG printk("scsicam_bios_param : partition %d has system \n", i); #endif cyl = p->cyl + ((p->sector & 0xc0) << 2); if (cyl > largest_cyl) { largest_cyl = cyl; largest = p; } } } if (largest) { end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2); end_head = largest->end_head; end_sector = largest->end_sector & 0x3f; if (end_head + 1 == 0 || end_sector == 0) return -1; #ifdef DEBUG printk("scsicam_bios_param : end at h = %d, c = %d, s = %d\n", end_head, end_cyl, end_sector); #endif physical_end = end_cyl * (end_head + 1) * end_sector + end_head * end_sector + end_sector; /* This is the actual _sector_ number at the end */ logical_end = get_unaligned(&largest->start_sect) + get_unaligned(&largest->nr_sects); /* This is for >1023 cylinders */ ext_cyl = (logical_end - (end_head * end_sector + end_sector)) / (end_head + 1) / end_sector; ext_physical_end = ext_cyl * (end_head + 1) * end_sector + end_head * end_sector + end_sector; #ifdef DEBUG printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n" ,logical_end, physical_end, ext_physical_end, ext_cyl); #endif if ((logical_end == physical_end) || (end_cyl == 1023 && ext_physical_end == logical_end)) { *secs = end_sector; *hds = end_head + 1; *cyls = capacity / ((end_head + 1) * end_sector); return 0; } #ifdef DEBUG printk("scsicam_bios_param : logical (%u) != physical (%u)\n", logical_end, physical_end); #endif } return -1; } EXPORT_SYMBOL(scsi_partsize); /* * Function : static int setsize(unsigned long capacity,unsigned int *cyls, * unsigned int *hds, unsigned int *secs); * * Purpose : to determine a near-optimal int 0x13 mapping for a * SCSI disk in terms of lost space of size capacity, storing * the results in *cyls, *hds, and *secs. * * Returns : -1 on failure, 0 on success. * * Extracted from * * WORKING X3T9.2 * DRAFT 792D * see http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf * * Revision 6 * 10-MAR-94 * Information technology - * SCSI-2 Common access method * transport and SCSI interface module * * ANNEX A : * * setsize() converts a read capacity value to int 13h * head-cylinder-sector requirements. It minimizes the value for * number of heads and maximizes the number of cylinders. This * will support rather large disks before the number of heads * will not fit in 4 bits (or 6 bits). This algorithm also * minimizes the number of sectors that will be unused at the end * of the disk while allowing for very large disks to be * accommodated. This algorithm does not use physical geometry. */ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds, unsigned int *secs) { unsigned int rv = 0; unsigned long heads, sectors, cylinders, temp; cylinders = 1024L; /* Set number of cylinders to max */ sectors = 62L; /* Maximize sectors per track */ temp = cylinders * sectors; /* Compute divisor for heads */ heads = capacity / temp; /* Compute value for number of heads */ if (capacity % temp) { /* If no remainder, done! */ heads++; /* Else, increment number of heads */ temp = cylinders * heads; /* Compute divisor for sectors */ sectors = capacity / temp; /* Compute value for sectors per track */ if (capacity % temp) { /* If no remainder, done! */ sectors++; /* Else, increment number of sectors */ temp = heads * sectors; /* Compute divisor for cylinders */ cylinders = capacity / temp; /* Compute number of cylinders */ } } if (cylinders == 0) rv = (unsigned) -1; /* Give error if 0 cylinders */ *cyls = (unsigned int) cylinders; /* Stuff return values */ *secs = (unsigned int) sectors; *hds = (unsigned int) heads; return (rv); }
gpl-2.0
mi3-dev/android_kernel_xiaomi_msm8x74pro
drivers/isdn/hisax/st5481_b.c
9511
9957
/* * Driver for ST5481 USB ISDN modem * * Author Frode Isaksen * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com> * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <linux/netdevice.h> #include <linux/bitrev.h> #include "st5481.h" static inline void B_L1L2(struct st5481_bcs *bcs, int pr, void *arg) { struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if; ifc->l1l2(ifc, pr, arg); } /* * Encode and transmit next frame. */ static void usb_b_out(struct st5481_bcs *bcs, int buf_nr) { struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; struct urb *urb; unsigned int packet_size, offset; int len, buf_size, bytes_sent; int i; struct sk_buff *skb; if (test_and_set_bit(buf_nr, &b_out->busy)) { DBG(4, "ep %d urb %d busy", (bcs->channel + 1) * 2, buf_nr); return; } urb = b_out->urb[buf_nr]; // Adjust isoc buffer size according to flow state if (b_out->flow_event & (OUT_DOWN | OUT_UNDERRUN)) { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST; packet_size = SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST; DBG(4, "B%d,adjust flow,add %d bytes", bcs->channel + 1, B_FLOW_ADJUST); } else if (b_out->flow_event & OUT_UP) { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST; packet_size = SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST; DBG(4, "B%d,adjust flow,remove %d bytes", bcs->channel + 1, B_FLOW_ADJUST); } else { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT; packet_size = 8; } b_out->flow_event = 0; len = 0; while (len < buf_size) { if ((skb = b_out->tx_skb)) { DBG_SKB(0x100, skb); DBG(4, "B%d,len=%d", bcs->channel + 1, skb->len); if (bcs->mode == L1_MODE_TRANS) { bytes_sent = buf_size - len; if (skb->len < bytes_sent) bytes_sent = skb->len; { /* swap tx bytes to get hearable audio data */ register unsigned char *src = skb->data; register unsigned char *dest = urb->transfer_buffer + len; register unsigned int count; for (count = 0; count < bytes_sent; count++) *dest++ = bitrev8(*src++); } len += bytes_sent; } else { len += isdnhdlc_encode(&b_out->hdlc_state, skb->data, skb->len, &bytes_sent, urb->transfer_buffer + len, buf_size-len); } skb_pull(skb, bytes_sent); if (!skb->len) { // Frame sent b_out->tx_skb = NULL; B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long) skb->truesize); dev_kfree_skb_any(skb); /* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */ /* st5481B_sched_event(bcs, B_XMTBUFREADY); */ /* } */ } } else { if (bcs->mode == L1_MODE_TRANS) { memset(urb->transfer_buffer + len, 0xff, buf_size-len); len = buf_size; } else { // Send flags len += isdnhdlc_encode(&b_out->hdlc_state, NULL, 0, &bytes_sent, urb->transfer_buffer + len, buf_size-len); } } } // Prepare the URB for (i = 0, offset = 0; offset < len; i++) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = packet_size; offset += packet_size; packet_size = SIZE_ISO_PACKETS_B_OUT; } urb->transfer_buffer_length = len; urb->number_of_packets = i; urb->dev = adapter->usb_dev; DBG_ISO_PACKET(0x200, urb); SUBMIT_URB(urb, GFP_NOIO); } /* * Start transferring (flags or data) on the B channel, since * FIFO counters has been set to a non-zero value. */ static void st5481B_start_xfer(void *context) { struct st5481_bcs *bcs = context; DBG(4, "B%d", bcs->channel + 1); // Start transmitting (flags or data) on B channel usb_b_out(bcs, 0); usb_b_out(bcs, 1); } /* * If the adapter has only 2 LEDs, the green * LED will blink with a rate depending * on the number of channels opened. */ static void led_blink(struct st5481_adapter *adapter) { u_char leds = adapter->leds; // 50 frames/sec for each channel if (++adapter->led_counter % 50) { return; } if (adapter->led_counter % 100) { leds |= GREEN_LED; } else { leds &= ~GREEN_LED; } st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, leds, NULL, NULL); } static void usb_b_out_complete(struct urb *urb) { struct st5481_bcs *bcs = urb->context; struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; int buf_nr; buf_nr = get_buf_nr(b_out->urb, urb); test_and_clear_bit(buf_nr, &b_out->busy); if (unlikely(urb->status < 0)) { switch (urb->status) { case -ENOENT: case -ESHUTDOWN: case -ECONNRESET: DBG(4, "urb killed status %d", urb->status); return; // Give up default: WARNING("urb status %d", urb->status); if (b_out->busy == 0) { st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2 | USB_DIR_OUT, NULL, NULL); } break; } } usb_b_out(bcs, buf_nr); if (adapter->number_of_leds == 2) led_blink(adapter); } /* * Start or stop the transfer on the B channel. */ static void st5481B_mode(struct st5481_bcs *bcs, int mode) { struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; DBG(4, "B%d,mode=%d", bcs->channel + 1, mode); if (bcs->mode == mode) return; bcs->mode = mode; // Cancel all USB transfers on this B channel usb_unlink_urb(b_out->urb[0]); usb_unlink_urb(b_out->urb[1]); b_out->busy = 0; st5481_in_mode(&bcs->b_in, mode); if (bcs->mode != L1_MODE_NULL) { // Open the B channel if (bcs->mode != L1_MODE_TRANS) { u32 features = HDLC_BITREVERSE; if (bcs->mode == L1_MODE_HDLC_56K) features |= HDLC_56KBIT; isdnhdlc_out_init(&b_out->hdlc_state, features); } st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2, NULL, NULL); // Enable B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1 + (bcs->channel * 2), OUT_UP + OUT_DOWN + OUT_UNDERRUN, NULL, NULL); // Enable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 32, st5481B_start_xfer, bcs); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds |= B1_LED; } else { adapter->leds |= B2_LED; } } } else { // Disble B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); // Disable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 0, NULL, NULL); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds &= ~B1_LED; } else { adapter->leds &= ~B2_LED; } } else { st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL); } if (b_out->tx_skb) { dev_kfree_skb_any(b_out->tx_skb); b_out->tx_skb = NULL; } } } static int st5481_setup_b_out(struct st5481_bcs *bcs) { struct usb_device *dev = bcs->adapter->usb_dev; struct usb_interface *intf; struct usb_host_interface *altsetting = NULL; struct usb_host_endpoint *endpoint; struct st5481_b_out *b_out = &bcs->b_out; DBG(4, ""); intf = usb_ifnum_to_if(dev, 0); if (intf) altsetting = usb_altnum_to_altsetting(intf, 3); if (!altsetting) return -ENXIO; // Allocate URBs and buffers for the B channel out endpoint = &altsetting->endpoint[EP_B1_OUT - 1 + bcs->channel * 2]; DBG(4, "endpoint address=%02x,packet size=%d", endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize)); // Allocate memory for 8000bytes/sec + extra bytes if underrun return st5481_setup_isocpipes(b_out->urb, dev, usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress), NUM_ISO_PACKETS_B, SIZE_ISO_PACKETS_B_OUT, NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST, usb_b_out_complete, bcs); } static void st5481_release_b_out(struct st5481_bcs *bcs) { struct st5481_b_out *b_out = &bcs->b_out; DBG(4, ""); st5481_release_isocpipes(b_out->urb); } int st5481_setup_b(struct st5481_bcs *bcs) { int retval; DBG(4, ""); retval = st5481_setup_b_out(bcs); if (retval) goto err; bcs->b_in.bufsize = HSCX_BUFMAX; bcs->b_in.num_packets = NUM_ISO_PACKETS_B; bcs->b_in.packet_size = SIZE_ISO_PACKETS_B_IN; bcs->b_in.ep = (bcs->channel ? EP_B2_IN : EP_B1_IN) | USB_DIR_IN; bcs->b_in.counter = bcs->channel ? IN_B2_COUNTER : IN_B1_COUNTER; bcs->b_in.adapter = bcs->adapter; bcs->b_in.hisax_if = &bcs->b_if.ifc; retval = st5481_setup_in(&bcs->b_in); if (retval) goto err_b_out; return 0; err_b_out: st5481_release_b_out(bcs); err: return retval; } /* * Release buffers and URBs for the B channels */ void st5481_release_b(struct st5481_bcs *bcs) { DBG(4, ""); st5481_release_in(&bcs->b_in); st5481_release_b_out(bcs); } /* * st5481_b_l2l1 is the entry point for upper layer routines that want to * transmit on the B channel. PH_DATA | REQUEST is a normal packet that * we either start transmitting (if idle) or queue (if busy). * PH_PULL | REQUEST can be called to request a callback message * (PH_PULL | CONFIRM) * once the link is idle. After a "pull" callback, the upper layer * routines can use PH_PULL | INDICATION to send data. */ void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg) { struct st5481_bcs *bcs = ifc->priv; struct sk_buff *skb = arg; long mode; DBG(4, ""); switch (pr) { case PH_DATA | REQUEST: BUG_ON(bcs->b_out.tx_skb); bcs->b_out.tx_skb = skb; break; case PH_ACTIVATE | REQUEST: mode = (long) arg; DBG(4, "B%d,PH_ACTIVATE_REQUEST %ld", bcs->channel + 1, mode); st5481B_mode(bcs, mode); B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL); break; case PH_DEACTIVATE | REQUEST: DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1); st5481B_mode(bcs, L1_MODE_NULL); B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL); break; default: WARNING("pr %#x\n", pr); } }
gpl-2.0
tzanussi/linux-yocto-micro-3.19
arch/arm/nwfpe/fpa11.c
9767
3118
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpmodule.inl" #include <linux/compiler.h> #include <linux/string.h> /* Reset the FPA11 chip. Called to initialize and reset the emulator. */ static void resetFPA11(void) { int i; FPA11 *fpa11 = GET_FPA11(); /* initialize the register type array */ for (i = 0; i <= 7; i++) { fpa11->fType[i] = typeNone; } /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */ fpa11->fpsr = FP_EMULATOR | BIT_AC; } int8 SetRoundingMode(const unsigned int opcode) { switch (opcode & MASK_ROUNDING_MODE) { default: case ROUND_TO_NEAREST: return float_round_nearest_even; case ROUND_TO_PLUS_INFINITY: return float_round_up; case ROUND_TO_MINUS_INFINITY: return float_round_down; case ROUND_TO_ZERO: return float_round_to_zero; } } int8 SetRoundingPrecision(const unsigned int opcode) { #ifdef CONFIG_FPE_NWFPE_XP switch (opcode & MASK_ROUNDING_PRECISION) { case ROUND_SINGLE: return 32; case ROUND_DOUBLE: return 64; case ROUND_EXTENDED: return 80; default: return 80; } #endif return 80; } void nwfpe_init_fpa(union fp_state *fp) { FPA11 *fpa11 = (FPA11 *)fp; #ifdef NWFPE_DEBUG printk("NWFPE: setting up state.\n"); #endif memset(fpa11, 0, sizeof(FPA11)); resetFPA11(); fpa11->initflag = 1; } /* Emulate the instruction in the opcode. */ unsigned int EmulateAll(unsigned int opcode) { unsigned int code; #ifdef NWFPE_DEBUG printk("NWFPE: emulating opcode %08x\n", opcode); #endif code = opcode & 0x00000f00; if (code == 0x00000100 || code == 0x00000200) { /* For coprocessor 1 or 2 (FPA11) */ code = opcode & 0x0e000000; if (code == 0x0e000000) { if (opcode & 0x00000010) { /* Emulate conversion opcodes. */ /* Emulate register transfer opcodes. */ /* Emulate comparison opcodes. */ return EmulateCPRT(opcode); } else { /* Emulate monadic arithmetic opcodes. */ /* Emulate dyadic arithmetic opcodes. */ return EmulateCPDO(opcode); } } else if (code == 0x0c000000) { /* Emulate load/store opcodes. */ /* Emulate load/store multiple opcodes. */ return EmulateCPDT(opcode); } } /* Invalid instruction detected. Return FALSE. */ return 0; }
gpl-2.0
Evervolv/android_kernel_samsung_manta
arch/arm/nwfpe/fpa11.c
9767
3118
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 (c) Philip Blundell, 2001 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpmodule.inl" #include <linux/compiler.h> #include <linux/string.h> /* Reset the FPA11 chip. Called to initialize and reset the emulator. */ static void resetFPA11(void) { int i; FPA11 *fpa11 = GET_FPA11(); /* initialize the register type array */ for (i = 0; i <= 7; i++) { fpa11->fType[i] = typeNone; } /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */ fpa11->fpsr = FP_EMULATOR | BIT_AC; } int8 SetRoundingMode(const unsigned int opcode) { switch (opcode & MASK_ROUNDING_MODE) { default: case ROUND_TO_NEAREST: return float_round_nearest_even; case ROUND_TO_PLUS_INFINITY: return float_round_up; case ROUND_TO_MINUS_INFINITY: return float_round_down; case ROUND_TO_ZERO: return float_round_to_zero; } } int8 SetRoundingPrecision(const unsigned int opcode) { #ifdef CONFIG_FPE_NWFPE_XP switch (opcode & MASK_ROUNDING_PRECISION) { case ROUND_SINGLE: return 32; case ROUND_DOUBLE: return 64; case ROUND_EXTENDED: return 80; default: return 80; } #endif return 80; } void nwfpe_init_fpa(union fp_state *fp) { FPA11 *fpa11 = (FPA11 *)fp; #ifdef NWFPE_DEBUG printk("NWFPE: setting up state.\n"); #endif memset(fpa11, 0, sizeof(FPA11)); resetFPA11(); fpa11->initflag = 1; } /* Emulate the instruction in the opcode. */ unsigned int EmulateAll(unsigned int opcode) { unsigned int code; #ifdef NWFPE_DEBUG printk("NWFPE: emulating opcode %08x\n", opcode); #endif code = opcode & 0x00000f00; if (code == 0x00000100 || code == 0x00000200) { /* For coprocessor 1 or 2 (FPA11) */ code = opcode & 0x0e000000; if (code == 0x0e000000) { if (opcode & 0x00000010) { /* Emulate conversion opcodes. */ /* Emulate register transfer opcodes. */ /* Emulate comparison opcodes. */ return EmulateCPRT(opcode); } else { /* Emulate monadic arithmetic opcodes. */ /* Emulate dyadic arithmetic opcodes. */ return EmulateCPDO(opcode); } } else if (code == 0x0c000000) { /* Emulate load/store opcodes. */ /* Emulate load/store multiple opcodes. */ return EmulateCPDT(opcode); } } /* Invalid instruction detected. Return FALSE. */ return 0; }
gpl-2.0
CyanogenMod/android_kernel_samsung_smdk4210
arch/powerpc/platforms/chrp/nvram.c
10023
2253
/* * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/rtas.h> #include "chrp.h" static unsigned int nvram_size; static unsigned char nvram_buf[4]; static DEFINE_SPINLOCK(nvram_lock); static unsigned char chrp_nvram_read(int addr) { unsigned int done; unsigned long flags; unsigned char ret; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return 0xff; } spin_lock_irqsave(&nvram_lock, flags); if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) ret = 0xff; else ret = nvram_buf[0]; spin_unlock_irqrestore(&nvram_lock, flags); return ret; } static void chrp_nvram_write(int addr, unsigned char val) { unsigned int done; unsigned long flags; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n", current->comm, addr, nvram_size); return; } spin_lock_irqsave(&nvram_lock, flags); nvram_buf[0] = val; if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); spin_unlock_irqrestore(&nvram_lock, flags); } void __init chrp_nvram_init(void) { struct device_node *nvram; const unsigned int *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return; nbytes_p = of_get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) { of_node_put(nvram); return; } nvram_size = *nbytes_p; printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); ppc_md.nvram_read_val = chrp_nvram_read; ppc_md.nvram_write_val = chrp_nvram_write; return; }
gpl-2.0
obsolete-ra/kernel_motorola_msm8226
crypto/rmd128.c
10535
10434
/* * Cryptographic API. * * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd128_ctx { u64 byte_count; u32 state[4]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)); \ } static void rmd128_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; /* Initialize right lane */ aaa = state[0]; bbb = state[1]; ccc = state[2]; ddd = state[3]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); /* round 2: left lane */ ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); /* round 3: left lane */ ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); /* round 4: left lane */ ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); /* round 2: right lane */ ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); /* round 3: right lane */ ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); /* round 4: right lane */ ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); /* combine results */ ddd += cc + state[1]; /* final result for state[0] */ state[1] = state[2] + dd + aaa; state[2] = state[3] + aa + bbb; state[3] = state[0] + bb + ccc; state[0] = ddd; return; } static int rmd128_init(struct shash_desc *desc) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd128_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd128_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd128_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd128_final(struct shash_desc *desc, u8 *out) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd128_update(desc, padding, padlen); /* Append length */ rmd128_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 4; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD128_DIGEST_SIZE, .init = rmd128_init, .update = rmd128_update, .final = rmd128_final, .descsize = sizeof(struct rmd128_ctx), .base = { .cra_name = "rmd128", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD128_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd128_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd128_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd128_mod_init); module_exit(rmd128_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
gpl-2.0
AppliedMicro/ENGLinuxLatest
crypto/rmd128.c
10535
10434
/* * Cryptographic API. * * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd128_ctx { u64 byte_count; u32 state[4]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)); \ } static void rmd128_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; /* Initialize right lane */ aaa = state[0]; bbb = state[1]; ccc = state[2]; ddd = state[3]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); /* round 2: left lane */ ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); /* round 3: left lane */ ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); /* round 4: left lane */ ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); /* round 2: right lane */ ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); /* round 3: right lane */ ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); /* round 4: right lane */ ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); /* combine results */ ddd += cc + state[1]; /* final result for state[0] */ state[1] = state[2] + dd + aaa; state[2] = state[3] + aa + bbb; state[3] = state[0] + bb + ccc; state[0] = ddd; return; } static int rmd128_init(struct shash_desc *desc) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd128_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd128_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd128_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd128_final(struct shash_desc *desc, u8 *out) { struct rmd128_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd128_update(desc, padding, padlen); /* Append length */ rmd128_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 4; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD128_DIGEST_SIZE, .init = rmd128_init, .update = rmd128_update, .final = rmd128_final, .descsize = sizeof(struct rmd128_ctx), .base = { .cra_name = "rmd128", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD128_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd128_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd128_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd128_mod_init); module_exit(rmd128_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
gpl-2.0
Lloir/android_kernel_htc_enrc2b-bladev2
drivers/net/chelsio/mv88x201x.c
12327
8786
/***************************************************************************** * * * File: mv88x201x.c * * $Revision: 1.12 $ * * $Date: 2005/04/15 19:27:14 $ * * Description: * * Marvell PHY (mv88x201x) functionality. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "cphy.h" #include "elmer0.h" /* * The 88x2010 Rev C. requires some link status registers * to be read * twice in order to get the right values. Future * revisions will fix * this problem and then this macro * can disappear. */ #define MV88x2010_LINK_STATUS_BUGS 1 static int led_init(struct cphy *cphy) { /* Setup the LED registers so we can turn on/off. * Writing these bits maps control to another * register. mmd(0x1) addr(0x7) */ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); return 0; } static int led_link(struct cphy *cphy, u32 do_enable) { u32 led = 0; #define LINK_ENABLE_BIT 0x1 cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); if (do_enable & LINK_ENABLE_BIT) { led |= LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } else { led &= ~LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } return 0; } /* Port Reset */ static int mv88x201x_reset(struct cphy *cphy, int wait) { /* This can be done through registers. It is not required since * a full chip reset is used. */ return 0; } static int mv88x201x_interrupt_enable(struct cphy *cphy) { /* Enable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, MDIO_PMA_LASI_LSALARM); /* Enable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_disable(struct cphy *cphy) { /* Disable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); /* Disable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer &= ~ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_clear(struct cphy *cphy) { u32 elmer; u32 val; #ifdef MV88x2010_LINK_STATUS_BUGS /* Required to read twice before clear takes affect. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); /* Read this register after the others above it else * the register doesn't clear correctly. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); #endif /* Clear link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); /* Clear PHY LASI interrupts. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); #ifdef MV88x2010_LINK_STATUS_BUGS /* Do it again. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); #endif /* Clear Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); } return 0; } static int mv88x201x_interrupt_handler(struct cphy *cphy) { /* Clear interrupts */ mv88x201x_interrupt_clear(cphy); /* We have only enabled link change interrupts and so * cphy_cause must be a link change interrupt. */ return cphy_cause_link_change; } static int mv88x201x_set_loopback(struct cphy *cphy, int on) { return 0; } static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, int *speed, int *duplex, int *fc) { u32 val = 0; if (link_ok) { /* Read link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); val &= MDIO_STAT1_LSTATUS; *link_ok = (val == MDIO_STAT1_LSTATUS); /* Turn on/off Link LED */ led_link(cphy, *link_ok); } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; if (fc) *fc = PAUSE_RX | PAUSE_TX; return 0; } static void mv88x201x_destroy(struct cphy *cphy) { kfree(cphy); } static struct cphy_ops mv88x201x_ops = { .destroy = mv88x201x_destroy, .reset = mv88x201x_reset, .interrupt_enable = mv88x201x_interrupt_enable, .interrupt_disable = mv88x201x_interrupt_disable, .interrupt_clear = mv88x201x_interrupt_clear, .interrupt_handler = mv88x201x_interrupt_handler, .get_link_status = mv88x201x_get_link_status, .set_loopback = mv88x201x_set_loopback, .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), }; static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, const struct mdio_ops *mdio_ops) { u32 val; struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); if (!cphy) return NULL; cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); /* Commands the PHY to enable XFP's clock. */ cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); /* Clear link status. Required because of a bug in the PHY. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); /* Allows for Link,Ack LED turn on/off */ led_init(cphy); return cphy; } /* Chip Reset */ static int mv88x201x_phy_reset(adapter_t *adapter) { u32 val; t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~4; t1_tpi_write(adapter, A_ELMER0_GPO, val); msleep(100); t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); msleep(1000); /* Now lets enable the Laser. Delay 100us */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= 0x8000; t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(100); return 0; } const struct gphy t1_mv88x201x_ops = { .create = mv88x201x_phy_create, .reset = mv88x201x_phy_reset };
gpl-2.0
wanahmadzainie/linux-mainline
drivers/scsi/device_handler/scsi_dh_alua.c
40
31200
/* * Generic SCSI-3 ALUA SCSI Device Handler * * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" #define ALUA_DH_VER "2.0" #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 #define RTPG_FMT_MASK 0x70 #define RTPG_FMT_EXT_HDR 0x10 #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_RTPG_SIZE 128 #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 #define ALUA_SYNC_STPG 0x04 /* State machine flags */ #define ALUA_PG_RUN_RTPG 0x10 #define ALUA_PG_RUN_STPG 0x20 #define ALUA_PG_RUNNING 0x40 static uint optimize_stpg; module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); static LIST_HEAD(port_group_list); static DEFINE_SPINLOCK(port_group_lock); static struct workqueue_struct *kaluad_wq; static struct workqueue_struct *kaluad_sync_wq; struct alua_port_group { struct kref kref; struct rcu_head rcu; struct list_head node; struct list_head dh_list; unsigned char device_id_str[256]; int device_id_len; int group_id; int tpgs; int state; int pref; unsigned flags; /* used for optimizing STPG */ unsigned char transition_tmo; unsigned long expiry; unsigned long interval; struct delayed_work rtpg_work; spinlock_t lock; struct list_head rtpg_list; struct scsi_device *rtpg_sdev; }; struct alua_dh_data { struct list_head node; struct alua_port_group *pg; int group_id; spinlock_t pg_lock; struct scsi_device *sdev; int init_error; struct mutex init_mutex; }; struct alua_queue_data { struct list_head entry; activate_complete callback_fn; void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 static void alua_rtpg_work(struct work_struct *work); static void alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force); static void alua_check(struct scsi_device *sdev, bool force); static void release_port_group(struct kref *kref) { struct alua_port_group *pg; pg = container_of(kref, struct alua_port_group, kref); if (pg->rtpg_sdev) flush_delayed_work(&pg->rtpg_work); spin_lock(&port_group_lock); list_del(&pg->node); spin_unlock(&port_group_lock); kfree_rcu(pg, rcu); } /* * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, int bufflen, struct scsi_sense_hdr *sshdr, int flags) { u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)]; int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; /* Prepare the command. */ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN)); cdb[0] = MAINTENANCE_IN; if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; else cdb[1] = MI_REPORT_TARGET_PGS; put_unaligned_be32(bufflen, &cdb[6]); return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, sshdr, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, NULL, req_flags); } /* * submit_stpg - Issue a SET TARGET PORT GROUP command * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups. */ static int submit_stpg(struct scsi_device *sdev, int group_id, struct scsi_sense_hdr *sshdr) { u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)]; unsigned char stpg_data[8]; int stpg_len = 8; int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; /* Prepare the data buffer */ memset(stpg_data, 0, stpg_len); stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; put_unaligned_be16(group_id, &stpg_data[6]); /* Prepare the command. */ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT)); cdb[0] = MAINTENANCE_OUT; cdb[1] = MO_SET_TARGET_PGS; put_unaligned_be32(stpg_len, &cdb[6]); return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, sshdr, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, NULL, req_flags); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, int group_id) { struct alua_port_group *pg; if (!id_str || !id_size || !strlen(id_str)) return NULL; list_for_each_entry(pg, &port_group_list, node) { if (pg->group_id != group_id) continue; if (!pg->device_id_len || pg->device_id_len != id_size) continue; if (strncmp(pg->device_id_str, id_str, id_size)) continue; if (!kref_get_unless_zero(&pg->kref)) continue; return pg; } return NULL; } /* * alua_alloc_pg - Allocate a new port_group structure * @sdev: scsi device * @h: alua device_handler data * @group_id: port group id * * Allocate a new port_group structure for a given * device. */ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, int group_id, int tpgs) { struct alua_port_group *pg, *tmp_pg; pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); if (!pg) return ERR_PTR(-ENOMEM); pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, sizeof(pg->device_id_str)); if (pg->device_id_len <= 0) { /* * TPGS supported but no device identification found. * Generate private device identification. */ sdev_printk(KERN_INFO, sdev, "%s: No device descriptors found\n", ALUA_DH_NAME); pg->device_id_str[0] = '\0'; pg->device_id_len = 0; } pg->group_id = group_id; pg->tpgs = tpgs; pg->state = SCSI_ACCESS_STATE_OPTIMAL; if (optimize_stpg) pg->flags |= ALUA_OPTIMIZE_STPG; kref_init(&pg->kref); INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); INIT_LIST_HEAD(&pg->rtpg_list); INIT_LIST_HEAD(&pg->node); INIT_LIST_HEAD(&pg->dh_list); spin_lock_init(&pg->lock); spin_lock(&port_group_lock); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); if (tmp_pg) { spin_unlock(&port_group_lock); kfree(pg); return tmp_pg; } list_add(&pg->node, &port_group_list); spin_unlock(&port_group_lock); return pg; } /* * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ static int alua_check_tpgs(struct scsi_device *sdev) { int tpgs = TPGS_MODE_NONE; /* * ALUA support for non-disk devices is fraught with * difficulties, so disable it for now. */ if (sdev->type != TYPE_DISK) { sdev_printk(KERN_INFO, sdev, "%s: disable for non-disk devices\n", ALUA_DH_NAME); return tpgs; } tpgs = scsi_device_tpgs(sdev); switch (tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_EXPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_NONE: sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); break; default: sdev_printk(KERN_INFO, sdev, "%s: unsupported TPGS setting %d\n", ALUA_DH_NAME, tpgs); tpgs = TPGS_MODE_NONE; break; } return tpgs; } /* * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators. */ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, int tpgs) { int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; bool pg_updated = false; unsigned long flags; group_id = scsi_vpd_tpg_id(sdev, &rel_port); if (group_id < 0) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. * Disable ALUA support */ sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); return SCSI_DH_DEV_UNSUPP; } pg = alua_alloc_pg(sdev, group_id, tpgs); if (IS_ERR(pg)) { if (PTR_ERR(pg) == -ENOMEM) return SCSI_DH_NOMEM; return SCSI_DH_DEV_UNSUPP; } if (pg->device_id_len) sdev_printk(KERN_INFO, sdev, "%s: device %s port group %x rel port %x\n", ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); else sdev_printk(KERN_INFO, sdev, "%s: port group %x rel port %x\n", ALUA_DH_NAME, group_id, rel_port); /* Check for existing port group references */ spin_lock(&h->pg_lock); old_pg = h->pg; if (old_pg != pg) { /* port group has changed. Update to new port group */ if (h->pg) { spin_lock_irqsave(&old_pg->lock, flags); list_del_rcu(&h->node); spin_unlock_irqrestore(&old_pg->lock, flags); } rcu_assign_pointer(h->pg, pg); pg_updated = true; } spin_lock_irqsave(&pg->lock, flags); if (sdev->synchronous_alua) pg->flags |= ALUA_SYNC_STPG; if (pg_updated) list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); alua_rtpg_queue(h->pg, sdev, NULL, true); spin_unlock(&h->pg_lock); if (old_pg) kref_put(&old_pg->kref, release_port_group); return SCSI_DH_OK; } static char print_alua_state(unsigned char state) { switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: return 'A'; case SCSI_ACCESS_STATE_ACTIVE: return 'N'; case SCSI_ACCESS_STATE_STANDBY: return 'S'; case SCSI_ACCESS_STATE_UNAVAILABLE: return 'U'; case SCSI_ACCESS_STATE_LBA: return 'L'; case SCSI_ACCESS_STATE_OFFLINE: return 'O'; case SCSI_ACCESS_STATE_TRANSITIONING: return 'T'; default: return 'X'; } } static int alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ alua_check(sdev, false); return NEEDS_RETRY; } break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. * Might have obscured a state transition, * so schedule a recheck. */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { /* * ALUA state changed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { /* * Implicit ALUA state transition failed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } /* * alua_tur - Send a TEST UNIT READY * @sdev: device to which the TEST UNIT READY command should be send * * Send a TEST UNIT READY to @sdev to figure out the device state * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. */ static int alua_tur(struct scsi_device *sdev) { struct scsi_sense_hdr sense_hdr; int retval; retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) return SCSI_DH_IO; else return SCSI_DH_OK; } /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) { struct scsi_sense_hdr sense_hdr; struct alua_port_group *tmp_pg; int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE; unsigned char *desc, *buff; unsigned err, retval; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; if (pg->transition_tmo) transition_tmo = pg->transition_tmo * HZ; pg->expiry = round_jiffies_up(jiffies + transition_tmo); } buff = kzalloc(bufflen, GFP_KERNEL); if (!buff) return SCSI_DH_DEV_TEMP_BUSY; retry: err = 0; retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); if (retval) { if (!scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n", ALUA_DH_NAME, retval); kfree(buff); if (driver_byte(retval) == DRIVER_ERROR) return SCSI_DH_DEV_TEMP_BUSY; return SCSI_DH_IO; } /* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. */ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && sense_hdr.sense_key == ILLEGAL_REQUEST && sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } /* * Retry on ALUA state transition or if any * UNIT ATTENTION occurred. */ if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) err = SCSI_DH_RETRY; else if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); return err; } sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); pg->expiry = 0; return SCSI_DH_IO; } len = get_unaligned_be32(&buff[0]) + 4; if (len > bufflen) { /* Resubmit with the correct length */ kfree(buff); bufflen = len; buff = kmalloc(bufflen, GFP_KERNEL); if (!buff) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ pg->expiry = 0; return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } orig_transition_tmo = pg->transition_tmo; if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) pg->transition_tmo = buff[5]; else pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; if (orig_transition_tmo != pg->transition_tmo) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, pg->transition_tmo); pg->expiry = jiffies + pg->transition_tmo * HZ; } if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; k < len; k += off, desc += off) { u16 group_id = get_unaligned_be16(&desc[2]); spin_lock_irqsave(&port_group_lock, flags); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); spin_unlock_irqrestore(&port_group_lock, flags); if (tmp_pg) { if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { if ((tmp_pg == pg) || !(tmp_pg->flags & ALUA_PG_RUNNING)) { struct alua_dh_data *h; tmp_pg->state = desc[0] & 0x0f; tmp_pg->pref = desc[0] >> 7; rcu_read_lock(); list_for_each_entry_rcu(h, &tmp_pg->dh_list, node) { /* h->sdev should always be valid */ BUG_ON(!h->sdev); h->sdev->access_state = desc[0]; } rcu_read_unlock(); } if (tmp_pg == pg) valid_states = desc[1]; spin_unlock_irqrestore(&tmp_pg->lock, flags); } kref_put(&tmp_pg->kref, release_port_group); } off = 8 + (desc[7] * 4); } spin_lock_irqsave(&pg->lock, flags); sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), pg->pref ? "preferred" : "non-preferred", valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', valid_states&TPGS_SUPPORT_STANDBY?'S':'s', valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); switch (pg->state) { case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ pg->interval = 2; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; /* Transitioning time exceeded, set port to standby */ err = SCSI_DH_IO; pg->state = SCSI_ACCESS_STATE_STANDBY; pg->expiry = 0; rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { BUG_ON(!h->sdev); h->sdev->access_state = (pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref) h->sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; } rcu_read_unlock(); } break; case SCSI_ACCESS_STATE_OFFLINE: /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; pg->expiry = 0; break; default: /* Useable path if active */ err = SCSI_DH_OK; pg->expiry = 0; break; } spin_unlock_irqrestore(&pg->lock, flags); kfree(buff); return err; } /* * alua_stpg - Issue a SET TARGET PORT GROUP command * * Issue a SET TARGET PORT GROUP command and evaluate the * response. Returns SCSI_DH_RETRY per default to trigger * a re-evaluation of the target group state or SCSI_DH_OK * if no further action needs to be taken. */ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) { int retval; struct scsi_sense_hdr sense_hdr; if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { /* Only implicit ALUA supported, retry */ return SCSI_DH_RETRY; } switch (pg->state) { case SCSI_ACCESS_STATE_OPTIMAL: return SCSI_DH_OK; case SCSI_ACCESS_STATE_ACTIVE: if ((pg->flags & ALUA_OPTIMIZE_STPG) && !pg->pref && (pg->tpgs & TPGS_MODE_IMPLICIT)) return SCSI_DH_OK; break; case SCSI_ACCESS_STATE_STANDBY: case SCSI_ACCESS_STATE_UNAVAILABLE: break; case SCSI_ACCESS_STATE_OFFLINE: return SCSI_DH_IO; case SCSI_ACCESS_STATE_TRANSITIONING: break; default: sdev_printk(KERN_INFO, sdev, "%s: stpg failed, unhandled TPGS state %d", ALUA_DH_NAME, pg->state); return SCSI_DH_NOSYS; } retval = submit_stpg(sdev, pg->group_id, &sense_hdr); if (retval) { if (!scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: stpg failed, result %d", ALUA_DH_NAME, retval); if (driver_byte(retval) == DRIVER_ERROR) return SCSI_DH_DEV_TEMP_BUSY; } else { sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); } } /* Retry RTPG */ return SCSI_DH_RETRY; } static void alua_rtpg_work(struct work_struct *work) { struct alua_port_group *pg = container_of(work, struct alua_port_group, rtpg_work.work); struct scsi_device *sdev; LIST_HEAD(qdata_list); int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; unsigned long flags; struct workqueue_struct *alua_wq = kaluad_wq; spin_lock_irqsave(&pg->lock, flags); sdev = pg->rtpg_sdev; if (!sdev) { WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); WARN_ON(pg->flags & ALUA_PG_RUN_STPG); spin_unlock_irqrestore(&pg->lock, flags); kref_put(&pg->kref, release_port_group); return; } if (pg->flags & ALUA_SYNC_STPG) alua_wq = kaluad_sync_wq; pg->flags |= ALUA_PG_RUNNING; if (pg->flags & ALUA_PG_RUN_RTPG) { int state = pg->state; pg->flags &= ~ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); if (state == SCSI_ACCESS_STATE_TRANSITIONING) { if (alua_tur(sdev) == SCSI_DH_RETRY) { spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(alua_wq, &pg->rtpg_work, pg->interval * HZ); return; } /* Send RTPG on failure or if TUR indicates SUCCESS */ } err = alua_rtpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(alua_wq, &pg->rtpg_work, pg->interval * HZ); return; } if (err != SCSI_DH_OK) pg->flags &= ~ALUA_PG_RUN_STPG; } if (pg->flags & ALUA_PG_RUN_STPG) { pg->flags &= ~ALUA_PG_RUN_STPG; spin_unlock_irqrestore(&pg->lock, flags); err = alua_stpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags |= ALUA_PG_RUN_RTPG; pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(alua_wq, &pg->rtpg_work, pg->interval * HZ); return; } } list_splice_init(&pg->rtpg_list, &qdata_list); pg->rtpg_sdev = NULL; spin_unlock_irqrestore(&pg->lock, flags); list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { list_del(&qdata->entry); if (qdata->callback_fn) qdata->callback_fn(qdata->callback_data, err); kfree(qdata); } spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); } static void alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force) { int start_queue = 0; unsigned long flags; struct workqueue_struct *alua_wq = kaluad_wq; if (!pg) return; spin_lock_irqsave(&pg->lock, flags); if (qdata) { list_add_tail(&qdata->entry, &pg->rtpg_list); pg->flags |= ALUA_PG_RUN_STPG; force = true; } if (pg->rtpg_sdev == NULL) { pg->interval = 0; pg->flags |= ALUA_PG_RUN_RTPG; kref_get(&pg->kref); pg->rtpg_sdev = sdev; scsi_device_get(sdev); start_queue = 1; } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); sdev = NULL; start_queue = 1; } } if (pg->flags & ALUA_SYNC_STPG) alua_wq = kaluad_sync_wq; spin_unlock_irqrestore(&pg->lock, flags); if (start_queue && !queue_delayed_work(alua_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { if (sdev) scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); } } /* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * * For the prep_fn to work correctly we have * to initialize the ALUA state for the device. */ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err = SCSI_DH_DEV_UNSUPP, tpgs; mutex_lock(&h->init_mutex); tpgs = alua_check_tpgs(sdev); if (tpgs != TPGS_MODE_NONE) err = alua_check_vpd(sdev, h, tpgs); h->init_error = err; mutex_unlock(&h->init_mutex); return err; } /* * alua_set_params - set/unset the optimize flag * @sdev: device on the path to be activated * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * For example, to set the flag pass the following parameters * from multipath.conf * hardware_handler "2 alua 1" */ static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group __rcu *pg = NULL; unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; unsigned long flags; if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) return -EINVAL; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg) { rcu_read_unlock(); return -ENXIO; } spin_lock_irqsave(&pg->lock, flags); if (optimize) pg->flags |= ALUA_OPTIMIZE_STPG; else pg->flags &= ~ALUA_OPTIMIZE_STPG; spin_unlock_irqrestore(&pg->lock, flags); rcu_read_unlock(); return result; } /* * alua_activate - activate a path * @sdev: device on the path to be activated * * We're currently switching the port group to be activated only and * let the array figure out the rest. * There may be other arrays which require us to switch all port groups * based on a certain policy. But until we actually encounter them it * should be okay. */ static int alua_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; struct alua_queue_data *qdata; struct alua_port_group __rcu *pg; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) { err = SCSI_DH_RES_TEMP_UNAVAIL; goto out; } qdata->callback_fn = fn; qdata->callback_data = data; mutex_lock(&h->init_mutex); rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); kfree(qdata); err = h->init_error; mutex_unlock(&h->init_mutex); goto out; } fn = NULL; rcu_read_unlock(); mutex_unlock(&h->init_mutex); alua_rtpg_queue(pg, sdev, qdata, true); kref_put(&pg->kref, release_port_group); out: if (fn) fn(data, err); return 0; } /* * alua_check - check path status * @sdev: device on the path to be checked * * Check the device status */ static void alua_check(struct scsi_device *sdev, bool force) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); return; } rcu_read_unlock(); alua_rtpg_queue(pg, sdev, NULL, force); kref_put(&pg->kref, release_port_group); } /* * alua_prep_fn - request callback * * Fail I/O to all paths not in state * active/optimized or active/non-optimized. */ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group __rcu *pg; unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; int ret = BLKPREP_OK; rcu_read_lock(); pg = rcu_dereference(h->pg); if (pg) state = pg->state; rcu_read_unlock(); if (state == SCSI_ACCESS_STATE_TRANSITIONING) ret = BLKPREP_DEFER; else if (state != SCSI_ACCESS_STATE_OPTIMAL && state != SCSI_ACCESS_STATE_ACTIVE && state != SCSI_ACCESS_STATE_LBA) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } static void alua_rescan(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; alua_initialize(sdev, h); } /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to */ static int alua_bus_attach(struct scsi_device *sdev) { struct alua_dh_data *h; int err, ret = -EINVAL; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return -ENOMEM; spin_lock_init(&h->pg_lock); rcu_assign_pointer(h->pg, NULL); h->init_error = SCSI_DH_OK; h->sdev = sdev; INIT_LIST_HEAD(&h->node); mutex_init(&h->init_mutex); err = alua_initialize(sdev, h); if (err == SCSI_DH_NOMEM) ret = -ENOMEM; if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; sdev->handler_data = h; return 0; failed: kfree(h); return ret; } /* * alua_bus_detach - Detach device handler * @sdev: device to be detached from */ static void alua_bus_detach(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; spin_lock(&h->pg_lock); pg = h->pg; rcu_assign_pointer(h->pg, NULL); h->sdev = NULL; spin_unlock(&h->pg_lock); if (pg) { spin_lock_irq(&pg->lock); list_del_rcu(&h->node); spin_unlock_irq(&pg->lock); kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; kfree(h); } static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, .rescan = alua_rescan, .set_params = alua_set_params, }; static int __init alua_init(void) { int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); if (!kaluad_wq) { /* Temporary failure, bypass */ return SCSI_DH_DEV_TEMP_BUSY; } kaluad_sync_wq = create_workqueue("kaluad_sync"); if (!kaluad_sync_wq) { destroy_workqueue(kaluad_wq); return SCSI_DH_DEV_TEMP_BUSY; } r = scsi_register_device_handler(&alua_dh); if (r != 0) { printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); destroy_workqueue(kaluad_sync_wq); destroy_workqueue(kaluad_wq); } return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); destroy_workqueue(kaluad_sync_wq); destroy_workqueue(kaluad_wq); } module_init(alua_init); module_exit(alua_exit); MODULE_DESCRIPTION("DM Multipath ALUA support"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ALUA_DH_VER);
gpl-2.0
nimengyu2/dm37x-cus-mini8510d-linux-2.6.32-sbc8100_plus
net/bluetooth/rfcomm/sock.c
40
24025
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/device.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); bh_lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent, 0); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL); sk->sk_state_change(sk); } bh_unlock_sock(sk); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; struct hlist_node *node; sk_for_each(sk, node, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel == channel && !bacmp(&bt_sk(sk)->src, src)) break; } return node ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; struct hlist_node *node; sk_for_each(sk, node, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) break; /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) sk1 = sk; } } return node ? sk : sk1; } /* Find socket with given address (channel, src). * Returns locked socket */ static inline struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *s; read_lock(&rfcomm_sk_list.lock); s = __rfcomm_get_sock_by_channel(state, channel, src); if (s) bh_lock_sock(s); read_unlock(&rfcomm_sk_list.lock); return s; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = bt_sk(parent)->defer_setup; pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); d = rfcomm_dlc_alloc(prio); if (!d) { sk_free(sk); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock_bh(&rfcomm_sk_list.lock); if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; sk->sk_state = BT_BOUND; } write_unlock_bh(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); if (!err) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &bt_sk(sk)->src; u8 channel; err = -EINVAL; write_lock_bh(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock_bh(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk->sk_sleep, &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk->sk_sleep, &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src); *len = sizeof(struct sockaddr_rc); return 0; } static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent = 0; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); while (len) { size_t size = min_t(size_t, len, d->mtu); int err; skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) { if (sent == 0) sent = err; break; } skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); if (sent == 0) sent = err; break; } err = rfcomm_dlc_send(d, skb); if (err < 0) { kfree_skb(skb); if (sent == 0) sent = err; break; } sent += size; len -= size; } release_sock(sk); return sent; } static long rfcomm_sock_data_wait(struct sock *sk, long timeo) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(sk->sk_sleep, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk->sk_sleep, &wait); return timeo; } static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; size_t target, copied = 0; long timeo; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } if (flags & MSG_OOB) return -EOPNOTSUPP; msg->msg_namelen = 0; BT_DBG("sk %p size %zu", sk, size); lock_sock(sk); target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb; int chunk; skb = skb_dequeue(&sk->sk_receive_queue); if (!skb) { if (copied >= target) break; if ((err = sock_error(sk)) != 0) break; if (sk->sk_shutdown & RCV_SHUTDOWN) break; err = -EAGAIN; if (!timeo) break; timeo = rfcomm_sock_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } continue; } chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; break; } copied += chunk; size -= chunk; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { atomic_sub(chunk, &sk->sk_rmem_alloc); skb_pull(skb, chunk); if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); out: if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return copied ? : err; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_user((char *) &sec, optval, len)) { err = -EFAULT; break; } if (sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } bt_sk(sk)->defer_setup = opt; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct rfcomm_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY lock_sock(sk); err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); release_sock(sk); #else err = -EOPNOTSUPP; #endif } return err; } static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; __rfcomm_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC); if (!sk) goto done; rfcomm_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, &src); bacpy(&bt_sk(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: bh_unlock_sock(parent); if (bt_sk(parent)->defer_setup) parent->sk_state_change(parent); return result; } static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf) { struct sock *sk; struct hlist_node *node; char *str = buf; read_lock_bh(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { str += sprintf(str, "%s %s %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock_bh(&rfcomm_sk_list.lock); return (str - buf); } static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) goto error; if (class_create_file(bt_class, &class_attr_rfcomm) < 0) BT_ERR("Failed to create RFCOMM info file"); BT_INFO("RFCOMM socket layer initialized"); return 0; error: BT_ERR("RFCOMM socket layer registration failed"); proto_unregister(&rfcomm_proto); return err; } void rfcomm_cleanup_sockets(void) { class_remove_file(bt_class, &class_attr_rfcomm); if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) BT_ERR("RFCOMM socket layer unregistration failed"); proto_unregister(&rfcomm_proto); }
gpl-2.0
gompa/linux
arch/powerpc/kvm/e500_mmu_host.c
296
21764
/* * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, yu.liu@freescale.com * Scott Wood, scottwood@freescale.com * Ashish Kalra, ashish.kalra@freescale.com * Varun Sethi, varun.sethi@freescale.com * Alexander Graf, agraf@suse.de * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> #include <asm/kvm_ppc.h> #include "e500.h" #include "timing.h" #include "e500_mmu_host.h" #include "trace_booke.h" #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; static inline unsigned int tlb1_max_shadow_size(void) { /* reserve one entry for magic page */ return host_tlb_params[1].entries - tlbcam_index - 1; } static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; #ifndef CONFIG_KVM_BOOKE_HV if (!usermode) { /* Guest is in supervisor mode, * so we need to translate guest * supervisor permissions into user permissions. */ mas3 &= ~E500_TLB_USER_PERM_MASK; mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; } mas3 |= E500_TLB_SUPER_PERM_MASK; #endif return mas3; } /* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, uint32_t mas0, uint32_t lpid) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid)); #endif asm volatile("isync; tlbwe" : : : "memory"); #ifdef CONFIG_KVM_BOOKE_HV /* Must clear mas8 for other host tlbwe's */ mtspr(SPRN_MAS8, 0); isync(); #endif local_irq_restore(flags); trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, stlbe->mas2, stlbe->mas7_3); } /* * Acquire a mas0 with victim hint, as if we just took a TLB miss. * * We don't care about the address we're searching for, other than that it's * in the right set and is not present in the TLB. Using a zero PID and a * userspace address means we don't have to set and then restore MAS5, or * calculate a proper MAS6 value. */ static u32 get_host_mas0(unsigned long eaddr) { unsigned long flags; u32 mas0; u32 mas4; local_irq_save(flags); mtspr(SPRN_MAS6, 0); mas4 = mfspr(SPRN_MAS4); mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK); asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); mas0 = mfspr(SPRN_MAS0); mtspr(SPRN_MAS4, mas4); local_irq_restore(flags); return mas0; } /* sesel is for tlb1 only */ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) { u32 mas0; if (tlbsel == 0) { mas0 = get_host_mas0(stlbe->mas2); __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); } else { __write_host_tlbe(stlbe, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(sesel)), vcpu_e500->vcpu.kvm->arch.lpid); } } /* sesel is for tlb1 only */ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int stlbsel, int sesel) { int stid; preempt_disable(); stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); stlbe->mas1 |= MAS1_TID(stid); write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); preempt_enable(); } #ifdef CONFIG_KVM_E500V2 /* XXX should be a hook in the gva2hpa translation */ void kvmppc_map_magic(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_entry magic; ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; unsigned int stid; kvm_pfn_t pfn; pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); preempt_disable(); stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | MAS1_TSIZE(BOOK3E_PAGESZ_4K); magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0); preempt_enable(); } #endif void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct kvm_book3e_206_tlb_entry *gtlbe = get_entry(vcpu_e500, tlbsel, esel); struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ if (!(ref->flags & E500_TLB_VALID)) { WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), "%s: flags %x\n", __func__, ref->flags); WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; int hw_tlb_indx; unsigned long flags; local_irq_save(flags); while (tmp) { hw_tlb_indx = __ilog2_u64(tmp & -tmp); mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); mtspr(SPRN_MAS1, 0); asm volatile("tlbwe"); vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; tmp &= tmp - 1; } mb(); vcpu_e500->g2h_tlb1_map[esel] = 0; ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); local_irq_restore(flags); } if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { /* * TLB1 entry is backed by 4k pages. This should happen * rarely and is not worth optimizing. Invalidate everything. */ kvmppc_e500_tlbil_all(vcpu_e500); ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); } /* * If TLB entry is still valid then it's a TLB0 entry, and thus * backed by at most one host tlbe per shadow pid */ if (ref->flags & E500_TLB_VALID) kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); /* Mark the TLB as not backed by the host anymore */ ref->flags = 0; } static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) { return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) { ref->pfn = pfn; ref->flags = E500_TLB_VALID; /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; /* Mark the page accessed */ kvm_set_pfn_accessed(pfn); if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } } static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) memset(vcpu_e500->g2h_tlb1_map, 0, sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) memset(vcpu_e500->h2g_tlb1_rmap, 0, sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { int tlbsel; int i; for (tlbsel = 0; tlbsel <= 1; tlbsel++) { for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][i].ref; kvmppc_e500_ref_release(ref); } } } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_e500_tlbil_all(vcpu_e500); clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } /* TID must be supplied by the caller */ static void kvmppc_e500_setup_stlbe( struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe, int tsize, struct tlbe_ref *ref, u64 gvaddr, struct kvm_book3e_206_tlb_entry *stlbe) { kvm_pfn_t pfn = ref->pfn; u32 pr = vcpu->arch.shared->msr & MSR_PR; BUG_ON(!(ref->flags & E500_TLB_VALID)); /* Force IPROT=0 for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) { struct kvm_memory_slot *slot; unsigned long pfn = 0; /* silence GCC warning */ unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; int ret = 0; unsigned long mmu_seq; struct kvm *kvm = vcpu_e500->vcpu.kvm; unsigned long tsize_pages = 0; pte_t *ptep; unsigned int wimg = 0; pgd_t *pgdir; unsigned long flags; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); /* * Translate guest physical to true physical, acquiring * a page reference if it is normal, non-reserved memory. * * gfn_to_memslot() must succeed because otherwise we wouldn't * have gotten this far. Eventually we should just pass the slot * pointer through from the first lookup. */ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); hva = gfn_to_hva_memslot(slot, gfn); if (tlbsel == 1) { struct vm_area_struct *vma; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, hva); if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_PFNMAP)) { /* * This VMA is a physically contiguous region (e.g. * /dev/mem) that bypasses normal Linux page * management. Find the overlap between the * vma and the memslot. */ unsigned long start, end; unsigned long slot_start, slot_end; pfnmap = 1; start = vma->vm_pgoff; end = start + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); slot_start = pfn - (gfn - slot->base_gfn); slot_end = slot_start + slot->npages; if (start < slot_start) start = slot_start; if (end > slot_end) end = slot_end; tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); /* * Now find the largest tsize (up to what the guest * requested) that will cover gfn, stay within the * range, and for which gfn and pfn are mutually * aligned. */ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { unsigned long gfn_start, gfn_end; tsize_pages = 1UL << (tsize - 2); gfn_start = gfn & ~(tsize_pages - 1); gfn_end = gfn_start + tsize_pages; if (gfn_start + pfn - gfn < start) continue; if (gfn_end + pfn - gfn > end) continue; if ((gfn & (tsize_pages - 1)) != (pfn & (tsize_pages - 1))) continue; gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); pfn &= ~(tsize_pages - 1); break; } } else if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_HUGETLB)) { unsigned long psize = vma_kernel_pagesize(vma); tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * Take the largest page size that satisfies both host * and guest mapping */ tsize = min(__ilog2(psize) - 10, tsize); /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } up_read(&current->mm->mmap_sem); } if (likely(!pfnmap)) { tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { if (printk_ratelimit()) pr_err("%s: real page not found for gfn %lx\n", __func__, (long)gfn); return -EINVAL; } /* Align guest and physical address to page map boundaries */ pfn &= ~(tsize_pages - 1); gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) { ret = -EAGAIN; goto out; } pgdir = vcpu_e500->vcpu.arch.pgdir; /* * We are just looking at the wimg bits, so we don't * care much about the trans splitting bit. * We are holding kvm->mmu_lock so a notifier invalidate * can't run hence pfn won't change. */ local_irq_save(flags); ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL); if (ptep) { pte_t pte = READ_ONCE(*ptep); if (pte_present(pte)) { wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; local_irq_restore(flags); } else { local_irq_restore(flags); pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n", __func__, (long)gfn, pfn); ret = -EINVAL; goto out; } } kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); out: spin_unlock(&kvm->mmu_lock); /* Drop refcount on page, so that mmu notifiers can clear it */ kvm_release_pfn_clean(pfn); return ret; } /* XXX only map the one-one case, for now use TLB0 */ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, struct kvm_book3e_206_tlb_entry *stlbe) { struct kvm_book3e_206_tlb_entry *gtlbe; struct tlbe_ref *ref; int stlbsel = 0; int sesel = 0; int r; gtlbe = get_entry(vcpu_e500, 0, esel); ref = &vcpu_e500->gtlb_priv[0][esel].ref; r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, gtlbe, 0, stlbe, ref); if (r) return r; write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); return 0; } static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref, int esel) { unsigned int sesel = vcpu_e500->host_tlb1_nv++; if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ /* For both one-one and one-to-many */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); if (r) return r; /* Use TLB0 when we can only map a page with 4k */ if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); return 0; } /* Otherwise map into TLB1 */ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; } void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, unsigned int index) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe_priv *priv; struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; int tlbsel = tlbsel_of(index); int esel = esel_of(index); gtlbe = get_entry(vcpu_e500, tlbsel, esel); switch (tlbsel) { case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, &priv->ref, eaddr, &stlbe); write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); } break; case 1: { gfn_t gfn = gpaddr >> PAGE_SHIFT; kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, esel); break; } default: BUG(); break; } } #ifdef CONFIG_KVM_BOOKE_HV int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, u32 *instr) { gva_t geaddr; hpa_t addr; hfn_t pfn; hva_t eaddr; u32 mas1, mas2, mas3; u64 mas7_mas3; struct page *page; unsigned int addr_space, psize_shift; bool pr; unsigned long flags; /* Search TLB for guest pc to get the real address */ geaddr = kvmppc_get_pc(vcpu); addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; local_irq_save(flags); mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu)); asm volatile("tlbsx 0, %[geaddr]\n" : : [geaddr] "r" (geaddr)); mtspr(SPRN_MAS5, 0); mtspr(SPRN_MAS8, 0); mas1 = mfspr(SPRN_MAS1); mas2 = mfspr(SPRN_MAS2); mas3 = mfspr(SPRN_MAS3); #ifdef CONFIG_64BIT mas7_mas3 = mfspr(SPRN_MAS7_MAS3); #else mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3; #endif local_irq_restore(flags); /* * If the TLB entry for guest pc was evicted, return to the guest. * There are high chances to find a valid TLB entry next time. */ if (!(mas1 & MAS1_VALID)) return EMULATE_AGAIN; /* * Another thread may rewrite the TLB entry in parallel, don't * execute from the address if the execute permission is not set */ pr = vcpu->arch.shared->msr & MSR_PR; if (unlikely((pr && !(mas3 & MAS3_UX)) || (!pr && !(mas3 & MAS3_SX)))) { pr_err_ratelimited( "%s: Instruction emulation from guest address %08lx without execute permission\n", __func__, geaddr); return EMULATE_AGAIN; } /* * The real address will be mapped by a cacheable, memory coherent, * write-back page. Check for mismatches when LRAT is used. */ if (has_feature(vcpu, VCPU_FTR_MMU_V2) && unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { pr_err_ratelimited( "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n", __func__, geaddr); return EMULATE_AGAIN; } /* Get pfn */ psize_shift = MAS1_GET_TSIZE(mas1) + 10; addr = (mas7_mas3 & (~0ULL << psize_shift)) | (geaddr & ((1ULL << psize_shift) - 1ULL)); pfn = addr >> PAGE_SHIFT; /* Guard against emulation from devices area */ if (unlikely(!page_is_ram(pfn))) { pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", __func__, addr); return EMULATE_AGAIN; } /* Map a page and get guest's instruction */ page = pfn_to_page(pfn); eaddr = (unsigned long)kmap_atomic(page); *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); kunmap_atomic((u32 *)eaddr); return EMULATE_DONE; } #else int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, u32 *instr) { return EMULATE_AGAIN; } #endif /************* MMU Notifiers *************/ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) { trace_kvm_unmap_hva(hva); /* * Flush all shadow tlb entries everywhere. This is slow, but * we are 100% sure that we catch the to be unmapped page */ kvm_flush_remote_tlbs(kvm); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); return 0; } int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) { /* XXX could be more clever ;) */ return 0; } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { /* The page will get remapped properly on its next fault */ kvm_unmap_hva(kvm, hva); } /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) { host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; /* * This should never happen on real e500 hardware, but is * architecturally possible -- e.g. in some weird nested * virtualization case. */ if (host_tlb_params[0].entries == 0 || host_tlb_params[1].entries == 0) { pr_err("%s: need to know host tlb size\n", __func__); return -ENODEV; } host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> TLBnCFG_ASSOC_SHIFT; host_tlb_params[1].ways = host_tlb_params[1].entries; if (!is_power_of_2(host_tlb_params[0].entries) || !is_power_of_2(host_tlb_params[0].ways) || host_tlb_params[0].entries < host_tlb_params[0].ways || host_tlb_params[0].ways == 0) { pr_err("%s: bad tlb0 host config: %u entries %u ways\n", __func__, host_tlb_params[0].entries, host_tlb_params[0].ways); return -ENODEV; } host_tlb_params[0].sets = host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) return -EINVAL; return 0; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); }
gpl-2.0
somya-anand/linux-staging
drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
552
4008
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/sw.h> #include <engine/fifo.h> struct nv04_sw_priv { struct nvkm_sw base; }; struct nv04_sw_chan { struct nvkm_sw_chan base; }; /******************************************************************************* * software object classes ******************************************************************************/ static int nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size) { struct nvkm_object *channel = (void *)nv_engctx(object->parent); struct nvkm_fifo_chan *fifo = (void *)channel->parent; atomic_set(&fifo->refcnt, *(u32*)data); return 0; } static int nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size) { struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent); if (chan->base.flip) return chan->base.flip(chan->base.flip_data); return -EINVAL; } static struct nvkm_omthds nv04_sw_omthds[] = { { 0x0150, 0x0150, nv04_sw_set_ref }, { 0x0500, 0x0500, nv04_sw_flip }, {} }; static struct nvkm_oclass nv04_sw_sclass[] = { { 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds }, {} }; /******************************************************************************* * software context ******************************************************************************/ static int nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv04_sw_chan *chan; int ret; ret = nvkm_sw_context_create(parent, engine, oclass, &chan); *pobject = nv_object(chan); if (ret) return ret; return 0; } static struct nvkm_oclass nv04_sw_cclass = { .handle = NV_ENGCTX(SW, 0x04), .ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_sw_context_ctor, .dtor = _nvkm_sw_context_dtor, .init = _nvkm_sw_context_init, .fini = _nvkm_sw_context_fini, }, }; /******************************************************************************* * software engine/subdev functions ******************************************************************************/ void nv04_sw_intr(struct nvkm_subdev *subdev) { nv_mask(subdev, 0x000100, 0x80000000, 0x00000000); } static int nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv04_sw_priv *priv; int ret; ret = nvkm_sw_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->cclass = &nv04_sw_cclass; nv_engine(priv)->sclass = nv04_sw_sclass; nv_subdev(priv)->intr = nv04_sw_intr; return 0; } struct nvkm_oclass * nv04_sw_oclass = &(struct nvkm_oclass) { .handle = NV_ENGINE(SW, 0x04), .ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_sw_ctor, .dtor = _nvkm_sw_dtor, .init = _nvkm_sw_init, .fini = _nvkm_sw_fini, }, };
gpl-2.0
VRToxin-AOSP/android_kernel_moto_shamu
fs/splice.c
1064
48729
/* * "splice": joining two ropes together by interweaving their strands. * * This is the "extended pipe" functionality, where a pipe is used as * an arbitrary in-memory buffer. Think of a pipe as a small kernel * buffer that you can use to transfer data from one end to the other. * * The traditional unix read/write is extended with a "splice()" operation * that transfers data buffers to or from a pipe buffer. * * Named by Larry McVoy, original implementation from Linus, extended by * Jens to support splicing to files, network, direct splicing, etc and * fixing lots of bugs. * * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> * */ #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/uio.h> #include <linux/security.h> #include <linux/gfp.h> #include <linux/socket.h> #include <linux/compat.h> #include "internal.h" /* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; struct address_space *mapping; lock_page(page); mapping = page_mapping(page); if (mapping) { WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this page, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * page, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ wait_on_page_writeback(page); if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; return 0; } } /* * Raced with truncate or failed to remove page from current * address space, unlock and return failure. */ out_unlock: unlock_page(page); return 1; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_release(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } /* * Check whether the contents of buf is OK to access. Since the content * is a page cache page, IO may be in flight. */ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; int err; if (!PageUptodate(page)) { lock_page(page); /* * Page got truncated/unhashed. This will cause a 0-byte * splice, if this is the first page. */ if (!page->mapping) { err = -ENODATA; goto error; } /* * Uh oh, read-error from disk. */ if (!PageUptodate(page)) { err = -EIO; goto error; } /* * Page is ok afterall, we are done. */ unlock_page(page); } return 0; error: unlock_page(page); return err; } const struct pipe_buf_operations page_cache_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = page_cache_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = page_cache_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) return 1; buf->flags |= PIPE_BUF_FLAG_LRU; return generic_pipe_buf_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = user_page_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } /** * splice_to_pipe - fill passed data into a pipe * @pipe: pipe to fill * @spd: data to fill * * Description: * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * */ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int spd_pages = spd->nr_pages; int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; page_nr = 0; pipe_lock(pipe); for (;;) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (pipe->nrbufs < pipe->buffers) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->files) do_wakeup = 1; if (!--spd->nr_pages) break; if (pipe->nrbufs < pipe->buffers) continue; break; } if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); if (do_wakeup) wakeup_pipe_readers(pipe); while (page_nr < spd_pages) spd->spd_release(spd, page_nr++); return ret; } void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) { page_cache_release(spd->pages[i]); } /* * Check if we need to grow the arrays holding pages and partial page * descriptions. */ int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int buffers = ACCESS_ONCE(pipe->buffers); spd->nr_pages_max = buffers; if (buffers <= PIPE_DEF_BUFFERS) return 0; spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; kfree(spd->pages); kfree(spd->partial); return -ENOMEM; } void splice_shrink_spd(struct splice_pipe_desc *spd) { if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); kfree(spd->partial); } static int __generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct address_space *mapping = in->f_mapping; unsigned int loff, nr_pages, req_pages; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct page *page; pgoff_t index, end_index; loff_t isize; int error, page_nr; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &page_cache_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; index = *ppos >> PAGE_CACHE_SHIFT; loff = *ppos & ~PAGE_CACHE_MASK; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); /* * Lookup the (hopefully) full range of pages we need. */ spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); index += spd.nr_pages; /* * If find_get_pages_contig() returned fewer pages than we needed, * readahead/allocate the rest and fill in the holes. */ if (spd.nr_pages < nr_pages) page_cache_sync_readahead(mapping, &in->f_ra, in, index, req_pages - spd.nr_pages); error = 0; while (spd.nr_pages < nr_pages) { /* * Page could be there, find_get_pages_contig() breaks on * the first hole. */ page = find_get_page(mapping, index); if (!page) { /* * page didn't exist, allocate one. */ page = page_cache_alloc_cold(mapping); if (!page) break; error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(error)) { page_cache_release(page); if (error == -EEXIST) continue; break; } /* * add_to_page_cache() locks the page, unlock it * to avoid convoluting the logic below even more. */ unlock_page(page); } spd.pages[spd.nr_pages++] = page; index++; } /* * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ index = *ppos >> PAGE_CACHE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { unsigned int this_len; if (!len) break; /* * this_len is the max we'll use from this page */ this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); page = spd.pages[page_nr]; if (PageReadahead(page)) page_cache_async_readahead(mapping, &in->f_ra, in, page, index, req_pages - page_nr); /* * If the page isn't uptodate, we may need to start io on it */ if (!PageUptodate(page)) { lock_page(page); /* * Page was truncated, or invalidated by the * filesystem. Redo the find/create, but this time the * page is kept locked, so there's no chance of another * race with truncate/invalidate. */ if (!page->mapping) { unlock_page(page); page = find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); if (!page) { error = -ENOMEM; break; } page_cache_release(spd.pages[page_nr]); spd.pages[page_nr] = page; } /* * page was already under io and is now done, great */ if (PageUptodate(page)) { unlock_page(page); goto fill_it; } /* * need to read in the page */ error = mapping->a_ops->readpage(in, page); if (unlikely(error)) { /* * We really should re-lookup the page here, * but it complicates things a lot. Instead * lets just do what we already stored, and * we'll get it the next time we are called. */ if (error == AOP_TRUNCATED_PAGE) error = 0; break; } } fill_it: /* * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) break; /* * if this is the last page, see if we need to shrink * the length and stop */ if (end_index == index) { unsigned int plen; /* * max good bytes in this page */ plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (plen <= loff) break; /* * force quit after adding this page */ this_len = min(this_len, plen - loff); len = this_len; } spd.partial[page_nr].offset = loff; spd.partial[page_nr].len = this_len; len -= this_len; loff = 0; spd.nr_pages++; index++; } /* * Release any pages at the end, if we quit early. 'page_nr' is how far * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) page_cache_release(spd.pages[page_nr++]); in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return error; } /** * generic_file_splice_read - splice data from file to a pipe * @in: file to splice from * @ppos: position in @in * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given file and fill them into a pipe. Can be * used as long as the address_space operations for the source implements * a readpage() hook. * */ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { loff_t isize, left; int ret; isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) return 0; left = isize - *ppos; if (unlikely(left < len)) len = left; ret = __generic_file_splice_read(in, ppos, pipe, len, flags); if (ret > 0) { *ppos += ret; file_accessed(in); } return ret; } EXPORT_SYMBOL(generic_file_splice_read); static const struct pipe_buf_operations default_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return 1; } /* Pipe buffer operations for a socket and similar. */ const struct pipe_buf_operations nosteal_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_nosteal, .get = generic_pipe_buf_get, }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); static ssize_t kernel_readv(struct file *file, const struct iovec *vec, unsigned long vlen, loff_t offset) { mm_segment_t old_fs; loff_t pos = offset; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); set_fs(old_fs); return res; } ssize_t kernel_write(struct file *file, const char *buf, size_t count, loff_t pos) { mm_segment_t old_fs; ssize_t res; old_fs = get_fs(); set_fs(get_ds()); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_write(file, (__force const char __user *)buf, count, &pos); set_fs(old_fs); return res; } EXPORT_SYMBOL(kernel_write); ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { unsigned int nr_pages; unsigned int nr_freed; size_t offset; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; ssize_t res; size_t this_len; int error; int i; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &default_pipe_buf_ops, .spd_release = spd_release_page, }; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; res = -ENOMEM; vec = __vec; if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); if (!vec) goto shrink_ret; } offset = *ppos & ~PAGE_CACHE_MASK; nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; page = alloc_page(GFP_USER); error = -ENOMEM; if (!page) goto err; this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); vec[i].iov_base = (void __user *) page_address(page); vec[i].iov_len = this_len; spd.pages[i] = page; spd.nr_pages++; len -= this_len; offset = 0; } res = kernel_readv(in, vec, spd.nr_pages, *ppos); if (res < 0) { error = res; goto err; } error = 0; if (!res) goto err; nr_freed = 0; for (i = 0; i < spd.nr_pages; i++) { this_len = min_t(size_t, vec[i].iov_len, res); spd.partial[i].offset = 0; spd.partial[i].len = this_len; if (!this_len) { __free_page(spd.pages[i]); spd.pages[i] = NULL; nr_freed++; } res -= this_len; } spd.nr_pages -= nr_freed; res = splice_to_pipe(pipe, &spd); if (res > 0) *ppos += res; shrink_ret: if (vec != __vec) kfree(vec); splice_shrink_spd(&spd); return res; err: for (i = 0; i < spd.nr_pages; i++) __free_page(spd.pages[i]); res = error; goto shrink_ret; } EXPORT_SYMBOL(default_file_splice_read); /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. */ static int pipe_to_sendpage(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; loff_t pos = sd->pos; int more; if (!likely(file->f_op && file->f_op->sendpage)) return -EINVAL; more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; if (sd->len < sd->total_len && pipe->nrbufs > 1) more |= MSG_SENDPAGE_NOTLAST; return file->f_op->sendpage(file, buf->page, buf->offset, sd->len, &pos, more); } /* * This is a little more tricky than the file -> pipe splicing. There are * basically three cases: * * - Destination page already exists in the address space and there * are users of it. For that case we have no other option that * copying the data. Tough luck. * - Destination page already exists in the address space, but there * are no users of it. Make sure it's uptodate, then drop it. Fall * through to last case. * - Destination page does not exist, we can add the pipe page to * the page cache and avoid the copy. * * If asked to move pages to the output file (SPLICE_F_MOVE is set in * sd->flags), we attempt to migrate pages from the pipe to the output * file address space page cache. This is possible if no one else has * the pipe page referenced outside of the pipe and page cache. If * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create * a new page in the output file page cache and fill/dirty that. */ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; struct address_space *mapping = file->f_mapping; unsigned int offset, this_len; struct page *page; void *fsdata; int ret; offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; if (this_len + offset > PAGE_CACHE_SIZE) this_len = PAGE_CACHE_SIZE - offset; ret = pagecache_write_begin(file, mapping, sd->pos, this_len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); if (unlikely(ret)) goto out; if (buf->page != page) { char *src = buf->ops->map(pipe, buf, 1); char *dst = kmap_atomic(page); memcpy(dst + offset, src + buf->offset, this_len); flush_dcache_page(page); kunmap_atomic(dst); buf->ops->unmap(pipe, buf, src); } ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, page, fsdata); out: return ret; } EXPORT_SYMBOL(pipe_to_file); static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } /** * splice_from_pipe_feed - feed available data from a pipe to a file * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function loops over the pipe and calls @actor to do the * actual moving of a single struct pipe_buffer to the desired * destination. It returns when there's no more buffers left in * the pipe or if the requested number of bytes (@sd->total_len) * have been copied. It returns a positive number (one) if the * pipe needs to be filled with more data, zero if the required * number of bytes have been copied and -errno on error. * * This, together with splice_from_pipe_{begin,end,next}, may be * used to implement the functionality of __splice_from_pipe() when * locking is required around copying the pipe buffers to the * destination. */ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; while (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; const struct pipe_buf_operations *ops = buf->ops; sd->len = buf->len; if (sd->len > sd->total_len) sd->len = sd->total_len; ret = buf->ops->confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } ret = actor(pipe, buf, sd); if (ret <= 0) return ret; buf->offset += ret; buf->len -= ret; sd->num_spliced += ret; sd->len -= ret; sd->pos += ret; sd->total_len -= ret; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; if (pipe->files) sd->need_wakeup = true; } if (!sd->total_len) return 0; } return 1; } EXPORT_SYMBOL(splice_from_pipe_feed); /** * splice_from_pipe_next - wait for some data to splice from * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wait for some data and return a positive * value (one) if pipe buffers are available. It will return zero * or -errno if no more data needs to be spliced. */ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { while (!pipe->nrbufs) { if (!pipe->writers) return 0; if (!pipe->waiting_writers && sd->num_spliced) return 0; if (sd->flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; if (sd->need_wakeup) { wakeup_pipe_writers(pipe); sd->need_wakeup = false; } pipe_wait(pipe); } return 1; } EXPORT_SYMBOL(splice_from_pipe_next); /** * splice_from_pipe_begin - start splicing from pipe * @sd: information about the splice operation * * Description: * This function should be called before a loop containing * splice_from_pipe_next() and splice_from_pipe_feed() to * initialize the necessary fields of @sd. */ void splice_from_pipe_begin(struct splice_desc *sd) { sd->num_spliced = 0; sd->need_wakeup = false; } EXPORT_SYMBOL(splice_from_pipe_begin); /** * splice_from_pipe_end - finish splicing from pipe * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wake up pipe writers if necessary. It should * be called after a loop containing splice_from_pipe_next() and * splice_from_pipe_feed(). */ void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) { if (sd->need_wakeup) wakeup_pipe_writers(pipe); } EXPORT_SYMBOL(splice_from_pipe_end); /** * __splice_from_pipe - splice data from a pipe to given actor * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to * the desired destination. See pipe_to_file, pipe_to_sendpage, or * pipe_to_user. * */ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; splice_from_pipe_begin(sd); do { ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); } while (ret > 0); splice_from_pipe_end(pipe, sd); return sd->num_spliced ? sd->num_spliced : ret; } EXPORT_SYMBOL(__splice_from_pipe); /** * splice_from_pipe - splice data from a pipe to a file * @pipe: pipe to splice from * @out: file to splice to * @ppos: position in @out * @len: how many bytes to splice * @flags: splice modifier flags * @actor: handler that splices the data * * Description: * See __splice_from_pipe. This function locks the pipe inode, * otherwise it's identical to __splice_from_pipe(). * */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { ssize_t ret; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, actor); pipe_unlock(pipe); return ret; } /** * generic_file_splice_write - splice data from a pipe to a file * @pipe: pipe info * @out: file to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. * */ ssize_t generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; ssize_t ret; pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ret = file_remove_suid(out); if (!ret) { ret = file_update_time(out); if (!ret) ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); } mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { int err; err = generic_write_sync(out, *ppos, ret); if (err) ret = err; else *ppos += ret; balance_dirty_pages_ratelimited(mapping); } return ret; } EXPORT_SYMBOL(generic_file_splice_write); static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int ret; void *data; loff_t tmp = sd->pos; data = buf->ops->map(pipe, buf, 0); ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); buf->ops->unmap(pipe, buf, data); return ret; } static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t ret; ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); if (ret > 0) *ppos += ret; return ret; } /** * generic_splice_sendpage - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will send @len bytes from the pipe to a network socket. No data copying * is involved. * */ ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); } EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); int ret; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, ppos, len); if (unlikely(ret < 0)) return ret; if (out->f_op && out->f_op->splice_write) splice_write = out->f_op->splice_write; else splice_write = default_file_splice_write; file_start_write(out); ret = splice_write(pipe, out, ppos, len, flags); file_end_write(out); return ret; } /* * Attempt to initiate a splice from a file to a pipe. */ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int ret; if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; ret = rw_verify_area(READ, in, ppos, len); if (unlikely(ret < 0)) return ret; if (in->f_op && in->f_op->splice_read) splice_read = in->f_op->splice_read; else splice_read = default_file_splice_read; return splice_read(in, ppos, pipe, len, flags); } /** * splice_direct_to_actor - splices data directly between two non-pipes * @in: file to splice from * @sd: actor information on where to splice to * @actor: handles the data splicing * * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated * pipe is cached in the process, and reused during the lifetime of * that process. * */ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; long ret, bytes; umode_t i_mode; size_t len; int i, flags; /* * We require the input being a regular file, as we don't want to * randomly drop data for eg socket -> socket splicing. Use the * piped splicing for that! */ i_mode = file_inode(in)->i_mode; if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ ret = 0; bytes = 0; len = sd->total_len; flags = sd->flags; /* * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; ret = do_splice_to(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) goto out_release; read_len = ret; sd->total_len = read_len; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->nrbufs = pipe->curbuf = 0; file_accessed(in); return bytes; out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) { buf->ops->release(pipe, buf); buf->ops = NULL; } } if (!bytes) bytes = ret; goto done; } EXPORT_SYMBOL(splice_direct_to_actor); static int direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); } /** * do_splice_direct - splices data directly between two files * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * For use by do_sendfile(). splice can easily emulate sendfile, but * doing it in the application would incur an extra system call * (splice in + splice out, as compared to just sendfile()). So this helper * can splice directly through a process-private pipe. * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, .opos = opos, }; long ret; ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) *ppos = sd.pos; return ret; } static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); /* * Determine where to splice to/from. */ static long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset; long ret; ipipe = get_pipe_info(in); opipe = get_pipe_info(out); if (ipipe && opipe) { if (off_in || off_out) return -ESPIPE; if (!(in->f_mode & FMODE_READ)) return -EBADF; if (!(out->f_mode & FMODE_WRITE)) return -EBADF; /* Splicing to self would be fun, but... */ if (ipipe == opipe) return -EINVAL; return splice_pipe_to_pipe(ipipe, opipe, len, flags); } if (ipipe) { if (off_in) return -ESPIPE; if (off_out) { if (!(out->f_mode & FMODE_PWRITE)) return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; } else { offset = out->f_pos; } ret = do_splice_from(ipipe, out, &offset, len, flags); if (!off_out) out->f_pos = offset; else if (copy_to_user(off_out, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } if (opipe) { if (off_out) return -ESPIPE; if (off_in) { if (!(in->f_mode & FMODE_PREAD)) return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; } else { offset = in->f_pos; } ret = do_splice_to(in, &offset, opipe, len, flags); if (!off_in) in->f_pos = offset; else if (copy_to_user(off_in, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } return -EINVAL; } /* * Map an iov into an array of pages and offset/length tupples. With the * partial_page structure, we can map several non-contiguous ranges into * our ones pages[] map instead of splitting that operation into pieces. * Could easily be exported as a generic helper for other users, in which * case one would probably want to add a 'max_nr_pages' parameter as well. */ static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, bool aligned, unsigned int pipe_buffers) { int buffers = 0, error = 0; while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ error = 0; if (unlikely(!len)) break; error = -EFAULT; if (!access_ok(VERIFY_READ, base, len)) break; /* * Get this base offset and number of pages, then map * in the user pages. */ off = (unsigned long) base & ~PAGE_MASK; /* * If asked for alignment, the offset must be zero and the * length a multiple of the PAGE_SIZE. */ error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > pipe_buffers - buffers) npages = pipe_buffers - buffers; error = get_user_pages_fast((unsigned long)base, npages, 0, &pages[buffers]); if (unlikely(error <= 0)) break; /* * Fill this contiguous range into the partial page map. */ for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } /* * We didn't complete this iov, stop here since it probably * means we have to move some of this into a pipe to * be able to continue. */ if (len) break; /* * Don't continue if we mapped fewer pages than we asked for, * or if we mapped the max number of pages that we have * room for. */ if (error < npages || buffers == pipe_buffers) break; nr_vecs--; iov++; } if (buffers) return buffers; return error; } static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { char *src; int ret; /* * See if we can use the atomic maps, by prefaulting in the * pages and doing an atomic copy */ if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { src = buf->ops->map(pipe, buf, 1); ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, sd->len); buf->ops->unmap(pipe, buf, src); if (!ret) { ret = sd->len; goto out; } } /* * No dice, use slow non-atomic map and copy */ src = buf->ops->map(pipe, buf, 0); ret = sd->len; if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) ret = -EFAULT; buf->ops->unmap(pipe, buf, src); out: if (ret > 0) sd->u.userptr += ret; return ret; } /* * For lack of a better implementation, implement vmsplice() to userspace * as a simple copy of the pipes pages to the user iov. */ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; ssize_t size; int error; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; pipe_lock(pipe); error = ret = 0; while (nr_segs) { void __user *base; size_t len; /* * Get user address base and length for this iovec. */ error = get_user(base, &iov->iov_base); if (unlikely(error)) break; error = get_user(len, &iov->iov_len); if (unlikely(error)) break; /* * Sanity check this iovec. 0 read succeeds. */ if (unlikely(!len)) break; if (unlikely(!base)) { error = -EFAULT; break; } if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { error = -EFAULT; break; } sd.len = 0; sd.total_len = len; sd.flags = flags; sd.u.userptr = base; sd.pos = 0; size = __splice_from_pipe(pipe, &sd, pipe_to_user); if (size < 0) { if (!ret) ret = size; break; } ret += size; if (size < len) break; nr_segs--; iov++; } pipe_unlock(pipe); if (!ret) ret = error; return ret; } /* * vmsplice splices a user address range into a pipe. It can be thought of * as splice-from-memory, where the regular splice is splice-from-file (or * to file). In both cases the output is a pipe, naturally. */ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &user_page_pipe_buf_ops, .spd_release = spd_release_page, }; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, spd.partial, false, spd.nr_pages_max); if (spd.nr_pages <= 0) ret = spd.nr_pages; else ret = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); return ret; } /* * Note that vmsplice only really supports true splicing _from_ user memory * to a pipe, not the other way around. Splicing from user memory is a simple * operation that can be supported without any funky alignment restrictions * or nasty vm tricks. We simply map in the user memory and fill them into * a pipe. The reverse isn't quite as easy, though. There are two possible * solutions for that: * * - memcpy() the data internally, at which point we might as well just * do a regular read() on the buffer anyway. * - Lots of nasty vm tricks, that are neither fast nor flexible (it * has restriction limitations on both ends of the pipe). * * Currently we punt and implement it as a normal copy, see pipe_to_user(). * */ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, unsigned long, nr_segs, unsigned int, flags) { struct fd f; long error; if (unlikely(nr_segs > UIO_MAXIOV)) return -EINVAL; else if (unlikely(!nr_segs)) return 0; error = -EBADF; f = fdget(fd); if (f.file) { if (f.file->f_mode & FMODE_WRITE) error = vmsplice_to_pipe(f.file, iov, nr_segs, flags); else if (f.file->f_mode & FMODE_READ) error = vmsplice_to_user(f.file, iov, nr_segs, flags); fdput(f); } return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, unsigned int, nr_segs, unsigned int, flags) { unsigned i; struct iovec __user *iov; if (nr_segs > UIO_MAXIOV) return -EINVAL; iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); for (i = 0; i < nr_segs; i++) { struct compat_iovec v; if (get_user(v.iov_base, &iov32[i].iov_base) || get_user(v.iov_len, &iov32[i].iov_len) || put_user(compat_ptr(v.iov_base), &iov[i].iov_base) || put_user(v.iov_len, &iov[i].iov_len)) return -EFAULT; } return sys_vmsplice(fd, iov, nr_segs, flags); } #endif SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { struct fd in, out; long error; if (unlikely(!len)) return 0; error = -EBADF; in = fdget(fd_in); if (in.file) { if (in.file->f_mode & FMODE_READ) { out = fdget(fd_out); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_splice(in.file, off_in, out.file, off_out, len, flags); fdput(out); } } fdput(in); } return error; } /* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs) return 0; ret = 0; pipe_lock(pipe); while (!pipe->nrbufs) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (!pipe->waiting_writers) { if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } } pipe_wait(pipe); } pipe_unlock(pipe); return ret; } /* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs < pipe->buffers) return 0; ret = 0; pipe_lock(pipe); while (pipe->nrbufs >= pipe->buffers) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); return ret; } /* * Splice contents of ipipe to opipe. */ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, nbuf; bool input_wakeup = false; retry: ret = ipipe_prep(ipipe, flags); if (ret) return ret; ret = opipe_prep(opipe, flags); if (ret) return ret; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (!ipipe->nrbufs && !ipipe->writers) break; /* * Cannot make any progress, because either the input * pipe is empty or the output pipe is full. */ if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { /* Already processed some buffers, break */ if (ret) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } /* * We raced with another reader/writer and haven't * managed to process any buffers. A zero return * value means EOF, so retry instead. */ pipe_unlock(ipipe); pipe_unlock(opipe); goto retry; } ibuf = ipipe->bufs + ipipe->curbuf; nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); obuf = opipe->bufs + nbuf; if (len >= ibuf->len) { /* * Simply move the whole buffer from ipipe to opipe */ *obuf = *ibuf; ibuf->ops = NULL; opipe->nrbufs++; ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); ipipe->nrbufs--; input_wakeup = true; } else { /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = len; opipe->nrbufs++; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } ret += obuf->len; len -= obuf->len; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); if (input_wakeup) wakeup_pipe_writers(ipipe); return ret; } /* * Link contents of ipipe to opipe. */ static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ ibuf->ops->get(ipipe, ibuf); obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * This is a tee(1) implementation that works on pipes. It doesn't copy * any data, it simply references the 'in' pages on the 'out' pipe. * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); int ret = -EINVAL; /* * Duplicate the contents of ipipe to opipe without actually * copying the data. */ if (ipipe && opipe && ipipe != opipe) { /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. */ ret = ipipe_prep(ipipe, flags); if (!ret) { ret = opipe_prep(opipe, flags); if (!ret) ret = link_pipe(ipipe, opipe, len, flags); } } return ret; } SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) { struct fd in; int error; if (unlikely(!len)) return 0; error = -EBADF; in = fdget(fdin); if (in.file) { if (in.file->f_mode & FMODE_READ) { struct fd out = fdget(fdout); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_tee(in.file, out.file, len, flags); fdput(out); } } fdput(in); } return error; }
gpl-2.0
ea4862/ArchiKernel_cm12.1
drivers/video/omap2/dss/overlay.c
2088
16889
/* * linux/drivers/video/omap2/dss/overlay.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "OVERLAY" #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/slab.h> #include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" #include "dss_features.h" static int num_overlays; static struct list_head overlay_list; static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name); } static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", ovl->manager ? ovl->manager->name : "<none>"); } static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf, size_t size) { int i, r; struct omap_overlay_manager *mgr = NULL; struct omap_overlay_manager *old_mgr; int len = size; if (buf[size-1] == '\n') --len; if (len > 0) { for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { mgr = omap_dss_get_overlay_manager(i); if (sysfs_streq(buf, mgr->name)) break; mgr = NULL; } } if (len > 0 && mgr == NULL) return -EINVAL; if (mgr) DSSDBG("manager %s found\n", mgr->name); if (mgr == ovl->manager) return size; old_mgr = ovl->manager; /* detach old manager */ if (old_mgr) { r = ovl->unset_manager(ovl); if (r) { DSSERR("detach failed\n"); return r; } r = old_mgr->apply(old_mgr); if (r) return r; } if (mgr) { r = ovl->set_manager(ovl, mgr); if (r) { DSSERR("Failed to attach overlay\n"); return r; } r = mgr->apply(mgr); if (r) return r; } return size; } static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->info.width, ovl->info.height); } static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width); } static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->info.pos_x, ovl->info.pos_y); } static ssize_t overlay_position_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; char *last; struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); info.pos_x = simple_strtoul(buf, &last, 10); ++last; if (last - buf >= size) return -EINVAL; info.pos_y = simple_strtoul(last, &last, 10); r = ovl->set_overlay_info(ovl, &info); if (r) return r; if (ovl->manager) { r = ovl->manager->apply(ovl->manager); if (r) return r; } return size; } static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->info.out_width, ovl->info.out_height); } static ssize_t overlay_output_size_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; char *last; struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); info.out_width = simple_strtoul(buf, &last, 10); ++last; if (last - buf >= size) return -EINVAL; info.out_height = simple_strtoul(last, &last, 10); r = ovl->set_overlay_info(ovl, &info); if (r) return r; if (ovl->manager) { r = ovl->manager->apply(ovl->manager); if (r) return r; } return size; } static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled); } static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r, enable; struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); r = kstrtoint(buf, 0, &enable); if (r) return r; info.enabled = !!enable; r = ovl->set_overlay_info(ovl, &info); if (r) return r; if (ovl->manager) { r = ovl->manager->apply(ovl->manager); if (r) return r; } return size; } static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.global_alpha); } static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; u8 alpha; struct omap_overlay_info info; r = kstrtou8(buf, 0, &alpha); if (r) return r; ovl->get_overlay_info(ovl, &info); /* Video1 plane does not support global alpha * to always make it 255 completely opaque */ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && ovl->id == OMAP_DSS_VIDEO1) info.global_alpha = 255; else info.global_alpha = alpha; r = ovl->set_overlay_info(ovl, &info); if (r) return r; if (ovl->manager) { r = ovl->manager->apply(ovl->manager); if (r) return r; } return size; } static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.pre_mult_alpha); } static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; u8 alpha; struct omap_overlay_info info; r = kstrtou8(buf, 0, &alpha); if (r) return r; ovl->get_overlay_info(ovl, &info); /* only GFX and Video2 plane support pre alpha multiplied * set zero for Video1 plane */ if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) && ovl->id == OMAP_DSS_VIDEO1) info.pre_mult_alpha = 0; else info.pre_mult_alpha = alpha; r = ovl->set_overlay_info(ovl, &info); if (r) return r; if (ovl->manager) { r = ovl->manager->apply(ovl->manager); if (r) return r; } return size; } struct overlay_attribute { struct attribute attr; ssize_t (*show)(struct omap_overlay *, char *); ssize_t (*store)(struct omap_overlay *, const char *, size_t); }; #define OVERLAY_ATTR(_name, _mode, _show, _store) \ struct overlay_attribute overlay_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL); static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR, overlay_manager_show, overlay_manager_store); static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL); static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL); static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR, overlay_position_show, overlay_position_store); static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR, overlay_output_size_show, overlay_output_size_store); static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR, overlay_enabled_show, overlay_enabled_store); static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR, overlay_global_alpha_show, overlay_global_alpha_store); static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR, overlay_pre_mult_alpha_show, overlay_pre_mult_alpha_store); static struct attribute *overlay_sysfs_attrs[] = { &overlay_attr_name.attr, &overlay_attr_manager.attr, &overlay_attr_input_size.attr, &overlay_attr_screen_width.attr, &overlay_attr_position.attr, &overlay_attr_output_size.attr, &overlay_attr_enabled.attr, &overlay_attr_global_alpha.attr, &overlay_attr_pre_mult_alpha.attr, NULL }; static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct omap_overlay *overlay; struct overlay_attribute *overlay_attr; overlay = container_of(kobj, struct omap_overlay, kobj); overlay_attr = container_of(attr, struct overlay_attribute, attr); if (!overlay_attr->show) return -ENOENT; return overlay_attr->show(overlay, buf); } static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct omap_overlay *overlay; struct overlay_attribute *overlay_attr; overlay = container_of(kobj, struct omap_overlay, kobj); overlay_attr = container_of(attr, struct overlay_attribute, attr); if (!overlay_attr->store) return -ENOENT; return overlay_attr->store(overlay, buf, size); } static const struct sysfs_ops overlay_sysfs_ops = { .show = overlay_attr_show, .store = overlay_attr_store, }; static struct kobj_type overlay_ktype = { .sysfs_ops = &overlay_sysfs_ops, .default_attrs = overlay_sysfs_attrs, }; /* Check if overlay parameters are compatible with display */ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev) { struct omap_overlay_info *info; u16 outw, outh; u16 dw, dh; if (!dssdev) return 0; if (!ovl->info.enabled) return 0; info = &ovl->info; if (info->paddr == 0) { DSSDBG("check_overlay failed: paddr 0\n"); return -EINVAL; } dssdev->driver->get_resolution(dssdev, &dw, &dh); DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n", ovl->id, info->pos_x, info->pos_y, info->width, info->height, info->out_width, info->out_height, dw, dh); if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { outw = info->width; outh = info->height; } else { if (info->out_width == 0) outw = info->width; else outw = info->out_width; if (info->out_height == 0) outh = info->height; else outh = info->out_height; } if (dw < info->pos_x + outw) { DSSDBG("check_overlay failed 1: %d < %d + %d\n", dw, info->pos_x, outw); return -EINVAL; } if (dh < info->pos_y + outh) { DSSDBG("check_overlay failed 2: %d < %d + %d\n", dh, info->pos_y, outh); return -EINVAL; } if ((ovl->supported_modes & info->color_mode) == 0) { DSSERR("overlay doesn't support mode %d\n", info->color_mode); return -EINVAL; } return 0; } static int dss_ovl_set_overlay_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { int r; struct omap_overlay_info old_info; old_info = ovl->info; ovl->info = *info; if (ovl->manager) { r = dss_check_overlay(ovl, ovl->manager->device); if (r) { ovl->info = old_info; return r; } } ovl->info_dirty = true; return 0; } static void dss_ovl_get_overlay_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { *info = ovl->info; } static int dss_ovl_wait_for_go(struct omap_overlay *ovl) { return dss_mgr_wait_for_go_ovl(ovl); } static int omap_dss_set_manager(struct omap_overlay *ovl, struct omap_overlay_manager *mgr) { if (!mgr) return -EINVAL; if (ovl->manager) { DSSERR("overlay '%s' already has a manager '%s'\n", ovl->name, ovl->manager->name); return -EINVAL; } if (ovl->info.enabled) { DSSERR("overlay has to be disabled to change the manager\n"); return -EINVAL; } ovl->manager = mgr; dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); /* XXX: When there is an overlay on a DSI manual update display, and * the overlay is first disabled, then moved to tv, and enabled, we * seem to get SYNC_LOST_DIGIT error. * * Waiting doesn't seem to help, but updating the manual update display * after disabling the overlay seems to fix this. This hints that the * overlay is perhaps somehow tied to the LCD output until the output * is updated. * * Userspace workaround for this is to update the LCD after disabling * the overlay, but before moving the overlay to TV. */ dispc_set_channel_out(ovl->id, mgr->id); dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); return 0; } static int omap_dss_unset_manager(struct omap_overlay *ovl) { int r; if (!ovl->manager) { DSSERR("failed to detach overlay: manager not set\n"); return -EINVAL; } if (ovl->info.enabled) { DSSERR("overlay has to be disabled to unset the manager\n"); return -EINVAL; } r = ovl->wait_for_go(ovl); if (r) return r; ovl->manager = NULL; return 0; } int omap_dss_get_num_overlays(void) { return num_overlays; } EXPORT_SYMBOL(omap_dss_get_num_overlays); struct omap_overlay *omap_dss_get_overlay(int num) { int i = 0; struct omap_overlay *ovl; list_for_each_entry(ovl, &overlay_list, list) { if (i++ == num) return ovl; } return NULL; } EXPORT_SYMBOL(omap_dss_get_overlay); static void omap_dss_add_overlay(struct omap_overlay *overlay) { ++num_overlays; list_add_tail(&overlay->list, &overlay_list); } static struct omap_overlay *dispc_overlays[MAX_DSS_OVERLAYS]; void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr) { mgr->num_overlays = dss_feat_get_num_ovls(); mgr->overlays = dispc_overlays; } #ifdef L4_EXAMPLE static struct omap_overlay *l4_overlays[1]; void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr) { mgr->num_overlays = 1; mgr->overlays = l4_overlays; } #endif void dss_init_overlays(struct platform_device *pdev) { int i, r; INIT_LIST_HEAD(&overlay_list); num_overlays = 0; for (i = 0; i < dss_feat_get_num_ovls(); ++i) { struct omap_overlay *ovl; ovl = kzalloc(sizeof(*ovl), GFP_KERNEL); BUG_ON(ovl == NULL); switch (i) { case 0: ovl->name = "gfx"; ovl->id = OMAP_DSS_GFX; ovl->caps = OMAP_DSS_OVL_CAP_DISPC; ovl->info.global_alpha = 255; break; case 1: ovl->name = "vid1"; ovl->id = OMAP_DSS_VIDEO1; ovl->caps = OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_DISPC; ovl->info.global_alpha = 255; break; case 2: ovl->name = "vid2"; ovl->id = OMAP_DSS_VIDEO2; ovl->caps = OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_DISPC; ovl->info.global_alpha = 255; break; } ovl->set_manager = &omap_dss_set_manager; ovl->unset_manager = &omap_dss_unset_manager; ovl->set_overlay_info = &dss_ovl_set_overlay_info; ovl->get_overlay_info = &dss_ovl_get_overlay_info; ovl->wait_for_go = &dss_ovl_wait_for_go; ovl->supported_modes = dss_feat_get_supported_color_modes(ovl->id); omap_dss_add_overlay(ovl); r = kobject_init_and_add(&ovl->kobj, &overlay_ktype, &pdev->dev.kobj, "overlay%d", i); if (r) { DSSERR("failed to create sysfs file\n"); continue; } dispc_overlays[i] = ovl; } #ifdef L4_EXAMPLE { struct omap_overlay *ovl; ovl = kzalloc(sizeof(*ovl), GFP_KERNEL); BUG_ON(ovl == NULL); ovl->name = "l4"; ovl->supported_modes = OMAP_DSS_COLOR_RGB24U; ovl->set_manager = &omap_dss_set_manager; ovl->unset_manager = &omap_dss_unset_manager; ovl->set_overlay_info = &dss_ovl_set_overlay_info; ovl->get_overlay_info = &dss_ovl_get_overlay_info; omap_dss_add_overlay(ovl); r = kobject_init_and_add(&ovl->kobj, &overlay_ktype, &pdev->dev.kobj, "overlayl4"); if (r) DSSERR("failed to create sysfs file\n"); l4_overlays[0] = ovl; } #endif } /* connect overlays to the new device, if not already connected. if force * selected, connect always. */ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) { int i; struct omap_overlay_manager *lcd_mgr; struct omap_overlay_manager *tv_mgr; struct omap_overlay_manager *lcd2_mgr = NULL; struct omap_overlay_manager *mgr = NULL; lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); if (dss_has_feature(FEAT_MGR_LCD2)) lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2); if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { if (!lcd2_mgr->device || force) { if (lcd2_mgr->device) lcd2_mgr->unset_device(lcd2_mgr); lcd2_mgr->set_device(lcd2_mgr, dssdev); mgr = lcd2_mgr; } } else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC && dssdev->type != OMAP_DISPLAY_TYPE_HDMI) { if (!lcd_mgr->device || force) { if (lcd_mgr->device) lcd_mgr->unset_device(lcd_mgr); lcd_mgr->set_device(lcd_mgr, dssdev); mgr = lcd_mgr; } } if (dssdev->type == OMAP_DISPLAY_TYPE_VENC || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { if (!tv_mgr->device || force) { if (tv_mgr->device) tv_mgr->unset_device(tv_mgr); tv_mgr->set_device(tv_mgr, dssdev); mgr = tv_mgr; } } if (mgr) { for (i = 0; i < dss_feat_get_num_ovls(); i++) { struct omap_overlay *ovl; ovl = omap_dss_get_overlay(i); if (!ovl->manager || force) { if (ovl->manager) omap_dss_unset_manager(ovl); omap_dss_set_manager(ovl, mgr); } } } } void dss_uninit_overlays(struct platform_device *pdev) { struct omap_overlay *ovl; while (!list_empty(&overlay_list)) { ovl = list_first_entry(&overlay_list, struct omap_overlay, list); list_del(&ovl->list); kobject_del(&ovl->kobj); kobject_put(&ovl->kobj); kfree(ovl); } num_overlays = 0; }
gpl-2.0
myjang0507/updatesource
sound/soc/cirrus/ep93xx-pcm.c
2088
5488
/* * linux/sound/arm/ep93xx-pcm.c - EP93xx ALSA PCM interface * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * Copyright (C) 2006 Applied Data Systems * * Rewritten for the SoC audio subsystem (Based on PXA2xx code): * Copyright (c) 2008 Ryan Mallon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #include <linux/platform_data/dma-ep93xx.h> #include <mach/hardware.h> #include <mach/ep93xx-regs.h> static const struct snd_pcm_hardware ep93xx_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), .rates = SNDRV_PCM_RATE_8000_192000, .rate_min = SNDRV_PCM_RATE_8000, .rate_max = SNDRV_PCM_RATE_192000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE), .buffer_bytes_max = 131072, .period_bytes_min = 32, .period_bytes_max = 32768, .periods_min = 1, .periods_max = 32, .fifo_size = 32, }; static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param) { struct ep93xx_dma_data *data = filter_param; if (data->direction == ep93xx_dma_chan_direction(chan)) { chan->private = data; return true; } return false; } static int ep93xx_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware); return snd_dmaengine_pcm_open_request_chan(substream, ep93xx_pcm_dma_filter, snd_soc_dai_get_dma_data(rtd->cpu_dai, substream)); } static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int ep93xx_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops ep93xx_pcm_ops = { .open = ep93xx_pcm_open, .close = snd_dmaengine_pcm_close_release_chan, .ioctl = snd_pcm_lib_ioctl, .hw_params = ep93xx_pcm_hw_params, .hw_free = ep93xx_pcm_hw_free, .trigger = snd_dmaengine_pcm_trigger, .pointer = snd_dmaengine_pcm_pointer_no_residue, .mmap = ep93xx_pcm_mmap, }; static int ep93xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = ep93xx_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); buf->bytes = size; return (buf->area == NULL) ? -ENOMEM : 0; } static void ep93xx_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static u64 ep93xx_pcm_dmamask = DMA_BIT_MASK(32); static int ep93xx_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &ep93xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = ep93xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) return ret; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = ep93xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) return ret; } return 0; } static struct snd_soc_platform_driver ep93xx_soc_platform = { .ops = &ep93xx_pcm_ops, .pcm_new = &ep93xx_pcm_new, .pcm_free = &ep93xx_pcm_free_dma_buffers, }; static int ep93xx_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &ep93xx_soc_platform); } static int ep93xx_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver ep93xx_pcm_driver = { .driver = { .name = "ep93xx-pcm-audio", .owner = THIS_MODULE, }, .probe = ep93xx_soc_platform_probe, .remove = ep93xx_soc_platform_remove, }; module_platform_driver(ep93xx_pcm_driver); MODULE_AUTHOR("Ryan Mallon"); MODULE_DESCRIPTION("EP93xx ALSA PCM interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-pcm-audio");
gpl-2.0
ShadySquirrel/e980-zeKrnl
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
3368
26640
/* * CAN driver for PEAK System PCAN-USB Pro adapter * Derived from the PCAN project file driver/src/pcan_usbpro.c * * Copyright (C) 2003-2011 PEAK System-Technik GmbH * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" #include "pcan_usb_pro.h" MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); /* PCAN-USB Pro Endpoints */ #define PCAN_USBPRO_EP_CMDOUT 1 #define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN) #define PCAN_USBPRO_EP_MSGOUT_0 2 #define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN) #define PCAN_USBPRO_EP_MSGOUT_1 3 #define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN) #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ #define PCAN_USBPRO_CRYSTAL_HZ 56000000 /* PCAN-USB Pro command timeout (ms.) */ #define PCAN_USBPRO_COMMAND_TIMEOUT 1000 /* PCAN-USB Pro rx/tx buffers size */ #define PCAN_USBPRO_RX_BUFFER_SIZE 1024 #define PCAN_USBPRO_TX_BUFFER_SIZE 64 #define PCAN_USBPRO_MSG_HEADER_LEN 4 /* some commands responses need to be re-submitted */ #define PCAN_USBPRO_RSP_SUBMIT_MAX 2 #define PCAN_USBPRO_RTR 0x01 #define PCAN_USBPRO_EXT 0x02 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 /* handle device specific info used by the netdevices */ struct pcan_usb_pro_interface { struct peak_usb_device *dev[PCAN_USBPRO_CHANNEL_COUNT]; struct peak_time_ref time_ref; int cm_ignore_count; int dev_opened_count; }; /* device information */ struct pcan_usb_pro_device { struct peak_usb_device dev; struct pcan_usb_pro_interface *usb_if; u32 cached_ccbt; }; /* internal structure used to handle messages sent to bulk urb */ struct pcan_usb_pro_msg { u8 *rec_ptr; int rec_buffer_size; int rec_buffer_len; union { u16 *rec_cnt_rd; u32 *rec_cnt; u8 *rec_buffer; } u; }; /* records sizes table indexed on message id. (8-bits value) */ static u16 pcan_usb_pro_sizeof_rec[256] = { [PCAN_USBPRO_SETBTR] = sizeof(struct pcan_usb_pro_btr), [PCAN_USBPRO_SETBUSACT] = sizeof(struct pcan_usb_pro_busact), [PCAN_USBPRO_SETSILENT] = sizeof(struct pcan_usb_pro_silent), [PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter), [PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts), [PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid), [PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled), [PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg), [PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4, [PCAN_USBPRO_RXMSG0] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXRTR] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXSTATUS] = sizeof(struct pcan_usb_pro_rxstatus), [PCAN_USBPRO_RXTS] = sizeof(struct pcan_usb_pro_rxts), [PCAN_USBPRO_TXMSG8] = sizeof(struct pcan_usb_pro_txmsg), [PCAN_USBPRO_TXMSG4] = sizeof(struct pcan_usb_pro_txmsg) - 4, [PCAN_USBPRO_TXMSG0] = sizeof(struct pcan_usb_pro_txmsg) - 8, }; /* * initialize PCAN-USB Pro message data structure */ static u8 *pcan_msg_init(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { if (buffer_size < PCAN_USBPRO_MSG_HEADER_LEN) return NULL; pm->u.rec_buffer = (u8 *)buffer_addr; pm->rec_buffer_size = pm->rec_buffer_len = buffer_size; pm->rec_ptr = pm->u.rec_buffer + PCAN_USBPRO_MSG_HEADER_LEN; return pm->rec_ptr; } static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { u8 *pr = pcan_msg_init(pm, buffer_addr, buffer_size); if (pr) { pm->rec_buffer_len = PCAN_USBPRO_MSG_HEADER_LEN; *pm->u.rec_cnt = 0; } return pr; } /* * add one record to a message being built */ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) { int len, i; u8 *pc; va_list ap; va_start(ap, id); pc = pm->rec_ptr + 1; i = 0; switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; case PCAN_USBPRO_TXMSG4: i += 4; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; memcpy(pc, va_arg(ap, int *), i); pc += i; break; case PCAN_USBPRO_SETBTR: case PCAN_USBPRO_GETDEVID: *pc++ = va_arg(ap, int); pc += 2; *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETFILTR: case PCAN_USBPRO_SETBUSACT: case PCAN_USBPRO_SETSILENT: *pc++ = va_arg(ap, int); *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; case PCAN_USBPRO_SETLED: *pc++ = va_arg(ap, int); *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETTS: pc++; *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; default: pr_err("%s: %s(): unknown data type %02Xh (%d)\n", PCAN_USB_DRIVER_NAME, __func__, id, id); pc--; break; } len = pc - pm->rec_ptr; if (len > 0) { *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1); *pm->rec_ptr = id; pm->rec_ptr = pc; pm->rec_buffer_len += len; } va_end(ap); return len; } /* * send PCAN-USB Pro command synchronously */ static int pcan_usb_pro_send_cmd(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { int actual_length; int err; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "sending command failure: %d\n", err); return err; } /* * wait for PCAN-USB Pro command response */ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { u8 req_data_type, req_channel; int actual_length; int i, err = 0; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; req_data_type = pum->u.rec_buffer[4]; req_channel = pum->u.rec_buffer[5]; *pum->u.rec_cnt = 0; for (i = 0; !err && i < PCAN_USBPRO_RSP_SUBMIT_MAX; i++) { struct pcan_usb_pro_msg rsp; union pcan_usb_pro_rec *pr; u32 r, rec_cnt; u16 rec_len; u8 *pc; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDIN), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) { netdev_err(dev->netdev, "waiting rsp error %d\n", err); break; } if (actual_length == 0) continue; err = -EBADMSG; if (actual_length < PCAN_USBPRO_MSG_HEADER_LEN) { netdev_err(dev->netdev, "got abnormal too small rsp (len=%d)\n", actual_length); break; } pc = pcan_msg_init(&rsp, pum->u.rec_buffer, actual_length); rec_cnt = le32_to_cpu(*rsp.u.rec_cnt); /* loop on records stored into message */ for (r = 0; r < rec_cnt; r++) { pr = (union pcan_usb_pro_rec *)pc; rec_len = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!rec_len) { netdev_err(dev->netdev, "got unprocessed record in msg\n"); dump_mem("rcvd rsp msg", pum->u.rec_buffer, actual_length); break; } /* check if response corresponds to request */ if (pr->data_type != req_data_type) netdev_err(dev->netdev, "got unwanted rsp %xh: ignored\n", pr->data_type); /* check if channel in response corresponds too */ else if ((req_channel != 0xff) && \ (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", req_data_type, pr->bus_act.channel); /* got the response */ else return 0; /* otherwise, go on with next record in message */ pc += rec_len; } } return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err; } static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, int req_value, void *req_addr, int req_size) { int err; u8 req_type; unsigned int p; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; memset(req_addr, '\0', req_size); req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; switch (req_id) { case PCAN_USBPRO_REQ_FCT: p = usb_sndctrlpipe(dev->udev, 0); break; default: p = usb_rcvctrlpipe(dev->udev, 0); req_type |= USB_DIR_IN; break; } err = usb_control_msg(dev->udev, p, req_id, req_type, req_value, 0, req_addr, req_size, 2 * USB_CTRL_GET_TIMEOUT); if (err < 0) { netdev_info(dev->netdev, "unable to request usb[type=%d value=%d] err=%d\n", req_id, req_value, err); return err; } return 0; } static int pcan_usb_pro_set_ts(struct peak_usb_device *dev, u16 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETTS, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bitrate(struct peak_usb_device *dev, u32 ccbt) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBTR, dev->ctrl_idx, ccbt); /* cache the CCBT value to reuse it before next buson */ pdev->cached_ccbt = ccbt; return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bus(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; /* if bus=on, be sure the bitrate being set before! */ if (onoff) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); pcan_usb_pro_set_bitrate(dev, pdev->cached_ccbt); } pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_silent(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETSILENT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_filter(struct peak_usb_device *dev, u16 filter_mode) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETFILTR, dev->ctrl_idx, filter_mode); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode, u32 timeout) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETLED, dev->ctrl_idx, mode, timeout); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, u32 *device_id) { struct pcan_usb_pro_devid *pdn; struct pcan_usb_pro_msg um; int err; u8 *pc; pc = pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_GETDEVID, dev->ctrl_idx); err = pcan_usb_pro_send_cmd(dev, &um); if (err) return err; err = pcan_usb_pro_wait_rsp(dev, &um); if (err) return err; pdn = (struct pcan_usb_pro_devid *)pc; if (device_id) *device_id = le32_to_cpu(pdn->serial_num); return err; } static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { u32 ccbt; ccbt = (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 0x00800000 : 0; ccbt |= (bt->sjw - 1) << 24; ccbt |= (bt->phase_seg2 - 1) << 20; ccbt |= (bt->prop_seg + bt->phase_seg1 - 1) << 16; /* = tseg1 */ ccbt |= bt->brp - 1; netdev_info(dev->netdev, "setting ccbt=0x%08x\n", ccbt); return pcan_usb_pro_set_bitrate(dev, ccbt); } static void pcan_usb_pro_restart_complete(struct urb *urb) { /* can delete usb resources */ peak_usb_async_complete(urb); /* notify candev and netdev */ peak_usb_restart_complete(urb->context); } /* * handle restart but in asynchronously way */ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, 1); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), buf, PCAN_USB_MAX_CMD_LEN, pcan_usb_pro_restart_complete, dev); return usb_submit_urb(urb, GFP_ATOMIC); } static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) { u8 buffer[16]; buffer[0] = 0; buffer[1] = !!loaded; pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer)); } static inline struct pcan_usb_pro_interface *pcan_usb_pro_dev_if(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); return pdev->usb_if; } static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxmsg *rx) { const unsigned int ctrl_idx = (rx->len >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; struct sk_buff *skb; struct timeval tv; skb = alloc_can_skb(netdev, &can_frame); if (!skb) return -ENOMEM; can_frame->can_id = le32_to_cpu(rx->id); can_frame->can_dlc = rx->len & 0x0f; if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; if (rx->flags & PCAN_USBPRO_RTR) can_frame->can_id |= CAN_RTR_FLAG; else memcpy(can_frame->data, rx->data, can_frame->can_dlc); peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); skb->tstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; return 0; } static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxstatus *er) { const u32 raw_status = le32_to_cpu(er->status); const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; u8 err_mask = 0; struct sk_buff *skb; struct timeval tv; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; if (!raw_status) { /* no error bit (back to active state) */ dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } if (raw_status & (PCAN_USBPRO_STATUS_OVERRUN | PCAN_USBPRO_STATUS_QOVERRUN)) { /* trick to bypass next comparison and process other errors */ new_state = CAN_STATE_MAX; } if (raw_status & PCAN_USBPRO_STATUS_BUS) { new_state = CAN_STATE_BUS_OFF; } else if (raw_status & PCAN_USBPRO_STATUS_ERROR) { u32 rx_err_cnt = (le32_to_cpu(er->err_frm) & 0x00ff0000) >> 16; u32 tx_err_cnt = (le32_to_cpu(er->err_frm) & 0xff000000) >> 24; if (rx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_RX_PASSIVE; else if (rx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_RX_WARNING; if (tx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_TX_PASSIVE; else if (tx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_TX_WARNING; if (err_mask & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) new_state = CAN_STATE_ERROR_WARNING; else if (err_mask & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; } /* donot post any error if current state didn't change */ if (dev->can.state == new_state) return 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &can_frame); if (!skb) return -ENOMEM; switch (new_state) { case CAN_STATE_BUS_OFF: can_frame->can_id |= CAN_ERR_BUSOFF; can_bus_off(netdev); break; case CAN_STATE_ERROR_PASSIVE: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_passive++; break; case CAN_STATE_ERROR_WARNING: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_warning++; break; case CAN_STATE_ERROR_ACTIVE: break; default: /* CAN_STATE_MAX (trick to handle other errors) */ if (raw_status & PCAN_USBPRO_STATUS_OVERRUN) { can_frame->can_id |= CAN_ERR_PROT; can_frame->data[2] |= CAN_ERR_PROT_OVERLOAD; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } if (raw_status & PCAN_USBPRO_STATUS_QOVERRUN) { can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } new_state = CAN_STATE_ERROR_ACTIVE; break; } dev->can.state = new_state; peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); skb->tstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; return 0; } static void pcan_usb_pro_handle_ts(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxts *ts) { /* should wait until clock is stabilized */ if (usb_if->cm_ignore_count > 0) usb_if->cm_ignore_count--; else peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts64[1])); } /* * callback for bulk IN urb */ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb) { struct pcan_usb_pro_interface *usb_if = pcan_usb_pro_dev_if(dev); struct net_device *netdev = dev->netdev; struct pcan_usb_pro_msg usb_msg; u8 *rec_ptr, *msg_end; u16 rec_cnt; int err = 0; rec_ptr = pcan_msg_init(&usb_msg, urb->transfer_buffer, urb->actual_length); if (!rec_ptr) { netdev_err(netdev, "bad msg hdr len %d\n", urb->actual_length); return -EINVAL; } /* loop reading all the records from the incoming message */ msg_end = urb->transfer_buffer + urb->actual_length; rec_cnt = le16_to_cpu(*usb_msg.u.rec_cnt_rd); for (; rec_cnt > 0; rec_cnt--) { union pcan_usb_pro_rec *pr = (union pcan_usb_pro_rec *)rec_ptr; u16 sizeof_rec = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!sizeof_rec) { netdev_err(netdev, "got unsupported rec in usb msg:\n"); err = -ENOTSUPP; break; } /* check if the record goes out of current packet */ if (rec_ptr + sizeof_rec > msg_end) { netdev_err(netdev, "got frag rec: should inc usb rx buf size\n"); err = -EBADMSG; break; } switch (pr->data_type) { case PCAN_USBPRO_RXMSG8: case PCAN_USBPRO_RXMSG4: case PCAN_USBPRO_RXMSG0: case PCAN_USBPRO_RXRTR: err = pcan_usb_pro_handle_canmsg(usb_if, &pr->rx_msg); if (err < 0) goto fail; break; case PCAN_USBPRO_RXSTATUS: err = pcan_usb_pro_handle_error(usb_if, &pr->rx_status); if (err < 0) goto fail; break; case PCAN_USBPRO_RXTS: pcan_usb_pro_handle_ts(usb_if, &pr->rx_ts); break; default: netdev_err(netdev, "unhandled rec type 0x%02x (%d): ignored\n", pr->data_type, pr->data_type); break; } rec_ptr += sizeof_rec; } fail: if (err) dump_mem("received msg", urb->transfer_buffer, urb->actual_length); return err; } static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct can_frame *cf = (struct can_frame *)skb->data; u8 data_type, len, flags; struct pcan_usb_pro_msg usb_msg; pcan_msg_init_empty(&usb_msg, obuf, *size); if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0)) data_type = PCAN_USBPRO_TXMSG0; else if (cf->can_dlc <= 4) data_type = PCAN_USBPRO_TXMSG4; else data_type = PCAN_USBPRO_TXMSG8; len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f); flags = 0; if (cf->can_id & CAN_EFF_FLAG) flags |= 0x02; if (cf->can_id & CAN_RTR_FLAG) flags |= 0x01; pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, cf->data); *size = usb_msg.rec_buffer_len; return 0; } static int pcan_usb_pro_start(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); int err; err = pcan_usb_pro_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) return err; /* filter mode: 0-> All OFF; 1->bypass */ err = pcan_usb_pro_set_filter(dev, 1); if (err) return err; /* opening first device: */ if (pdev->usb_if->dev_opened_count == 0) { /* reset time_ref */ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro); /* ask device to send ts messages */ err = pcan_usb_pro_set_ts(dev, 1); } pdev->usb_if->dev_opened_count++; return err; } /* * stop interface * (last chance before set bus off) */ static int pcan_usb_pro_stop(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* turn off ts msgs for that interface if no other dev opened */ if (pdev->usb_if->dev_opened_count == 1) pcan_usb_pro_set_ts(dev, 0); pdev->usb_if->dev_opened_count--; return 0; } /* * called when probing to initialize a device object. */ static int pcan_usb_pro_init(struct peak_usb_device *dev) { struct pcan_usb_pro_interface *usb_if; struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* do this for 1st channel only */ if (!dev->prev_siblings) { struct pcan_usb_pro_fwinfo fi; struct pcan_usb_pro_blinfo bi; int err; /* allocate netdevices common structure attached to first one */ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), GFP_KERNEL); if (!usb_if) return -ENOMEM; /* number of ts msgs to ignore before taking one into account */ usb_if->cm_ignore_count = 5; /* * explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx netdevices. */ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, &fi, sizeof(fi)); if (err) { kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); return err; } err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_BL, &bi, sizeof(bi)); if (err) { kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); return err; } dev_info(dev->netdev->dev.parent, "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", pcan_usb_pro.name, bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo, pcan_usb_pro.ctrl_count); /* tell the device the can driver is running */ pcan_usb_pro_drv_loaded(dev, 1); } else { usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); } pdev->usb_if = usb_if; usb_if->dev[dev->ctrl_idx] = dev; /* set LED in default state (end of init phase) */ pcan_usb_pro_set_led(dev, 0, 1); return 0; } static void pcan_usb_pro_exit(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* * when rmmod called before unplug and if down, should reset things * before leaving */ if (dev->can.state != CAN_STATE_STOPPED) { /* set bus off on the corresponding channel */ pcan_usb_pro_set_bus(dev, 0); } /* if channel #0 (only) */ if (dev->ctrl_idx == 0) { /* turn off calibration message if any device were opened */ if (pdev->usb_if->dev_opened_count > 0) pcan_usb_pro_set_ts(dev, 0); /* tell the PCAN-USB Pro device the driver is being unloaded */ pcan_usb_pro_drv_loaded(dev, 0); } } /* * called when PCAN-USB Pro adapter is unplugged */ static void pcan_usb_pro_free(struct peak_usb_device *dev) { /* last device: can free pcan_usb_pro_interface object now */ if (!dev->prev_siblings && !dev->next_siblings) kfree(pcan_usb_pro_dev_if(dev)); } /* * probe function for new PCAN-USB Pro usb interface */ static int pcan_usb_pro_probe(struct usb_interface *intf) { struct usb_host_interface *if_desc; int i; if_desc = intf->altsetting; /* check interface endpoint addresses */ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; /* * below is the list of valid ep addreses. Any other ep address * is considered as not-CAN interface address => no dev created */ switch (ep->bEndpointAddress) { case PCAN_USBPRO_EP_CMDOUT: case PCAN_USBPRO_EP_CMDIN: case PCAN_USBPRO_EP_MSGOUT_0: case PCAN_USBPRO_EP_MSGOUT_1: case PCAN_USBPRO_EP_MSGIN: case PCAN_USBPRO_EP_UNUSED: break; default: return -ENODEV; } } return 0; } /* * describe the PCAN-USB Pro adapter */ struct peak_usb_adapter pcan_usb_pro = { .name = "PCAN-USB Pro", .device_id = PCAN_USBPRO_PRODUCT_ID, .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, .clock = { .freq = PCAN_USBPRO_CRYSTAL_HZ, }, .bittiming_const = { .name = "pcan_usb_pro", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), /* timestamps usage */ .ts_used_bits = 32, .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_USBPRO_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_USBPRO_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, .dev_init = pcan_usb_pro_init, .dev_exit = pcan_usb_pro_exit, .dev_free = pcan_usb_pro_free, .dev_set_bus = pcan_usb_pro_set_bus, .dev_set_bittiming = pcan_usb_pro_set_bittiming, .dev_get_device_id = pcan_usb_pro_get_device_id, .dev_decode_buf = pcan_usb_pro_decode_buf, .dev_encode_msg = pcan_usb_pro_encode_msg, .dev_start = pcan_usb_pro_start, .dev_stop = pcan_usb_pro_stop, .dev_restart_async = pcan_usb_pro_restart_async, };
gpl-2.0
M1cha/lge-kernel-lproj
drivers/ide/piix.c
5160
14392
/* * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> * * May be copied or modified under the terms of the GNU General Public License * * Documentation: * * Publicly available from Intel web site. Errata documentation * is also publicly available. As an aide to anyone hacking on this * driver the list of errata that are relevant is below.going back to * PIIX4. Older device documentation is now a bit tricky to find. * * Errata of note: * * Unfixable * PIIX4 errata #9 - Only on ultra obscure hw * ICH3 errata #13 - Not observed to affect real hw * by Intel * * Things we must deal with * PIIX4 errata #10 - BM IDE hang with non UDMA * (must stop/start dma to recover) * 440MX errata #15 - As PIIX4 errata #10 * PIIX4 errata #15 - Must not read control registers * during a PIO transfer * 440MX errata #13 - As PIIX4 errata #15 * ICH2 errata #21 - DMA mode 0 doesn't work right * ICH0/1 errata #55 - As ICH2 errata #21 * ICH2 spec c #9 - Extra operations needed to handle * drive hotswap [NOT YET SUPPORTED] * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX * 450NX: errata #20 - DMA hangs on old 450NX * 450NX: errata #25 - Corruption with DMA on old 450NX * ICH3 errata #15 - IDE deadlock under high load * (BIOS must set dev 31 fn 0 bit 23) * ICH3 errata #18 - Don't use native mode */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "piix" static int no_piix_dma; /** * piix_set_pio_mode - set host controller for PIO mode * @port: port * @drive: drive * * Set the interface PIO mode based upon the settings done by AMI BIOS. */ static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = hwif->channel ? 0x42 : 0x40; int slave_port = 0x44; unsigned long flags; u16 master_data; u8 slave_data; static DEFINE_SPINLOCK(tune_lock); int control = 0; const u8 pio = drive->pio_mode - XFER_PIO_0; /* ISP RTC */ static const u8 timings[][2]= { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; /* * Master vs slave is synchronized above us but the slave register is * shared by the two hwifs so the corner case of two slave timeouts in * parallel must be locked. */ spin_lock_irqsave(&tune_lock, flags); pci_read_config_word(dev, master_port, &master_data); if (pio > 1) control |= 1; /* Programmable timing on */ if (drive->media == ide_disk) control |= 4; /* Prefetch, post write */ if (ide_pio_need_iordy(drive, pio)) control |= 2; /* IORDY */ if (is_slave) { master_data |= 0x4000; master_data &= ~0x0070; if (pio > 1) { /* Set PPE, IE and TIME */ master_data |= control << 4; } pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= hwif->channel ? 0x0f : 0xf0; slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0); } else { master_data &= ~0x3307; if (pio > 1) { /* enable PPE, IE and TIME */ master_data |= control; } master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); spin_unlock_irqrestore(&tune_lock, flags); } /** * piix_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Set a PIIX host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, &reg4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, &reg48); pci_read_config_word(dev, 0x4a, &reg4a); pci_read_config_byte(dev, 0x54, &reg54); pci_read_config_byte(dev, 0x55, &reg55); if (speed >= XFER_UDMA_0) { u8 udma = speed - XFER_UDMA_0; u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) drive->pio_mode = mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ piix_set_pio_mode(hwif, drive); } } /** * init_chipset_ich - set up the ICH chipset * @dev: PCI device to set up * * Initialize the PCI device as required. For the ICH this turns * out to be nice and simple. */ static int init_chipset_ich(struct pci_dev *dev) { u32 extra = 0; pci_read_config_dword(dev, 0x54, &extra); pci_write_config_dword(dev, 0x54, extra | 0x400); return 0; } /** * ich_clear_irq - clear BMDMA status * @drive: IDE drive * * ICHx contollers set DMA INTR no matter DMA or PIO. * BMDMA status might need to be cleared even for * PIO interrupts to prevent spurious/lost IRQ. */ static void ich_clear_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat; /* * ide_dma_end() needs BMDMA status for error checking. * So, skip clearing BMDMA status here and leave it * to ide_dma_end() if this is DMA interrupt. */ if (drive->waiting_for_dma || hwif->dma_base == 0) return; /* clear the INTR & ERROR bits */ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* Should we force the bit as well ? */ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); } struct ich_laptop { u16 device; u16 subvendor; u16 subdevice; }; /* * List of laptops that use short cables rather than 80 wire */ static const struct ich_laptop ich_laptop[] = { /* devid, subvendor, subdev */ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; static u8 piix_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); const struct ich_laptop *lap = &ich_laptop[0]; u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30; /* check for specials */ while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) { return ATA_CBL_PATA40_SHORT; } lap++; } pci_read_config_byte(pdev, 0x54, &reg54h); return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } /** * init_hwif_piix - fill in the hwif for the PIIX * @hwif: IDE interface * * Set up the ide_hwif_t for the PIIX interface according to the * capabilities of the hardware. */ static void __devinit init_hwif_piix(ide_hwif_t *hwif) { if (!hwif->dma_base) return; if (no_piix_dma) hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0; } static const struct ide_port_ops piix_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .cable_detect = piix_cable_detect, }; static const struct ide_port_ops ich_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .clear_irq = ich_clear_irq, .cable_detect = piix_cable_detect, }; #define DECLARE_PIIX_DEV(udma) \ { \ .name = DRV_NAME, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &piix_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = ATA_MWDMA12_ONLY, \ .udma_mask = udma, \ } #define DECLARE_ICH_DEV(mwdma, udma) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_ich, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &ich_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = mwdma, \ .udma_mask = udma, \ } static const struct ide_port_info piix_pci_info[] __devinitdata = { /* 0: MPIIX */ { /* * MPIIX actually has only a single IDE channel mapped to * the primary or secondary ports depending on the value * of the bit 14 of the IDETIM register at offset 0x6c */ .name = DRV_NAME, .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}}, .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, /* This is a painful system best to let it self tune for now */ }, /* 1: PIIXa/PIIXb/PIIX3 */ DECLARE_PIIX_DEV(0x00), /* no udma */ /* 2: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA2), /* 3: ICH0 */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2), /* 4: ICH */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4), /* 5: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA4), /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5), /* 7: ICH7/7-R, no MWDMA1 */ DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5), }; /** * piix_init_one - called when a PIIX is found * @dev: the piix device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL); } /** * piix_check_450nx - Check for problem 450NX setup * * Check for the present of 450NX errata #19 and errata #25. If * they are found, disable use of DMA IDE */ static void __devinit piix_check_450nx(void) { struct pci_dev *pdev = NULL; u16 cfg; while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if(no_piix_dma) printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n"); if(no_piix_dma == 2) printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n"); } static const struct pci_device_id piix_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 }, #endif { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, piix_pci_tbl); static struct pci_driver piix_pci_driver = { .name = "PIIX_IDE", .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init piix_ide_init(void) { piix_check_450nx(); return ide_pci_register_driver(&piix_pci_driver); } static void __exit piix_ide_exit(void) { pci_unregister_driver(&piix_pci_driver); } module_init(piix_ide_init); module_exit(piix_ide_exit); MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz"); MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE"); MODULE_LICENSE("GPL");
gpl-2.0