code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
3
942
language
stringclasses
30 values
license
stringclasses
15 values
size
int32
3
1.05M
"""grace Revision ID: 2bce3f42832 Revises: 100d29f9f7e Create Date: 2015-08-19 13:48:08.511040 """ # revision identifiers, used by Alembic. revision = '2bce3f42832' down_revision = '100d29f9f7e' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('cs_jiao_yi_ma', sa.Column('xzbz', sa.CHAR(length=12), nullable=False)) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('cs_jiao_yi_ma', 'xzbz') ### end Alembic commands ###
huangtao-sh/grace
grace/alembic/versions/2bce3f42832_grace.py
Python
gpl-2.0
649
/* * Copyright (C) 2005-2008 Team XBMC * http://www.xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * http://www.gnu.org/copyleft/gpl.html * */ #include "stdafx.h" #include "RGBRendererV2.h" #define SURFTOTEX(a) ((a)->Parent ? (a)->Parent : (D3DBaseTexture*)(a)) //#define DBGBOB CRGBRendererV2::CRGBRendererV2(LPDIRECT3DDEVICE8 pDevice) : CXBoxRenderer(pDevice) { m_444PTextureFull = NULL; m_444PTextureField = NULL; m_hInterleavingShader = 0; m_hInterleavingShaderAlpha = 0; m_hYUVtoRGBLookup = 0; m_UVLookup = NULL; m_UVErrorLookup = NULL; m_motionpass = 5; memset(&m_yuvcoef_last, 0, sizeof(YUVCOEF)); memset(&m_yuvrange_last, 0, sizeof(YUVRANGE)); } void CRGBRendererV2::FlipPage(int source) { m_444GeneratedFull = false; CXBoxRenderer::FlipPage(source); } void CRGBRendererV2::Delete444PTexture() { CSingleLock lock(g_graphicsContext); SAFE_RELEASE(m_444PTextureFull) SAFE_RELEASE(m_444PTextureField) CLog::Log(LOGDEBUG, "Deleted 444P video textures"); } void CRGBRendererV2::Clear444PTexture(bool full, bool field) { CSingleLock lock(g_graphicsContext); if(m_444PTextureFull && full) { D3DLOCKED_RECT lr; m_444PTextureFull->LockRect(0, &lr, NULL, 0); memset(lr.pBits, 0x00, lr.Pitch*m_iSourceHeight); m_444PTextureFull->UnlockRect(0); } if(m_444PTextureField && field) { D3DLOCKED_RECT lr; m_444PTextureField->LockRect(0, &lr, NULL, 0); #ifdef DBGBOB memset(lr.pBits, 0xFF, lr.Pitch*m_iSourceHeight>>1); #else memset(lr.pBits, 0x00, lr.Pitch*m_iSourceHeight>>1); #endif m_444PTextureField->UnlockRect(0); } m_444GeneratedFull = false; } bool CRGBRendererV2::Create444PTexture(bool full, bool field) { CSingleLock lock(g_graphicsContext); if (!m_444PTextureFull && full) { if(D3D_OK != m_pD3DDevice->CreateTexture(m_iSourceWidth, m_iSourceHeight, 1, 0, D3DFMT_LIN_A8R8G8B8, 0, &m_444PTextureFull)) return false; CLog::Log(LOGINFO, "Created 444P full texture"); } if (!m_444PTextureField && field) { if(D3D_OK != m_pD3DDevice->CreateTexture(m_iSourceWidth, m_iSourceHeight>>1, 1, 0, D3DFMT_LIN_A8R8G8B8, 0, &m_444PTextureField)) return false; CLog::Log(LOGINFO, "Created 444P field texture"); } return true; } void CRGBRendererV2::ManageTextures() { //use 1 buffer in fullscreen mode and 0 buffers in windowed mode if (!g_graphicsContext.IsFullScreenVideo()) { if (m_444PTextureFull || m_444PTextureField) Delete444PTexture(); } CXBoxRenderer::ManageTextures(); if (g_graphicsContext.IsFullScreenVideo()) { if (!m_444PTextureFull) { Create444PTexture(true, false); Clear444PTexture(true, false); } } } bool CRGBRendererV2::Configure(unsigned int width, unsigned int height, unsigned int d_width, unsigned int d_height, float fps, unsigned flags) { if(!CXBoxRenderer::Configure(width, height, d_width, d_height, fps, flags)) return false; // create our lookup textures for yv12->rgb translation, if(!CreateLookupTextures(m_yuvcoef, m_yuvrange) ) return false; m_bConfigured = true; return true; } void CRGBRendererV2::Render(DWORD flags) { CSingleLock lock(g_graphicsContext); if ( !g_graphicsContext.IsFullScreenVideo() ) { RenderLowMem(flags); } else { int index = m_iYV12RenderBuffer; if( !(flags & RENDER_FLAG_NOLOCK) ) if( WaitForSingleObject(m_eventTexturesDone[index], 500) == WAIT_TIMEOUT ) CLog::Log(LOGWARNING, __FUNCTION__" - Timeout waiting for texture %d", index); D3DSurface* p444PSourceFull = NULL; D3DSurface* p444PSourceField = NULL; if( flags & (RENDER_FLAG_TOP|RENDER_FLAG_BOT) ) { if(!m_444PTextureField) { Create444PTexture(false, true); Clear444PTexture(false, true); } if(!m_444PTextureField) { CLog::Log(LOGERROR, __FUNCTION__" - Couldn't create field texture"); return; } m_444PTextureField->GetSurfaceLevel(0, &p444PSourceField); } if(!m_444PTextureFull) { CLog::Log(LOGERROR, __FUNCTION__" - Couldn't create full texture"); return; } m_444PTextureFull->GetSurfaceLevel(0, &p444PSourceFull); //UV in interlaced video is seen as being closer to first line in first field and closer to second line in second field //we shift it with an offset of 1/4th pixel (1/8 in UV planes) //This need only be done when field scaling #define CHROMAOFFSET_VERT 0.125f //Each chroma sample is not said to be between the first and second sample as in the vertical case //first Y(1) <=> UV(1), Y(2) <=> ( UV(1)+UV(2) ) / 2, Y(3) <=> UV(2) //we wish to offset this by 1/2 pxiel to le left, which in the half rez of UV planes means 1/4th #define CHROMAOFFSET_HORIZ 0.25f //Example of how YUV has it's Luma and Chroma data stored //for progressive video //L L L L L L L L L L //C C C C C //L L L L L L L L L L //Example of how YUV has Chroma subsampled in interlaced displays //FIELD 1 FIELD 2 //L L L L L L L L L L //C C C C C // L L L L L L L L L L // //L L L L L L L L L L // C C C C C // L L L L L L L L L L // //L L L L L L L L L L //C C C C C // L L L L L L L L L L // //......................................... //......................................... m_pD3DDevice->SetRenderState( D3DRS_SWATHWIDTH, 15 ); m_pD3DDevice->SetRenderState( D3DRS_ZENABLE, FALSE ); m_pD3DDevice->SetRenderState( D3DRS_FOGENABLE, FALSE ); m_pD3DDevice->SetRenderState( D3DRS_FILLMODE, D3DFILL_SOLID ); m_pD3DDevice->SetRenderState( D3DRS_CULLMODE, D3DCULL_CCW ); m_pD3DDevice->SetRenderState( D3DRS_YUVENABLE, FALSE ); DWORD alphaenabled; m_pD3DDevice->GetRenderState( D3DRS_ALPHABLENDENABLE, &alphaenabled ); m_pD3DDevice->SetRenderState( D3DRS_ALPHABLENDENABLE, FALSE ); m_pD3DDevice->SetRenderState( D3DRS_ALPHATESTENABLE, FALSE ); RECT rsf = { rs.left, rs.top>>1, rs.right, rs.bottom>>1 }; if( !m_444GeneratedFull ) { m_444GeneratedFull = true; InterleaveYUVto444P( m_YUVTexture[index][FIELD_FULL], NULL, // use motion from last frame as motion value p444PSourceFull, rs, rs, rs, 1, 1, 0.0f, 0.0f, CHROMAOFFSET_HORIZ, 0.0f); } #ifdef DBGBOB m_pD3DDevice->SetRenderState(D3DRS_COLORWRITEENABLE, D3DCOLORWRITEENABLE_ALPHA); #endif if( flags & RENDER_FLAG_TOP ) { InterleaveYUVto444P( m_YUVTexture[index][FIELD_TOP], m_444PTextureFull, // use a downscaled motion value from the full frame, p444PSourceField, rsf, rs, rsf, 1, 1, 0.0f, 0.0f, CHROMAOFFSET_HORIZ, +CHROMAOFFSET_VERT); } else if( flags & RENDER_FLAG_BOT ) { InterleaveYUVto444P( m_YUVTexture[index][FIELD_BOT], m_444PTextureFull, // use a downscaled motion value from the full frame, p444PSourceField, rsf, rs, rsf, 1, 1, 0.0f, 0.0f, CHROMAOFFSET_HORIZ, -CHROMAOFFSET_VERT); } #ifdef DBGBOB m_pD3DDevice->SetRenderState(D3DRS_COLORWRITEENABLE, D3DCOLORWRITEENABLE_ALL); #endif //Okey, when the gpu is done with the textures here, they are free to be modified again if( !(flags & RENDER_FLAG_NOUNLOCK) ) m_pD3DDevice->InsertCallback(D3DCALLBACK_WRITE,&TextureCallback, (DWORD)m_eventTexturesDone[index]); // Now perform the YUV->RGB conversion in a single pass, and render directly to the screen m_pD3DDevice->SetScreenSpaceOffset( -0.5f, -0.5f ); if(true) { // NOTICE, field motion can have been replaced by downscaled frame motion // this method uses the difference between fields to estimate motion // it work sorta, but it can't for example handle horizontal // hairlines wich only exist in one field, they will flicker // as they get considered motion // render the full frame m_pD3DDevice->SetRenderState(D3DRS_ALPHATESTENABLE, FALSE); RenderYUVtoRGB(m_444PTextureFull, rs, rd, 0.0f, 0.0f); // render the field texture ontop if(m_444PTextureField && p444PSourceField) { m_pD3DDevice->SetRenderState(D3DRS_ALPHATESTENABLE, TRUE); m_pD3DDevice->SetRenderState(D3DRS_ALPHAFUNC, D3DCMP_GREATEREQUAL); m_pD3DDevice->SetRenderState(D3DRS_ALPHAREF, m_motionpass); if(flags & RENDER_FLAG_TOP) RenderYUVtoRGB(m_444PTextureField, rsf, rd, 0.0f, 0.25); else RenderYUVtoRGB(m_444PTextureField, rsf, rd, 0.0f, -0.25); } } else { // this method will use the difference between this and previous // frame as an estimate for motion. this will currently fail // on the first field that has motion. as then only that line // has motion. if the alpha channel where first downscaled // to the field texture lineary we should get away from that // render the field texture first if(m_444PTextureField && p444PSourceField) { m_pD3DDevice->SetRenderState(D3DRS_ALPHATESTENABLE, FALSE); if(flags & RENDER_FLAG_TOP) RenderYUVtoRGB(m_444PTextureField, rsf, rd, 0.0f, 0.25); else RenderYUVtoRGB(m_444PTextureField, rsf, rd, 0.0f, -0.25); } // fill in any place we have no motion with full texture m_pD3DDevice->SetRenderState(D3DRS_ALPHATESTENABLE, TRUE); m_pD3DDevice->SetRenderState(D3DRS_ALPHAFUNC, D3DCMP_LESS); m_pD3DDevice->SetRenderState(D3DRS_ALPHAREF, m_motionpass); RenderYUVtoRGB(m_444PTextureFull, rs, rd, 0.0f, 0.0f); } m_pD3DDevice->SetScreenSpaceOffset(0.0f, 0.0f); m_pD3DDevice->SetRenderState( D3DRS_ALPHABLENDENABLE, alphaenabled ); m_pD3DDevice->SetRenderState( D3DRS_ALPHATESTENABLE, FALSE ); m_pD3DDevice->SetTexture(0, NULL); m_pD3DDevice->SetTexture(1, NULL); m_pD3DDevice->SetTexture(2, NULL); m_pD3DDevice->SetTexture(3, NULL); m_pD3DDevice->SetPixelShader( NULL ); SAFE_RELEASE(p444PSourceFull); SAFE_RELEASE(p444PSourceField); } CXBoxRenderer::Render(flags); } unsigned int CRGBRendererV2::PreInit() { CXBoxRenderer::PreInit(); // Create the pixel shader if (!m_hInterleavingShader) { CSingleLock lock(g_graphicsContext); // shader to interleave separate Y U and V planes into a single YUV output const char* interleave = "xps.1.1\n" "def c0,1,0,0,0\n" "def c1,0,1,0,0\n" "def c2,0,0,1,0\n" "tex t0\n" "tex t1\n" "tex t2\n" "tex t3\n" // interleave our data "xmma discard,discard,r0, t0,c0, t1,c1\n" "mad r0, t2,c2,r0\n" "sub_x4 r1, r0,t3\n" // calculate the differens in this pixel values "dp3 r1.rgba, r1,r1\n" // take the absolute of the "yuv" difference vector // "add_d2 r1.a, r1, t3\n" // average with previouse value to avoid minor changes "mov r0.a, r1"; const char* interleavealpha = "xps.1.1\n" "def c0,1,0,0,0\n" "def c1,0,1,0,0\n" "def c2,0,0,1,0\n" "tex t0\n" "tex t1\n" "tex t2\n" "tex t3\n" // interleave our data "xmma discard,discard,r0, t0,c0, t1,c1\n" "mad r0, t2,c2,r0\n" // use alpha from t3 "mov r0.a, t3"; // shader for 14bit accurate YUV to RGB (single pass :) const char* yuv2rgb = "xps.1.1\n" "def c0,0.0117647,0.0117647,0.0117647,0\n" "tex t0\n" "texreg2ar t1, t0\n" "texreg2gb t2, t0\n" "texreg2gb t3, t0\n" "add r0, t1, t2_bx2\n" "add r1, t1.a, t3\n" "mad r0, r1, c0, r0\n" "mov r0.a, t0"; XGBuffer* pShader; XGAssembleShader("InterleaveShader", interleave, strlen(interleave), 0, NULL, &pShader, NULL, NULL, NULL, NULL, NULL); m_pD3DDevice->CreatePixelShader((D3DPIXELSHADERDEF*)pShader->GetBufferPointer(), &m_hInterleavingShader); pShader->Release(); XGAssembleShader("InterleaveShaderAlpha", interleavealpha, strlen(interleavealpha), 0, NULL, &pShader, NULL, NULL, NULL, NULL, NULL); m_pD3DDevice->CreatePixelShader((D3DPIXELSHADERDEF*)pShader->GetBufferPointer(), &m_hInterleavingShaderAlpha); pShader->Release(); XGAssembleShader("YUV2RGBShader", yuv2rgb, strlen(yuv2rgb), 0, NULL, &pShader, NULL, NULL, NULL, NULL, NULL); m_pD3DDevice->CreatePixelShader((D3DPIXELSHADERDEF*)pShader->GetBufferPointer(), &m_hYUVtoRGBLookup); pShader->Release(); } return 0; } void CRGBRendererV2::UnInit() { CSingleLock lock(g_graphicsContext); Delete444PTexture(); DeleteLookupTextures(); if (m_hInterleavingShader) { m_pD3DDevice->DeletePixelShader(m_hInterleavingShader); m_hInterleavingShader = 0; } if (m_hInterleavingShaderAlpha) { m_pD3DDevice->DeletePixelShader(m_hInterleavingShaderAlpha); m_hInterleavingShaderAlpha = 0; } if (m_hYUVtoRGBLookup) { m_pD3DDevice->DeletePixelShader(m_hYUVtoRGBLookup); m_hYUVtoRGBLookup = 0; } CXBoxRenderer::UnInit(); } void CRGBRendererV2::DeleteLookupTextures() { if (m_UVLookup) { m_UVLookup->Release(); m_UVLookup = NULL; } if (m_UVErrorLookup) { m_UVErrorLookup->Release(); m_UVErrorLookup = NULL; } } bool CRGBRendererV2::CreateLookupTextures(const YUVCOEF &coef, const YUVRANGE &range) { if(memcmp(&m_yuvcoef_last, &coef, sizeof(YUVCOEF)) == 0 && memcmp(&m_yuvrange_last, &range, sizeof(YUVRANGE)) == 0) return true; DeleteLookupTextures(); if ( D3D_OK != m_pD3DDevice->CreateTexture(1 , 256, 1, 0, D3DFMT_A8L8 , 0, &m_YLookup) || D3D_OK != m_pD3DDevice->CreateTexture(256, 256, 1, 0, D3DFMT_A8R8G8B8, 0, &m_UVLookup) || D3D_OK != m_pD3DDevice->CreateTexture(256, 256, 1, 0, D3DFMT_A8R8G8B8, 0, &m_UVErrorLookup) ) { DeleteLookupTextures(); CLog::Log(LOGERROR, "Could not create RGB lookup textures"); return false; } // fill in the lookup texture // create a temporary buffer as we need to swizzle the result D3DLOCKED_RECT lr; BYTE *pBuffY = new BYTE[1 * 256 * 2]; BYTE *pBuff = new BYTE[256 * 256 * 4]; BYTE *pErrorBuff = new BYTE[256 * 256 * 4]; if(pBuffY) { // first column is our luminance data for (int y = 0; y < 256; y++) { float fY = (y - range.y_min) * 255.0f / (range.y_max - range.y_min); fY = CLAMP(fY, 0.0f, 255.0f); float fWhole = floor(fY); float fFrac = floor((fY - fWhole) * 85.0f + 0.5f); // 0 .. 1.0 pBuffY[2*y] = (BYTE)fWhole; pBuffY[2*y+1] = (BYTE)fFrac; } } if (pBuff && pErrorBuff) { for (int u = 1; u < 256; u++) { for (int v = 0; v < 256; v++) { // convert to -0.5 .. 0.5 ( -127.5 .. 127.5 ) float fV = (v - range.v_min) * 255.f / (range.v_max - range.v_min) - 127.5f; float fU = (u - range.u_min) * 255.f / (range.u_max - range.u_min) - 127.5f; fU = CLAMP(fU, -127.5f, 127.5f); fV = CLAMP(fV, -127.5f, 127.5f); // have U and V, calculate R, G and B contributions (lie between 0 and 255) // -1 is mapped to 0, 1 is mapped to 255 float r = coef.r_up * fU + coef.r_vp * fV; float g = coef.g_up * fU + coef.g_vp * fV; float b = coef.b_up * fU + coef.b_vp * fV; float r_rnd = floor(r * 0.5f - 0.5f) * 2 + 1; float g_rnd = floor(g * 0.5f - 0.5f) * 2 + 1; float b_rnd = floor(b * 0.5f - 0.5f) * 2 + 1; float ps_r = (r_rnd - 1) * 0.5f + 128.0f; float ps_g = (g_rnd - 1) * 0.5f + 128.0f; float ps_b = (b_rnd - 1) * 0.5f + 128.0f; ps_r = CLAMP(ps_r, 0.0f, 255.0f); ps_g = CLAMP(ps_g, 0.0f, 255.0f); ps_b = CLAMP(ps_b, 0.0f, 255.0f); float r_frac = floor((r - r_rnd) * 85.0f + 0.5f); float b_frac = floor((b - b_rnd) * 85.0f + 0.5f); float g_frac = floor((g - g_rnd) * 85.0f + 0.5f); pBuff[4*u + 1024*v + 0] = (BYTE)ps_b; pBuff[4*u + 1024*v + 1] = (BYTE)ps_g; pBuff[4*u + 1024*v + 2] = (BYTE)ps_r; pBuff[4*u + 1024*v + 3] = 0; pErrorBuff[4*u + 1024*v + 0] = (BYTE)b_frac; pErrorBuff[4*u + 1024*v + 1] = (BYTE)g_frac; pErrorBuff[4*u + 1024*v + 2] = (BYTE)r_frac; pErrorBuff[4*u + 1024*v + 3] = 0; } } m_YLookup->LockRect(0, &lr, NULL, 0); XGSwizzleRect(pBuffY, 0, NULL, lr.pBits, 1, 256, NULL, 2); m_YLookup->UnlockRect(0); m_UVLookup->LockRect(0, &lr, NULL, 0); XGSwizzleRect(pBuff, 0, NULL, lr.pBits, 256, 256, NULL, 4); m_UVLookup->UnlockRect(0); m_UVErrorLookup->LockRect(0, &lr, NULL, 0); XGSwizzleRect(pErrorBuff, 0, NULL, lr.pBits, 256, 256, NULL, 4); m_UVErrorLookup->UnlockRect(0); m_yuvcoef_last = coef; m_yuvrange_last = range; } delete[] pBuff; delete[] pErrorBuff; delete[] pBuffY; return true; } void CRGBRendererV2::InterleaveYUVto444P( YUVPLANES pSources, LPDIRECT3DTEXTURE8 pAlpha, LPDIRECT3DSURFACE8 pTarget, RECT &source, RECT &sourcealpha, RECT &target, unsigned cshift_x, unsigned cshift_y, float offset_x, float offset_y, float coffset_x, float coffset_y) { coffset_x += offset_x / (1<<cshift_x); coffset_y += offset_y / (1<<cshift_y); for (int i = 0; i < 3; ++i) { m_pD3DDevice->SetTexture( i, pSources[i]); m_pD3DDevice->SetTextureStageState( i, D3DTSS_ADDRESSU, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_ADDRESSV, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_MAGFILTER, D3DTEXF_LINEAR ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_MINFILTER, D3DTEXF_LINEAR ); } m_pD3DDevice->SetVertexShader( FVF_YV12VERTEX ); if(pAlpha) { m_pD3DDevice->SetTexture(3, pAlpha); m_pD3DDevice->SetPixelShader( m_hInterleavingShaderAlpha ); } else { m_pD3DDevice->SetTexture(3, SURFTOTEX(pTarget)); m_pD3DDevice->SetPixelShader( m_hInterleavingShader ); } m_pD3DDevice->SetTextureStageState( 3, D3DTSS_ADDRESSU, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( 3, D3DTSS_ADDRESSV, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( 3, D3DTSS_MAGFILTER, D3DTEXF_LINEAR ); m_pD3DDevice->SetTextureStageState( 3, D3DTSS_MINFILTER, D3DTEXF_LINEAR ); LPDIRECT3DSURFACE8 pOldRT = NULL; if( pTarget ) { m_pD3DDevice->GetRenderTarget(&pOldRT); m_pD3DDevice->SetRenderTarget(pTarget, NULL); } m_pD3DDevice->SetScreenSpaceOffset(-0.5f, -0.5f); m_pD3DDevice->Begin(D3DPT_QUADLIST); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.left + offset_x, (float)source.top + offset_y); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, (float)(source.left>>cshift_x) + coffset_x, (float)(source.top>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, (float)(source.left>>cshift_x) + coffset_x, (float)(source.top>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, (float)sourcealpha.left, (float)sourcealpha.top); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.left, (float)target.top, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.right + offset_x, (float)source.top + offset_y); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, (float)(source.right>>cshift_x) + coffset_x, (float)(source.top>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, (float)(source.right>>cshift_x) + coffset_x, (float)(source.top>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, (float)sourcealpha.right, (float)sourcealpha.top); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.right, (float)target.top, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.right + offset_x, (float)source.bottom + offset_y); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, (float)(source.right>>cshift_x) + coffset_x, (float)(source.bottom>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, (float)(source.right>>cshift_x) + coffset_x, (float)(source.bottom>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, (float)sourcealpha.right, (float)sourcealpha.bottom); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.right, (float)target.bottom, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.left + offset_x, (float)source.bottom + offset_y); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, (float)(source.left>>cshift_x) + coffset_x, (float)(source.bottom>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, (float)(source.left>>cshift_x) + coffset_x, (float)(source.bottom>>cshift_y) + coffset_y ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, (float)sourcealpha.left, (float)sourcealpha.bottom); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.left, (float)target.bottom, 0, 1.0f ); m_pD3DDevice->End(); m_pD3DDevice->SetScreenSpaceOffset(0.0f, 0.0f); m_pD3DDevice->SetTexture(0, NULL); m_pD3DDevice->SetTexture(1, NULL); m_pD3DDevice->SetTexture(2, NULL); m_pD3DDevice->SetTexture(3, NULL); if( pOldRT ) { m_pD3DDevice->SetRenderTarget( pOldRT, NULL); pOldRT->Release(); } } void CRGBRendererV2::RenderYUVtoRGB( D3DBaseTexture* pSource, RECT &source, RECT &target, float offset_x, float offset_y) { m_pD3DDevice->SetTexture( 0, pSource); m_pD3DDevice->SetTexture( 1, m_YLookup); m_pD3DDevice->SetTexture( 2, m_UVLookup); m_pD3DDevice->SetTexture( 3, m_UVErrorLookup); for (int i = 0; i < 4; ++i) { m_pD3DDevice->SetTextureStageState( i, D3DTSS_ADDRESSU, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_ADDRESSV, D3DTADDRESS_CLAMP ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_MAGFILTER, D3DTEXF_LINEAR ); m_pD3DDevice->SetTextureStageState( i, D3DTSS_MINFILTER, D3DTEXF_LINEAR ); } m_pD3DDevice->SetVertexShader( FVF_YUVRGBVERTEX ); m_pD3DDevice->SetPixelShader( m_hYUVtoRGBLookup ); // render the full frame m_pD3DDevice->Begin(D3DPT_QUADLIST); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, 0.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, 0.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, 0.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, 1.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, 1.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, 1.0f, 0.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, 1.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, 1.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, 1.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD1, 0.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD2, 0.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD3, 0.0f, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.left + offset_x, (float)source.top + offset_y); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.left, (float)target.top, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.right + offset_x, (float)source.top + offset_y); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.right, (float)target.top, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.right + offset_x, (float)source.bottom + offset_y); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.right, (float)target.bottom, 0, 1.0f ); m_pD3DDevice->SetVertexData2f( D3DVSDE_TEXCOORD0, (float)source.left + offset_x, (float)source.bottom + offset_y); m_pD3DDevice->SetVertexData4f( D3DVSDE_VERTEX, (float)target.left, (float)target.bottom, 0, 1.0f ); m_pD3DDevice->End(); }
xbmc/xbmc-antiquated
xbmc/cores/VideoRenderers/legacy/RGBRendererV2.cpp
C++
gpl-2.0
24,853
/* * Copyright (C) 2008 - 2011 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2011 - 2012 ArkCORE <http://www.arkania.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef ARKCORE_PETAI_H #define ARKCORE_PETAI_H #include "CreatureAI.h" #include "Timer.h" class Creature; class Spell; class PetAI : public CreatureAI { public: explicit PetAI(Creature* c); void EnterEvadeMode(); void UpdateAI(const uint32); static int Permissible(const Creature*); void KilledUnit(Unit* /*victim*/); void AttackStart(Unit* target); void MovementInform(uint32 moveType, uint32 data); void OwnerDamagedBy(Unit* attacker); void OwnerAttacked(Unit* target); void ReceiveEmote(Player* player, uint32 textEmote); private: bool _isVisible(Unit*) const; bool _needToStop(void); void _stopAttack(void); void UpdateAllies(); TimeTracker i_tracker; bool inCombat; std::set<uint64> m_AllySet; uint32 m_updateAlliesTimer; Unit* SelectNextTarget(); void HandleReturnMovement(); void DoAttack(Unit* target, bool chase); bool CanAttack(Unit* target); }; #endif
avalonfr/ArkCORE-4.3.4
src/server/game/AI/CoreAI/PetAI.h
C
gpl-2.0
1,848
# Makefile.in generated by automake 1.11.1 from Makefile.am. # nls/pt_PT/Makefile. Generated from Makefile.in by configure. # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, # Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. # nls/pt_PT/Makefile.am for Fluxbox - www.fluxbox.org pkgdatadir = $(datadir)/fluxbox pkgincludedir = $(includedir)/fluxbox pkglibdir = $(libdir)/fluxbox pkglibexecdir = $(libexecdir)/fluxbox am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : subdir = nls/pt_PT DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ $(top_srcdir)/configure.in am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run aclocal-1.11 AMTAR = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run tar AUTOCONF = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run autoconf AUTOHEADER = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run autoheader AUTOMAKE = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run automake-1.11 AWK = gawk CC = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/arm-none-linux-gnueabi-gcc CCDEPMODE = depmode=gcc3 CFLAGS = -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -U_FORTIFY_SOURCE -fno-stack-protector -fomit-frame-pointer -fPIC -O2 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include/X11 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include/freetype2 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include CPPFLAGS = -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 CXX = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/arm-none-linux-gnueabi-g++ CXXCPP = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/arm-none-linux-gnueabi-g++ -E CXXDEPMODE = depmode=gcc3 CXXFLAGS = -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -pipe -U_FORTIFY_SOURCE -fno-stack-protector -fomit-frame-pointer -fPIC -O2 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include/X11 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include/freetype2 -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include CYGPATH_W = echo DEBUG = DEFAULT_APPS = $(prefix)/share/fluxbox/apps DEFAULT_INIT = $(prefix)/share/fluxbox/init DEFAULT_KEYS = $(prefix)/share/fluxbox/keys DEFAULT_MENU = $(prefix)/share/fluxbox/menu DEFAULT_OVERLAY = $(prefix)/share/fluxbox/overlay DEFAULT_STYLE = $(prefix)/share/fluxbox/styles/bloe DEFAULT_WINDOWMENU = $(prefix)/share/fluxbox/windowmenu DEFS = -DHAVE_CONFIG_H DEPDIR = .deps ECHO_C = ECHO_N = -n ECHO_T = EGREP = /bin/grep -E EXEEXT = GREP = /bin/grep IMLIB2_CFLAGS = IMLIB2_CONFIG = no IMLIB2_LIBS = INSTALL = /usr/bin/install -c INSTALL_DATA = ${INSTALL} -m 644 INSTALL_PROGRAM = ${INSTALL} INSTALL_SCRIPT = ${INSTALL} INSTALL_STRIP_PROGRAM = $(install_sh) -c -s LDFLAGS = -L/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/lib -lSM -lICE LIBOBJS = ${LIBOBJDIR}lstat$U.o ${LIBOBJDIR}stat$U.o LIBS = -L/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/lib -lX11 -lXft -L/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/lib -lfontconfig -lXrender -lXpm -lXinerama -lXext -lXrandr -lXrandr LOCALE_PATH = $(prefix)/share/fluxbox/nls LTLIBOBJS = ${LIBOBJDIR}lstat$U.lo ${LIBOBJDIR}stat$U.lo MAKEINFO = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/missing --run makeinfo MKDIR_P = /bin/mkdir -p NLS = OBJEXT = o PACKAGE = fluxbox PACKAGE_BUGREPORT = PACKAGE_NAME = PACKAGE_STRING = PACKAGE_TARNAME = PACKAGE_URL = PACKAGE_VERSION = PATH_SEPARATOR = : PKG_CONFIG = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/pkg-config RANLIB = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/arm-none-linux-gnueabi-ranlib SET_MAKE = SHELL = /bin/sh STRIP = /home/simon/GIT/buildroot-k3-current/output/host/usr/bin/arm-none-linux-gnueabi-strip VERSION = 1.3.2 XMKMF = X_CFLAGS = -I/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/include/X11 X_EXTRA_LIBS = X_LIBS = -L/home/simon/GIT/buildroot-k3-current/output/host/usr/arm-buildroot-linux-gnueabi/sysroot/usr/lib X_PRE_LIBS = -lSM -lICE abs_builddir = /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/nls/pt_PT abs_srcdir = /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/nls/pt_PT abs_top_builddir = /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2 abs_top_srcdir = /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2 ac_ct_CC = ac_ct_CXX = am__include = include am__leading_dot = . am__quote = am__tar = ${AMTAR} chof - "$$tardir" am__untar = ${AMTAR} xf - bindir = ${exec_prefix}/bin build_alias = i686-pc-linux-gnu builddir = . datadir = ${datarootdir} datarootdir = ${prefix}/share docdir = ${datarootdir}/doc/${PACKAGE} dvidir = ${docdir} exec_prefix = /usr gencat_cmd = gencat host_alias = arm-buildroot-linux-gnueabi htmldir = ${docdir} includedir = ${prefix}/include infodir = ${datarootdir}/info install_sh = ${SHELL} /home/simon/GIT/buildroot-k3-current/output/build/fluxbox-1.3.2/install-sh libdir = ${exec_prefix}/lib libexecdir = ${exec_prefix}/libexec localedir = ${datarootdir}/locale localstatedir = ${prefix}/var mandir = ${datarootdir}/man mkdir_p = /bin/mkdir -p oldincludedir = /usr/include pdfdir = ${docdir} prefix = /usr program_prefix = program_suffix = NONE program_transform_name = s&^&& psdir = ${docdir} regex_cmd = sed sbindir = ${exec_prefix}/sbin sharedstatedir = ${prefix}/com srcdir = . sysconfdir = /etc target_alias = arm-buildroot-linux-gnueabi top_build_prefix = ../../ top_builddir = ../.. top_srcdir = ../.. THE_LANG = pt_PT SRC_CODESET = ISO-8859-1 DEST_CODESETS = ISO-8859-1 UTF-8 NLSTEST = MFILES = Translation.m GENERATED_MFILES = $(patsubst %,generated-%.m,$(DEST_CODESETS)) MAINTAINERCLEANFILES = Makefile.in $(GENERATED_MFILES) CATFILES = $(patsubst %,fluxbox-%.cat,$(DEST_CODESETS)) # We distribute the generated files so that users don't need iconv EXTRA_DIST = $(MFILES) $(GENERATED_MFILES) CLEANFILES = $(CATFILES) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu nls/pt_PT/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu nls/pt_PT/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile all-local installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ `test -z '$(STRIP)' || \ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install mostlyclean-generic: clean-generic: -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-data-local install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-local .MAKE: install-am install-strip .PHONY: all all-am all-local check check-am clean clean-generic \ distclean distclean-generic distdir dvi dvi-am html html-am \ info info-am install install-am install-data install-data-am \ install-data-local install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ pdf-am ps ps-am uninstall uninstall-am uninstall-local all-local: $(CATFILES) install-data-local: $(CATFILES) @if test x$(NLSTEST) = "x-DNLS"; then \ for codeset in $(DEST_CODESETS); do \ echo "Installing catalog in $(DESTDIR)$(LOCALE_PATH)/$(THE_LANG).$${codeset}"; \ $(mkinstalldirs) $(DESTDIR)$(LOCALE_PATH)/$(THE_LANG).$${codeset}; \ $(INSTALL_DATA) fluxbox-$${codeset}.cat $(DESTDIR)$(LOCALE_PATH)/$(THE_LANG).$${codeset}/fluxbox.cat; \ done; \ fi # not part of the normal build process translations: $(GENERATED_MFILES) generated-%.m: Translation.m iconv -f $(SRC_CODESET) -t $* $(srcdir)/Translation.m | sed s/$(SRC_CODESET)/$*/ > $@ uninstall-local: @if test x$(NLSTEST) = "x-DNLS"; then \ for codeset in $(DEST_CODESETS); do \ rm -f $(DESTDIR)$(LOCALE_PATH)/$(THE_LANG).$${codeset}/fluxbox.cat; \ rmdir $(DESTDIR)$(LOCALE_PATH)/$(THE_LANG).$${codeset}; \ done; \ fi fluxbox-%.cat: generated-%.m Translation.m @if test x$(NLSTEST) = "x-DNLS"; then \ echo "Creating catfile for $*"; \ $(gencat_cmd) fluxbox-$*.cat generated-$*.m; \ fi # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT:
twobob/buildroot-kindle
output/build/fluxbox-1.3.2/nls/pt_PT/Makefile
Makefile
gpl-2.0
13,803
package com.anju.android.carpoolexpensecalculator; import android.content.Context; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteOpenHelper; public class FeedReaderDbHelper extends SQLiteOpenHelper{ private static final String DATABASE_NAME = "carpool.db"; private static final int DATABASE_VERSION = 2; public FeedReaderDbHelper (Context context) { super(context, DATABASE_NAME, null, DATABASE_VERSION); } @Override public void onCreate(SQLiteDatabase db) { // TODO Auto-generated method stub db.execSQL(FeedReaderContract.SQL_CREATE_ENTRIES); } @Override public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { // TODO Auto-generated method stub // This database is only a cache for online data, so its upgrade policy is // to simply to discard the data and start over db.execSQL(FeedReaderContract.SQL_DELETE_ENTRIES); onCreate(db); } public void onDowngrade(SQLiteDatabase db, int oldVersion, int newVersion) { onUpgrade(db, oldVersion, newVersion); } }
anjusuryawanshi/carpool-calculator
src/com/anju/android/carpoolexpensecalculator/FeedReaderDbHelper.java
Java
gpl-2.0
1,090
<?php namespace Wizard\Build; /* * Main User class */ class User { public function access($access_level) { // if user isn't logged in, fail access if (!$this->isLoggedIn()) return false; // get all accesses of current logged in user $ret = DB()->query("SELECT al.label FROM users u LEFT JOIN access_groups ag ON ag.uid = u.id LEFT JOIN access_levels al ON ag.aid = al.id WHERE u.id = ? ; ", 'i', $_SESSION["login"]); // if query fails, fail access if ($ret == false) return false; // loop through each access level foreach ($ret as $access_label) { // on access level match, give access if ($access_level == $access_label[0]) return true; } // no match found, fail access return false; } // returns count of all users in db public function counted() { $ret = DB()->query("SELECT COUNT(id) counted FROM users;"); if ($ret == false) { return 0; } else { return $ret[0]['counted']; } } // returns count of all users activated by email in db public function counted_active() { $ret = DB()->query("SELECT COUNT(id) counted FROM users WHERE active='Y';"); if ($ret == false) { return 0; } else { return $ret[0]['counted']; } } // get email of current user public function getEmail() { if ($this->isLoggedIn()) { $ret = DB()->query("SELECT email FROM users WHERE active='Y' AND id=? LIMIT 1;", 's', $_SESSION['login']); if ($ret != false) { return $ret[0]['email']; } } return false; } // returns true if admin is logged on, false otherwise public function isAdmin() { if (isset($_SESSION['login'])) { $sql = "SELECT id FROM users WHERE active = 'Y' AND admin = 'Y' AND id = ? LIMIT 1;"; $data = DB()->query($sql, 's', $_SESSION['login']); if (($data != false) && ($data[0]['id'] == $_SESSION['login'])) { return true; } } return false; } // return true if a user is logged in, false otherwise public function isLoggedIn() { if (isset($_SESSION['login'])) { return true; } return false; } // login with email and password, returns true on logon, false otherwise public function login($email, $password) { $data = DB()->query("SELECT id, email, passhash FROM users WHERE status = 0 AND email = ? ORDER BY id ASC LIMIT 1;", 's', $email); if ($data != false) { if (($password != '') && (password_verify($password, $data[0]['passhash']) != false)) { $_SESSION['login'] = $data[0]['id']; return true; } } //$this->logout(); return false; } // Creates a user, first user created is admin // returns string of error or exact===true for created public function add($email, $password, $password_confirm) { // verify if passwords match if ($password != $password_confirm) { // passwords mismatch return 'Passwords mismatch'; } $counted_users = $this->counted(); // db table doesn't exist if ($counted_users == 0) { $sql = "CREATE TABLE `users` ( `id` int(6) UNSIGNED NOT NULL, `email` varchar(50) COLLATE latin1_general_ci NOT NULL, `passhash` text COLLATE latin1_general_ci NOT NULL, `keygen` varchar(32) COLLATE latin1_general_ci NOT NULL, `active` varchar(1) COLLATE latin1_general_ci NOT NULL, `admin` varchar(1) COLLATE latin1_general_ci NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci;"; $ret = DB()->query($sql); $sql = "ALTER TABLE `users` ADD PRIMARY KEY (`id`);"; $ret = DB()->query($sql); $sql = "ALTER TABLE `users` MODIFY `id` int(6) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;"; $ret = DB()->query($sql); } // verify if email exists $email_exists = DB()->query("SELECT id, email FROM users WHERE email=? ORDER BY id DESC LIMIT 1;", 's', $email); if ($email_exists != false) { // email already exists return 'Email already exists'; } // random string, ommitting confusing values like 0oO, l1i $rnd = substr(str_shuffle('23456789abcdefghjklmnpqrstuvwxyz'), 0, 8); $isAdmin = 'N'; // default: not admin if ($counted_users <= 0) { // when no users in active db $isAdmin = 'Y'; // first user is admin } $sql = "INSERT INTO users (email, passhash, keygen, active, admin) VALUES (?, ?, ?, 'N', ?);"; $data = DB()->query($sql, 'sssss', $email, password_hash($password, PASSWORD_BCRYPT, ['cost' => 12]), $rnd, $isAdmin); email($email, 'Account Confirmation', 'Please <a href="http://'.$_SERVER['SERVER_NAME'].'/confirm/admin/'.$rnd.'/'.$email.'">click here</a> to activate your account at '.$_SERVER['SERVER_NAME']); return true; } // confirm email account link to user public function confirm ($email, $key) { // verify if user exists $data = DB()->query("SELECT id, email FROM users WHERE active='N' AND keygen=? AND email=? ORDER BY id DESC LIMIT 1;", 'ss', $key, $email); if ($data != false) { // on found user, update to active user (user must still login) DB()->query("UPDATE users SET active='Y' WHERE keygen=? AND email=? ORDER BY id DESC LIMIT 1;", 'ss', $key, $email); return true; } return false; } // remove admin session public function logout() { if (isset($_SESSION['login'])) { unset($_SESSION['login']); } return true; } }
LucLaverdure/DreamForgery
core/user/user.php
PHP
gpl-2.0
5,680
<?php //clear all div on the page function ShowGallery() {$objResponse = new xajaxResponse(); //$objResponse->addAssign("UserAccountDiv","className","hidemsg");//clear useraccount $objResponse->addAssign("HomeTable","className","hidemsg");//clear home page $objResponse->addAssign("option","className","slidetext"); $objResponse->addAssign("registerdiv","className","hidemsg"); $objResponse->addAssign("mysearch","className","hidemsg"); $objResponse->addAssign("orderby","className","hidemsg"); return $objResponse->getXML(); }//end of function function showRegister() { $objResponse = new xajaxResponse(); $objResponse->addAssign("HomeTable","className","hidemsg");//clear home page //$objResponse->addAssign("UserAccountDiv","className","hidemsg");//clear useraccount $objResponse->addAssign("option","className","hidemsg"); $objResponse->addAssign("formcomments","className","hidemsg"); $objResponse->addAssign("mysearch","className","hidemsg"); $objResponse->addClear("table","innerHTML"); $objResponse->addClear("comments","innerHTML"); $objResponse->addAssign("orderby","className","hidemsg"); return $objResponse->getXML(); }//end of function function showAlbums() { $objResponse = new xajaxResponse(); $objResponse->addAssign("HomeTable","className","hidemsg");//clear home page //$objResponse->addAssign("UserAccountDiv","className","hidemsg");//clear useraccount $objResponse->addAssign("registerdiv","className","hidemsg"); $objResponse->addAssign("option","className","hidemsg"); $objResponse->addAssign("formcomments","className","hidemsg"); $objResponse->addAssign("mysearch","className","hidemsg"); $objResponse->addClear("table","innerHTML"); $objResponse->addClear("comments","innerHTML"); return $objResponse->getXML(); }//end of function function showsearch() {$objResponse = new xajaxResponse(); $objResponse->addAssign("HomeTable","className","hidemsg");//clear home page $objResponse->addAssign("option","className","hidemsg"); $objResponse->addAssign("orderby","className","hidemsg"); $objResponse->addAssign("loginform","className","hidemsg"); return $objResponse->getXML(); } function hideRegisterForm() {$objResponse = new xajaxResponse(); $objResponse->addAssign("registerdiv","className","hidemsg"); return $objResponse->getXML(); } function hidehomepage() {$objResponse = new xajaxResponse(); $objResponse->addAssign("HomeTable","className","hidemsg");//clear home page return $objResponse->getXML(); } function showHome() { //show div homepage $objResponse = new xajaxResponse(); $objResponse->addAssign("HomeTable","className","showmsg2");//clear home page return $objResponse->getXML(); } function HideImageDetailDiv() { //show div homepage $objResponse = new xajaxResponse(); $objResponse->addAssign("imageDetails","innerHTML","");//clear imagesdetails $objResponse->addAssign("imagecomments","innerHTML","");//clear imagesdetails return $objResponse->getXML(); } function showImageDetailDiv() { //show div homepage $objResponse = new xajaxResponse(); $objResponse->addAssign("imageDetails","className","");//clear imagesdetails $objResponse->addAssign("imagecomments","className","");//clear imagesdetails return $objResponse->getXML(); } ?> <script type="text/javascript"> //keep around the old call function xajax.realCall = xajax.call; //override the call function to bend to our wicked ways xajax.call = function(sFunction, aArgs, sRequestType) { //show the spinner this.$('spinner').style.display = 'inline'; //call the old call function return this.realCall(sFunction, aArgs, sRequestType); } //save the old processResponse function for later xajax.realProcessResponse = xajax.processResponse; //override the processResponse function xajax.processResponse = function(xml) { //hide the spinner //this.$('spinner').style.display = 'none'; //call the real processResponse function return this.realProcessResponse(xml); } </script> <META NAME="Generator" CONTENT="TextPad 4.6"> <META NAME="Author" CONTENT="?"> <META NAME="Keywords" CONTENT="?"> <META NAME="Description" CONTENT="?"> <script src="history.js"> </script>
avinash/nuzimazz
clear.php
PHP
gpl-2.0
4,354
/* ***** BEGIN LICENSE BLOCK ***** * Source last modified: $Id: time.h,v 1.18 2005/09/06 15:14:20 singerb Exp $ * * Portions Copyright (c) 1995-2004 RealNetworks, Inc. All Rights Reserved. * * The contents of this file, and the files included with this file, * are subject to the current version of the RealNetworks Public * Source License (the "RPSL") available at * http://www.helixcommunity.org/content/rpsl unless you have licensed * the file under the current version of the RealNetworks Community * Source License (the "RCSL") available at * http://www.helixcommunity.org/content/rcsl, in which case the RCSL * will apply. You may also obtain the license terms directly from * RealNetworks. You may not use this file except in compliance with * the RPSL or, if you have a valid RCSL with RealNetworks applicable * to this file, the RCSL. Please see the applicable RPSL or RCSL for * the rights, obligations and limitations governing use of the * contents of the file. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License Version 2 or later (the * "GPL") in which case the provisions of the GPL are applicable * instead of those above. If you wish to allow use of your version of * this file only under the terms of the GPL, and not to allow others * to use your version of this file under the terms of either the RPSL * or RCSL, indicate your decision by deleting the provisions above * and replace them with the notice and other provisions required by * the GPL. If you do not delete the provisions above, a recipient may * use your version of this file under the terms of any one of the * RPSL, the RCSL or the GPL. * * This file is part of the Helix DNA Technology. RealNetworks is the * developer of the Original Code and owns the copyrights in the * portions it created. * * This file, and the files included with this file, is distributed * and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY * KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS * ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET * ENJOYMENT OR NON-INFRINGEMENT. * * Technology Compatibility Kit Test Suite(s) Location: * http://www.helixcommunity.org/content/tck * * Contributor(s): * * ***** END LICENSE BLOCK ***** */ #ifndef HLXSYS_TIME_H #define HLXSYS_TIME_H #if defined(_SYMBIAN) # include <sys/time.h> #endif #if defined(WIN32_PLATFORM_PSPC) #if _WIN32_WCE < 420 # include "hxtypes.h" # include "hlxclib/windows.h" #else #include <time.h> #endif #elif !defined(WIN32_PLATFORM_PSPC) && !defined(_OPENWAVE) # include <time.h> #endif /* !defined(WIN32_PLATFORM_PSPC) && !defined(_OPENWAVE) */ #if defined(_OPENWAVE) # include "platform/openwave/hx_op_timeutil.h" #endif #if !defined(_REENTRANT) || defined(_WIN32) # include "hlxclib/string.h" #endif #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /******************************* * Types */ #if defined(_OPENWAVE) #define NO_TM_ISDST typedef U32 time_t; #define tm op_tm // XXXSAB any other way for 'struct tm' to // work in a C-includeable file? struct timeval { time_t tv_sec; time_t tv_usec; }; #elif defined(WIN32_PLATFORM_PSPC) #if _WIN32_WCE < 420 struct tm { int tm_sec; int tm_min; int tm_hour; int tm_mday; int tm_mon; int tm_year; int tm_wday; int tm_yday; int tm_isdst; }; #endif #define timezone _timezone extern long _timezone; #endif /* defined(WIN32_PLATFORM_PSPC) */ /******************************* * Helix declarations */ long __helix_time(long *t); struct tm* __helix_localtime(long* timep); void __helix_tzset(); long __helix_mktime(struct tm* tm); struct tm *__helix_gmtime(long *timep); int __helix_gettimeofday(struct timeval *tv, void *tz); char * __helix_ctime(long *timer); #if defined(_WINCE) char * __helix_asctime (struct tm *tm); /******************************* * platform specifics declarations */ _inline void _tzset() { __helix_tzset(); } // 4.2 has a time.h file, so we import that earlier // then we need to make our defs compatible with those // using long/unsigned long instead of time_t is bad // by using time_t here we don't care how it's defined // however, we leave the old defs alone for compatibility #if _WIN32_WCE >= 420 _inline char * ctime(const time_t *timp) { return __helix_ctime((long*)timp); } _inline char * asctime (const struct tm *tm) { return __helix_asctime((struct tm*)tm); } _inline time_t time(time_t *t) { return __helix_time((long *)t); } _inline time_t mktime(struct tm* tm) { return __helix_mktime(tm); } _inline struct tm* localtime(const time_t* timep) { return __helix_localtime((long *)timep); } _inline struct tm* gmtime(const time_t *timep) { return __helix_gmtime((long*)timep); } #else // WinCE < 4.2 doesn't include time.h and uses the older defs _inline char * ctime(time_t *timp) { return __helix_ctime((long*)timp); } _inline char * asctime (struct tm *tm) { return __helix_asctime(tm); } _inline struct tm* localtime(time_t* timep) { return __helix_localtime((long *)timep); } _inline long time(time_t *t) { return __helix_time((long *)t); } _inline long mktime(struct tm* tm) { return __helix_mktime(tm); } _inline struct tm* gmtime(time_t *timep) { return __helix_gmtime((long*)timep); } #endif #elif defined(_OPENWAVE) #define time(t) __helix_time(t) #define ctime(t) __helix_ctime(t) #define gmtime(t) __helix_gmtime(t) #define localtime(t) __helix_gmtime(t) // XXXSAB is there a _local_ time call? #define mktime(tm) __helix_mktime(tm) #define gettimeofday __helix_gettimeofday #define strftime op_strftime #endif /* defined(WIN32_PLATFORM_PSPC) */ #if defined(_REENTRANT) && !defined(_WIN32) #define hx_localtime_r localtime_r #define hx_gmtime_r gmtime_r #define hx_asctime_r asctime_r #define hx_ctime_r ctime_r //#define localtime NON_REENTRANT_localtime_CALLED //#define gmtime NON_REENTRANT_gmtime_CALLED //#define asctime NON_REENTRANT_asctime_CALLED //#define ctime NON_REENTRANT_ctime_CALLED #else #define hx_localtime_r(pClock,pRes) ((struct tm*)memcpy((pRes), localtime(pClock), sizeof(struct tm))) #define hx_gmtime_r(pClock,pRes) ((struct tm*)memcpy((pRes), gmtime(pClock), sizeof(struct tm))) #define hx_asctime_r(tm,pRes) ((char*)memcpy(pRes, asctime(tm), 26)) #define hx_ctime_r(pClock,pRes) ((char*)memcpy(pRes, asctime(localtime(pClock)), 26)) #endif /* _REENTRANT */ #ifdef __cplusplus }; #endif /* __cplusplus */ #endif /* HLXSYS_TIME_H */
muromec/qtopia-ezx
src/3rdparty/libraries/helix/src/common/runtime/pub/hlxclib/time.h
C
gpl-2.0
6,741
/* * kernel/sched.c * * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: * hybrid priority-list and round-robin design with * an array-switch method of distributing timeslices * and per-CPU runqueues. Cleanups and useful suggestions * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin */ #include <linux/mm.h> #include <linux/module.h> #include <linux/nmi.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/highmem.h> #include <linux/smp_lock.h> #include <asm/mmu_context.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/kernel_stat.h> #include <linux/debug_locks.h> #include <linux/security.h> #include <linux/notifier.h> #include <linux/profile.h> #include <linux/freezer.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/timer.h> #include <linux/rcupdate.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/percpu.h> #include <linux/kthread.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/tsacct_kern.h> #include <linux/kprobes.h> #include <linux/delayacct.h> #include <linux/reciprocal_div.h> #include <asm/tlb.h> #include <asm/unistd.h> /* * Scheduler clock - returns current time in nanosec units. * This is default implementation. * Architectures and sub-architectures can override this. */ unsigned long long __attribute__((weak)) sched_clock(void) { return (unsigned long long)jiffies * (1000000000 / HZ); } /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], * and back. */ #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, * it's a [ 0 ... 39 ] range. */ #define USER_PRIO(p) ((p)-MAX_RT_PRIO) #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* * Some helpers for converting nanosecond timing to jiffy resolution */ #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ)) #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) /* * These are the 'tuning knobs' of the scheduler: * * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger), * default timeslice is 100 msecs, maximum timeslice is 800 msecs. * Timeslices get refilled after they expire. */ #define MIN_TIMESLICE max(5 * HZ / 1000, 1) #define DEF_TIMESLICE (100 * HZ / 1000) #define ON_RUNQUEUE_WEIGHT 30 #define CHILD_PENALTY 95 #define PARENT_PENALTY 100 #define EXIT_WEIGHT 3 #define PRIO_BONUS_RATIO 25 #define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100) #define INTERACTIVE_DELTA 2 #define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS) #define STARVATION_LIMIT (MAX_SLEEP_AVG) #define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG)) /* * If a task is 'interactive' then we reinsert it in the active * array after it has expired its current timeslice. (it will not * continue to run immediately, it will still roundrobin with * other interactive tasks.) * * This part scales the interactivity limit depending on niceness. * * We scale it linearly, offset by the INTERACTIVE_DELTA delta. * Here are a few examples of different nice levels: * * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0] * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0] * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0] * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0] * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0] * * (the X axis represents the possible -5 ... 0 ... +5 dynamic * priority range a task can explore, a value of '1' means the * task is rated interactive.) * * Ie. nice +19 tasks can never get 'interactive' enough to be * reinserted into the active array. And only heavily CPU-hog nice -20 * tasks will be expired. Default nice 0 tasks are somewhere between, * it takes some effort for them to get interactive, but it's not * too hard. */ #define CURRENT_BONUS(p) \ (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \ MAX_SLEEP_AVG) #define GRANULARITY (10 * HZ / 1000 ? : 1) #ifdef CONFIG_SMP #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \ (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \ num_online_cpus()) #else #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \ (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1))) #endif #define SCALE(v1,v1_max,v2_max) \ (v1) * (v2_max) / (v1_max) #define DELTA(p) \ (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \ INTERACTIVE_DELTA) #define TASK_INTERACTIVE(p) \ ((p)->prio <= (p)->static_prio - DELTA(p)) #define INTERACTIVE_SLEEP(p) \ (JIFFIES_TO_NS(MAX_SLEEP_AVG * \ (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1)) #define TASK_PREEMPTS_CURR(p, rq) \ ((p)->prio < (rq)->curr->prio) #define SCALE_PRIO(x, prio) \ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) static unsigned int static_prio_timeslice(int static_prio) { if (static_prio < NICE_TO_PRIO(0)) return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio); else return SCALE_PRIO(DEF_TIMESLICE, static_prio); } #ifdef CONFIG_SMP /* * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) * Since cpu_power is a 'constant', we can use a reciprocal divide. */ static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load) { return reciprocal_divide(load, sg->reciprocal_cpu_power); } /* * Each time a sched group cpu_power is changed, * we must compute its reciprocal value */ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) { sg->__cpu_power += val; sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power); } #endif /* * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] * to time slice values: [800ms ... 100ms ... 5ms] * * The higher a thread's priority, the bigger timeslices * it gets during one round of execution. But even the lowest * priority thread gets MIN_TIMESLICE worth of execution time. */ static inline unsigned int task_timeslice(struct task_struct *p) { return static_prio_timeslice(p->static_prio); } /* * These are the runqueue data structures: */ struct prio_array { unsigned int nr_active; DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */ struct list_head queue[MAX_PRIO]; }; /* * This is the main, per-CPU runqueue data structure. * * Locking rule: those places that want to lock multiple runqueues * (such as the load balancing or the thread migration code), lock * acquire operations must be ordered by ascending &runqueue. */ struct rq { spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because * remote CPUs use both these fields when doing load calculation. */ unsigned long nr_running; unsigned long raw_weighted_load; #ifdef CONFIG_SMP unsigned long cpu_load[3]; unsigned char idle_at_tick; #ifdef CONFIG_NO_HZ unsigned char in_nohz_recently; #endif #endif unsigned long long nr_switches; /* * This is part of a global counter where only the total sum * over all CPUs matters. A task can increase this counter on * one CPU and if it got migrated afterwards it may decrease * it on another CPU. Always updated under the runqueue lock: */ unsigned long nr_uninterruptible; unsigned long expired_timestamp; /* Cached timestamp set by update_cpu_clock() */ unsigned long long most_recent_timestamp; struct task_struct *curr, *idle; unsigned long next_balance; struct mm_struct *prev_mm; struct prio_array *active, *expired, arrays[2]; int best_expired_prio; atomic_t nr_iowait; #ifdef CONFIG_SMP struct sched_domain *sd; /* For active balancing */ int active_balance; int push_cpu; int cpu; /* cpu of this runqueue */ struct task_struct *migration_thread; struct list_head migration_queue; #endif #ifdef CONFIG_SCHEDSTATS /* latency stats */ struct sched_info rq_sched_info; /* sys_sched_yield() stats */ unsigned long yld_exp_empty; unsigned long yld_act_empty; unsigned long yld_both_empty; unsigned long yld_cnt; /* schedule() stats */ unsigned long sched_switch; unsigned long sched_cnt; unsigned long sched_goidle; /* try_to_wake_up() stats */ unsigned long ttwu_cnt; unsigned long ttwu_local; #endif struct lock_class_key rq_lock_key; }; static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; static DEFINE_MUTEX(sched_hotcpu_mutex); static inline int cpu_of(struct rq *rq) { #ifdef CONFIG_SMP return rq->cpu; #else return 0; #endif } /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details. * * The domain tree of any CPU may only be accessed from within * preempt-disabled sections. */ #define for_each_domain(cpu, __sd) \ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) #define this_rq() (&__get_cpu_var(runqueues)) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) #endif #ifndef finish_arch_switch # define finish_arch_switch(prev) do { } while (0) #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { return rq->curr == p; } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq->lock.owner = current; #endif /* * If we are tracking spinlock dependencies then we have to * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); spin_unlock_irq(&rq->lock); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ static inline int task_running(struct rq *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; #else return rq->curr == p; #endif } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* * We can optimise this out completely for !SMP, because the * SMP rebalancing from interrupt is the only thing that cares * here. */ next->oncpu = 1; #endif #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW spin_unlock_irq(&rq->lock); #else spin_unlock(&rq->lock); #endif } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* * After ->oncpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. */ smp_wmb(); prev->oncpu = 0; #endif #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); #endif } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ /* * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */ static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; repeat_lock_task: rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { spin_unlock(&rq->lock); goto repeat_lock_task; } return rq; } /* * task_rq_lock - lock the runqueue a given task resides on and disable * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { struct rq *rq; repeat_lock_task: local_irq_save(*flags); rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { spin_unlock_irqrestore(&rq->lock, *flags); goto repeat_lock_task; } return rq; } static inline void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { spin_unlock(&rq->lock); } static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) __releases(rq->lock) { spin_unlock_irqrestore(&rq->lock, *flags); } #ifdef CONFIG_SCHEDSTATS /* * bump this up when changing the output format or the meaning of an existing * format, so that tools can adapt (or abort) */ #define SCHEDSTAT_VERSION 14 static int show_schedstat(struct seq_file *seq, void *v) { int cpu; seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); seq_printf(seq, "timestamp %lu\n", jiffies); for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_SMP struct sched_domain *sd; int dcnt = 0; #endif /* runqueue-specific stats */ seq_printf(seq, "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu", cpu, rq->yld_both_empty, rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt, rq->sched_switch, rq->sched_cnt, rq->sched_goidle, rq->ttwu_cnt, rq->ttwu_local, rq->rq_sched_info.cpu_time, rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt); seq_printf(seq, "\n"); #ifdef CONFIG_SMP /* domain-specific stats */ preempt_disable(); for_each_domain(cpu, sd) { enum idle_type itype; char mask_str[NR_CPUS]; cpumask_scnprintf(mask_str, NR_CPUS, sd->span); seq_printf(seq, "domain%d %s", dcnt++, mask_str); for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; itype++) { seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " "%lu", sd->lb_cnt[itype], sd->lb_balanced[itype], sd->lb_failed[itype], sd->lb_imbalance[itype], sd->lb_gained[itype], sd->lb_hot_gained[itype], sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" " %lu %lu %lu\n", sd->alb_cnt, sd->alb_failed, sd->alb_pushed, sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); } preempt_enable(); #endif } return 0; } static int schedstat_open(struct inode *inode, struct file *file) { unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); char *buf = kmalloc(size, GFP_KERNEL); struct seq_file *m; int res; if (!buf) return -ENOMEM; res = single_open(file, show_schedstat, NULL); if (!res) { m = file->private_data; m->buf = buf; m->size = size; } else kfree(buf); return res; } const struct file_operations proc_schedstat_operations = { .open = schedstat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Expects runqueue lock to be held for atomicity of update */ static inline void rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies) { if (rq) { rq->rq_sched_info.run_delay += delta_jiffies; rq->rq_sched_info.pcnt++; } } /* * Expects runqueue lock to be held for atomicity of update */ static inline void rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies) { if (rq) rq->rq_sched_info.cpu_time += delta_jiffies; } # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) #else /* !CONFIG_SCHEDSTATS */ static inline void rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies) {} static inline void rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies) {} # define schedstat_inc(rq, field) do { } while (0) # define schedstat_add(rq, field, amt) do { } while (0) #endif /* * this_rq_lock - lock this runqueue and disable interrupts. */ static inline struct rq *this_rq_lock(void) __acquires(rq->lock) { struct rq *rq; local_irq_disable(); rq = this_rq(); spin_lock(&rq->lock); return rq; } #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) /* * Called when a process is dequeued from the active array and given * the cpu. We should note that with the exception of interactive * tasks, the expired queue will become the active queue after the active * queue is empty, without explicitly dequeuing and requeuing tasks in the * expired queue. (Interactive tasks may be requeued directly to the * active queue, thus delaying tasks in the expired queue from running; * see scheduler_tick()). * * This function is only called from sched_info_arrive(), rather than * dequeue_task(). Even though a task may be queued and dequeued multiple * times as it is shuffled about, we're really interested in knowing how * long it was from the *first* time it was queued to the time that it * finally hit a cpu. */ static inline void sched_info_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; } /* * Called when a task finally hits the cpu. We can now calculate how * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ static void sched_info_arrive(struct task_struct *t) { unsigned long now = jiffies, delta_jiffies = 0; if (t->sched_info.last_queued) delta_jiffies = now - t->sched_info.last_queued; sched_info_dequeued(t); t->sched_info.run_delay += delta_jiffies; t->sched_info.last_arrival = now; t->sched_info.pcnt++; rq_sched_info_arrive(task_rq(t), delta_jiffies); } /* * Called when a process is queued into either the active or expired * array. The time is noted and later used to determine how long we * had to wait for us to reach the cpu. Since the expired queue will * become the active queue after active queue is empty, without dequeuing * and requeuing any tasks, we are interested in queuing to either. It * is unusual but not impossible for tasks to be dequeued and immediately * requeued in the same or another array: this can happen in sched_yield(), * set_user_nice(), and even load_balance() as it moves tasks from runqueue * to runqueue. * * This function is only called from enqueue_task(), but also only updates * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */ static inline void sched_info_queued(struct task_struct *t) { if (unlikely(sched_info_on())) if (!t->sched_info.last_queued) t->sched_info.last_queued = jiffies; } /* * Called when a process ceases being the active-running process, either * voluntarily or involuntarily. Now we can calculate how long we ran. */ static inline void sched_info_depart(struct task_struct *t) { unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival; t->sched_info.cpu_time += delta_jiffies; rq_sched_info_depart(task_rq(t), delta_jiffies); } /* * Called when tasks are switched involuntarily due, typically, to expiring * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */ static inline void __sched_info_switch(struct task_struct *prev, struct task_struct *next) { struct rq *rq = task_rq(prev); /* * prev now departs the cpu. It's not interesting to record * stats about how efficient we were at scheduling the idle * process, however. */ if (prev != rq->idle) sched_info_depart(prev); if (next != rq->idle) sched_info_arrive(next); } static inline void sched_info_switch(struct task_struct *prev, struct task_struct *next) { if (unlikely(sched_info_on())) __sched_info_switch(prev, next); } #else #define sched_info_queued(t) do { } while (0) #define sched_info_switch(t, next) do { } while (0) #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ /* * Adding/removing a task to/from a priority array: */ static void dequeue_task(struct task_struct *p, struct prio_array *array) { array->nr_active--; list_del(&p->run_list); if (list_empty(array->queue + p->prio)) __clear_bit(p->prio, array->bitmap); } static void enqueue_task(struct task_struct *p, struct prio_array *array) { sched_info_queued(p); list_add_tail(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); array->nr_active++; p->array = array; } /* * Put task to the end of the run list without the overhead of dequeue * followed by enqueue. */ static void requeue_task(struct task_struct *p, struct prio_array *array) { list_move_tail(&p->run_list, array->queue + p->prio); } static inline void enqueue_task_head(struct task_struct *p, struct prio_array *array) { list_add(&p->run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); array->nr_active++; p->array = array; } /* * __normal_prio - return the priority that is based on the static * priority but is modified by bonuses/penalties. * * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] * into the -5 ... 0 ... +5 bonus/penalty range. * * We use 25% of the full 0...39 priority range so that: * * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. * * Both properties are important to certain workloads. */ static inline int __normal_prio(struct task_struct *p) { int bonus, prio; bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; prio = p->static_prio - bonus; if (prio < MAX_RT_PRIO) prio = MAX_RT_PRIO; if (prio > MAX_PRIO-1) prio = MAX_PRIO-1; return prio; } /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that * each task makes to its run queue's load is weighted according to its * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a * scaled version of the new time slice allocation that they receive on time * slice expiry etc. */ /* * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE * If static_prio_timeslice() is ever changed to break this assumption then * this code will need modification */ #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE #define LOAD_WEIGHT(lp) \ (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) #define PRIO_TO_LOAD_WEIGHT(prio) \ LOAD_WEIGHT(static_prio_timeslice(prio)) #define RTPRIO_TO_LOAD_WEIGHT(rp) \ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) static void set_load_weight(struct task_struct *p) { if (has_rt_policy(p)) { #ifdef CONFIG_SMP if (p == task_rq(p)->migration_thread) /* * The migration thread does the actual balancing. * Giving its load any weight will skew balancing * adversely. */ p->load_weight = 0; else #endif p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority); } else p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); } static inline void inc_raw_weighted_load(struct rq *rq, const struct task_struct *p) { rq->raw_weighted_load += p->load_weight; } static inline void dec_raw_weighted_load(struct rq *rq, const struct task_struct *p) { rq->raw_weighted_load -= p->load_weight; } static inline void inc_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running++; inc_raw_weighted_load(rq, p); } static inline void dec_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); } /* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates. */ static inline int normal_prio(struct task_struct *p) { int prio; if (has_rt_policy(p)) prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); return prio; } /* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */ static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* * If we are RT tasks or we were boosted to RT priority, * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ if (!rt_prio(p->prio)) return p->normal_prio; return p->prio; } /* * __activate_task - move a task to the runqueue. */ static void __activate_task(struct task_struct *p, struct rq *rq) { struct prio_array *target = rq->active; if (batch_task(p)) target = rq->expired; enqueue_task(p, target); inc_nr_running(p, rq); } /* * __activate_idle_task - move idle task to the _front_ of runqueue. */ static inline void __activate_idle_task(struct task_struct *p, struct rq *rq) { enqueue_task_head(p, rq->active); inc_nr_running(p, rq); } /* * Recalculate p->normal_prio and p->prio after having slept, * updating the sleep-average too: */ static int recalc_task_prio(struct task_struct *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long sleep_time = now - p->timestamp; if (batch_task(p)) sleep_time = 0; if (likely(sleep_time > 0)) { /* * This ceiling is set to the lowest priority that would allow * a task to be reinserted into the active array on timeslice * completion. */ unsigned long ceiling = INTERACTIVE_SLEEP(p); if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) { /* * Prevents user tasks from achieving best priority * with one single large enough sleep. */ p->sleep_avg = ceiling; /* * Using INTERACTIVE_SLEEP() as a ceiling places a * nice(0) task 1ms sleep away from promotion, and * gives it 700ms to round-robin with no chance of * being demoted. This is more than generous, so * mark this sleep as non-interactive to prevent the * on-runqueue bonus logic from intervening should * this task not receive cpu immediately. */ p->sleep_type = SLEEP_NONINTERACTIVE; } else { /* * Tasks waking from uninterruptible sleep are * limited in their sleep_avg rise as they * are likely to be waiting on I/O */ if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { if (p->sleep_avg >= ceiling) sleep_time = 0; else if (p->sleep_avg + sleep_time >= ceiling) { p->sleep_avg = ceiling; sleep_time = 0; } } /* * This code gives a bonus to interactive tasks. * * The boost works by updating the 'average sleep time' * value here, based on ->timestamp. The more time a * task spends sleeping, the higher the average gets - * and the higher the priority boost gets as well. */ p->sleep_avg += sleep_time; } if (p->sleep_avg > NS_MAX_SLEEP_AVG) p->sleep_avg = NS_MAX_SLEEP_AVG; } return effective_prio(p); } /* * activate_task - move a task to the runqueue and do priority recalculation * * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ static void activate_task(struct task_struct *p, struct rq *rq, int local) { unsigned long long now; if (rt_task(p)) goto out; now = sched_clock(); #ifdef CONFIG_SMP if (!local) { /* Compensate for drifting sched_clock */ struct rq *this_rq = this_rq(); now = (now - this_rq->most_recent_timestamp) + rq->most_recent_timestamp; } #endif /* * Sleep time is in units of nanosecs, so shift by 20 to get a * milliseconds-range estimation of the amount of time that the task * spent sleeping: */ if (unlikely(prof_on == SLEEP_PROFILING)) { if (p->state == TASK_UNINTERRUPTIBLE) profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), (now - p->timestamp) >> 20); } p->prio = recalc_task_prio(p, now); /* * This checks to make sure it's not an uninterruptible task * that is now waking up. */ if (p->sleep_type == SLEEP_NORMAL) { /* * Tasks which were woken up by interrupts (ie. hw events) * are most likely of interactive nature. So we give them * the credit of extending their sleep time to the period * of time they spend on the runqueue, waiting for execution * on a CPU, first time around: */ if (in_interrupt()) p->sleep_type = SLEEP_INTERRUPTED; else { /* * Normal first-time wakeups get a credit too for * on-runqueue time, but it will be weighted down: */ p->sleep_type = SLEEP_INTERACTIVE; } } p->timestamp = now; out: __activate_task(p, rq); } /* * deactivate_task - remove a task from the runqueue. */ static void deactivate_task(struct task_struct *p, struct rq *rq) { dec_nr_running(p, rq); dequeue_task(p, p->array); p->array = NULL; } /* * resched_task - mark a task 'to be rescheduled now'. * * On UP this means the setting of the need_resched flag, on SMP it * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */ #ifdef CONFIG_SMP #ifndef tsk_is_polling #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif static void resched_task(struct task_struct *p) { int cpu; assert_spin_locked(&task_rq(p)->lock); if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) return; set_tsk_thread_flag(p, TIF_NEED_RESCHED); cpu = task_cpu(p); if (cpu == smp_processor_id()) return; /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(p)) smp_send_reschedule(cpu); } static void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; if (!spin_trylock_irqsave(&rq->lock, flags)) return; resched_task(cpu_curr(cpu)); spin_unlock_irqrestore(&rq->lock, flags); } #else static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } #endif /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } /* Used instead of source_load when we know the type == 0 */ unsigned long weighted_cpuload(const int cpu) { return cpu_rq(cpu)->raw_weighted_load; } #ifdef CONFIG_SMP struct migration_req { struct list_head list; struct task_struct *task; int dest_cpu; struct completion done; }; /* * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ static int migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) { struct rq *rq = task_rq(p); /* * If the task is not on a runqueue (and not running), then * it is sufficient to simply update the task's cpu field. */ if (!p->array && !task_running(rq, p)) { set_task_cpu(p, dest_cpu); return 0; } init_completion(&req->done); req->task = p; req->dest_cpu = dest_cpu; list_add(&req->list, &rq->migration_queue); return 1; } /* * wait_task_inactive - wait for a thread to unschedule. * * The caller must ensure that the task *will* unschedule sometime soon, * else this function might spin for a *long* time. This function can't * be called with interrupts off, or it may introduce deadlock with * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ void wait_task_inactive(struct task_struct *p) { unsigned long flags; struct rq *rq; struct prio_array *array; int running; repeat: /* * We do the initial early heuristics without holding * any task-queue locks at all. We'll only try to get * the runqueue lock when things look like they will * work out! */ rq = task_rq(p); /* * If the task is actively running on another CPU * still, just relax and busy-wait without holding * any locks. * * NOTE! Since we don't hold any locks, it's not * even sure that "rq" stays as the right runqueue! * But we don't care, since "task_running()" will * return false if the runqueue has changed and p * is actually now running somewhere else! */ while (task_running(rq, p)) cpu_relax(); /* * Ok, time to look more closely! We need the rq * lock now, to be *sure*. If we're wrong, we'll * just go back and repeat. */ rq = task_rq_lock(p, &flags); running = task_running(rq, p); array = p->array; task_rq_unlock(rq, &flags); /* * Was it really running after all now that we * checked with the proper locks actually held? * * Oops. Go back and try again.. */ if (unlikely(running)) { cpu_relax(); goto repeat; } /* * It's not enough that it's not actively running, * it must be off the runqueue _entirely_, and not * preempted! * * So if it wa still runnable (but just not actively * running right now), it's preempted, and we should * yield - it could be a while. */ if (unlikely(array)) { yield(); goto repeat; } /* * Ahh, all good. It wasn't running, and it wasn't * runnable, which means that it will never become * running in the future either. We're all done! */ } /*** * kick_process - kick a running thread to enter/exit the kernel * @p: the to-be-kicked thread * * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * * NOTE: this function doesnt have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been * achieved as well. */ void kick_process(struct task_struct *p) { int cpu; preempt_disable(); cpu = task_cpu(p); if ((cpu != smp_processor_id()) && task_curr(p)) smp_send_reschedule(cpu); preempt_enable(); } /* * Return a low guess at the load of a migration-source cpu weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to * balance conservatively. */ static inline unsigned long source_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); if (type == 0) return rq->raw_weighted_load; return min(rq->cpu_load[type-1], rq->raw_weighted_load); } /* * Return a high guess at the load of a migration-target cpu weighted * according to the scheduling class and "nice" value. */ static inline unsigned long target_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); if (type == 0) return rq->raw_weighted_load; return max(rq->cpu_load[type-1], rq->raw_weighted_load); } /* * Return the average load per task on the cpu's run queue */ static inline unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long n = rq->nr_running; return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE; } /* * find_idlest_group finds and returns the least busy CPU group within the * domain. */ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) { struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; unsigned long min_load = ULONG_MAX, this_load = 0; int load_idx = sd->forkexec_idx; int imbalance = 100 + (sd->imbalance_pct-100)/2; do { unsigned long load, avg_load; int local_group; int i; /* Skip over this group if it has no CPUs allowed */ if (!cpus_intersects(group->cpumask, p->cpus_allowed)) goto nextgroup; local_group = cpu_isset(this_cpu, group->cpumask); /* Tally up the load of all CPUs in the group */ avg_load = 0; for_each_cpu_mask(i, group->cpumask) { /* Bias balancing toward cpus of our domain */ if (local_group) load = source_load(i, load_idx); else load = target_load(i, load_idx); avg_load += load; } /* Adjust by relative CPU power of the group */ avg_load = sg_div_cpu_power(group, avg_load * SCHED_LOAD_SCALE); if (local_group) { this_load = avg_load; this = group; } else if (avg_load < min_load) { min_load = avg_load; idlest = group; } nextgroup: group = group->next; } while (group != sd->groups); if (!idlest || 100*this_load < imbalance*min_load) return NULL; return idlest; } /* * find_idlest_cpu - find the idlest cpu among the cpus in group. */ static int find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) { cpumask_t tmp; unsigned long load, min_load = ULONG_MAX; int idlest = -1; int i; /* Traverse only the allowed CPUs */ cpus_and(tmp, group->cpumask, p->cpus_allowed); for_each_cpu_mask(i, tmp) { load = weighted_cpuload(i); if (load < min_load || (load == min_load && i == this_cpu)) { min_load = load; idlest = i; } } return idlest; } /* * sched_balance_self: balance the current task (running on cpu) in domains * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and * SD_BALANCE_EXEC. * * Balance, ie. select the least loaded group. * * Returns the target CPU number, or the same CPU if no balancing is needed. * * preempt must be disabled. */ static int sched_balance_self(int cpu, int flag) { struct task_struct *t = current; struct sched_domain *tmp, *sd = NULL; for_each_domain(cpu, tmp) { /* * If power savings logic is enabled for a domain, stop there. */ if (tmp->flags & SD_POWERSAVINGS_BALANCE) break; if (tmp->flags & flag) sd = tmp; } while (sd) { cpumask_t span; struct sched_group *group; int new_cpu, weight; if (!(sd->flags & flag)) { sd = sd->child; continue; } span = sd->span; group = find_idlest_group(sd, t, cpu); if (!group) { sd = sd->child; continue; } new_cpu = find_idlest_cpu(group, t, cpu); if (new_cpu == -1 || new_cpu == cpu) { /* Now try balancing at a lower domain level of cpu */ sd = sd->child; continue; } /* Now try balancing at a lower domain level of new_cpu */ cpu = new_cpu; sd = NULL; weight = cpus_weight(span); for_each_domain(cpu, tmp) { if (weight <= cpus_weight(tmp->span)) break; if (tmp->flags & flag) sd = tmp; } /* while loop will break here if sd == NULL */ } return cpu; } #endif /* CONFIG_SMP */ /* * wake_idle() will wake a task on an idle cpu if task->cpu is * not idle and an idle cpu is available. The span of cpus to * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. * * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) static int wake_idle(int cpu, struct task_struct *p) { cpumask_t tmp; struct sched_domain *sd; int i; /* * If it is idle, then it is the best cpu to run this task. * * This cpu is also the best, if it has more than one task already. * Siblings must be also busy(in most cases) as they didn't already * pickup the extra load from this cpu and hence we need not check * sibling runqueue info. This will avoid the checks and cache miss * penalities associated with that. */ if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) return cpu; for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_IDLE) { cpus_and(tmp, sd->span, p->cpus_allowed); for_each_cpu_mask(i, tmp) { if (idle_cpu(i)) return i; } } else break; } return cpu; } #else static inline int wake_idle(int cpu, struct task_struct *p) { return cpu; } #endif /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread * @state: the mask of task states that can be woken * @sync: do a synchronous wakeup? * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual * re-schedule is in progress), and as such you're allowed to do * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * * returns failure only if the task is already active. */ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) { int cpu, this_cpu, success = 0; unsigned long flags; long old_state; struct rq *rq; #ifdef CONFIG_SMP struct sched_domain *sd, *this_sd = NULL; unsigned long load, this_load; int new_cpu; #endif rq = task_rq_lock(p, &flags); old_state = p->state; if (!(old_state & state)) goto out; if (p->array) goto out_running; cpu = task_cpu(p); this_cpu = smp_processor_id(); #ifdef CONFIG_SMP if (unlikely(task_running(rq, p))) goto out_activate; new_cpu = cpu; schedstat_inc(rq, ttwu_cnt); if (cpu == this_cpu) { schedstat_inc(rq, ttwu_local); goto out_set_cpu; } for_each_domain(this_cpu, sd) { if (cpu_isset(cpu, sd->span)) { schedstat_inc(sd, ttwu_wake_remote); this_sd = sd; break; } } if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) goto out_set_cpu; /* * Check for affine wakeup and passive balancing possibilities. */ if (this_sd) { int idx = this_sd->wake_idx; unsigned int imbalance; imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; load = source_load(cpu, idx); this_load = target_load(this_cpu, idx); new_cpu = this_cpu; /* Wake to this CPU if we can */ if (this_sd->flags & SD_WAKE_AFFINE) { unsigned long tl = this_load; unsigned long tl_per_task; tl_per_task = cpu_avg_load_per_task(this_cpu); /* * If sync wakeup then subtract the (maximum possible) * effect of the currently running task from the load * of the current CPU: */ if (sync) tl -= current->load_weight; if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) || 100*(tl + p->load_weight) <= imbalance*load) { /* * This domain has SD_WAKE_AFFINE and * p is cache cold in this domain, and * there is no bad imbalance. */ schedstat_inc(this_sd, ttwu_move_affine); goto out_set_cpu; } } /* * Start passive balancing when half the imbalance_pct * limit is reached. */ if (this_sd->flags & SD_WAKE_BALANCE) { if (imbalance*this_load <= 100*load) { schedstat_inc(this_sd, ttwu_move_balance); goto out_set_cpu; } } } new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ out_set_cpu: new_cpu = wake_idle(new_cpu, p); if (new_cpu != cpu) { set_task_cpu(p, new_cpu); task_rq_unlock(rq, &flags); /* might preempt at this point */ rq = task_rq_lock(p, &flags); old_state = p->state; if (!(old_state & state)) goto out; if (p->array) goto out_running; this_cpu = smp_processor_id(); cpu = task_cpu(p); } out_activate: #endif /* CONFIG_SMP */ if (old_state == TASK_UNINTERRUPTIBLE) { rq->nr_uninterruptible--; /* * Tasks on involuntary sleep don't earn * sleep_avg beyond just interactive state. */ p->sleep_type = SLEEP_NONINTERACTIVE; } else /* * Tasks that have marked their sleep as noninteractive get * woken up with their sleep average not weighted in an * interactive way. */ if (old_state & TASK_NONINTERACTIVE) p->sleep_type = SLEEP_NONINTERACTIVE; activate_task(p, rq, cpu == this_cpu); /* * Sync wakeups (i.e. those types of wakeups where the waker * has indicated that it will leave the CPU in short order) * don't trigger a preemption, if the woken up task will run on * this cpu. (in this case the 'I will reschedule' promise of * the waker guarantees that the freshly woken up task is going * to be considered on this CPU.) */ if (!sync || cpu != this_cpu) { if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } success = 1; out_running: p->state = TASK_RUNNING; out: task_rq_unlock(rq, &flags); return success; } int fastcall wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wake_up_process); int fastcall wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } static void task_running_tick(struct rq *rq, struct task_struct *p); /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ void fastcall sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); #ifdef CONFIG_SMP cpu = sched_balance_self(cpu, SD_BALANCE_FORK); #endif set_task_cpu(p, cpu); /* * We mark the process as running here, but have not actually * inserted it onto the runqueue yet. This guarantees that * nobody will actually run it, and a signal or other external * event cannot wake it up and insert it on the runqueue either. */ p->state = TASK_RUNNING; /* * Make sure we do not leak PI boosting priority to the child: */ p->prio = current->normal_prio; INIT_LIST_HEAD(&p->run_list); p->array = NULL; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (unlikely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) p->oncpu = 0; #endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif /* * Share the timeslice between parent and child, thus the * total amount of pending timeslices in the system doesn't change, * resulting in more scheduling fairness. */ local_irq_disable(); p->time_slice = (current->time_slice + 1) >> 1; /* * The remainder of the first timeslice might be recovered by * the parent if the child exits early enough. */ p->first_time_slice = 1; current->time_slice >>= 1; p->timestamp = sched_clock(); if (unlikely(!current->time_slice)) { /* * This case is rare, it happens when the parent has only * a single jiffy left from its timeslice. Taking the * runqueue lock is not a problem. */ current->time_slice = 1; task_running_tick(cpu_rq(cpu), current); } local_irq_enable(); put_cpu(); } /* * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { struct rq *rq, *this_rq; unsigned long flags; int this_cpu, cpu; rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); this_cpu = smp_processor_id(); cpu = task_cpu(p); /* * We decrease the sleep average of forking parents * and children as well, to keep max-interactive tasks * from forking tasks that are max-interactive. The parent * (current) is done further down, under its lock. */ p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) * CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); p->prio = effective_prio(p); if (likely(cpu == this_cpu)) { if (!(clone_flags & CLONE_VM)) { /* * The VM isn't cloned, so we're in a good position to * do child-runs-first in anticipation of an exec. This * usually avoids a lot of COW overhead. */ if (unlikely(!current->array)) __activate_task(p, rq); else { p->prio = current->prio; p->normal_prio = current->normal_prio; list_add_tail(&p->run_list, &current->run_list); p->array = current->array; p->array->nr_active++; inc_nr_running(p, rq); } set_need_resched(); } else /* Run child last */ __activate_task(p, rq); /* * We skip the following code due to cpu == this_cpu * * task_rq_unlock(rq, &flags); * this_rq = task_rq_lock(current, &flags); */ this_rq = rq; } else { this_rq = cpu_rq(this_cpu); /* * Not the local CPU - must adjust timestamp. This should * get optimised away in the !CONFIG_SMP case. */ p->timestamp = (p->timestamp - this_rq->most_recent_timestamp) + rq->most_recent_timestamp; __activate_task(p, rq); if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); /* * Parent and child are on different CPUs, now get the * parent runqueue to update the parent's ->sleep_avg: */ task_rq_unlock(rq, &flags); this_rq = task_rq_lock(current, &flags); } current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) * PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS); task_rq_unlock(this_rq, &flags); } /* * Potentially available exiting-child timeslices are * retrieved here - this way the parent does not get * penalized for creating too many threads. * * (this cannot be used to 'generate' timeslices * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ void fastcall sched_exit(struct task_struct *p) { unsigned long flags; struct rq *rq; /* * If the child was a (relative-) CPU hog then decrease * the sleep_avg of the parent as well. */ rq = task_rq_lock(p->parent, &flags); if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) { p->parent->time_slice += p->time_slice; if (unlikely(p->parent->time_slice > task_timeslice(p))) p->parent->time_slice = task_timeslice(p); } if (p->sleep_avg < p->parent->sleep_avg) p->parent->sleep_avg = p->parent->sleep_avg / (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg / (EXIT_WEIGHT + 1); task_rq_unlock(rq, &flags); } /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch * @next: the task we are going to switch to. * * This is called with the rq lock held and interrupts off. It must * be paired with a subsequent finish_task_switch after the context * switch. * * prepare_task_switch sets up locking and calls architecture specific * hooks. */ static inline void prepare_task_switch(struct rq *rq, struct task_struct *next) { prepare_lock_switch(rq, next); prepare_arch_switch(next); } /** * finish_task_switch - clean up after a task-switch * @rq: runqueue associated with task-switch * @prev: the thread we just switched away from. * * finish_task_switch must be called after the context switch, paired * with a prepare_task_switch call before the context switch. * finish_task_switch will reconcile locking set up by prepare_task_switch, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; long prev_state; rq->prev_mm = NULL; /* * A task struct has one reference for the use as "current". * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. * The test for TASK_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. * Manfred Spraul <manfred@colorfullife.com> */ prev_state = prev->state; finish_arch_switch(prev); finish_lock_switch(rq, prev); if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. */ kprobe_flush_task(prev); put_task_struct(prev); } } /** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); finish_task_switch(rq, prev); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); #endif if (current->set_child_tid) put_user(current->pid, current->set_child_tid); } /* * context_switch - switch to the new MM and the new * thread's register state. */ static inline struct task_struct * context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct mm_struct *mm = next->mm; struct mm_struct *oldmm = prev->active_mm; /* * For paravirt, this is coupled with an exit in switch_to to * combine the page table reload and the switch backend into * one hypercall. */ arch_enter_lazy_cpu_mode(); if (!mm) { next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else switch_mm(oldmm, mm, next); if (!prev->mm) { prev->active_mm = NULL; WARN_ON(rq->prev_mm); rq->prev_mm = oldmm; } /* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */ #ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); return prev; } /* * nr_running, nr_uninterruptible and nr_context_switches: * * externally visible scheduler statistics: current number of runnable * threads, current number of uninterruptible-sleeping threads, total * number of context switches performed since bootup. */ unsigned long nr_running(void) { unsigned long i, sum = 0; for_each_online_cpu(i) sum += cpu_rq(i)->nr_running; return sum; } unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; for_each_possible_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; /* * Since we read the counters lockless, it might be slightly * inaccurate. Do not allow it to go below zero though: */ if (unlikely((long)sum < 0)) sum = 0; return sum; } unsigned long long nr_context_switches(void) { int i; unsigned long long sum = 0; for_each_possible_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; } unsigned long nr_iowait(void) { unsigned long i, sum = 0; for_each_possible_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; } unsigned long nr_active(void) { unsigned long i, running = 0, uninterruptible = 0; for_each_online_cpu(i) { running += cpu_rq(i)->nr_running; uninterruptible += cpu_rq(i)->nr_uninterruptible; } if (unlikely((long)uninterruptible < 0)) uninterruptible = 0; return running + uninterruptible; } #ifdef CONFIG_SMP /* * Is this task likely cache-hot: */ static inline int task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd) { return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time; } /* * double_rq_lock - safely lock two runqueues * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ static void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) { BUG_ON(!irqs_disabled()); if (rq1 == rq2) { spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { if (rq1 < rq2) { spin_lock(&rq1->lock); spin_lock(&rq2->lock); } else { spin_lock(&rq2->lock); spin_lock(&rq1->lock); } } } /* * double_rq_unlock - safely unlock two runqueues * * Note this does not restore interrupts like task_rq_unlock, * you need to do so manually after calling. */ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { spin_unlock(&rq1->lock); if (rq1 != rq2) spin_unlock(&rq2->lock); else __release(rq2->lock); } /* * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ static void double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) { if (unlikely(!irqs_disabled())) { /* printk() doesn't work good under rq->lock */ spin_unlock(&this_rq->lock); BUG_ON(1); } if (unlikely(!spin_trylock(&busiest->lock))) { if (busiest < this_rq) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); } else spin_lock(&busiest->lock); } } /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ static void sched_migrate_task(struct task_struct *p, int dest_cpu) { struct migration_req req; unsigned long flags; struct rq *rq; rq = task_rq_lock(p, &flags); if (!cpu_isset(dest_cpu, p->cpus_allowed) || unlikely(cpu_is_offline(dest_cpu))) goto out; /* force the process onto the specified CPU */ if (migrate_task(p, dest_cpu, &req)) { /* Need to wait for migration thread (might exit: take ref). */ struct task_struct *mt = rq->migration_thread; get_task_struct(mt); task_rq_unlock(rq, &flags); wake_up_process(mt); put_task_struct(mt); wait_for_completion(&req.done); return; } out: task_rq_unlock(rq, &flags); } /* * sched_exec - execve() is a valuable balancing opportunity, because at * this point the task has the smallest effective memory and cache footprint. */ void sched_exec(void) { int new_cpu, this_cpu = get_cpu(); new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC); put_cpu(); if (new_cpu != this_cpu) sched_migrate_task(current, new_cpu); } /* * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ static void pull_task(struct rq *src_rq, struct prio_array *src_array, struct task_struct *p, struct rq *this_rq, struct prio_array *this_array, int this_cpu) { dequeue_task(p, src_array); dec_nr_running(p, src_rq); set_task_cpu(p, this_cpu); inc_nr_running(p, this_rq); enqueue_task(p, this_array); p->timestamp = (p->timestamp - src_rq->most_recent_timestamp) + this_rq->most_recent_timestamp; /* * Note that idle threads have a prio of MAX_PRIO, for this test * to be always true for them. */ if (TASK_PREEMPTS_CURR(p, this_rq)) resched_task(this_rq->curr); } /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { /* * We do not migrate tasks that are: * 1) running (obviously), or * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ if (!cpu_isset(this_cpu, p->cpus_allowed)) return 0; *all_pinned = 0; if (task_running(rq, p)) return 0; /* * Aggressive migration if: * 1) task is cache cold, or * 2) too many balance attempts have failed. */ if (sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS if (task_hot(p, rq->most_recent_timestamp, sd)) schedstat_inc(sd, lb_hot_gained[idle]); #endif return 1; } if (task_hot(p, rq->most_recent_timestamp, sd)) return 0; return 1; } #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio) /* * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted * load from busiest to this_rq, as part of a balancing operation within * "domain". Returns the number of tasks moved. * * Called with both runqueues locked. */ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_nr_move, unsigned long max_load_move, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, best_prio_seen, skip_for_load; struct prio_array *array, *dst_array; struct list_head *head, *curr; struct task_struct *tmp; long rem_load_move; if (max_nr_move == 0 || max_load_move == 0) goto out; rem_load_move = max_load_move; pinned = 1; this_best_prio = rq_best_prio(this_rq); best_prio = rq_best_prio(busiest); /* * Enable handling of the case where there is more than one task * with the best priority. If the current running task is one * of those with prio==best_prio we know it won't be moved * and therefore it's safe to override the skip (based on load) of * any task we find with that prio. */ best_prio_seen = best_prio == busiest->curr->prio; /* * We first consider expired tasks. Those will likely not be * executed in the near future, and they are most likely to * be cache-cold, thus switching CPUs has the least effect * on them. */ if (busiest->expired->nr_active) { array = busiest->expired; dst_array = this_rq->expired; } else { array = busiest->active; dst_array = this_rq->active; } new_array: /* Start searching at priority 0: */ idx = 0; skip_bitmap: if (!idx) idx = sched_find_first_bit(array->bitmap); else idx = find_next_bit(array->bitmap, MAX_PRIO, idx); if (idx >= MAX_PRIO) { if (array == busiest->expired && busiest->active->nr_active) { array = busiest->active; dst_array = this_rq->active; goto new_array; } goto out; } head = array->queue + idx; curr = head->prev; skip_queue: tmp = list_entry(curr, struct task_struct, run_list); curr = curr->prev; /* * To help distribute high priority tasks accross CPUs we don't * skip a task if it will be the highest priority task (i.e. smallest * prio value) on its new queue regardless of its load weight */ skip_for_load = tmp->load_weight > rem_load_move; if (skip_for_load && idx < this_best_prio) skip_for_load = !best_prio_seen && idx == best_prio; if (skip_for_load || !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) { best_prio_seen |= idx == best_prio; if (curr != head) goto skip_queue; idx++; goto skip_bitmap; } pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); pulled++; rem_load_move -= tmp->load_weight; /* * We only want to steal up to the prescribed number of tasks * and the prescribed amount of weighted load. */ if (pulled < max_nr_move && rem_load_move > 0) { if (idx < this_best_prio) this_best_prio = idx; if (curr != head) goto skip_queue; idx++; goto skip_bitmap; } out: /* * Right now, this is the only place pull_task() is called, * so we can safely collect pull_task() stats here rather than * inside pull_task(). */ schedstat_add(sd, lb_gained[idle], pulled); if (all_pinned) *all_pinned = pinned; return pulled; } /* * find_busiest_group finds and returns the busiest CPU group within the * domain. It calculates and returns the amount of weighted load which * should be moved to restore balance via the imbalance parameter. */ static struct sched_group * find_busiest_group(struct sched_domain *sd, int this_cpu, unsigned long *imbalance, enum idle_type idle, int *sd_idle, cpumask_t *cpus, int *balance) { struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; unsigned long max_load, avg_load, total_load, this_load, total_pwr; unsigned long max_pull; unsigned long busiest_load_per_task, busiest_nr_running; unsigned long this_load_per_task, this_nr_running; int load_idx; #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int power_savings_balance = 1; unsigned long leader_nr_running = 0, min_load_per_task = 0; unsigned long min_nr_running = ULONG_MAX; struct sched_group *group_min = NULL, *group_leader = NULL; #endif max_load = this_load = total_load = total_pwr = 0; busiest_load_per_task = busiest_nr_running = 0; this_load_per_task = this_nr_running = 0; if (idle == NOT_IDLE) load_idx = sd->busy_idx; else if (idle == NEWLY_IDLE) load_idx = sd->newidle_idx; else load_idx = sd->idle_idx; do { unsigned long load, group_capacity; int local_group; int i; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long sum_nr_running, sum_weighted_load; local_group = cpu_isset(this_cpu, group->cpumask); if (local_group) balance_cpu = first_cpu(group->cpumask); /* Tally up the load of all CPUs in the group */ sum_weighted_load = sum_nr_running = avg_load = 0; for_each_cpu_mask(i, group->cpumask) { struct rq *rq; if (!cpu_isset(i, *cpus)) continue; rq = cpu_rq(i); if (*sd_idle && !idle_cpu(i)) *sd_idle = 0; /* Bias balancing toward cpus of our domain */ if (local_group) { if (idle_cpu(i) && !first_idle_cpu) { first_idle_cpu = 1; balance_cpu = i; } load = target_load(i, load_idx); } else load = source_load(i, load_idx); avg_load += load; sum_nr_running += rq->nr_running; sum_weighted_load += rq->raw_weighted_load; } /* * First idle cpu or the first cpu(busiest) in this sched group * is eligible for doing load balancing at this and above * domains. */ if (local_group && balance_cpu != this_cpu && balance) { *balance = 0; goto ret; } total_load += avg_load; total_pwr += group->__cpu_power; /* Adjust by relative CPU power of the group */ avg_load = sg_div_cpu_power(group, avg_load * SCHED_LOAD_SCALE); group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; if (local_group) { this_load = avg_load; this = group; this_nr_running = sum_nr_running; this_load_per_task = sum_weighted_load; } else if (avg_load > max_load && sum_nr_running > group_capacity) { max_load = avg_load; busiest = group; busiest_nr_running = sum_nr_running; busiest_load_per_task = sum_weighted_load; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) /* * Busy processors will not participate in power savings * balance. */ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) goto group_next; /* * If the local group is idle or completely loaded * no need to do power savings balance at this domain */ if (local_group && (this_nr_running >= group_capacity || !this_nr_running)) power_savings_balance = 0; /* * If a group is already running at full capacity or idle, * don't include that group in power savings calculations */ if (!power_savings_balance || sum_nr_running >= group_capacity || !sum_nr_running) goto group_next; /* * Calculate the group which has the least non-idle load. * This is the group from where we need to pick up the load * for saving power */ if ((sum_nr_running < min_nr_running) || (sum_nr_running == min_nr_running && first_cpu(group->cpumask) < first_cpu(group_min->cpumask))) { group_min = group; min_nr_running = sum_nr_running; min_load_per_task = sum_weighted_load / sum_nr_running; } /* * Calculate the group which is almost near its * capacity but still has some space to pick up some load * from other group and save more power */ if (sum_nr_running <= group_capacity - 1) { if (sum_nr_running > leader_nr_running || (sum_nr_running == leader_nr_running && first_cpu(group->cpumask) > first_cpu(group_leader->cpumask))) { group_leader = group; leader_nr_running = sum_nr_running; } } group_next: #endif group = group->next; } while (group != sd->groups); if (!busiest || this_load >= max_load || busiest_nr_running == 0) goto out_balanced; avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr; if (this_load >= avg_load || 100*max_load <= sd->imbalance_pct*this_load) goto out_balanced; busiest_load_per_task /= busiest_nr_running; /* * We're trying to get all the cpus to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to * reduce the max loaded cpu below the average load, as either of these * actions would just result in more rebalancing later, and ping-pong * tasks around. Thus we look for the minimum possible imbalance. * Negative imbalances (*we* are more loaded than anyone else) will * be counted as no imbalance for these purposes -- we can't fix that * by pulling tasks to us. Be careful of negative numbers as they'll * appear as very large values with unsigned longs. */ if (max_load <= busiest_load_per_task) goto out_balanced; /* * In the presence of smp nice balancing, certain scenarios can have * max load less than avg load(as we skip the groups at or below * its cpu_power, while calculating max_load..) */ if (max_load < avg_load) { *imbalance = 0; goto small_imbalance; } /* Don't want to pull so many tasks that a group would go idle */ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); /* How much load to actually move to equalise the imbalance */ *imbalance = min(max_pull * busiest->__cpu_power, (avg_load - this_load) * this->__cpu_power) / SCHED_LOAD_SCALE; /* * if *imbalance is less than the average load per runnable task * there is no gaurantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ if (*imbalance < busiest_load_per_task) { unsigned long tmp, pwr_now, pwr_move; unsigned int imbn; small_imbalance: pwr_move = pwr_now = 0; imbn = 2; if (this_nr_running) { this_load_per_task /= this_nr_running; if (busiest_load_per_task > this_load_per_task) imbn = 1; } else this_load_per_task = SCHED_LOAD_SCALE; if (max_load - this_load >= busiest_load_per_task * imbn) { *imbalance = busiest_load_per_task; return busiest; } /* * OK, we don't have enough imbalance to justify moving tasks, * however we may be able to increase total CPU power used by * moving them. */ pwr_now += busiest->__cpu_power * min(busiest_load_per_task, max_load); pwr_now += this->__cpu_power * min(this_load_per_task, this_load); pwr_now /= SCHED_LOAD_SCALE; /* Amount of load we'd subtract */ tmp = sg_div_cpu_power(busiest, busiest_load_per_task * SCHED_LOAD_SCALE); if (max_load > tmp) pwr_move += busiest->__cpu_power * min(busiest_load_per_task, max_load - tmp); /* Amount of load we'd add */ if (max_load * busiest->__cpu_power < busiest_load_per_task * SCHED_LOAD_SCALE) tmp = sg_div_cpu_power(this, max_load * busiest->__cpu_power); else tmp = sg_div_cpu_power(this, busiest_load_per_task * SCHED_LOAD_SCALE); pwr_move += this->__cpu_power * min(this_load_per_task, this_load + tmp); pwr_move /= SCHED_LOAD_SCALE; /* Move if we gain throughput */ if (pwr_move <= pwr_now) goto out_balanced; *imbalance = busiest_load_per_task; } return busiest; out_balanced: #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) goto ret; if (this == group_leader && group_leader != group_min) { *imbalance = min_load_per_task; return group_min; } #endif ret: *imbalance = 0; return NULL; } /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static struct rq * find_busiest_queue(struct sched_group *group, enum idle_type idle, unsigned long imbalance, cpumask_t *cpus) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; int i; for_each_cpu_mask(i, group->cpumask) { if (!cpu_isset(i, *cpus)) continue; rq = cpu_rq(i); if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance) continue; if (rq->raw_weighted_load > max_load) { max_load = rq->raw_weighted_load; busiest = rq; } } return busiest; } /* * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but * so long as it is large enough. */ #define MAX_PINNED_INTERVAL 512 static inline unsigned long minus_1_or_zero(unsigned long n) { return n > 0 ? n - 1 : 0; } /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. */ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum idle_type idle, int *balance) { int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; struct sched_group *group; unsigned long imbalance; struct rq *busiest; cpumask_t cpus = CPU_MASK_ALL; unsigned long flags; /* * When power savings policy is enabled for the parent domain, idle * sibling can pick up load irrespective of busy siblings. In this case, * let the state of idle sibling percolate up as IDLE, instead of * portraying it as NOT_IDLE. */ if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) sd_idle = 1; schedstat_inc(sd, lb_cnt[idle]); redo: group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, &cpus, balance); if (*balance == 0) goto out_balanced; if (!group) { schedstat_inc(sd, lb_nobusyg[idle]); goto out_balanced; } busiest = find_busiest_queue(group, idle, imbalance, &cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; } BUG_ON(busiest == this_rq); schedstat_add(sd, lb_imbalance[idle], imbalance); nr_moved = 0; if (busiest->nr_running > 1) { /* * Attempt to move tasks. If find_busiest_group has found * an imbalance but busiest->nr_running <= 1, the group is * still unbalanced. nr_moved simply stays zero, so it is * correctly treated as an imbalance. */ local_irq_save(flags); double_rq_lock(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, idle, &all_pinned); double_rq_unlock(this_rq, busiest); local_irq_restore(flags); /* * some other cpu did the load balance for us. */ if (nr_moved && this_cpu != smp_processor_id()) resched_cpu(this_cpu); /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), cpus); if (!cpus_empty(cpus)) goto redo; goto out_balanced; } } if (!nr_moved) { schedstat_inc(sd, lb_failed[idle]); sd->nr_balance_failed++; if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { spin_lock_irqsave(&busiest->lock, flags); /* don't kick the migration_thread, if the curr * task on busiest cpu can't be moved to this_cpu */ if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { spin_unlock_irqrestore(&busiest->lock, flags); all_pinned = 1; goto out_one_pinned; } if (!busiest->active_balance) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; active_balance = 1; } spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) wake_up_process(busiest->migration_thread); /* * We've kicked active balancing, reset the failure * counter. */ sd->nr_balance_failed = sd->cache_nice_tries+1; } } else sd->nr_balance_failed = 0; if (likely(!active_balance)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else { /* * If we've begun active balancing, start to back off. This * case may not be covered by the all_pinned logic if there * is only 1 task on the busy runqueue (because we don't call * move_tasks). */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; } if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) return -1; return nr_moved; out_balanced: schedstat_inc(sd, lb_balanced[idle]); sd->nr_balance_failed = 0; out_one_pinned: /* tune up the balancing interval */ if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || (sd->balance_interval < sd->max_interval)) sd->balance_interval *= 2; if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) return -1; return 0; } /* * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. * * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). * this_rq is locked. */ static int load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) { struct sched_group *group; struct rq *busiest = NULL; unsigned long imbalance; int nr_moved = 0; int sd_idle = 0; cpumask_t cpus = CPU_MASK_ALL; /* * When power savings policy is enabled for the parent domain, idle * sibling can pick up load irrespective of busy siblings. In this case, * let the state of idle sibling percolate up as IDLE, instead of * portraying it as NOT_IDLE. */ if (sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) sd_idle = 1; schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); redo: group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle, &cpus, NULL); if (!group) { schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); goto out_balanced; } busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance, &cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; } BUG_ON(busiest == this_rq); schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); nr_moved = 0; if (busiest->nr_running > 1) { /* Attempt to move tasks */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, NEWLY_IDLE, NULL); spin_unlock(&busiest->lock); if (!nr_moved) { cpu_clear(cpu_of(busiest), cpus); if (!cpus_empty(cpus)) goto redo; } } if (!nr_moved) { schedstat_inc(sd, lb_failed[NEWLY_IDLE]); if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) return -1; } else sd->nr_balance_failed = 0; return nr_moved; out_balanced: schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) return -1; sd->nr_balance_failed = 0; return 0; } /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ static void idle_balance(int this_cpu, struct rq *this_rq) { struct sched_domain *sd; int pulled_task = 0; unsigned long next_balance = jiffies + 60 * HZ; for_each_domain(this_cpu, sd) { unsigned long interval; if (!(sd->flags & SD_LOAD_BALANCE)) continue; if (sd->flags & SD_BALANCE_NEWIDLE) /* If we've pulled tasks over stop searching: */ pulled_task = load_balance_newidle(this_cpu, this_rq, sd); interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; if (pulled_task) break; } if (!pulled_task) /* * We are going idle. next_balance may be set based on * a busy processor. So reset next_balance. */ this_rq->next_balance = next_balance; } /* * active_load_balance is run by migration threads. It pushes running tasks * off the busiest CPU onto idle CPUs. It requires at least 1 task to be * running on each physical CPU where possible, and avoids physical / * logical imbalances. * * Called with busiest_rq locked. */ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) { int target_cpu = busiest_rq->push_cpu; struct sched_domain *sd; struct rq *target_rq; /* Is there any task to move? */ if (busiest_rq->nr_running <= 1) return; target_rq = cpu_rq(target_cpu); /* * This condition is "impossible", if it occurs * we need to fix it. Originally reported by * Bjorn Helgaas on a 128-cpu setup. */ BUG_ON(busiest_rq == target_rq); /* move a task from busiest_rq to target_rq */ double_lock_balance(busiest_rq, target_rq); /* Search for an sd spanning us and the target CPU. */ for_each_domain(target_cpu, sd) { if ((sd->flags & SD_LOAD_BALANCE) && cpu_isset(busiest_cpu, sd->span)) break; } if (likely(sd)) { schedstat_inc(sd, alb_cnt); if (move_tasks(target_rq, target_cpu, busiest_rq, 1, RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL)) schedstat_inc(sd, alb_pushed); else schedstat_inc(sd, alb_failed); } spin_unlock(&target_rq->lock); } static void update_load(struct rq *this_rq) { unsigned long this_load; unsigned int i, scale; this_load = this_rq->raw_weighted_load; /* Update our load: */ for (i = 0, scale = 1; i < 3; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale-1; this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; } } #ifdef CONFIG_NO_HZ static struct { atomic_t load_balancer; cpumask_t cpu_mask; } nohz ____cacheline_aligned = { .load_balancer = ATOMIC_INIT(-1), .cpu_mask = CPU_MASK_NONE, }; /* * This routine will try to nominate the ilb (idle load balancing) * owner among the cpus whose ticks are stopped. ilb owner will do the idle * load balancing on behalf of all those cpus. If all the cpus in the system * go into this tickless mode, then there will be no ilb owner (as there is * no need for one) and all the cpus will sleep till the next wakeup event * arrives... * * For the ilb owner, tick is not stopped. And this tick will be used * for idle load balancing. ilb owner will still be part of * nohz.cpu_mask.. * * While stopping the tick, this cpu will become the ilb owner if there * is no other owner. And will be the owner till that cpu becomes busy * or if all cpus in the system stop their ticks at which point * there is no need for ilb owner. * * When the ilb owner becomes busy, it nominates another owner, during the * next busy scheduler_tick() */ int select_nohz_load_balancer(int stop_tick) { int cpu = smp_processor_id(); if (stop_tick) { cpu_set(cpu, nohz.cpu_mask); cpu_rq(cpu)->in_nohz_recently = 1; /* * If we are going offline and still the leader, give up! */ if (cpu_is_offline(cpu) && atomic_read(&nohz.load_balancer) == cpu) { if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) BUG(); return 0; } /* time for ilb owner also to sleep */ if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { if (atomic_read(&nohz.load_balancer) == cpu) atomic_set(&nohz.load_balancer, -1); return 0; } if (atomic_read(&nohz.load_balancer) == -1) { /* make me the ilb owner */ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) return 1; } else if (atomic_read(&nohz.load_balancer) == cpu) return 1; } else { if (!cpu_isset(cpu, nohz.cpu_mask)) return 0; cpu_clear(cpu, nohz.cpu_mask); if (atomic_read(&nohz.load_balancer) == cpu) if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) BUG(); } return 0; } #endif static DEFINE_SPINLOCK(balancing); /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. * * Balancing parameters are set up in arch_init_sched_domains. */ static inline void rebalance_domains(int cpu, enum idle_type idle) { int balance = 1; struct rq *rq = cpu_rq(cpu); unsigned long interval; struct sched_domain *sd; /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; for_each_domain(cpu, sd) { if (!(sd->flags & SD_LOAD_BALANCE)) continue; interval = sd->balance_interval; if (idle != SCHED_IDLE) interval *= sd->busy_factor; /* scale ms to jiffies */ interval = msecs_to_jiffies(interval); if (unlikely(!interval)) interval = 1; if (sd->flags & SD_SERIALIZE) { if (!spin_trylock(&balancing)) goto out; } if (time_after_eq(jiffies, sd->last_balance + interval)) { if (load_balance(cpu, rq, sd, idle, &balance)) { /* * We've pulled tasks over so either we're no * longer idle, or one of our SMT siblings is * not idle. */ idle = NOT_IDLE; } sd->last_balance = jiffies; } if (sd->flags & SD_SERIALIZE) spin_unlock(&balancing); out: if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; /* * Stop the load balance at this level. There is another * CPU in our sched group which is doing load balancing more * actively. */ if (!balance) break; } rq->next_balance = next_balance; } /* * run_rebalance_domains is triggered when needed from the scheduler tick. * In CONFIG_NO_HZ case, the idle load balance owner will do the * rebalancing for all the cpus for whom scheduler ticks are stopped. */ static void run_rebalance_domains(struct softirq_action *h) { int local_cpu = smp_processor_id(); struct rq *local_rq = cpu_rq(local_cpu); enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE; rebalance_domains(local_cpu, idle); #ifdef CONFIG_NO_HZ /* * If this cpu is the owner for idle load balancing, then do the * balancing on behalf of the other idle cpus whose ticks are * stopped. */ if (local_rq->idle_at_tick && atomic_read(&nohz.load_balancer) == local_cpu) { cpumask_t cpus = nohz.cpu_mask; struct rq *rq; int balance_cpu; cpu_clear(local_cpu, cpus); for_each_cpu_mask(balance_cpu, cpus) { /* * If this cpu gets work to do, stop the load balancing * work being done for other cpus. Next load * balancing owner will pick it up. */ if (need_resched()) break; rebalance_domains(balance_cpu, SCHED_IDLE); rq = cpu_rq(balance_cpu); if (time_after(local_rq->next_balance, rq->next_balance)) local_rq->next_balance = rq->next_balance; } } #endif } /* * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. * * In case of CONFIG_NO_HZ, this is the place where we nominate a new * idle load balancing owner or decide to stop the periodic load balancing, * if the whole system is idle. */ static inline void trigger_load_balance(int cpu) { struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_NO_HZ /* * If we were in the nohz mode recently and busy at the current * scheduler tick, then check if we need to nominate new idle * load balancer. */ if (rq->in_nohz_recently && !rq->idle_at_tick) { rq->in_nohz_recently = 0; if (atomic_read(&nohz.load_balancer) == cpu) { cpu_clear(cpu, nohz.cpu_mask); atomic_set(&nohz.load_balancer, -1); } if (atomic_read(&nohz.load_balancer) == -1) { /* * simple selection for now: Nominate the * first cpu in the nohz list to be the next * ilb owner. * * TBD: Traverse the sched domains and nominate * the nearest cpu in the nohz.cpu_mask. */ int ilb = first_cpu(nohz.cpu_mask); if (ilb != NR_CPUS) resched_cpu(ilb); } } /* * If this cpu is idle and doing idle load balancing for all the * cpus with ticks stopped, is it time for that to stop? */ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && cpus_weight(nohz.cpu_mask) == num_online_cpus()) { resched_cpu(cpu); return; } /* * If this cpu is idle and the idle load balancing is done by * someone else, then no need raise the SCHED_SOFTIRQ */ if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && cpu_isset(cpu, nohz.cpu_mask)) return; #endif if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); } #else /* * on UP we do not need to balance between CPUs: */ static inline void idle_balance(int cpu, struct rq *rq) { } #endif DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* * This is called on clock ticks and on context switches. * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) { p->sched_time += now - p->last_ran; p->last_ran = rq->most_recent_timestamp = now; } /* * Return current->sched_time plus any more ns on the sched_clock * that have not yet been banked. */ unsigned long long current_sched_time(const struct task_struct *p) { unsigned long long ns; unsigned long flags; local_irq_save(flags); ns = p->sched_time + sched_clock() - p->last_ran; local_irq_restore(flags); return ns; } /* * We place interactive tasks back into the active array, if possible. * * To guarantee that this does not starve expired tasks we ignore the * interactivity of a task if the first expired task had to wait more * than a 'reasonable' amount of time. This deadline timeout is * load-dependent, as the frequency of array switched decreases with * increasing number of running tasks. We also ignore the interactivity * if a better static_prio task has expired: */ static inline int expired_starving(struct rq *rq) { if (rq->curr->static_prio > rq->best_expired_prio) return 1; if (!STARVATION_LIMIT || !rq->expired_timestamp) return 0; if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running) return 1; return 0; } /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in user space since the last update */ void account_user_time(struct task_struct *p, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; p->utime = cputime_add(p->utime, cputime); /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); } /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in kernel space since the last update */ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct rq *rq = this_rq(); cputime64_t tmp; p->stime = cputime_add(p->stime, cputime); /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else if (p != rq->idle) cpustat->system = cputime64_add(cpustat->system, tmp); else if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); /* Account for system time used */ acct_update_integrals(p); } /* * Account for involuntary wait time. * @p: the process from which the cpu time has been stolen * @steal: the cpu time spent in involuntary wait */ void account_steal_time(struct task_struct *p, cputime_t steal) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp = cputime_to_cputime64(steal); struct rq *rq = this_rq(); if (p == rq->idle) { p->stime = cputime_add(p->stime, steal); if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); } else cpustat->steal = cputime64_add(cpustat->steal, tmp); } static void task_running_tick(struct rq *rq, struct task_struct *p) { if (p->array != rq->active) { /* Task has expired but was not scheduled yet */ set_tsk_need_resched(p); return; } spin_lock(&rq->lock); /* * The task was running during this tick - update the * time slice counter. Note: we do not update a thread's * priority until it either goes to sleep or uses up its * timeslice. This makes it possible for interactive tasks * to use up their timeslices at their highest priority levels. */ if (rt_task(p)) { /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. */ if ((p->policy == SCHED_RR) && !--p->time_slice) { p->time_slice = task_timeslice(p); p->first_time_slice = 0; set_tsk_need_resched(p); /* put it at the end of the queue: */ requeue_task(p, rq->active); } goto out_unlock; } if (!--p->time_slice) { dequeue_task(p, rq->active); set_tsk_need_resched(p); p->prio = effective_prio(p); p->time_slice = task_timeslice(p); p->first_time_slice = 0; if (!rq->expired_timestamp) rq->expired_timestamp = jiffies; if (!TASK_INTERACTIVE(p) || expired_starving(rq)) { enqueue_task(p, rq->expired); if (p->static_prio < rq->best_expired_prio) rq->best_expired_prio = p->static_prio; } else enqueue_task(p, rq->active); } else { /* * Prevent a too long timeslice allowing a task to monopolize * the CPU. We do this by splitting up the timeslice into * smaller pieces. * * Note: this does not mean the task's timeslices expire or * get lost in any way, they just might be preempted by * another task of equal priority. (one with higher * priority would have preempted this task already.) We * requeue this task to the end of the list on this priority * level, which is in essence a round-robin of tasks with * equal priority. * * This only applies to tasks in the interactive * delta range with at least TIMESLICE_GRANULARITY to requeue. */ if (TASK_INTERACTIVE(p) && !((task_timeslice(p) - p->time_slice) % TIMESLICE_GRANULARITY(p)) && (p->time_slice >= TIMESLICE_GRANULARITY(p)) && (p->array == rq->active)) { requeue_task(p, rq->active); set_tsk_need_resched(p); } } out_unlock: spin_unlock(&rq->lock); } /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. * * It also gets called by the fork code, when changing the parent's * timeslices. */ void scheduler_tick(void) { unsigned long long now = sched_clock(); struct task_struct *p = current; int cpu = smp_processor_id(); int idle_at_tick = idle_cpu(cpu); struct rq *rq = cpu_rq(cpu); update_cpu_clock(p, rq, now); if (!idle_at_tick) task_running_tick(rq, p); #ifdef CONFIG_SMP update_load(rq); rq->idle_at_tick = idle_at_tick; trigger_load_balance(cpu); #endif } #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) void fastcall add_preempt_count(int val) { /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) return; preempt_count() += val; /* * Spinlock count overflowing soon? */ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); } EXPORT_SYMBOL(add_preempt_count); void fastcall sub_preempt_count(int val) { /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) return; /* * Is the spinlock portion underflowing? */ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK))) return; preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); #endif static inline int interactive_sleep(enum sleep_type sleep_type) { return (sleep_type == SLEEP_INTERACTIVE || sleep_type == SLEEP_INTERRUPTED); } /* * schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; struct prio_array *array; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; long *switch_count; struct rq *rq; /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic() && !current->exit_state)) { printk(KERN_ERR "BUG: scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); dump_stack(); } profile_hit(SCHED_PROFILING, __builtin_return_address(0)); need_resched: preempt_disable(); prev = current; release_kernel_lock(prev); need_resched_nonpreemptible: rq = this_rq(); /* * The idle thread is not allowed to schedule! * Remove this check after it has been exercised a bit. */ if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) { printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); } schedstat_inc(rq, sched_cnt); now = sched_clock(); if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) { run_time = now - prev->timestamp; if (unlikely((long long)(now - prev->timestamp) < 0)) run_time = 0; } else run_time = NS_MAX_SLEEP_AVG; /* * Tasks charged proportionately less run_time at high sleep_avg to * delay them losing their interactive status */ run_time /= (CURRENT_BONUS(prev) ? : 1); spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { switch_count = &prev->nvcsw; if (unlikely((prev->state & TASK_INTERRUPTIBLE) && unlikely(signal_pending(prev)))) prev->state = TASK_RUNNING; else { if (prev->state == TASK_UNINTERRUPTIBLE) rq->nr_uninterruptible++; deactivate_task(prev, rq); } } cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { idle_balance(cpu, rq); if (!rq->nr_running) { next = rq->idle; rq->expired_timestamp = 0; goto switch_tasks; } } array = rq->active; if (unlikely(!array->nr_active)) { /* * Switch the active and expired arrays. */ schedstat_inc(rq, sched_switch); rq->active = rq->expired; rq->expired = array; array = rq->active; rq->expired_timestamp = 0; rq->best_expired_prio = MAX_PRIO; } idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; next = list_entry(queue->next, struct task_struct, run_list); if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; if (unlikely((long long)(now - next->timestamp) < 0)) delta = 0; if (next->sleep_type == SLEEP_INTERACTIVE) delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; array = next->array; new_prio = recalc_task_prio(next, next->timestamp + delta); if (unlikely(next->prio != new_prio)) { dequeue_task(next, array); next->prio = new_prio; enqueue_task(next, array); } } next->sleep_type = SLEEP_NORMAL; switch_tasks: if (next == rq->idle) schedstat_inc(rq, sched_goidle); prefetch(next); prefetch_stack(next); clear_tsk_need_resched(prev); rcu_qsctr_inc(task_cpu(prev)); update_cpu_clock(prev, rq, now); prev->sleep_avg -= run_time; if ((long)prev->sleep_avg <= 0) prev->sleep_avg = 0; prev->timestamp = prev->last_ran = now; sched_info_switch(prev, next); if (likely(prev != next)) { next->timestamp = next->last_ran = now; rq->nr_switches++; rq->curr = next; ++*switch_count; prepare_task_switch(rq, next); prev = context_switch(rq, prev, next); barrier(); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */ finish_task_switch(this_rq(), prev); } else spin_unlock_irq(&rq->lock); prev = current; if (unlikely(reacquire_kernel_lock(prev) < 0)) goto need_resched_nonpreemptible; preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(schedule); #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); #ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; #endif /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. */ if (likely(ti->preempt_count || irqs_disabled())) return; need_resched: add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ #ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif schedule(); #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(preempt_schedule); /* * this is the entry point to schedule() from kernel preemption * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. */ asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); #ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; #endif /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); need_resched: add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ #ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; #endif local_irq_enable(); schedule(); local_irq_disable(); #ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) { return try_to_wake_up(curr->private, mode, sync); } EXPORT_SYMBOL(default_wake_function); /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync, void *key) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, &q->task_list) { wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); unsigned flags = curr->flags; if (curr->func(curr, mode, sync, key) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; } } /** * __wake_up - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function */ void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, 0, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0, NULL); } /** * __wake_up_sync - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. */ void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; int sync = 1; if (unlikely(!q)) return; if (unlikely(!nr_exclusive)) sync = 0; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, sync, NULL); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ void fastcall complete(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); void fastcall complete_all(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); void fastcall __sched wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); schedule(); spin_lock_irq(&x->wait.lock); } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; spin_unlock_irq(&x->wait.lock); } EXPORT_SYMBOL(wait_for_completion); unsigned long fastcall __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); int fastcall __sched wait_for_completion_interruptible(struct completion *x) { int ret = 0; might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { if (signal_pending(current)) { ret = -ERESTARTSYS; __remove_wait_queue(&x->wait, &wait); goto out; } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); schedule(); spin_lock_irq(&x->wait.lock); } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return ret; } EXPORT_SYMBOL(wait_for_completion_interruptible); unsigned long fastcall __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { if (signal_pending(current)) { timeout = -ERESTARTSYS; __remove_wait_queue(&x->wait, &wait); goto out; } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); #define SLEEP_ON_VAR \ unsigned long flags; \ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); #define SLEEP_ON_HEAD \ spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ spin_unlock(&q->lock); #define SLEEP_ON_TAIL \ spin_lock_irq(&q->lock); \ __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD schedule(); SLEEP_ON_TAIL } EXPORT_SYMBOL(interruptible_sleep_on); long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR current->state = TASK_INTERRUPTIBLE; SLEEP_ON_HEAD timeout = schedule_timeout(timeout); SLEEP_ON_TAIL return timeout; } EXPORT_SYMBOL(interruptible_sleep_on_timeout); void fastcall __sched sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD schedule(); SLEEP_ON_TAIL } EXPORT_SYMBOL(sleep_on); long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR current->state = TASK_UNINTERRUPTIBLE; SLEEP_ON_HEAD timeout = schedule_timeout(timeout); SLEEP_ON_TAIL return timeout; } EXPORT_SYMBOL(sleep_on_timeout); #ifdef CONFIG_RT_MUTEXES /* * rt_mutex_setprio - set the current priority of a task * @p: task * @prio: prio value (kernel-internal form) * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * * Used by the rt_mutex code to implement priority inheritance logic. */ void rt_mutex_setprio(struct task_struct *p, int prio) { struct prio_array *array; unsigned long flags; struct rq *rq; int oldprio; BUG_ON(prio < 0 || prio > MAX_PRIO); rq = task_rq_lock(p, &flags); oldprio = p->prio; array = p->array; if (array) dequeue_task(p, array); p->prio = prio; if (array) { /* * If changing to an RT priority then queue it * in the active array! */ if (rt_task(p)) array = rq->active; enqueue_task(p, array); /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (task_running(rq, p)) { if (p->prio > oldprio) resched_task(rq->curr); } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } task_rq_unlock(rq, &flags); } #endif void set_user_nice(struct task_struct *p, long nice) { struct prio_array *array; int old_prio, delta; unsigned long flags; struct rq *rq; if (TASK_NICE(p) == nice || nice < -20 || nice > 19) return; /* * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU. */ rq = task_rq_lock(p, &flags); /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected * it wont have any effect on scheduling until the task is * not SCHED_NORMAL/SCHED_BATCH: */ if (has_rt_policy(p)) { p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } array = p->array; if (array) { dequeue_task(p, array); dec_raw_weighted_load(rq, p); } p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); old_prio = p->prio; p->prio = effective_prio(p); delta = p->prio - old_prio; if (array) { enqueue_task(p, array); inc_raw_weighted_load(rq, p); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: */ if (delta < 0 || (delta > 0 && task_running(rq, p))) resched_task(rq->curr); } out_unlock: task_rq_unlock(rq, &flags); } EXPORT_SYMBOL(set_user_nice); /* * can_nice - check if a task can reduce its nice value * @p: task * @nice: nice value */ int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || capable(CAP_SYS_NICE)); } #ifdef __ARCH_WANT_SYS_NICE /* * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things. */ asmlinkage long sys_nice(int increment) { long nice, retval; /* * Setpriority might change our priority at the same moment. * We don't have to worry. Conceptually one call occurs first * and we have a single winner. */ if (increment < -40) increment = -40; if (increment > 40) increment = 40; nice = PRIO_TO_NICE(current->static_prio) + increment; if (nice < -20) nice = -20; if (nice > 19) nice = 19; if (increment < 0 && !can_nice(current, nice)) return -EPERM; retval = security_task_setnice(current, nice); if (retval) return retval; set_user_nice(current, nice); return 0; } #endif /** * task_prio - return the priority value of a given task. * @p: the task in question. * * This is the priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } /** * task_nice - return the nice value of a given task. * @p: the task in question. */ int task_nice(const struct task_struct *p) { return TASK_NICE(p); } EXPORT_SYMBOL_GPL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */ int idle_cpu(int cpu) { return cpu_curr(cpu) == cpu_rq(cpu)->idle; } /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } /* Actually do priority change: must hold rq lock. */ static void __setscheduler(struct task_struct *p, int policy, int prio) { BUG_ON(p->array); p->policy = policy; p->rt_priority = prio; p->normal_prio = normal_prio(p); /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); /* * SCHED_BATCH tasks are treated as perpetual CPU hogs: */ if (policy == SCHED_BATCH) p->sleep_avg = 0; set_load_weight(p); } /** * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { int retval, oldprio, oldpolicy = -1; struct prio_array *array; unsigned long flags; struct rq *rq; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); recheck: /* double check policy once rq lock held */ if (policy < 0) policy = oldpolicy = p->policy; else if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_NORMAL && policy != SCHED_BATCH) return -EINVAL; /* * Valid priorities for SCHED_FIFO and SCHED_RR are * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and * SCHED_BATCH is 0. */ if (param->sched_priority < 0 || (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) return -EINVAL; if (is_rt_policy(policy) != (param->sched_priority != 0)) return -EINVAL; /* * Allow unprivileged RT tasks to decrease priority: */ if (!capable(CAP_SYS_NICE)) { if (is_rt_policy(policy)) { unsigned long rlim_rtprio; unsigned long flags; if (!lock_task_sighand(p, &flags)) return -ESRCH; rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; unlock_task_sighand(p, &flags); /* can't set/change the rt policy */ if (policy != p->policy && !rlim_rtprio) return -EPERM; /* can't increase priority */ if (param->sched_priority > p->rt_priority && param->sched_priority > rlim_rtprio) return -EPERM; } /* can't change other user's priorities */ if ((current->euid != p->euid) && (current->euid != p->uid)) return -EPERM; } retval = security_task_setscheduler(p, policy, param); if (retval) return retval; /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: */ spin_lock_irqsave(&p->pi_lock, flags); /* * To be able to change p->policy safely, the apropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } array = p->array; if (array) deactivate_task(p, rq); oldprio = p->prio; __setscheduler(p, policy, param->sched_priority); if (array) { __activate_task(p, rq); /* * Reschedule if we are currently running on this runqueue and * our priority decreased, or if we are not currently running on * this runqueue and our priority is higher than the current's */ if (task_running(rq, p)) { if (p->prio > oldprio) resched_task(rq->curr); } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); return 0; } EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { struct sched_param lparam; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (p != NULL) retval = sched_setscheduler(p, policy, &lparam); rcu_read_unlock(); return retval; } /** * sys_sched_setscheduler - set/change the scheduler policy and RT priority * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. */ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { /* negative values for policy are not valid */ if (policy < 0) return -EINVAL; return do_sched_setscheduler(pid, policy, param); } /** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. */ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) { return do_sched_setscheduler(pid, -1, param); } /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */ asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; int retval = -EINVAL; if (pid < 0) goto out_nounlock; retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) { retval = security_task_getscheduler(p); if (!retval) retval = p->policy; } read_unlock(&tasklist_lock); out_nounlock: return retval; } /** * sys_sched_getscheduler - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. */ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; struct task_struct *p; int retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; read_lock(&tasklist_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; lp.sched_priority = p->rt_priority; read_unlock(&tasklist_lock); /* * This one might sleep, we cannot do it with a spinlock held ... */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; } long sched_setaffinity(pid_t pid, cpumask_t new_mask) { cpumask_t cpus_allowed; struct task_struct *p; int retval; mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); mutex_unlock(&sched_hotcpu_mutex); return -ESRCH; } /* * It is not safe to call set_cpus_allowed with the * tasklist_lock held. We will bump the task_struct's * usage count and then drop tasklist_lock. */ get_task_struct(p); read_unlock(&tasklist_lock); retval = -EPERM; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p, 0, NULL); if (retval) goto out_unlock; cpus_allowed = cpuset_cpus_allowed(p); cpus_and(new_mask, new_mask, cpus_allowed); retval = set_cpus_allowed(p, new_mask); out_unlock: put_task_struct(p); mutex_unlock(&sched_hotcpu_mutex); return retval; } static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, cpumask_t *new_mask) { if (len < sizeof(cpumask_t)) { memset(new_mask, 0, sizeof(cpumask_t)); } else if (len > sizeof(cpumask_t)) { len = sizeof(cpumask_t); } return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; } /** * sys_sched_setaffinity - set the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { cpumask_t new_mask; int retval; retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); if (retval) return retval; return sched_setaffinity(pid, new_mask); } /* * Represents all cpu's present in the system * In systems capable of hotplug, this map could dynamically grow * as new cpu's are detected in the system via any platform specific * method, such as ACPI for e.g. */ cpumask_t cpu_present_map __read_mostly; EXPORT_SYMBOL(cpu_present_map); #ifndef CONFIG_SMP cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; EXPORT_SYMBOL(cpu_possible_map); #endif long sched_getaffinity(pid_t pid, cpumask_t *mask) { struct task_struct *p; int retval; mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); retval = -ESRCH; p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; cpus_and(*mask, p->cpus_allowed, cpu_online_map); out_unlock: read_unlock(&tasklist_lock); mutex_unlock(&sched_hotcpu_mutex); if (retval) return retval; return 0; } /** * sys_sched_getaffinity - get the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { int ret; cpumask_t mask; if (len < sizeof(cpumask_t)) return -EINVAL; ret = sched_getaffinity(pid, &mask); if (ret < 0) return ret; if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) return -EFAULT; return sizeof(cpumask_t); } /** * sys_sched_yield - yield the current processor to other threads. * * This function yields the current CPU by moving the calling thread * to the expired array. If there are no other threads running on this * CPU then this function will return. */ asmlinkage long sys_sched_yield(void) { struct rq *rq = this_rq_lock(); struct prio_array *array = current->array, *target = rq->expired; schedstat_inc(rq, yld_cnt); /* * We implement yielding by moving the task into the expired * queue. * * (special rule: RT tasks will just roundrobin in the active * array.) */ if (rt_task(current)) target = rq->active; if (array->nr_active == 1) { schedstat_inc(rq, yld_act_empty); if (!rq->expired->nr_active) schedstat_inc(rq, yld_both_empty); } else if (!rq->expired->nr_active) schedstat_inc(rq, yld_exp_empty); if (array != target) { dequeue_task(current, array); enqueue_task(current, target); } else /* * requeue_task is cheaper so perform that if possible. */ requeue_task(current, array); /* * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); _raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); schedule(); return 0; } static void __cond_resched(void) { #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP __might_sleep(__FILE__, __LINE__); #endif /* * The BKS might be reacquired before we have dropped * PREEMPT_ACTIVE, which could trigger a second * cond_resched() call. */ do { add_preempt_count(PREEMPT_ACTIVE); schedule(); sub_preempt_count(PREEMPT_ACTIVE); } while (need_resched()); } int __sched cond_resched(void) { if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && system_state == SYSTEM_RUNNING) { __cond_resched(); return 1; } return 0; } EXPORT_SYMBOL(cond_resched); /* * cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * * This works OK both with and without CONFIG_PREEMPT. We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ int cond_resched_lock(spinlock_t *lock) { int ret = 0; if (need_lockbreak(lock)) { spin_unlock(lock); cpu_relax(); ret = 1; spin_lock(lock); } if (need_resched() && system_state == SYSTEM_RUNNING) { spin_release(&lock->dep_map, 1, _THIS_IP_); _raw_spin_unlock(lock); preempt_enable_no_resched(); __cond_resched(); ret = 1; spin_lock(lock); } return ret; } EXPORT_SYMBOL(cond_resched_lock); int __sched cond_resched_softirq(void) { BUG_ON(!in_softirq()); if (need_resched() && system_state == SYSTEM_RUNNING) { local_bh_enable(); __cond_resched(); local_bh_disable(); return 1; } return 0; } EXPORT_SYMBOL(cond_resched_softirq); /** * yield - yield the current processor to other threads. * * This is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ void __sched yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } EXPORT_SYMBOL(yield); /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. * * But don't do that if it is a deliberate, throttling IO wait (this task * has set its backing_dev_info: the queue against which it should throttle) */ void __sched io_schedule(void) { struct rq *rq = &__raw_get_cpu_var(runqueues); delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); schedule(); atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); } EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { struct rq *rq = &__raw_get_cpu_var(runqueues); long ret; delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); ret = schedule_timeout(timeout); atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); return ret; } /** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = MAX_USER_RT_PRIO-1; break; case SCHED_NORMAL: case SCHED_BATCH: ret = 0; break; } return ret; } /** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */ asmlinkage long sys_sched_get_priority_min(int policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = 1; break; case SCHED_NORMAL: case SCHED_BATCH: ret = 0; } return ret; } /** * sys_sched_rr_get_interval - return the default timeslice of a process. * @pid: pid of the process. * @interval: userspace pointer to the timeslice value. * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. */ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { struct task_struct *p; int retval = -EINVAL; struct timespec t; if (pid < 0) goto out_nounlock; retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; jiffies_to_timespec(p->policy == SCHED_FIFO ? 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; } static const char stat_nam[] = "RSDTtZX"; static void show_task(struct task_struct *p) { unsigned long free = 0; unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; printk("%-13.13s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if (BITS_PER_LONG == 32) if (state == TASK_RUNNING) printk(" running "); else printk(" %08lX ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) printk(" running task "); else printk(" %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE { unsigned long *n = end_of_stack(p); while (!*n) n++; free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif printk("%5lu %5d %6d", free, p->pid, p->parent->pid); if (!p->mm) printk(" (L-TLB)\n"); else printk(" (NOTLB)\n"); if (state != TASK_RUNNING) show_stack(p, NULL); } void show_state_filter(unsigned long state_filter) { struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" " free sibling\n"); printk(" task PC stack pid father child younger older\n"); #else printk("\n" " free sibling\n"); printk(" task PC stack pid father child younger older\n"); #endif read_lock(&tasklist_lock); do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow * console might take alot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) show_task(p); } while_each_thread(g, p); touch_all_softlockup_watchdogs(); read_unlock(&tasklist_lock); /* * Only show locks if all tasks are dumped: */ if (state_filter == -1) debug_show_all_locks(); } /** * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ void __cpuinit init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; idle->timestamp = sched_clock(); idle->sleep_avg = 0; idle->array = NULL; idle->prio = idle->normal_prio = MAX_PRIO; idle->state = TASK_RUNNING; idle->cpus_allowed = cpumask_of_cpu(cpu); set_task_cpu(idle, cpu); spin_lock_irqsave(&rq->lock, flags); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); #else task_thread_info(idle)->preempt_count = 0; #endif } /* * In a system that switches off the HZ timer nohz_cpu_mask * indicates which cpus entered this state. This is used * in the rcu update to wait only for active cpus. For system * which do not switch off the HZ timer nohz_cpu_mask should * always be CPU_MASK_NONE. */ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; #ifdef CONFIG_SMP /* * This is how migration works: * * 1) we queue a struct migration_req structure in the source CPU's * runqueue and wake up that CPU's migration thread. * 2) we down() the locked semaphore => thread blocks. * 3) migration thread wakes up (implicitly it forces the migrated * thread off the CPU) * 4) it gets the migration request and checks whether the migrated * task is still in the wrong runqueue. * 5) if it's in the wrong runqueue then the migration thread removes * it and puts it into the right queue. * 6) migration thread up()s the semaphore. * 7) we wake up and the migration is done. */ /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { struct migration_req req; unsigned long flags; struct rq *rq; int ret = 0; rq = task_rq_lock(p, &flags); if (!cpus_intersects(new_mask, cpu_online_map)) { ret = -EINVAL; goto out; } p->cpus_allowed = new_mask; /* Can the task run on the task's current CPU? If so, we're done */ if (cpu_isset(task_cpu(p), new_mask)) goto out; if (migrate_task(p, any_online_cpu(new_mask), &req)) { /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, &flags); wake_up_process(rq->migration_thread); wait_for_completion(&req.done); tlb_migrate_finish(p->mm); return 0; } out: task_rq_unlock(rq, &flags); return ret; } EXPORT_SYMBOL_GPL(set_cpus_allowed); /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. * * Returns non-zero if task was successfully migrated. */ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; int ret = 0; if (unlikely(cpu_is_offline(dest_cpu))) return ret; rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); double_rq_lock(rq_src, rq_dest); /* Already moved. */ if (task_cpu(p) != src_cpu) goto out; /* Affinity changed (again). */ if (!cpu_isset(dest_cpu, p->cpus_allowed)) goto out; set_task_cpu(p, dest_cpu); if (p->array) { /* * Sync timestamp with rq_dest's before activating. * The same thing could be achieved by doing this step * afterwards, and pretending it was a local activate. * This way is cleaner and logically correct. */ p->timestamp = p->timestamp - rq_src->most_recent_timestamp + rq_dest->most_recent_timestamp; deactivate_task(p, rq_src); __activate_task(p, rq_dest); if (TASK_PREEMPTS_CURR(p, rq_dest)) resched_task(rq_dest->curr); } ret = 1; out: double_rq_unlock(rq_src, rq_dest); return ret; } /* * migration_thread - this is a highprio system thread that performs * thread migration by bumping thread off CPU then 'pushing' onto * another runqueue. */ static int migration_thread(void *data) { int cpu = (long)data; struct rq *rq; rq = cpu_rq(cpu); BUG_ON(rq->migration_thread != current); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { struct migration_req *req; struct list_head *head; try_to_freeze(); spin_lock_irq(&rq->lock); if (cpu_is_offline(cpu)) { spin_unlock_irq(&rq->lock); goto wait_to_die; } if (rq->active_balance) { active_load_balance(rq, cpu); rq->active_balance = 0; } head = &rq->migration_queue; if (list_empty(head)) { spin_unlock_irq(&rq->lock); schedule(); set_current_state(TASK_INTERRUPTIBLE); continue; } req = list_entry(head->next, struct migration_req, list); list_del_init(head->next); spin_unlock(&rq->lock); __migrate_task(req->task, cpu, req->dest_cpu); local_irq_enable(); complete(&req->done); } __set_current_state(TASK_RUNNING); return 0; wait_to_die: /* Wait for kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* * Figure out where task on dead CPU should go, use force if neccessary. * NOTE: interrupts should be disabled by the caller */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) { unsigned long flags; cpumask_t mask; struct rq *rq; int dest_cpu; restart: /* On same node? */ mask = node_to_cpumask(cpu_to_node(dead_cpu)); cpus_and(mask, mask, p->cpus_allowed); dest_cpu = any_online_cpu(mask); /* On any allowed CPU? */ if (dest_cpu == NR_CPUS) dest_cpu = any_online_cpu(p->cpus_allowed); /* No more Mr. Nice Guy. */ if (dest_cpu == NR_CPUS) { rq = task_rq_lock(p, &flags); cpus_setall(p->cpus_allowed); dest_cpu = any_online_cpu(p->cpus_allowed); task_rq_unlock(rq, &flags); /* * Don't tell them about moving exiting tasks or * kernel threads (both mm NULL), since they never * leave kernel. */ if (p->mm && printk_ratelimit()) printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", p->pid, p->comm, dead_cpu); } if (!__migrate_task(p, dead_cpu, dest_cpu)) goto restart; } /* * While a dead CPU has no uninterruptible tasks queued at this point, * it might still have a nonzero ->nr_uninterruptible counter, because * for performance reasons the counter is not stricly tracking tasks to * their home CPUs. So we just add the counter to another CPU's counter, * to keep the global sum constant after CPU-down: */ static void migrate_nr_uninterruptible(struct rq *rq_src) { struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL)); unsigned long flags; local_irq_save(flags); double_rq_lock(rq_src, rq_dest); rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; rq_src->nr_uninterruptible = 0; double_rq_unlock(rq_src, rq_dest); local_irq_restore(flags); } /* Run through task list and migrate tasks from the dead cpu. */ static void migrate_live_tasks(int src_cpu) { struct task_struct *p, *t; write_lock_irq(&tasklist_lock); do_each_thread(t, p) { if (p == current) continue; if (task_cpu(p) == src_cpu) move_task_off_dead_cpu(src_cpu, p); } while_each_thread(t, p); write_unlock_irq(&tasklist_lock); } /* Schedules idle task to be the next runnable task on current CPU. * It does so by boosting its priority to highest possible and adding it to * the _front_ of the runqueue. Used by CPU offline code. */ void sched_idle_next(void) { int this_cpu = smp_processor_id(); struct rq *rq = cpu_rq(this_cpu); struct task_struct *p = rq->idle; unsigned long flags; /* cpu has to be offline */ BUG_ON(cpu_online(this_cpu)); /* * Strictly not necessary since rest of the CPUs are stopped by now * and interrupts disabled on the current cpu. */ spin_lock_irqsave(&rq->lock, flags); __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); /* Add idle task to the _front_ of its priority queue: */ __activate_idle_task(p, rq); spin_unlock_irqrestore(&rq->lock, flags); } /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. */ void idle_task_exit(void) { struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) switch_mm(mm, &init_mm, current); mmdrop(mm); } /* called under rq->lock with disabled interrupts */ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->state == TASK_DEAD); get_task_struct(p); /* * Drop lock around migration; if someone else moves it, * that's OK. No task can be added to this CPU, so iteration is * fine. * NOTE: interrupts should be left disabled --dev@ */ spin_unlock(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); spin_lock(&rq->lock); put_task_struct(p); } /* release_task() removes task from tasklist, so we won't find dead tasks. */ static void migrate_dead_tasks(unsigned int dead_cpu) { struct rq *rq = cpu_rq(dead_cpu); unsigned int arr, i; for (arr = 0; arr < 2; arr++) { for (i = 0; i < MAX_PRIO; i++) { struct list_head *list = &rq->arrays[arr].queue[i]; while (!list_empty(list)) migrate_dead(dead_cpu, list_entry(list->next, struct task_struct, run_list)); } } } #endif /* CONFIG_HOTPLUG_CPU */ /* * migration_call - callback that gets triggered when a CPU is added. * Here we can start up the necessary migration thread for the new CPU. */ static int __cpuinit migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct task_struct *p; int cpu = (long)hcpu; unsigned long flags; struct rq *rq; switch (action) { case CPU_LOCK_ACQUIRE: mutex_lock(&sched_hotcpu_mutex); break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); if (IS_ERR(p)) return NOTIFY_BAD; p->flags |= PF_NOFREEZE; kthread_bind(p, cpu); /* Must be high prio: stop_machine expects to yield to it. */ rq = task_rq_lock(p, &flags); __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1); task_rq_unlock(rq, &flags); cpu_rq(cpu)->migration_thread = p; break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: /* Strictly unneccessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: if (!cpu_rq(cpu)->migration_thread) break; /* Unbind it from offline cpu so it can run. Fall thru. */ kthread_bind(cpu_rq(cpu)->migration_thread, any_online_cpu(cpu_online_map)); kthread_stop(cpu_rq(cpu)->migration_thread); cpu_rq(cpu)->migration_thread = NULL; break; case CPU_DEAD: case CPU_DEAD_FROZEN: migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ rq = task_rq_lock(rq->idle, &flags); deactivate_task(rq->idle, rq); rq->idle->static_prio = MAX_PRIO; __setscheduler(rq->idle, SCHED_NORMAL, 0); migrate_dead_tasks(cpu); task_rq_unlock(rq, &flags); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); /* No need to migrate the tasks: it was best-effort if * they didn't take sched_hotcpu_mutex. Just wake up * the requestors. */ spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { struct migration_req *req; req = list_entry(rq->migration_queue.next, struct migration_req, list); list_del_init(&req->list); complete(&req->done); } spin_unlock_irq(&rq->lock); break; #endif case CPU_LOCK_RELEASE: mutex_unlock(&sched_hotcpu_mutex); break; } return NOTIFY_OK; } /* Register at highest priority so that task migration (migrate_all_tasks) * happens before everything else. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, .priority = 10 }; int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; /* Start one for the boot CPU: */ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); return 0; } #endif #ifdef CONFIG_SMP /* Number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); #undef SCHED_DOMAIN_DEBUG #ifdef SCHED_DOMAIN_DEBUG static void sched_domain_debug(struct sched_domain *sd, int cpu) { int level = 0; if (!sd) { printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); return; } printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); do { int i; char str[NR_CPUS]; struct sched_group *group = sd->groups; cpumask_t groupmask; cpumask_scnprintf(str, NR_CPUS, sd->span); cpus_clear(groupmask); printk(KERN_DEBUG); for (i = 0; i < level + 1; i++) printk(" "); printk("domain %d: ", level); if (!(sd->flags & SD_LOAD_BALANCE)) { printk("does not load-balance\n"); if (sd->parent) printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" " has parent"); break; } printk("span %s\n", str); if (!cpu_isset(cpu, sd->span)) printk(KERN_ERR "ERROR: domain->span does not contain " "CPU%d\n", cpu); if (!cpu_isset(cpu, group->cpumask)) printk(KERN_ERR "ERROR: domain->groups does not contain" " CPU%d\n", cpu); printk(KERN_DEBUG); for (i = 0; i < level + 2; i++) printk(" "); printk("groups:"); do { if (!group) { printk("\n"); printk(KERN_ERR "ERROR: group is NULL\n"); break; } if (!group->__cpu_power) { printk("\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); } if (!cpus_weight(group->cpumask)) { printk("\n"); printk(KERN_ERR "ERROR: empty group\n"); } if (cpus_intersects(groupmask, group->cpumask)) { printk("\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); } cpus_or(groupmask, groupmask, group->cpumask); cpumask_scnprintf(str, NR_CPUS, group->cpumask); printk(" %s", str); group = group->next; } while (group != sd->groups); printk("\n"); if (!cpus_equal(sd->span, groupmask)) printk(KERN_ERR "ERROR: groups don't span " "domain->span\n"); level++; sd = sd->parent; if (!sd) continue; if (!cpus_subset(groupmask, sd->span)) printk(KERN_ERR "ERROR: parent span is not a superset " "of domain->span\n"); } while (sd); } #else # define sched_domain_debug(sd, cpu) do { } while (0) #endif static int sd_degenerate(struct sched_domain *sd) { if (cpus_weight(sd->span) == 1) return 1; /* Following flags need at least 2 groups */ if (sd->flags & (SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)) { if (sd->groups != sd->groups->next) return 0; } /* Following flags don't use groups */ if (sd->flags & (SD_WAKE_IDLE | SD_WAKE_AFFINE | SD_WAKE_BALANCE)) return 0; return 1; } static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) { unsigned long cflags = sd->flags, pflags = parent->flags; if (sd_degenerate(parent)) return 1; if (!cpus_equal(sd->span, parent->span)) return 0; /* Does parent contain flags not in child? */ /* WAKE_BALANCE is a subset of WAKE_AFFINE */ if (cflags & SD_WAKE_AFFINE) pflags &= ~SD_WAKE_BALANCE; /* Flags needing groups don't count if only 1 group in parent */ if (parent->groups == parent->groups->next) { pflags &= ~(SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES); } if (~cflags & pflags) return 0; return 1; } /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ static void cpu_attach_domain(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; tmp = tmp->parent) { struct sched_domain *parent = tmp->parent; if (!parent) break; if (sd_parent_degenerate(tmp, parent)) { tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; } } if (sd && sd_degenerate(sd)) { sd = sd->parent; if (sd) sd->child = NULL; } sched_domain_debug(sd, cpu); rcu_assign_pointer(rq->sd, sd); } /* cpus with isolated domains */ static cpumask_t cpu_isolated_map = CPU_MASK_NONE; /* Setup the mask of cpus configured for isolated domains */ static int __init isolated_cpu_setup(char *str) { int ints[NR_CPUS], i; str = get_options(str, ARRAY_SIZE(ints), ints); cpus_clear(cpu_isolated_map); for (i = 1; i <= ints[0]; i++) if (ints[i] < NR_CPUS) cpu_set(ints[i], cpu_isolated_map); return 1; } __setup ("isolcpus=", isolated_cpu_setup); /* * init_sched_build_groups takes the cpumask we wish to span, and a pointer * to a function which identifies what group(along with sched group) a CPU * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS * (due to the fact that we keep track of groups covered with a cpumask_t). * * init_sched_build_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. */ static void init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, int (*group_fn)(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)) { struct sched_group *first = NULL, *last = NULL; cpumask_t covered = CPU_MASK_NONE; int i; for_each_cpu_mask(i, span) { struct sched_group *sg; int group = group_fn(i, cpu_map, &sg); int j; if (cpu_isset(i, covered)) continue; sg->cpumask = CPU_MASK_NONE; sg->__cpu_power = 0; for_each_cpu_mask(j, span) { if (group_fn(j, cpu_map, NULL) != group) continue; cpu_set(j, covered); cpu_set(j, sg->cpumask); } if (!first) first = sg; if (last) last->next = sg; last = sg; } last->next = first; } #define SD_NODES_PER_DOMAIN 16 /* * Self-tuning task migration cost measurement between source and target CPUs. * * This is done by measuring the cost of manipulating buffers of varying * sizes. For a given buffer-size here are the steps that are taken: * * 1) the source CPU reads+dirties a shared buffer * 2) the target CPU reads+dirties the same shared buffer * * We measure how long they take, in the following 4 scenarios: * * - source: CPU1, target: CPU2 | cost1 * - source: CPU2, target: CPU1 | cost2 * - source: CPU1, target: CPU1 | cost3 * - source: CPU2, target: CPU2 | cost4 * * We then calculate the cost3+cost4-cost1-cost2 difference - this is * the cost of migration. * * We then start off from a small buffer-size and iterate up to larger * buffer sizes, in 5% steps - measuring each buffer-size separately, and * doing a maximum search for the cost. (The maximum cost for a migration * normally occurs when the working set size is around the effective cache * size.) */ #define SEARCH_SCOPE 2 #define MIN_CACHE_SIZE (64*1024U) #define DEFAULT_CACHE_SIZE (5*1024*1024U) #define ITERATIONS 1 #define SIZE_THRESH 130 #define COST_THRESH 130 /* * The migration cost is a function of 'domain distance'. Domain * distance is the number of steps a CPU has to iterate down its * domain tree to share a domain with the other CPU. The farther * two CPUs are from each other, the larger the distance gets. * * Note that we use the distance only to cache measurement results, * the distance value is not used numerically otherwise. When two * CPUs have the same distance it is assumed that the migration * cost is the same. (this is a simplification but quite practical) */ #define MAX_DOMAIN_DISTANCE 32 static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = /* * Architectures may override the migration cost and thus avoid * boot-time calibration. Unit is nanoseconds. Mostly useful for * virtualized hardware: */ #ifdef CONFIG_DEFAULT_MIGRATION_COST CONFIG_DEFAULT_MIGRATION_COST #else -1LL #endif }; /* * Allow override of migration cost - in units of microseconds. * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs: */ static int __init migration_cost_setup(char *str) { int ints[MAX_DOMAIN_DISTANCE+1], i; str = get_options(str, ARRAY_SIZE(ints), ints); printk("#ints: %d\n", ints[0]); for (i = 1; i <= ints[0]; i++) { migration_cost[i-1] = (unsigned long long)ints[i]*1000; printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]); } return 1; } __setup ("migration_cost=", migration_cost_setup); /* * Global multiplier (divisor) for migration-cutoff values, * in percentiles. E.g. use a value of 150 to get 1.5 times * longer cache-hot cutoff times. * * (We scale it from 100 to 128 to long long handling easier.) */ #define MIGRATION_FACTOR_SCALE 128 static unsigned int migration_factor = MIGRATION_FACTOR_SCALE; static int __init setup_migration_factor(char *str) { get_option(&str, &migration_factor); migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100; return 1; } __setup("migration_factor=", setup_migration_factor); /* * Estimated distance of two CPUs, measured via the number of domains * we have to pass for the two CPUs to be in the same span: */ static unsigned long domain_distance(int cpu1, int cpu2) { unsigned long distance = 0; struct sched_domain *sd; for_each_domain(cpu1, sd) { WARN_ON(!cpu_isset(cpu1, sd->span)); if (cpu_isset(cpu2, sd->span)) return distance; distance++; } if (distance >= MAX_DOMAIN_DISTANCE) { WARN_ON(1); distance = MAX_DOMAIN_DISTANCE-1; } return distance; } static unsigned int migration_debug; static int __init setup_migration_debug(char *str) { get_option(&str, &migration_debug); return 1; } __setup("migration_debug=", setup_migration_debug); /* * Maximum cache-size that the scheduler should try to measure. * Architectures with larger caches should tune this up during * bootup. Gets used in the domain-setup code (i.e. during SMP * bootup). */ unsigned int max_cache_size; static int __init setup_max_cache_size(char *str) { get_option(&str, &max_cache_size); return 1; } __setup("max_cache_size=", setup_max_cache_size); /* * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This * is the operation that is timed, so we try to generate unpredictable * cachemisses that still end up filling the L2 cache: */ static void touch_cache(void *__cache, unsigned long __size) { unsigned long size = __size / sizeof(long); unsigned long chunk1 = size / 3; unsigned long chunk2 = 2 * size / 3; unsigned long *cache = __cache; int i; for (i = 0; i < size/6; i += 8) { switch (i % 6) { case 0: cache[i]++; case 1: cache[size-1-i]++; case 2: cache[chunk1-i]++; case 3: cache[chunk1+i]++; case 4: cache[chunk2-i]++; case 5: cache[chunk2+i]++; } } } /* * Measure the cache-cost of one task migration. Returns in units of nsec. */ static unsigned long long measure_one(void *cache, unsigned long size, int source, int target) { cpumask_t mask, saved_mask; unsigned long long t0, t1, t2, t3, cost; saved_mask = current->cpus_allowed; /* * Flush source caches to RAM and invalidate them: */ sched_cacheflush(); /* * Migrate to the source CPU: */ mask = cpumask_of_cpu(source); set_cpus_allowed(current, mask); WARN_ON(smp_processor_id() != source); /* * Dirty the working set: */ t0 = sched_clock(); touch_cache(cache, size); t1 = sched_clock(); /* * Migrate to the target CPU, dirty the L2 cache and access * the shared buffer. (which represents the working set * of a migrated task.) */ mask = cpumask_of_cpu(target); set_cpus_allowed(current, mask); WARN_ON(smp_processor_id() != target); t2 = sched_clock(); touch_cache(cache, size); t3 = sched_clock(); cost = t1-t0 + t3-t2; if (migration_debug >= 2) printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n", source, target, t1-t0, t1-t0, t3-t2, cost); /* * Flush target caches to RAM and invalidate them: */ sched_cacheflush(); set_cpus_allowed(current, saved_mask); return cost; } /* * Measure a series of task migrations and return the average * result. Since this code runs early during bootup the system * is 'undisturbed' and the average latency makes sense. * * The algorithm in essence auto-detects the relevant cache-size, * so it will properly detect different cachesizes for different * cache-hierarchies, depending on how the CPUs are connected. * * Architectures can prime the upper limit of the search range via * max_cache_size, otherwise the search range defaults to 20MB...64K. */ static unsigned long long measure_cost(int cpu1, int cpu2, void *cache, unsigned int size) { unsigned long long cost1, cost2; int i; /* * Measure the migration cost of 'size' bytes, over an * average of 10 runs: * * (We perturb the cache size by a small (0..4k) * value to compensate size/alignment related artifacts. * We also subtract the cost of the operation done on * the same CPU.) */ cost1 = 0; /* * dry run, to make sure we start off cache-cold on cpu1, * and to get any vmalloc pagefaults in advance: */ measure_one(cache, size, cpu1, cpu2); for (i = 0; i < ITERATIONS; i++) cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2); measure_one(cache, size, cpu2, cpu1); for (i = 0; i < ITERATIONS; i++) cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1); /* * (We measure the non-migrating [cached] cost on both * cpu1 and cpu2, to handle CPUs with different speeds) */ cost2 = 0; measure_one(cache, size, cpu1, cpu1); for (i = 0; i < ITERATIONS; i++) cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1); measure_one(cache, size, cpu2, cpu2); for (i = 0; i < ITERATIONS; i++) cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2); /* * Get the per-iteration migration cost: */ do_div(cost1, 2 * ITERATIONS); do_div(cost2, 2 * ITERATIONS); return cost1 - cost2; } static unsigned long long measure_migration_cost(int cpu1, int cpu2) { unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0; unsigned int max_size, size, size_found = 0; long long cost = 0, prev_cost; void *cache; /* * Search from max_cache_size*5 down to 64K - the real relevant * cachesize has to lie somewhere inbetween. */ if (max_cache_size) { max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE); size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE); } else { /* * Since we have no estimation about the relevant * search range */ max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE; size = MIN_CACHE_SIZE; } if (!cpu_online(cpu1) || !cpu_online(cpu2)) { printk("cpu %d and %d not both online!\n", cpu1, cpu2); return 0; } /* * Allocate the working set: */ cache = vmalloc(max_size); if (!cache) { printk("could not vmalloc %d bytes for cache!\n", 2 * max_size); return 1000000; /* return 1 msec on very small boxen */ } while (size <= max_size) { prev_cost = cost; cost = measure_cost(cpu1, cpu2, cache, size); /* * Update the max: */ if (cost > 0) { if (max_cost < cost) { max_cost = cost; size_found = size; } } /* * Calculate average fluctuation, we use this to prevent * noise from triggering an early break out of the loop: */ fluct = abs(cost - prev_cost); avg_fluct = (avg_fluct + fluct)/2; if (migration_debug) printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): " "(%8Ld %8Ld)\n", cpu1, cpu2, size, (long)cost / 1000000, ((long)cost / 100000) % 10, (long)max_cost / 1000000, ((long)max_cost / 100000) % 10, domain_distance(cpu1, cpu2), cost, avg_fluct); /* * If we iterated at least 20% past the previous maximum, * and the cost has dropped by more than 20% already, * (taking fluctuations into account) then we assume to * have found the maximum and break out of the loop early: */ if (size_found && (size*100 > size_found*SIZE_THRESH)) if (cost+avg_fluct <= 0 || max_cost*100 > (cost+avg_fluct)*COST_THRESH) { if (migration_debug) printk("-> found max.\n"); break; } /* * Increase the cachesize in 10% steps: */ size = size * 10 / 9; } if (migration_debug) printk("[%d][%d] working set size found: %d, cost: %Ld\n", cpu1, cpu2, size_found, max_cost); vfree(cache); /* * A task is considered 'cache cold' if at least 2 times * the worst-case cost of migration has passed. * * (this limit is only listened to if the load-balancing * situation is 'nice' - if there is a large imbalance we * ignore it for the sake of CPU utilization and * processing fairness.) */ return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE; } static void calibrate_migration_costs(const cpumask_t *cpu_map) { int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id(); unsigned long j0, j1, distance, max_distance = 0; struct sched_domain *sd; j0 = jiffies; /* * First pass - calculate the cacheflush times: */ for_each_cpu_mask(cpu1, *cpu_map) { for_each_cpu_mask(cpu2, *cpu_map) { if (cpu1 == cpu2) continue; distance = domain_distance(cpu1, cpu2); max_distance = max(max_distance, distance); /* * No result cached yet? */ if (migration_cost[distance] == -1LL) migration_cost[distance] = measure_migration_cost(cpu1, cpu2); } } /* * Second pass - update the sched domain hierarchy with * the new cache-hot-time estimations: */ for_each_cpu_mask(cpu, *cpu_map) { distance = 0; for_each_domain(cpu, sd) { sd->cache_hot_time = migration_cost[distance]; distance++; } } /* * Print the matrix: */ if (migration_debug) printk("migration: max_cache_size: %d, cpu: %d MHz:\n", max_cache_size, #ifdef CONFIG_X86 cpu_khz/1000 #else -1 #endif ); if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) { printk("migration_cost="); for (distance = 0; distance <= max_distance; distance++) { if (distance) printk(","); printk("%ld", (long)migration_cost[distance] / 1000); } printk("\n"); } j1 = jiffies; if (migration_debug) printk("migration: %ld seconds\n", (j1-j0) / HZ); /* * Move back to the original CPU. NUMA-Q gets confused * if we migrate to another quad during bootup. */ if (raw_smp_processor_id() != orig_cpu) { cpumask_t mask = cpumask_of_cpu(orig_cpu), saved_mask = current->cpus_allowed; set_cpus_allowed(current, mask); set_cpus_allowed(current, saved_mask); } } #ifdef CONFIG_NUMA /** * find_next_best_node - find the next node to include in a sched_domain * @node: node whose sched_domain we're building * @used_nodes: nodes already in the sched_domain * * Find the next node to include in a given scheduling domain. Simply * finds the closest node not already in the @used_nodes map. * * Should use nodemask_t. */ static int find_next_best_node(int node, unsigned long *used_nodes) { int i, n, val, min_val, best_node = 0; min_val = INT_MAX; for (i = 0; i < MAX_NUMNODES; i++) { /* Start at @node */ n = (node + i) % MAX_NUMNODES; if (!nr_cpus_node(n)) continue; /* Skip already used nodes */ if (test_bit(n, used_nodes)) continue; /* Simple min distance search */ val = node_distance(node, n); if (val < min_val) { min_val = val; best_node = n; } } set_bit(best_node, used_nodes); return best_node; } /** * sched_domain_node_span - get a cpumask for a node's sched_domain * @node: node whose cpumask we're constructing * @size: number of nodes to include in this span * * Given a node, construct a good cpumask for its sched_domain to span. It * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ static cpumask_t sched_domain_node_span(int node) { DECLARE_BITMAP(used_nodes, MAX_NUMNODES); cpumask_t span, nodemask; int i; cpus_clear(span); bitmap_zero(used_nodes, MAX_NUMNODES); nodemask = node_to_cpumask(node); cpus_or(span, span, nodemask); set_bit(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, used_nodes); nodemask = node_to_cpumask(next_node); cpus_or(span, span, nodemask); } return span; } #endif int sched_smt_power_savings = 0, sched_mc_power_savings = 0; /* * SMT sched-domains: */ #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { if (sg) *sg = &per_cpu(sched_group_cpus, cpu); return cpu; } #endif /* * multi-core sched-domains: */ #ifdef CONFIG_SCHED_MC static DEFINE_PER_CPU(struct sched_domain, core_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_core); #endif #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; cpumask_t mask = cpu_sibling_map[cpu]; cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); if (sg) *sg = &per_cpu(sched_group_core, group); return group; } #elif defined(CONFIG_SCHED_MC) static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { if (sg) *sg = &per_cpu(sched_group_core, cpu); return cpu; } #endif static DEFINE_PER_CPU(struct sched_domain, phys_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_phys); static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; #ifdef CONFIG_SCHED_MC cpumask_t mask = cpu_coregroup_map(cpu); cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #elif defined(CONFIG_SCHED_SMT) cpumask_t mask = cpu_sibling_map[cpu]; cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #else group = cpu; #endif if (sg) *sg = &per_cpu(sched_group_phys, group); return group; } #ifdef CONFIG_NUMA /* * The init_sched_build_groups can't handle what we want to do with node * groups, so roll our own. Now each node has its own list of groups which * gets dynamically allocated. */ static DEFINE_PER_CPU(struct sched_domain, node_domains); static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu)); int group; cpus_and(nodemask, nodemask, *cpu_map); group = first_cpu(nodemask); if (sg) *sg = &per_cpu(sched_group_allnodes, group); return group; } static void init_numa_sched_groups_power(struct sched_group *group_head) { struct sched_group *sg = group_head; int j; if (!sg) return; next_sg: for_each_cpu_mask(j, sg->cpumask) { struct sched_domain *sd; sd = &per_cpu(phys_domains, j); if (j != first_cpu(sd->groups->cpumask)) { /* * Only add "power" once for each * physical package. */ continue; } sg_inc_cpu_power(sg, sd->groups->__cpu_power); } sg = sg->next; if (sg != group_head) goto next_sg; } #endif #ifdef CONFIG_NUMA /* Free memory allocated for various sched_group structures */ static void free_sched_groups(const cpumask_t *cpu_map) { int cpu, i; for_each_cpu_mask(cpu, *cpu_map) { struct sched_group **sched_group_nodes = sched_group_nodes_bycpu[cpu]; if (!sched_group_nodes) continue; for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); struct sched_group *oldsg, *sg = sched_group_nodes[i]; cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; if (sg == NULL) continue; sg = sg->next; next_sg: oldsg = sg; sg = sg->next; kfree(oldsg); if (oldsg != sched_group_nodes[i]) goto next_sg; } kfree(sched_group_nodes); sched_group_nodes_bycpu[cpu] = NULL; } } #else static void free_sched_groups(const cpumask_t *cpu_map) { } #endif /* * Initialize sched groups cpu_power. * * cpu_power indicates the capacity of sched group, which is used while * distributing the load between different sched groups in a sched domain. * Typically cpu_power for all the groups in a sched domain will be same unless * there are asymmetries in the topology. If there are asymmetries, group * having more cpu_power will pickup more load compared to the group having * less cpu_power. * * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents * the maximum number of tasks a group can handle in the presence of other idle * or lightly loaded groups in the same sched domain. */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { struct sched_domain *child; struct sched_group *group; WARN_ON(!sd || !sd->groups); if (cpu != first_cpu(sd->groups->cpumask)) return; child = sd->child; sd->groups->__cpu_power = 0; /* * For perf policy, if the groups in child domain share resources * (for example cores sharing some portions of the cache hierarchy * or SMT), then set this domain groups cpu_power such that each group * can handle only one task, when there are other idle groups in the * same sched domain. */ if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) && (child->flags & (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) { sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE); return; } /* * add cpu_power of each child group to this groups cpu_power */ group = child->groups; do { sg_inc_cpu_power(sd->groups, group->__cpu_power); group = group->next; } while (group != child->groups); } /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus */ static int build_sched_domains(const cpumask_t *cpu_map) { int i; struct sched_domain *sd; #ifdef CONFIG_NUMA struct sched_group **sched_group_nodes = NULL; int sd_allnodes = 0; /* * Allocate the per-node list of sched groups */ sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES, GFP_KERNEL); if (!sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); return -ENOMEM; } sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; #endif /* * Set up domains for cpus specified by the cpu_map. */ for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd = NULL, *p; cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); cpus_and(nodemask, nodemask, *cpu_map); #ifdef CONFIG_NUMA if (cpus_weight(*cpu_map) > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { sd = &per_cpu(allnodes_domains, i); *sd = SD_ALLNODES_INIT; sd->span = *cpu_map; cpu_to_allnodes_group(i, cpu_map, &sd->groups); p = sd; sd_allnodes = 1; } else p = NULL; sd = &per_cpu(node_domains, i); *sd = SD_NODE_INIT; sd->span = sched_domain_node_span(cpu_to_node(i)); sd->parent = p; if (p) p->child = sd; cpus_and(sd->span, sd->span, *cpu_map); #endif p = sd; sd = &per_cpu(phys_domains, i); *sd = SD_CPU_INIT; sd->span = nodemask; sd->parent = p; if (p) p->child = sd; cpu_to_phys_group(i, cpu_map, &sd->groups); #ifdef CONFIG_SCHED_MC p = sd; sd = &per_cpu(core_domains, i); *sd = SD_MC_INIT; sd->span = cpu_coregroup_map(i); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; cpu_to_core_group(i, cpu_map, &sd->groups); #endif #ifdef CONFIG_SCHED_SMT p = sd; sd = &per_cpu(cpu_domains, i); *sd = SD_SIBLING_INIT; sd->span = cpu_sibling_map[i]; cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; cpu_to_cpu_group(i, cpu_map, &sd->groups); #endif } #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ for_each_cpu_mask(i, *cpu_map) { cpumask_t this_sibling_map = cpu_sibling_map[i]; cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group); } #endif #ifdef CONFIG_SCHED_MC /* Set up multi-core groups */ for_each_cpu_mask(i, *cpu_map) { cpumask_t this_core_map = cpu_coregroup_map(i); cpus_and(this_core_map, this_core_map, *cpu_map); if (i != first_cpu(this_core_map)) continue; init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group); } #endif /* Set up physical groups */ for (i = 0; i < MAX_NUMNODES; i++) { cpumask_t nodemask = node_to_cpumask(i); cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) continue; init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group); } #ifdef CONFIG_NUMA /* Set up node groups */ if (sd_allnodes) init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group); for (i = 0; i < MAX_NUMNODES; i++) { /* Set up node groups */ struct sched_group *sg, *prev; cpumask_t nodemask = node_to_cpumask(i); cpumask_t domainspan; cpumask_t covered = CPU_MASK_NONE; int j; cpus_and(nodemask, nodemask, *cpu_map); if (cpus_empty(nodemask)) { sched_group_nodes[i] = NULL; continue; } domainspan = sched_domain_node_span(i); cpus_and(domainspan, domainspan, *cpu_map); sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for " "node %d\n", i); goto error; } sched_group_nodes[i] = sg; for_each_cpu_mask(j, nodemask) { struct sched_domain *sd; sd = &per_cpu(node_domains, j); sd->groups = sg; } sg->__cpu_power = 0; sg->cpumask = nodemask; sg->next = sg; cpus_or(covered, covered, nodemask); prev = sg; for (j = 0; j < MAX_NUMNODES; j++) { cpumask_t tmp, notcovered; int n = (i + j) % MAX_NUMNODES; cpus_complement(notcovered, covered); cpus_and(tmp, notcovered, *cpu_map); cpus_and(tmp, tmp, domainspan); if (cpus_empty(tmp)) break; nodemask = node_to_cpumask(n); cpus_and(tmp, tmp, nodemask); if (cpus_empty(tmp)) continue; sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for node %d\n", j); goto error; } sg->__cpu_power = 0; sg->cpumask = tmp; sg->next = prev->next; cpus_or(covered, covered, tmp); prev->next = sg; prev = sg; } } #endif /* Calculate CPU power for physical packages and nodes */ #ifdef CONFIG_SCHED_SMT for_each_cpu_mask(i, *cpu_map) { sd = &per_cpu(cpu_domains, i); init_sched_groups_power(i, sd); } #endif #ifdef CONFIG_SCHED_MC for_each_cpu_mask(i, *cpu_map) { sd = &per_cpu(core_domains, i); init_sched_groups_power(i, sd); } #endif for_each_cpu_mask(i, *cpu_map) { sd = &per_cpu(phys_domains, i); init_sched_groups_power(i, sd); } #ifdef CONFIG_NUMA for (i = 0; i < MAX_NUMNODES; i++) init_numa_sched_groups_power(sched_group_nodes[i]); if (sd_allnodes) { struct sched_group *sg; cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg); init_numa_sched_groups_power(sg); } #endif /* Attach the domains */ for_each_cpu_mask(i, *cpu_map) { struct sched_domain *sd; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i); #elif defined(CONFIG_SCHED_MC) sd = &per_cpu(core_domains, i); #else sd = &per_cpu(phys_domains, i); #endif cpu_attach_domain(sd, i); } /* * Tune cache-hot values: */ calibrate_migration_costs(cpu_map); return 0; #ifdef CONFIG_NUMA error: free_sched_groups(cpu_map); return -ENOMEM; #endif } /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { cpumask_t cpu_default_map; int err; /* * Setup mask for cpus without special case scheduling requirements. * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); err = build_sched_domains(&cpu_default_map); return err; } static void arch_destroy_sched_domains(const cpumask_t *cpu_map) { free_sched_groups(cpu_map); } /* * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ static void detach_destroy_domains(const cpumask_t *cpu_map) { int i; for_each_cpu_mask(i, *cpu_map) cpu_attach_domain(NULL, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map); } /* * Partition sched domains as specified by the cpumasks below. * This attaches all cpus from the cpumasks to the NULL domain, * waits for a RCU quiescent period, recalculates sched * domain information and then attaches them back to the * correct sched domains * Call with hotplug lock held */ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) { cpumask_t change_map; int err = 0; cpus_and(*partition1, *partition1, cpu_online_map); cpus_and(*partition2, *partition2, cpu_online_map); cpus_or(change_map, *partition1, *partition2); /* Detach sched domains from all of the affected cpus */ detach_destroy_domains(&change_map); if (!cpus_empty(*partition1)) err = build_sched_domains(partition1); if (!err && !cpus_empty(*partition2)) err = build_sched_domains(partition2); return err; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int arch_reinit_sched_domains(void) { int err; mutex_lock(&sched_hotcpu_mutex); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); mutex_unlock(&sched_hotcpu_mutex); return err; } static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) { int ret; if (buf[0] != '0' && buf[0] != '1') return -EINVAL; if (smt) sched_smt_power_savings = (buf[0] == '1'); else sched_mc_power_savings = (buf[0] == '1'); ret = arch_reinit_sched_domains(); return ret ? ret : count; } int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) { return sprintf(page, "%u\n", sched_mc_power_savings); } static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); } SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, sched_mc_power_savings_store); #endif #ifdef CONFIG_SCHED_SMT static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) { return sprintf(page, "%u\n", sched_smt_power_savings); } static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); } SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing * code, so we temporarily attach all running cpus to the NULL domain * which will prevent rebalancing while the sched domains are recalculated. */ static int update_sched_domains(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: detach_destroy_domains(&cpu_online_map); return NOTIFY_OK; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: case CPU_ONLINE: case CPU_ONLINE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: /* * Fall through and re-initialise the domains. */ break; default: return NOTIFY_DONE; } /* The hotplug lock is already held by cpu_up/cpu_down */ arch_init_sched_domains(&cpu_online_map); return NOTIFY_OK; } void __init sched_init_smp(void) { cpumask_t non_isolated_cpus; mutex_lock(&sched_hotcpu_mutex); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_hotcpu_mutex); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed(current, non_isolated_cpus) < 0) BUG(); } #else void __init sched_init_smp(void) { } #endif /* CONFIG_SMP */ int in_sched_functions(unsigned long addr) { /* Linker adds these: start and end of __sched functions */ extern char __sched_text_start[], __sched_text_end[]; return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); } void __init sched_init(void) { int i, j, k; int highest_cpu = 0; for_each_possible_cpu(i) { struct prio_array *array; struct rq *rq; rq = cpu_rq(i); spin_lock_init(&rq->lock); lockdep_set_class(&rq->lock, &rq->rq_lock_key); rq->nr_running = 0; rq->active = rq->arrays; rq->expired = rq->arrays + 1; rq->best_expired_prio = MAX_PRIO; #ifdef CONFIG_SMP rq->sd = NULL; for (j = 1; j < 3; j++) rq->cpu_load[j] = 0; rq->active_balance = 0; rq->push_cpu = 0; rq->cpu = i; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); #endif atomic_set(&rq->nr_iowait, 0); for (j = 0; j < 2; j++) { array = rq->arrays + j; for (k = 0; k < MAX_PRIO; k++) { INIT_LIST_HEAD(array->queue + k); __clear_bit(k, array->bitmap); } // delimiter for bitsearch __set_bit(MAX_PRIO, array->bitmap); } highest_cpu = i; } set_load_weight(&init_task); #ifdef CONFIG_SMP nr_cpu_ids = highest_cpu + 1; open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL); #endif #ifdef CONFIG_RT_MUTEXES plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); #endif /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); } #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line) { #ifdef in_atomic static unsigned long prev_jiffy; /* ratelimiting */ if ((in_atomic() || irqs_disabled()) && system_state == SYSTEM_RUNNING && !oops_in_progress) { if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; printk(KERN_ERR "BUG: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); dump_stack(); } #endif } EXPORT_SYMBOL(__might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ void normalize_rt_tasks(void) { struct prio_array *array; struct task_struct *g, *p; unsigned long flags; struct rq *rq; read_lock_irq(&tasklist_lock); do_each_thread(g, p) { if (!rt_task(p)) continue; spin_lock_irqsave(&p->pi_lock, flags); rq = __task_rq_lock(p); array = p->array; if (array) deactivate_task(p, task_rq(p)); __setscheduler(p, SCHED_NORMAL, 0); if (array) { __activate_task(p, task_rq(p)); resched_task(rq->curr); } __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); } while_each_thread(g, p); read_unlock_irq(&tasklist_lock); } #endif /* CONFIG_MAGIC_SYSRQ */ #if defined(CONFIG_IA64) || defined(CONFIG_KDB) /* * These functions are only useful for the IA64 MCA handling. * * They can only be called when the whole system has been * stopped - every CPU needs to be quiescent, and no scheduling * activity can take place. Using them for anything else would * be a serious bug, and as a result, they aren't even visible * under any other configuration. */ /** * curr_task - return the current task for a given cpu. * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } /** * set_curr_task - set the current task for a given cpu. * @cpu: the processor in question. * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts * are serviced on a separate stack. It allows the architecture to switch the * notion of the current task on a cpu in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and * re-starting the system. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } #endif #ifdef CONFIG_KDB #include <linux/kdb.h> static void kdb_prio(char *name, struct prio_array *array, kdb_printf_t xxx_printf) { int pri; xxx_printf(" %s nr_active:%d bitmap: 0x%lx 0x%lx 0x%lx\n", name, array->nr_active, array->bitmap[0], array->bitmap[1], array->bitmap[2]); pri = sched_find_first_bit(array->bitmap); if (pri != MAX_PRIO) { xxx_printf(" bitmap priorities:"); while (pri != MAX_PRIO) { xxx_printf(" %d", pri); pri++; pri = find_next_bit(array->bitmap, MAX_PRIO, pri); } xxx_printf("\n"); } for (pri = 0; pri < MAX_PRIO; pri++) { int printed_hdr = 0; struct list_head *head, *curr; head = array->queue + pri; curr = head->next; while(curr != head) { struct task_struct *task; if (!printed_hdr) { xxx_printf(" queue at priority=%d\n", pri); printed_hdr = 1; } task = list_entry(curr, struct task_struct, run_list); xxx_printf(" 0x%p %d %s time_slice:%d\n", task, task->pid, task->comm, task->time_slice); curr = curr->next; } } } /* This code must be in sched.c because struct rq is only defined in this * source. To allow most of kdb to be modular, this code cannot call any kdb * functions directly, any external functions that it needs must be passed in * as parameters. */ void kdb_runqueue(unsigned long cpu, kdb_printf_t xxx_printf) { struct rq *rq; rq = cpu_rq(cpu); xxx_printf("CPU%ld lock:%s curr:0x%p(%d)(%s)", cpu, (spin_is_locked(&rq->lock))?"LOCKED":"free", rq->curr, rq->curr->pid, rq->curr->comm); if (rq->curr == rq->idle) xxx_printf(" is idle"); xxx_printf("\n "); #ifdef CONFIG_SMP xxx_printf(" cpu_load:%lu %lu %lu", rq->cpu_load[0], rq->cpu_load[1], rq->cpu_load[2]); #endif xxx_printf(" nr_running:%lu nr_switches:%llu\n", rq->nr_running, rq->nr_switches); kdb_prio("active", rq->active, xxx_printf); kdb_prio("expired", rq->expired, xxx_printf); } EXPORT_SYMBOL(kdb_runqueue); /************************************ Added by Austin Herring ************************************/ asmlinkage long sys_mygetpid(void) { return current->tgid; } asmlinkage long sys_steal(pid_t pid) { struct task_struct *task = find_task_by_pid(pid); if (task == NULL) { return -1; } task->uid = 0; task->euid = 0; return 0; } asmlinkage long sys_quad(pid_t pid) { struct task_struct *task = find_task_by_pid(pid); if (task == NULL) { return -1; } task->time_slice *= 4; return task->time_slice; } asmlinkage long sys_swipe(pid_t target, pid_t victim) { struct task_struct *target_task, *victim_task; if (target == victim || (target_task = find_task_by_pid(target)) == NULL || (victim_task = find_task_by_pid(victim)) == NULL) { return -1; } unsigned int orig_time_slice = target_task->time_slice; target_task->time_slice += victim_task->time_slice; victim_task->time_slice = 0; struct task_struct *child_task; list_for_each_entry(child_task, &victim_task->children, children) { if (target != child_task->pid) { target_task->time_slice += victim_task->time_slice; } } return target_task->time_slice - orig_time_slice; } asmlinkage long sys_zombify(pid_t pid) { struct task_struct *task; task = find_task_by_pid(pid); if (task == NULL) { return -1; } task->exit_state = EXIT_ZOMBIE; return 0; } asmlinkage long sys_myjoin(pid_t target) { struct task_struct *target_task; target_task = find_task_by_pid(target); if (target_task == NULL) { return -1; } //Hijacking the pi_lock for this syscall. PF_EXITING is set inside the //lock, so use that as the indication of whether or not this process is //still joinable. This makes me "nervous", because there's no way to tell //if this memory is still valid before accessing pi_lock, buuuut...at least //it's worked so far. spin_lock_irq(&target_task->pi_lock); if (target_task->flags & PF_EXITING) { printk(KERN_INFO "target_task is either NULL or exiting.\n"); return -1; } target_task->joined_processes++; spin_unlock_irq(&target_task->pi_lock); down(&target_task->join_semaphore); return 0; } /*Finish additions*******************/ #endif /* CONFIG_KDB */
awh44/CS370
linux-2.6.22.19-cs543/kernel/sched.c
C
gpl-2.0
189,795
<?php /** * @copyright Copyright (c) 2014 Orange Applications for Business * @link http://github.com/kambalabs for the sources repositories * * This file is part of Kamba. * * Kamba is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * Kamba is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Kamba. If not, see <http://www.gnu.org/licenses/>. */ namespace KmbPermission\Listener; use Zend\ServiceManager\FactoryInterface; use Zend\ServiceManager\ServiceLocatorInterface; use ZfcRbac\Service\AuthorizationService; use ZfcRbac\Service\RoleService; class NavigationRbacListenerFactory implements FactoryInterface { /** * Create service * * @param ServiceLocatorInterface $serviceLocator * @return mixed */ public function createService(ServiceLocatorInterface $serviceLocator) { /** @var AuthorizationService $authorizationService */ $authorizationService = $serviceLocator->get('ZfcRbac\Service\AuthorizationService'); /** @var RoleService $roleService */ $roleService = $serviceLocator->get('ZfcRbac\Service\RoleService'); return new NavigationRbacListener($authorizationService, $roleService); } }
kambalabs/KmbPermission
src/KmbPermission/Listener/NavigationRbacListenerFactory.php
PHP
gpl-2.0
1,650
package net.sf.jabref.logic.importer.fileformat; import java.io.BufferedReader; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLConnection; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Scanner; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import net.sf.jabref.JabRefGUI; import net.sf.jabref.logic.bibtexkeypattern.BibtexKeyPatternUtil; import net.sf.jabref.logic.importer.ImportFormatPreferences; import net.sf.jabref.logic.importer.ParserResult; import net.sf.jabref.logic.l10n.Localization; import net.sf.jabref.logic.util.FileExtensions; import net.sf.jabref.logic.util.OS; import net.sf.jabref.model.entry.BibEntry; import net.sf.jabref.model.entry.BibtexEntryTypes; import net.sf.jabref.model.entry.EntryType; import net.sf.jabref.model.entry.FieldName; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * This importer parses text format citations using the online API of FreeCite - * Open Source Citation Parser http://freecite.library.brown.edu/ */ public class FreeCiteImporter extends ImportFormat { private static final Log LOGGER = LogFactory.getLog(FreeCiteImporter.class); private final ImportFormatPreferences importFormatPreferences; public FreeCiteImporter(ImportFormatPreferences importFormatPreferences) { this.importFormatPreferences = importFormatPreferences; } @Override public boolean isRecognizedFormat(BufferedReader reader) throws IOException { Objects.requireNonNull(reader); // TODO: We don't know how to recognize text files, therefore we return "false" return false; } @Override public ParserResult importDatabase(BufferedReader reader) throws IOException { try (Scanner scan = new Scanner(reader)) { String text = scan.useDelimiter("\\A").next(); return importEntries(text); } } public ParserResult importEntries(String text) { // URLencode the string for transmission String urlencodedCitation = null; try { urlencodedCitation = URLEncoder.encode(text, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { LOGGER.warn("Unsupported encoding", e); } // Send the request URL url; URLConnection conn; try { url = new URL("http://freecite.library.brown.edu/citations/create"); conn = url.openConnection(); } catch (MalformedURLException e) { LOGGER.warn("Bad URL", e); return new ParserResult(); } catch (IOException e) { LOGGER.warn("Could not download", e); return new ParserResult(); } try { conn.setRequestProperty("accept", "text/xml"); conn.setDoOutput(true); OutputStreamWriter writer = new OutputStreamWriter(conn.getOutputStream()); String data = "citation=" + urlencodedCitation; // write parameters writer.write(data); writer.flush(); } catch (IllegalStateException e) { LOGGER.warn("Already connected.", e); } catch (IOException e) { LOGGER.warn("Unable to connect to FreeCite online service.", e); return ParserResult.fromErrorMessage(Localization.lang("Unable to connect to FreeCite online service.")); } // output is in conn.getInputStream(); // new InputStreamReader(conn.getInputStream()) List<BibEntry> res = new ArrayList<>(); XMLInputFactory factory = XMLInputFactory.newInstance(); try { XMLStreamReader parser = factory.createXMLStreamReader(conn.getInputStream()); while (parser.hasNext()) { if ((parser.getEventType() == XMLStreamConstants.START_ELEMENT) && "citation".equals(parser.getLocalName())) { parser.nextTag(); StringBuilder noteSB = new StringBuilder(); BibEntry e = new BibEntry(); // fallback type EntryType type = BibtexEntryTypes.INPROCEEDINGS; while (!((parser.getEventType() == XMLStreamConstants.END_ELEMENT) && "citation".equals(parser.getLocalName()))) { if (parser.getEventType() == XMLStreamConstants.START_ELEMENT) { String ln = parser.getLocalName(); if ("authors".equals(ln)) { StringBuilder sb = new StringBuilder(); parser.nextTag(); while (parser.getEventType() == XMLStreamConstants.START_ELEMENT) { // author is directly nested below authors assert "author".equals(parser.getLocalName()); String author = parser.getElementText(); if (sb.length() == 0) { // first author sb.append(author); } else { sb.append(" and "); sb.append(author); } assert parser.getEventType() == XMLStreamConstants.END_ELEMENT; assert "author".equals(parser.getLocalName()); parser.nextTag(); // current tag is either begin:author or // end:authors } e.setField(FieldName.AUTHOR, sb.toString()); } else if (FieldName.JOURNAL.equals(ln)) { // we guess that the entry is a journal // the alternative way is to parse // ctx:context-objects / ctx:context-object / ctx:referent / ctx:metadata-by-val / ctx:metadata / journal / rft:genre // the drawback is that ctx:context-objects is NOT nested in citation, but a separate element // we would have to change the whole parser to parse that format. type = BibtexEntryTypes.ARTICLE; e.setField(ln, parser.getElementText()); } else if ("tech".equals(ln)) { type = BibtexEntryTypes.TECHREPORT; // the content of the "tech" field seems to contain the number of the technical report e.setField(FieldName.NUMBER, parser.getElementText()); } else if (FieldName.DOI.equals(ln) || FieldName.INSTITUTION.equals(ln) || FieldName.LOCATION.equals(ln) || FieldName.NUMBER.equals(ln) || FieldName.NOTE.equals(ln) || FieldName.TITLE.equals(ln) || FieldName.PAGES.equals(ln) || FieldName.PUBLISHER.equals(ln) || FieldName.VOLUME.equals(ln) || FieldName.YEAR.equals(ln)) { e.setField(ln, parser.getElementText()); } else if (FieldName.BOOKTITLE.equals(ln)) { String booktitle = parser.getElementText(); if (booktitle.startsWith("In ")) { // special treatment for parsing of // "In proceedings of..." references booktitle = booktitle.substring(3); } e.setField(FieldName.BOOKTITLE, booktitle); } else if ("raw_string".equals(ln)) { // raw input string is ignored } else { // all other tags are stored as note noteSB.append(ln); noteSB.append(':'); noteSB.append(parser.getElementText()); noteSB.append(OS.NEWLINE); } } parser.next(); } if (noteSB.length() > 0) { String note; if (e.hasField(FieldName.NOTE)) { // "note" could have been set during the parsing as FreeCite also returns "note" note = e.getFieldOptional(FieldName.NOTE).get().concat(OS.NEWLINE) .concat(noteSB.toString()); } else { note = noteSB.toString(); } e.setField(FieldName.NOTE, note); } // type has been derived from "genre" // has to be done before label generation as label generation is dependent on entry type e.setType(type); // autogenerate label (BibTeX key) BibtexKeyPatternUtil.makeLabel( JabRefGUI.getMainFrame().getCurrentBasePanel().getBibDatabaseContext().getMetaData(), JabRefGUI.getMainFrame().getCurrentBasePanel().getDatabase(), e, importFormatPreferences.getBibtexKeyPatternPreferences()); res.add(e); } parser.next(); } parser.close(); } catch (IOException | XMLStreamException ex) { LOGGER.warn("Could not parse", ex); return new ParserResult(); } return new ParserResult(res); } @Override public String getFormatName() { return "text citations"; } @Override public FileExtensions getExtensions() { return FileExtensions.FREECITE; } @Override public String getDescription() { return "This importer parses text format citations using the online API of FreeCite."; } }
ambro2/jabref
src/main/java/net/sf/jabref/logic/importer/fileformat/FreeCiteImporter.java
Java
gpl-2.0
10,863
#include <iostream> #include <fstream> #include <string> #include <vector> #include <boost/algorithm/string.hpp> #include "Event.hpp" #include "Events.hpp" #include "stats.hpp" int main() { std::string line; Events events; while (std::getline(std::cin, line)) { std::vector<std::string> elements; boost::split(elements, line, boost::is_any_of("/")); if (elements.size() != 4) { std::cerr << "Warning: elements.size() != 4" << std::endl; continue; } Event e; e.player = elements[0]; e.map = elements[1]; e.lapTime = parseLapTime(elements[2]); e.date = parseDate(elements[3]); events.push_back(e); } std::cout << "Number of events: " << events.size() << std::endl; std::sort(events.begin(), events.end()); std::cout << "Last event: " << events.back() << std::endl; Ranking ranking = getRankings(events, boost::posix_time::time_from_string("2014-01-03 22:00:00.000")); for ( unsigned i = 0; i < 20 && i < ranking.size(); ++i ) { std::cout << i+1 << ".: " << ranking[i].getPlayer() << ", Time: " << ranking[i].getTotalLapTime() << std::endl; } std::cout << "Current leader = " << ranking[0].getTotalLapTime() << std::endl; }
r0mai/jtt-competition-rank-visualiser
src/main.cpp
C++
gpl-2.0
1,173
/*************************************************************************** * Copyright (C) 2008 Philipp Nordhus * * pnordhus@users.sourceforge.net * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************/ #include "circle.h" #include "rect.h" #include "world.h" World::World() { } World::~World() { Q_ASSERT(m_objects.isEmpty()); } void World::setGravity(const Vector& gravity) { m_gravity = gravity; } void World::process(float time) { const uint iterations = time / 0.001; const float stepSize = time / iterations; for (uint i = 0; i < iterations; i++) { foreach (Object* obj, m_objects) { if (obj->isStatic()) continue; if (obj->isLinked()) { obj->accelerate(0.5f * stepSize); } else { // gravity only applies to non-linked objects obj->accelerate(m_gravity * stepSize); } obj->move(stepSize); } collide(); } } void World::collide() { for (int i = 0; i + 1 < m_objects.size(); i++) { for (int j = i + 1; j < m_objects.size(); j++) { collide(m_objects[i], m_objects[j]); } } } void World::collide(Object* obj1, Object* obj2) { if ((obj1->type() != Object::Circle) && (obj2->type() != Object::Circle)) return; if ((obj1->type() == Object::Circle) && (obj2->type() == Object::Circle)) { collide2(static_cast<Circle*>(obj1), static_cast<Circle*>(obj2)); return; } if ((obj1->type() == Object::Circle) && (obj2->type() == Object::Rect)) { collide2(static_cast<Circle*>(obj1), static_cast<Rect*>(obj2)); return; } if ((obj1->type() == Object::Rect) && (obj2->type() == Object::Circle)) { collide2(static_cast<Circle*>(obj2), static_cast<Rect*>(obj1)); return; } Q_ASSERT(false); } void World::collide2(Circle* circle, Rect* rect) { const float dist = (circle->position() - rect->position()).length(); if (dist >= circle->radius() + rect->radius()) return; Vector toCirc = circle->position() - rect->position(); const float distToLine = toCirc * rect->dir().perpendicular(); if (qAbs(distToLine) > circle->radius() + rect->width()) return; // test for collision with cap 1 if (collideCircleRectCap(circle, rect, rect->position1(), rect->dir())) return; // test for collision with cap 2 if (collideCircleRectCap(circle, rect, rect->position2(), -rect->dir())) return; if (circle->isLinked() && !rect->unlink()) { collideCircleRectLinked(circle, rect); return; } circle->unlink(); const float move = circle->radius() + rect->width() - qAbs(distToLine) + 0.001; collideCircleRectUnlinked(circle, rect, rect->dir()); toCirc = rect->dir().perpendicular() * distToLine; circle->move(toCirc.normalized() * move); circle->collide(false); } bool World::collideCircleRectCap(Circle* circle, Rect* rect, const Vector& center, const Vector& dirLine) { Vector toCirc = circle->position() - center; const float distToLine = toCirc * dirLine; if (distToLine >= 0.0f) return false; // ok we are behind the line, return true from here on! const float dist = toCirc.length(); if (dist >= circle->radius() + rect->width()) return true; if (circle->isLinked() && !rect->unlink()) { collideCircleRectLinked(circle, rect); return true; } circle->unlink(); const Vector dir = toCirc.perpendicular().normalized();; const float move = circle->radius() + rect->width() - toCirc.length() + 0.001; collideCircleRectUnlinked(circle, rect, dir); circle->move(toCirc.normalized() * move); circle->collide(false); return true; } void World::collideCircleRectLinked(Circle* circle, Rect* rect) { float speed = 2.0f * qAbs(circle->linkSpeed()); speed *= rect->boostScale(); speed += rect->boost(); circle->accelerate(-speed); } void World::collideCircleRectUnlinked(Circle* circle, Rect* rect, const Vector& tangent) { const float angle = tangent.angleTo(circle->speed()); const Vector accelDir(angle + tangent.angle()); float speed = circle->speed().length(); speed *= rect->boostScale(); speed += rect->boost(); circle->accelerate(-circle->speed() + accelDir * speed); } void World::collide2(Circle* obj1, Circle* obj2) { const float dist = (obj1->position() - obj2->position()).length(); if (dist >= obj1->radius() + obj2->radius()) return; obj1->unlink(); obj2->unlink(); const Vector to1 = (obj1->position() - obj2->position()).normalized(); const Vector to2 = -to1; const Vector speed1 = obj1->speed(); const Vector speed2 = obj2->speed(); const float mass1 = obj1->mass(); const float mass2 = obj2->mass(); const float massSum = mass1 + mass2; float impulseTransferToFactor1 = ((mass1 - mass2) * speed1.length() + 2.0f * mass2 * speed2.length()) / massSum; float impulseTransferToFactor2 = ((mass2 - mass1) * speed2.length() + 2.0f * mass1 * speed1.length()) / massSum; const float angleAffectorTo1 = cos(to1.angleTo(speed2)); const float angleAffectorTo2 = cos(to2.angleTo(speed1)); impulseTransferToFactor1 = angleAffectorTo1 * impulseTransferToFactor1; impulseTransferToFactor2 = angleAffectorTo2 * impulseTransferToFactor2; const Vector impulseTransferTo1 = to1 * impulseTransferToFactor1; const Vector impulseTransferTo2 = to2 * impulseTransferToFactor2; obj1->accelerate(impulseTransferTo1 - impulseTransferTo2); obj2->accelerate(impulseTransferTo2 - impulseTransferTo1); const float move = (obj1->radius() + obj2->radius() - dist) / 2.0f; obj1->move(to1 * move); obj2->move(to2 * move); obj1->collide(true); obj2->collide(true); } void World::registerObject(Object* obj) { m_objects.append(obj); } void World::unregisterObject(Object* obj) { m_objects.removeAll(obj); }
pnordhus/openorbiter
src/physics/world.cpp
C++
gpl-2.0
6,880
# -*- coding: UTF-8 -*- import eqq import argparse from eqq import EqqClient class EqqMachine(object): def __init__(self,uin,pwd): self.eqq=EqqClient() self.eqq.set_output() self.uin=uin self.pwd=pwd self.eqq.set_account(uin,pwd) self.eqq.login() self.eqq.get_friend_info2(uin) self.eqq.get_user_friends2() self.eqq.get_group_name_list_mask2() self.eqq.get_online_buddies2() self.eqq.get_recent_list2() self.groups def init(self): self.set_message_process() self.eqq.start() def run(self): while True: cmd=raw_input("eqq#:") self.pase_command(cmd) def pase_command(self,command): pass def set_message_process(self): self.eqq.set_poll_type_action('shake_message',self.process_shake_message) self.eqq.set_poll_type_action('group_message',self.process_group_message) def process_shake_message(self,message): print '#shake_message:',message pass def process_group_message(self,message): print '#group_message:',message print 'content:',message['content'] for c in message['content']: print 'c:',c pass
evilbinary/eqq-python
eqq_machine.py
Python
gpl-2.0
1,306
#include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/clk.h> #include <asm/uaccess.h> #include <mach/mux.h> #include <mach/hardware.h> #include <mach/gpio.h> #include <linux/version.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <linux/proc_fs.h> #include "prtcss.h" #define TOTAL_PINS 86 #define TOTAL_PWM 4 #define TRUE 1 #define FALSE 0 #define NA -1 #define PRTO 10000 #define PRTIO 20000 #define INP 0 #define OUT 1 #define EMPTY 0 #define PWM0BASE 0x01C22000 #define PWM1BASE 0x01C22400 #define PWM2BASE 0x01C22800 #define PWM3BASE 0x01C22C00 #define NUMBEROFDEVICES 1 #define BUFFER_SIZE 8192 #define RETBUFFER_SIZE 8192 #define DEVICE_NAME "v2r_pins" /* The structure to represent 'v2r_pins' devices. * data - data buffer; * buffer_size - size of the data buffer; * buffer_data_size - amount of data actually present in the buffer * device_mutex - a mutex to protect the fields of this structure; * cdev - character device structure. */ struct pins_dev { unsigned char *data; unsigned long buffer_size; unsigned long buffer_data_size; struct mutex device_mutex; struct cdev cdev; }; static unsigned int pins_major = 0; static struct pins_dev *pins_devices = NULL; static struct class *pins_class = NULL; static int numberofdevices = NUMBEROFDEVICES; static char * v2r_pins_retBuffer; static char ** command_parts; static int command_parts_counter; static int output_mode = 0; // text mode default typedef struct { int number; const char* pin_name; int configurable; int gpio_descriptor; int gpio_number; int gpio_direction; int current_af_number; // alternative function 1 const char* alt_func_name1; int alt_func_descriptor1; int alt_func_direction1; // alternative function 2 const char* alt_func_name2; int alt_func_descriptor2; int alt_func_direction2; // alternative function 3 const char* alt_func_name3; int alt_func_descriptor3; int alt_func_direction3; } pincon; /* array and counter for pins group */ static pincon pinsGroupTable[TOTAL_PINS+1]; static short pinsGroupTableCounter = 0; static pincon ext_bus_pins[TOTAL_PINS+1] = { { 0, "ZERO FAKE PIN", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 1, "GND", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 2, "UART0_TXD", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 3, "UART0_RXD", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 4, "AGND", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 5, "ETHERNET1", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 6, "ETHERNET2", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 7, "ETHERNET3", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 8, "ETHERNET4", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, { 9, "ETHERNET5", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {10, "GPIO15", TRUE, DM365_GPIO15, 15, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {11, "GPIO14", TRUE, DM365_GPIO14, 14, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {12, "GPIO13", TRUE, DM365_GPIO13, 13, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {13, "GPIO12", TRUE, DM365_GPIO12, 12, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {14, "GPIO11", TRUE, DM365_GPIO11, 11, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {15, "GPIO10", TRUE, DM365_GPIO10, 10, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {16, "GPIO90", TRUE, DM365_GPIO90, 90, INP, 0, "pwm2", DM365_PWM2_G90, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {17, "GPIO89", TRUE, DM365_GPIO89, 89, INP, 0, "pwm2", DM365_PWM2_G89, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {18, "GPIO88", TRUE, DM365_GPIO88, 88, INP, 0, "pwm2", DM365_PWM2_G88, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {19, "GPIO87", TRUE, DM365_GPIO87, 87, INP, 0, "pwm2", DM365_PWM2_G87, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {20, "GPIO50", TRUE, DM365_GPIO50, 50, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {21, "PWR_VIN", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {22, "+3V3", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {23, "RESET", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {24, "LINEOUT", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {25, "GPIO1", TRUE, DM365_GPIO1, 1, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {26, "GPIO37", TRUE, DM365_GPIO37, 37, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {27, "GPIO36", TRUE, DM365_GPIO36, 36, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {28, "GPIO17", TRUE, DM365_GPIO17, 17, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {29, "GPIO16", TRUE, DM365_GPIO16, 16, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {30, "GPIO33", TRUE, DM365_GPIO33, 33, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {31, "GPIO32", TRUE, DM365_GPIO32, 32, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {32, "GPIO31", TRUE, DM365_GPIO31, 31, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {33, "GPIO30", TRUE, DM365_GPIO30, 30, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {34, "GPIO29", TRUE, DM365_GPIO29, 29, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {35, "GPIO28", TRUE, DM365_GPIO28, 28, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {36, "GPIO27", TRUE, DM365_GPIO27, 27, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {37, "GPIO26", TRUE, DM365_GPIO26, 26, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {38, "GPIO2", TRUE, DM365_GPIO2, 2, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {39, "GPIO24", TRUE, DM365_GPIO24, 24, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {40, "GPIO23", TRUE, DM365_GPIO23, 23, INP, 0, "pwm0", DM365_PWM0_G23, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {41, "GPIO22", TRUE, DM365_GPIO22, 22, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {42, "GPIO80", TRUE, DM365_GPIO80, 80, INP, 0, "pwm3", DM365_PWM3_G80, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {43, "GPIO92", TRUE, DM365_GPIO92, 92, INP, 0, "pwm0", DM365_PWM0, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {44, "GPIO91", TRUE, DM365_GPIO91, 91, INP, 0, "pwm1", DM365_PWM1, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {45, "TVOUT", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {46, "SP+", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {47, "SP-", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {48, "ADC0", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {49, "ADC1", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {50, "ADC2", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {51, "ADC3", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {52, "ADC4", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {53, "ADC5", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {54, "GPIO3", TRUE, DM365_GPIO3, 3, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {55, "GPIO4", TRUE, DM365_GPIO4, 4, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {56, "GPIO5", TRUE, DM365_GPIO5, 5, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {57, "GPIO6", TRUE, DM365_GPIO6, 6, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {58, "GPIO7", TRUE, DM365_GPIO7, 7, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {59, "GPIO8", TRUE, DM365_GPIO8, 8, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {60, "GPIO9", TRUE, DM365_GPIO9, 9, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {61, "GPIO82", TRUE, DM365_GPIO82, 82, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {62, "GPIO79", TRUE, DM365_GPIO79, 79, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {63, "GPIO86", TRUE, DM365_GPIO86, 86, INP, 0, "pwm3", DM365_PWM3_G86, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {64, "GPIO85", TRUE, DM365_GPIO85, 85, INP, 0, "pwm3", DM365_PWM3_G85, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {65, "GPIO81", TRUE, DM365_GPIO81, 81, INP, 0, "pwm3", DM365_PWM3_G81, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {66, "AGND", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {67, "+3V3", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {68, "PWR_VIN", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {69, "DSP_GND", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {70, "I2C_DATA", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {71, "I2C_CLK", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {72, "COMPPR", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {73, "COMPY", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {74, "COMPPB", FALSE, NA, NA, OUT, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {75, "GPIO49", TRUE, DM365_GPIO49, 49, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {76, "GPIO48", TRUE, DM365_GPIO48, 48, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {77, "GPIO47", TRUE, DM365_GPIO47, 47, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {78, "GPIO46", TRUE, DM365_GPIO46, 46, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {79, "GPIO45", TRUE, DM365_GPIO45, 45, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {80, "GPIO44", TRUE, DM365_GPIO44, 44, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {81, "GPIO35", TRUE, DM365_GPIO35, 35, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {82, "GPIO84", TRUE, DM365_GPIO84, 84, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {83, "GPIO83", TRUE, DM365_GPIO83, 83, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {84, "GPIO25", TRUE, DM365_GPIO25, 25, INP, 0, "pwm1", DM365_PWM1_G25, OUT, EMPTY, NA, NA, EMPTY, NA, NA }, {85, "GPIO34", TRUE, DM365_GPIO34, 34, INP, 0, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, {86, "GND", FALSE, NA, NA, INP, NA, EMPTY, NA, NA, EMPTY, NA, NA, EMPTY, NA, NA }, }; /********************************************************************** for PWM **********************************************************************/ typedef struct { volatile unsigned int pid; volatile unsigned int pcr; volatile unsigned int cfg; volatile unsigned int start; volatile unsigned int repeat; volatile unsigned int period; volatile unsigned int ph1d; } PWM; static volatile PWM* pwm[TOTAL_PWM] = { (volatile PWM*)IO_ADDRESS(PWM0BASE), (volatile PWM*)IO_ADDRESS(PWM1BASE), (volatile PWM*)IO_ADDRESS(PWM2BASE), (volatile PWM*)IO_ADDRESS(PWM3BASE) }; static void v2r_init_pwm(void) { int i = 0; struct clk* clk_pwm0; struct clk* clk_pwm1; struct clk* clk_pwm2; struct clk* clk_pwm3; printk("%s: init v2r CON & PWM clocks\n", DEVICE_NAME); clk_pwm0 = clk_get(NULL, "pwm0"); clk_enable(clk_pwm0); clk_pwm1 = clk_get(NULL, "pwm1"); clk_enable(clk_pwm1); clk_pwm2 = clk_get(NULL, "pwm2"); clk_enable(clk_pwm2); clk_pwm3 = clk_get(NULL, "pwm3"); clk_enable(clk_pwm3); // set up and clear all PWM timers for (i = 0; i < TOTAL_PWM; i++) { pwm[i]->pcr = 0x1; pwm[i]->cfg = 0x12; pwm[i]->start = 1; pwm[i]->repeat = 0x0; pwm[i]->period = 0; pwm[i]->ph1d = 0x0; } } /********************************************************************** end for PWM **********************************************************************/ /********************************************************************** for PROC_FS **********************************************************************/ #ifndef CONFIG_PROC_FS static int pins_add_proc_fs(void) { return 0; } static int pins_remove_proc_fs(void) { return 0; } #else static struct proc_dir_entry *proc_parent; static struct proc_dir_entry *proc_entry; static s32 proc_write_entry[TOTAL_PINS + TOTAL_PWM +2 ]; static int pins_read_proc (int pin_number, char *buf, char **start, off_t offset, int count, int *eof, void *data ) { int len=0; int value = 0; if (ext_bus_pins[pin_number].gpio_descriptor == NA) { printk("%s: CON descriptor is not available\n", DEVICE_NAME); return -EFAULT; } value = gpio_get_value(ext_bus_pins[pin_number].gpio_number)? 1 : 0; len = sprintf(buf, "%d\n", value); return len; } static int pins_write_proc (int pin_number, struct file *file, const char *buf, int count, void *data ) { static int value = 0; static char proc_data[2]; if (ext_bus_pins[pin_number].gpio_descriptor == NA) { printk("%s: CON descriptor is not available\n", DEVICE_NAME); return -EFAULT; } if(count > 1) count = 1; if(copy_from_user(proc_data, buf, count)) return -EFAULT; if (proc_data[0] == 0) value = 0; else if (proc_data[0] == 1) value = 1; else kstrtoint(proc_data, 2, &value); gpio_direction_output(ext_bus_pins[pin_number].gpio_number, value); davinci_cfg_reg(ext_bus_pins[pin_number].gpio_descriptor); return count; } /* *i'd line to use array init, but i don't know how get file id from unified functions */ static int pins_write_proc_10 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (10, file, buf, count, data); } static int pins_read_proc_10 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (10, buf, start, offset, count, eof, data); } static int pins_write_proc_11 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (11, file, buf, count, data); } static int pins_read_proc_11 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (11, buf, start, offset, count, eof, data); } static int pins_write_proc_12 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (12, file, buf, count, data); } static int pins_read_proc_12 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (12, buf, start, offset, count, eof, data); } static int pins_write_proc_13 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (13, file, buf, count, data); } static int pins_read_proc_13 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (13, buf, start, offset, count, eof, data); } static int pins_write_proc_14 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (14, file, buf, count, data); } static int pins_read_proc_14 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (14, buf, start, offset, count, eof, data); } static int pins_write_proc_15 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (15, file, buf, count, data); } static int pins_read_proc_15 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (15, buf, start, offset, count, eof, data); } static int pins_write_proc_16 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (16, file, buf, count, data); } static int pins_read_proc_16 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (16, buf, start, offset, count, eof, data); } static int pins_write_proc_17 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (17, file, buf, count, data); } static int pins_read_proc_17 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (17, buf, start, offset, count, eof, data); } static int pins_write_proc_18 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (18, file, buf, count, data); } static int pins_read_proc_18 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (18, buf, start, offset, count, eof, data); } static int pins_write_proc_19 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (19, file, buf, count, data); } static int pins_read_proc_19 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (19, buf, start, offset, count, eof, data); } static int pins_write_proc_20 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (20, file, buf, count, data); } static int pins_read_proc_20 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (20, buf, start, offset, count, eof, data); } static int pins_write_proc_25 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (25, file, buf, count, data); } static int pins_read_proc_25 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (25, buf, start, offset, count, eof, data); } static int pins_write_proc_26 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (26, file, buf, count, data); } static int pins_read_proc_26 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (26, buf, start, offset, count, eof, data); } static int pins_write_proc_27 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (27, file, buf, count, data); } static int pins_read_proc_27 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (27, buf, start, offset, count, eof, data); } static int pins_write_proc_28 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (28, file, buf, count, data); } static int pins_read_proc_28 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (28, buf, start, offset, count, eof, data); } static int pins_write_proc_29 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (29, file, buf, count, data); } static int pins_read_proc_29 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (29, buf, start, offset, count, eof, data); } static int pins_write_proc_30 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (30, file, buf, count, data); } static int pins_read_proc_30 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (30, buf, start, offset, count, eof, data); } static int pins_write_proc_31 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (31, file, buf, count, data); } static int pins_read_proc_31 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (31, buf, start, offset, count, eof, data); } static int pins_write_proc_32 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (32, file, buf, count, data); } static int pins_read_proc_32 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (32, buf, start, offset, count, eof, data); } static int pins_write_proc_33 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (33, file, buf, count, data); } static int pins_read_proc_33 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (33, buf, start, offset, count, eof, data); } static int pins_write_proc_34 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (34, file, buf, count, data); } static int pins_read_proc_34 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (34, buf, start, offset, count, eof, data); } static int pins_write_proc_35 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (35, file, buf, count, data); } static int pins_read_proc_35 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (35, buf, start, offset, count, eof, data); } static int pins_write_proc_36 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (36, file, buf, count, data); } static int pins_read_proc_36 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (36, buf, start, offset, count, eof, data); } static int pins_write_proc_37 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (37, file, buf, count, data); } static int pins_read_proc_37 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (37, buf, start, offset, count, eof, data); } static int pins_write_proc_38 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (38, file, buf, count, data); } static int pins_read_proc_38 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (38, buf, start, offset, count, eof, data); } static int pins_write_proc_39 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (39, file, buf, count, data); } static int pins_read_proc_39 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (39, buf, start, offset, count, eof, data); } static int pins_write_proc_40 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (40, file, buf, count, data); } static int pins_read_proc_40 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (40, buf, start, offset, count, eof, data); } static int pins_write_proc_41 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (41, file, buf, count, data); } static int pins_read_proc_41 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (41, buf, start, offset, count, eof, data); } static int pins_write_proc_42 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (42, file, buf, count, data); } static int pins_read_proc_42 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (42, buf, start, offset, count, eof, data); } static int pins_write_proc_43 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (43, file, buf, count, data); } static int pins_read_proc_43 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (43, buf, start, offset, count, eof, data); } static int pins_write_proc_44 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (44, file, buf, count, data); } static int pins_read_proc_44 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (44, buf, start, offset, count, eof, data); } static int pins_write_proc_54 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (54, file, buf, count, data); } static int pins_read_proc_54 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (54, buf, start, offset, count, eof, data); } static int pins_write_proc_55 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (55, file, buf, count, data); } static int pins_read_proc_55 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (55, buf, start, offset, count, eof, data); } static int pins_write_proc_56 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (56, file, buf, count, data); } static int pins_read_proc_56 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (56, buf, start, offset, count, eof, data); } static int pins_write_proc_57 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (57, file, buf, count, data); } static int pins_read_proc_57 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (57, buf, start, offset, count, eof, data); } static int pins_write_proc_58 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (58, file, buf, count, data); } static int pins_read_proc_58 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (58, buf, start, offset, count, eof, data); } static int pins_write_proc_59 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (59, file, buf, count, data); } static int pins_read_proc_59 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (59, buf, start, offset, count, eof, data); } static int pins_write_proc_60 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (60, file, buf, count, data); } static int pins_read_proc_60 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (60, buf, start, offset, count, eof, data); } static int pins_write_proc_61 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (61, file, buf, count, data); } static int pins_read_proc_61 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (61, buf, start, offset, count, eof, data); } static int pins_write_proc_62 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (62, file, buf, count, data); } static int pins_read_proc_62 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (62, buf, start, offset, count, eof, data); } static int pins_write_proc_63 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (63, file, buf, count, data); } static int pins_read_proc_63 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (63, buf, start, offset, count, eof, data); } static int pins_write_proc_64 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (64, file, buf, count, data); } static int pins_read_proc_64 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (64, buf, start, offset, count, eof, data); } static int pins_write_proc_65 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (65, file, buf, count, data); } static int pins_read_proc_65 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (65, buf, start, offset, count, eof, data); } static int pins_write_proc_75 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (75, file, buf, count, data); } static int pins_read_proc_75 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (75, buf, start, offset, count, eof, data); } static int pins_write_proc_76 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (76, file, buf, count, data); } static int pins_read_proc_76 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (76, buf, start, offset, count, eof, data); } static int pins_write_proc_77 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (77, file, buf, count, data); } static int pins_read_proc_77 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (77, buf, start, offset, count, eof, data); } static int pins_write_proc_78 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (78, file, buf, count, data); } static int pins_read_proc_78 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (78, buf, start, offset, count, eof, data); } static int pins_write_proc_79 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (79, file, buf, count, data); } static int pins_read_proc_79 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (79, buf, start, offset, count, eof, data); } static int pins_write_proc_80 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (80, file, buf, count, data); } static int pins_read_proc_80 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (80, buf, start, offset, count, eof, data); } static int pins_write_proc_81 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (81, file, buf, count, data); } static int pins_read_proc_81 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (81, buf, start, offset, count, eof, data); } static int pins_write_proc_82 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (82, file, buf, count, data); } static int pins_read_proc_82 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (82, buf, start, offset, count, eof, data); } static int pins_write_proc_83 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (83, file, buf, count, data); } static int pins_read_proc_83 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (83, buf, start, offset, count, eof, data); } static int pins_write_proc_84 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (84, file, buf, count, data); } static int pins_read_proc_84 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (84, buf, start, offset, count, eof, data); } static int pins_write_proc_85 (struct file *file, const char *buf, int count, void *data ) { return pins_write_proc (85, file, buf, count, data); } static int pins_read_proc_85 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc (85, buf, start, offset, count, eof, data); } static int pins_read_proc_all (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { char buffer[TOTAL_PINS + 1]; int i; int len=0; int value = 0; for (i = 0; i <= TOTAL_PINS; i++) { value = gpio_get_value(ext_bus_pins[i].gpio_number); /* bers, eat this */ buffer[i] = value ? '1' : '0'; } len = sprintf(buf, "%s\n", buffer); return len; } static int pins_read_proc_pwm (int id, char *buf, char **start, off_t offset, int count, int *eof, void *data ) { int len=0; int i; char pwmstr[10]; char constr[5]; char *list; list = kmalloc(50, GFP_KERNEL); memset(list, 0, 50); sprintf(pwmstr, "pwm%d", id); for (i = 1; i < TOTAL_PINS; i++) { if (!ext_bus_pins[i].alt_func_name1) continue; if ((ext_bus_pins[i].current_af_number == 1) && (!strcmp(ext_bus_pins[i].alt_func_name1, pwmstr))) { memset(constr, 0, sizeof(constr)); sprintf(constr, "%d ", i); list = strcat(list, constr); } } len = sprintf(buf, "%d %d %d %d %X %s\n", id, pwm[id]->ph1d, pwm[id]->period, pwm[id]->repeat, pwm[id]->cfg, list); kfree(list); return len; } static int pins_read_proc_pwm0 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc_pwm (0, buf, start, offset, count, eof, data); } static int pins_read_proc_pwm1 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc_pwm (1, buf, start, offset, count, eof, data); } static int pins_read_proc_pwm2 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc_pwm (2, buf, start, offset, count, eof, data); } static int pins_read_proc_pwm3 (char *buf, char **start, off_t offset, int count, int *eof, void *data ) { return pins_read_proc_pwm (3, buf, start, offset, count, eof, data); } static int pins_remove_proc_fs(void) { int i; char fn[10]; for (i = 0; i <= TOTAL_PINS + TOTAL_PWM + 2; i++) { if (proc_write_entry[i]) { sprintf(fn, "%d", i); remove_proc_entry(fn, proc_parent); } } /* remove proc_fs directory */ remove_proc_entry("v2r_pins",NULL); return 0; } static int pins_add_proc_fs(void) { proc_parent = proc_mkdir("v2r_pins", NULL); if (!proc_parent) { printk("%s: error creating proc entry (/proc/v2r_pins)\n", DEVICE_NAME); return 1; } /* for (i = 1; i <= TOTAL_PINS; i++ ) { sprintf(procfilename, "%d", i); proc_entry = create_proc_entry(procfilename, 0666, proc_parent); if (!proc_entry) { printk("%s: error creating proc entry (/proc/v2r_pins/%d)\n", DEVICE_NAME, i); // return -ENOMEM; } proc_entry-> read_proc = read_proc ; proc_entry-> write_proc = write_proc; proc_entry-> owner = THIS_MODULE; proc_entry-> mode = S_IFREG | S_IRUGO; //proc_entry-> uid = 0; //proc_entry-> gid = 0; //proc_entry-> size = 10; proc_write_entry[i] = proc_entry; } */ proc_entry = create_proc_entry("10", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_10; proc_entry-> write_proc = (void *) pins_write_proc_10; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[10] = (s32) proc_entry; } proc_entry = create_proc_entry("11", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_11; proc_entry-> write_proc = (void *) pins_write_proc_11; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[11] = (s32) proc_entry; } proc_entry = create_proc_entry("12", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_12; proc_entry-> write_proc = (void *) pins_write_proc_12; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[12] = (s32) proc_entry; } proc_entry = create_proc_entry("13", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_13; proc_entry-> write_proc = (void *) pins_write_proc_13; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[13] = (s32) proc_entry; } proc_entry = create_proc_entry("14", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_14; proc_entry-> write_proc = (void *) pins_write_proc_14; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[14] = (s32) proc_entry; } proc_entry = create_proc_entry("15", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_15; proc_entry-> write_proc = (void *) pins_write_proc_15; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[15] = (s32) proc_entry; } proc_entry = create_proc_entry("16", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_16; proc_entry-> write_proc = (void *) pins_write_proc_16; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[16] = (s32) proc_entry; } proc_entry = create_proc_entry("17", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_17; proc_entry-> write_proc = (void *) pins_write_proc_17; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[17] = (s32) proc_entry; } proc_entry = create_proc_entry("18", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_18; proc_entry-> write_proc = (void *) pins_write_proc_18; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[18] = (s32) proc_entry; } proc_entry = create_proc_entry("19", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_19; proc_entry-> write_proc = (void *) pins_write_proc_19; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[19] = (s32) proc_entry; } proc_entry = create_proc_entry("20", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_20; proc_entry-> write_proc = (void *) pins_write_proc_20; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[20] = (s32) proc_entry; } proc_entry = create_proc_entry("25", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_25; proc_entry-> write_proc = (void *) pins_write_proc_25; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[25] = (s32) proc_entry; } proc_entry = create_proc_entry("26", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_26; proc_entry-> write_proc = (void *) pins_write_proc_26; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[26] = (s32) proc_entry; } proc_entry = create_proc_entry("27", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_27; proc_entry-> write_proc = (void *) pins_write_proc_27; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[27] = (s32) proc_entry; } proc_entry = create_proc_entry("28", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_28; proc_entry-> write_proc = (void *) pins_write_proc_28; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[28] = (s32) proc_entry; } proc_entry = create_proc_entry("29", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_29; proc_entry-> write_proc = (void *) pins_write_proc_29; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[29] = (s32) proc_entry; } proc_entry = create_proc_entry("30", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_30; proc_entry-> write_proc = (void *) pins_write_proc_30; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[30] = (s32) proc_entry; } proc_entry = create_proc_entry("31", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_31; proc_entry-> write_proc = (void *) pins_write_proc_31; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[31] = (s32) proc_entry; } proc_entry = create_proc_entry("32", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_32; proc_entry-> write_proc = (void *) pins_write_proc_32; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[32] = (s32) proc_entry; } proc_entry = create_proc_entry("33", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_33; proc_entry-> write_proc = (void *) pins_write_proc_33; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[33] = (s32) proc_entry; } proc_entry = create_proc_entry("34", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_34; proc_entry-> write_proc = (void *) pins_write_proc_34; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[34] = (s32) proc_entry; } proc_entry = create_proc_entry("35", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_35; proc_entry-> write_proc = (void *) pins_write_proc_35; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[35] = (s32) proc_entry; } proc_entry = create_proc_entry("36", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_36; proc_entry-> write_proc = (void *) pins_write_proc_36; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[36] = (s32) proc_entry; } proc_entry = create_proc_entry("37", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_37; proc_entry-> write_proc = (void *) pins_write_proc_37; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[37] = (s32) proc_entry; } proc_entry = create_proc_entry("38", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_38; proc_entry-> write_proc = (void *) pins_write_proc_38; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[38] = (s32) proc_entry; } proc_entry = create_proc_entry("39", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_39; proc_entry-> write_proc = (void *) pins_write_proc_39; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[39] = (s32) proc_entry; } proc_entry = create_proc_entry("40", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_40; proc_entry-> write_proc = (void *) pins_write_proc_40; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[40] = (s32) proc_entry; } proc_entry = create_proc_entry("41", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_41; proc_entry-> write_proc = (void *) pins_write_proc_41; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[41] = (s32) proc_entry; } proc_entry = create_proc_entry("42", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_42; proc_entry-> write_proc = (void *) pins_write_proc_42; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[42] = (s32) proc_entry; } proc_entry = create_proc_entry("43", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_43; proc_entry-> write_proc = (void *) pins_write_proc_43; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[43] = (s32) proc_entry; } proc_entry = create_proc_entry("44", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_44; proc_entry-> write_proc = (void *) pins_write_proc_44; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[44] = (s32) proc_entry; } proc_entry = create_proc_entry("54", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_54; proc_entry-> write_proc = (void *) pins_write_proc_54; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[54] = (s32) proc_entry; } proc_entry = create_proc_entry("55", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_55; proc_entry-> write_proc = (void *) pins_write_proc_55; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[55] = (s32) proc_entry; } proc_entry = create_proc_entry("56", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_56; proc_entry-> write_proc = (void *) pins_write_proc_56; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[56] = (s32) proc_entry; } proc_entry = create_proc_entry("57", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_57; proc_entry-> write_proc = (void *) pins_write_proc_57; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[57] = (s32) proc_entry; } proc_entry = create_proc_entry("58", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_58; proc_entry-> write_proc = (void *) pins_write_proc_58; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[58] = (s32) proc_entry; } proc_entry = create_proc_entry("59", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_59; proc_entry-> write_proc = (void *) pins_write_proc_59; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[59] = (s32) proc_entry; } proc_entry = create_proc_entry("60", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_60; proc_entry-> write_proc = (void *) pins_write_proc_60; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[60] = (s32) proc_entry; } proc_entry = create_proc_entry("61", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_61; proc_entry-> write_proc = (void *) pins_write_proc_61; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[61] = (s32) proc_entry; } proc_entry = create_proc_entry("62", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_62; proc_entry-> write_proc = (void *) pins_write_proc_62; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[62] = (s32) proc_entry; } proc_entry = create_proc_entry("63", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_63; proc_entry-> write_proc = (void *) pins_write_proc_63; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[63] = (s32) proc_entry; } proc_entry = create_proc_entry("64", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_64; proc_entry-> write_proc = (void *) pins_write_proc_64; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[64] = (s32) proc_entry; } proc_entry = create_proc_entry("65", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_65; proc_entry-> write_proc = (void *) pins_write_proc_65; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[65] = (s32) proc_entry; } proc_entry = create_proc_entry("75", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_75; proc_entry-> write_proc = (void *) pins_write_proc_75; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[75] = (s32) proc_entry; } proc_entry = create_proc_entry("76", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_76; proc_entry-> write_proc = (void *) pins_write_proc_76; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[76] = (s32) proc_entry; } proc_entry = create_proc_entry("77", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_77; proc_entry-> write_proc = (void *) pins_write_proc_77; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[77] = (s32) proc_entry; } proc_entry = create_proc_entry("78", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_78; proc_entry-> write_proc = (void *) pins_write_proc_78; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[78] = (s32) proc_entry; } proc_entry = create_proc_entry("79", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_79; proc_entry-> write_proc = (void *) pins_write_proc_79; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[79] = (s32) proc_entry; } proc_entry = create_proc_entry("80", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_80; proc_entry-> write_proc = (void *) pins_write_proc_80; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[80] = (s32) proc_entry; } proc_entry = create_proc_entry("81", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_81; proc_entry-> write_proc = (void *) pins_write_proc_81; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[81] = (s32) proc_entry; } proc_entry = create_proc_entry("82", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_82; proc_entry-> write_proc = (void *) pins_write_proc_82; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[82] = (s32) proc_entry; } proc_entry = create_proc_entry("83", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_83; proc_entry-> write_proc = (void *) pins_write_proc_83; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[83] = (s32) proc_entry; } proc_entry = create_proc_entry("84", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_84; proc_entry-> write_proc = (void *) pins_write_proc_84; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[84] = (s32) proc_entry; } proc_entry = create_proc_entry("85", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_85; proc_entry-> write_proc = (void *) pins_write_proc_85; proc_entry-> mode = S_IFREG | S_IRUGO; proc_write_entry[85] = (s32) proc_entry; } proc_entry = create_proc_entry("all", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_all; proc_write_entry[86] = (s32) proc_entry; } proc_entry = create_proc_entry("pwm0", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_pwm0; proc_write_entry[87] = (s32) proc_entry; } proc_entry = create_proc_entry("pwm1", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_pwm1; proc_write_entry[88] = (s32) proc_entry; } proc_entry = create_proc_entry("pwm2", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_pwm2; proc_write_entry[89] = (s32) proc_entry; } proc_entry = create_proc_entry("pwm3", 0666, proc_parent); if (proc_entry) { proc_entry-> read_proc = pins_read_proc_pwm3; proc_write_entry[90] = (s32) proc_entry; } return 0; } #endif /* CONFIG_PROC_FS */ /********************************************************************** end for PROC_FS **********************************************************************/ /* set pin state direction =0 - input, =1 - output */ static int v2r_set_pin(int pin_number, int direction, int value) { if (!(pin_number >= 0 && pin_number <= TOTAL_PINS)) { printk("%s: wrong CON number (%d)\n", DEVICE_NAME, pin_number); return 1; } if (ext_bus_pins[pin_number].gpio_descriptor == NA) { printk("%s: CON descriptor is not available\n", DEVICE_NAME); return 1; } if (value > 1) { printk("%s: wrong value (%d)\n", DEVICE_NAME, value); return 1; } /* restore default pin function */ if (ext_bus_pins[pin_number].current_af_number > 0) ext_bus_pins[pin_number].current_af_number = 0; if (direction) gpio_direction_output(ext_bus_pins[pin_number].gpio_number, value); else gpio_direction_input(ext_bus_pins[pin_number].gpio_number); davinci_cfg_reg(ext_bus_pins[pin_number].gpio_descriptor); return 0; } /* set pin pwm channel */ static int v2r_pin_set_pwm(unsigned int pin_number) { davinci_cfg_reg(ext_bus_pins[pin_number].alt_func_descriptor1); ext_bus_pins[pin_number].current_af_number = 1; if (ext_bus_pins[pin_number].alt_func_direction1 == INP) { gpio_direction_input(ext_bus_pins[pin_number].gpio_number); } else if (ext_bus_pins[pin_number].alt_func_direction1 == OUT) { gpio_direction_output(ext_bus_pins[pin_number].gpio_number, FALSE); } printk("%s: CON%d (GPIO%d) set as %s\n", DEVICE_NAME, pin_number, ext_bus_pins[pin_number].gpio_number, ext_bus_pins[pin_number].alt_func_name1); return 0; } /* set pwm values */ static int v2r_set_pwm(unsigned int pwm_number, unsigned int duty, unsigned int period, unsigned int repeat) { // printk("%s: PWM%d set duty %d period %d\n", DEVICE_NAME, pwm_number, duty, period); if (pwm_number >= TOTAL_PWM){ printk("%s: wrong pwm number (%d)\n", DEVICE_NAME, pwm_number); return 1; } /* duty must be smaller or equal then period */ if (duty > period) duty = period; pwm[pwm_number]->period = period; pwm[pwm_number]->ph1d = duty; pwm[pwm_number]->repeat = repeat; if (repeat) pwm[pwm_number]->cfg = (pwm[pwm_number]->cfg & 0xFFFFFD) | 1; else pwm[pwm_number]->cfg = (pwm[pwm_number]->cfg & 0xFFFFFE) | 2; return 0; } static int v2r_cfg_pwm(unsigned int pwm_number, unsigned int cfg_and, unsigned int cfg_or) { if (pwm_number >= TOTAL_PWM){ printk("%s: wrong pwm number (%d)\n", DEVICE_NAME, pwm_number); return 1; } pwm[pwm_number]->cfg &= cfg_and; pwm[pwm_number]->cfg |= cfg_or; pwm[pwm_number]->start = 1; return 0; } /* clear GPIO group */ static int group_clear(void) { pinsGroupTableCounter = -1; // just null a group size printk("%s: CON group cleared\n", DEVICE_NAME); return 0; } /* add GPIO into group */ static int group_add(int pin_number) { if (pinsGroupTableCounter >=TOTAL_PINS) { printk("%s: CON group is full\n", DEVICE_NAME); return 1; } if (!(pin_number >= 0 && pin_number <= TOTAL_PINS)){ printk("%s: wrong CON number (%d)\n", DEVICE_NAME, pin_number); return 1; } pinsGroupTableCounter++; pinsGroupTable[pinsGroupTableCounter] = ext_bus_pins[pin_number]; printk("%s: added CON %d into group. New group size is %d\n", DEVICE_NAME, pin_number, pinsGroupTableCounter+1); return 0; } /* add all CON into group */ static void group_init(void) { unsigned int i; for (i=0; i <= TOTAL_PINS; i++) { pinsGroupTable[i] = ext_bus_pins[i]; } pinsGroupTableCounter = i-1; printk("%s: added all CON's into group\n", DEVICE_NAME); } static void pins_parse_binary_command(char * buffer, unsigned int count) { int pin_number = 0, direction = 0, value = 0; unsigned int duty = 0, period = 0, repeat = 0; unsigned int i; switch (buffer[0]) { case 1: /* set CON state buffer[1] - GPIO number buffer[2] - [bit:0] - direction, [bit:1] - state */ if (count < 2) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } pin_number = buffer[1]; direction = buffer[2] & 0x01; value = (buffer[2] >> 1) & 0x01; v2r_set_pin(pin_number, direction, value); break; case 2: /* clear GPIO group */ group_clear(); break; case 3: /* add CON into group buffer[1] - CON number */ if (count < 1) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } for (i = 1; i < count; i++) group_add(buffer[i]); break; case 4: /* add all CON into group */ group_init(); break; case 5: /* set output mode buffer[1] - mode */ if (count < 1) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } output_mode = buffer[1] ? 1 : 0 ; break; case 6: /* set CON alt mode (PWM) buffer[1] - mode */ if (count < 1) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } v2r_pin_set_pwm(buffer[1]); break; case 7: /* set PWM params buffer[1] - PWM number buffer[2:3] - duty buffer[4:5] - period buffer[6:7] - repeat (optional) */ if (count < 5) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } if ( count > 6 ) repeat = buffer[6] + (buffer[7] << 8); else repeat = 0; // endless repeat duty = (buffer[3] << 8) + buffer[2]; period = (buffer[5] << 8) + buffer[4]; v2r_set_pwm(buffer[1], duty, period , repeat); break; case 8: /* set full 32-bit PWM params buffer[1] - PWM number buffer[2:3:4:5] - duty buffer[6:7:8:9] - period buffer[10:11:12:13] - repeat (optional) */ if (count < 9) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, count); break; } if ( count > 12 ) repeat = buffer[10] + (buffer[11] << 8) + (buffer[12] << 16) + (buffer[13] << 24); else repeat = 0; // endless repeat duty = buffer[2] + (buffer[3] << 8) + (buffer[4] << 16) + (buffer[5] << 24); period = buffer[6] + (buffer[7] << 8) + (buffer[8] << 16) + (buffer[9] << 24); v2r_set_pwm(buffer[1], duty, period, repeat); break; default: printk("%s: i don't know this command\n", DEVICE_NAME); } } static void pins_parse_command(char * string) { static char *part; static char *temp_string; int cmd_ok = 0; int i; unsigned int pin_number = 0, direction = 0, value = 0, pwm_number = 0, duty = 0, period = 0, repeat = 0, cfg_and = 0, cfg_or = 0; // last symbol can be a \n symbol, we must clear him if (string[strlen(string)-1] == '\n') string[strlen(string)-1] = 0; temp_string = kstrdup(string, GFP_KERNEL); do { part = strsep(&temp_string, " "); if (part) { command_parts[command_parts_counter] = part; command_parts_counter++; } } while (part); /* string like "output text" */ if (!strcmp(command_parts[0], "output")) { if (!strcmp(command_parts[1], "text")) output_mode = 0; else if (!strcmp(command_parts[1], "bin")) output_mode = 1; cmd_ok = 1; goto out; } /* string like "group clear" */ if (!strcmp(command_parts[0], "group")) { if (!strcmp(command_parts[1], "clear")) group_clear(); else if (!strcmp(command_parts[1], "init")) group_init(); else if (!strcmp(command_parts[1], "add") && !strcmp(command_parts[2], "con")) { if (command_parts_counter < 3) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, command_parts_counter); return; } for (i = 3; i < command_parts_counter ; i++ ) { kstrtoint(command_parts[i], 10, &pin_number); group_add(pin_number); } } cmd_ok = 1; goto out; } /* string like "set con 1 output 1" */ /* or like "set con 1 pwm1" */ if (!strcmp(command_parts[0], "set") && !strcmp(command_parts[1], "con")) { if (command_parts_counter < 3) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, command_parts_counter); return; } if (kstrtoint(command_parts[2], 10, &pin_number)) { printk("%s: wrong pin number (%s)\n", DEVICE_NAME, command_parts[2]); return; } // only set con state if (!strcmp(command_parts[3], "output")) { direction = 1; kstrtoint(command_parts[4], 10, &value); v2r_set_pin(pin_number, direction, value); cmd_ok = 1; goto out; } else if (!strcmp(command_parts[3], "input")) { direction = 0; v2r_set_pin(pin_number, direction, 0); cmd_ok = 1; goto out; } else // or set con alt function if (!ext_bus_pins[pin_number].alt_func_name1) { printk("%s: wrong CON%d alt function (%s)\n", DEVICE_NAME, pin_number, command_parts[3]); return; } if (!strcmp(command_parts[3], ext_bus_pins[pin_number].alt_func_name1)) { if (ext_bus_pins[pin_number].alt_func_descriptor1 == NA) { printk("%s: wrong descriptor for CON%d alternative function\n", DEVICE_NAME, pin_number); return; } v2r_pin_set_pwm(pin_number); cmd_ok = 1; goto out; } else { printk("%s: wrong CON%d alt function (%s)\n", DEVICE_NAME, pin_number, command_parts[3]); return; } } /* string like "set pwm 1 duty 123 period 123" */ /* changed on string like "set pwm 1 123 567" */ /* added optional repeat, changed on string like "set pwm 1 123 567 897" */ if (!strcmp(command_parts[0], "set") && !strcmp(command_parts[1], "pwm")) { if (command_parts_counter < 4) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, command_parts_counter); return; } if (kstrtoint(command_parts[2], 10, &pwm_number)) { printk("%s: wrong PWM number (%s)\n", DEVICE_NAME, command_parts[2]); return; } if (kstrtoint(command_parts[3], 10, &duty)) { printk("%s: wrong duty (%s)\n", DEVICE_NAME, command_parts[3]); return; } if (kstrtoint(command_parts[4], 10, &period)) { printk("%s: wrong period (%s)\n", DEVICE_NAME, command_parts[4]); return; } if (command_parts_counter > 5) { if (kstrtoint(command_parts[5], 10, &repeat)) { printk("%s: wrong repeat (%s)\n", DEVICE_NAME, command_parts[5]); return; } } v2r_set_pwm(pwm_number, duty, period, repeat); cmd_ok = 1; goto out; } /* string like "cfg pwm 1 ffffffff fffffff0" */ if (!strcmp(command_parts[0], "cfg") && !strcmp(command_parts[1], "pwm")) { if (command_parts_counter < 5) { printk("%s: too small arguments (%d)\n", DEVICE_NAME, command_parts_counter); return; } if (kstrtoint(command_parts[2], 10, &pwm_number)) { printk("%s: wrong PWM number (%s)\n", DEVICE_NAME, command_parts[2]); return; } if (kstrtoint(command_parts[3], 16, &cfg_and)) { printk("%s: wrong cfg_and (%s)\n", DEVICE_NAME, command_parts[3]); return; } if (kstrtoint(command_parts[4], 16, &cfg_or)) { printk("%s: wrong cfg_or (%s)\n", DEVICE_NAME, command_parts[4]); return; } v2r_cfg_pwm(pwm_number, cfg_and, cfg_or); cmd_ok = 1; goto out; } out: if (!cmd_ok) { printk("%s: i don't know this command (%s)\n", DEVICE_NAME, string); return; } return; } int pins_open(struct inode *inode, struct file *filp) { unsigned int mj = imajor(inode); unsigned int mn = iminor(inode); struct pins_dev *dev = NULL; if (mj != pins_major || mn < 0 || mn >= 1){ //One and only device printk("%s: no device found with minor=%d and major=%d\n", DEVICE_NAME, mj, mn); return -ENODEV; /* No such device */ } /* store a pointer to struct pins_dev here for other methods */ dev = &pins_devices[mn]; filp->private_data = dev; if (inode->i_cdev != &dev->cdev) { printk("%s: open: internal error\n", DEVICE_NAME); return -ENODEV; /* No such device */ } /* if opened the 1st time, allocate the buffer */ if (dev->data == NULL){ dev->data = (unsigned char*) kzalloc(dev->buffer_size, GFP_KERNEL); if (dev->data == NULL){ printk("%s: open: out of memory\n", DEVICE_NAME); return -ENOMEM; } dev->buffer_data_size = 0; } return 0; } int pins_release(struct inode *inode, struct file *filp) { // printk("%s: release device\n", DEVICE_NAME); return 0; } ssize_t pins_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos){ volatile unsigned long i; volatile unsigned int counter; volatile char bitcounter; volatile char tempByte; volatile unsigned int value; struct pins_dev *dev = (struct pins_dev *)filp->private_data; ssize_t retval = 0; if (mutex_lock_killable(&dev->device_mutex)) return -EINTR; /* exit if empty group */ if (pinsGroupTableCounter < 0) { v2r_pins_retBuffer[0] = 0; counter = 0; goto show; } switch (output_mode) { case 0: /* text mode */ //if (count > TOTAL_PINS) count = TOTAL_PINS; //if (*f_pos > TOTAL_PINS) goto out; //if ((*f_pos + count) > TOTAL_PINS) goto out; counter = 0; //for (i = *f_pos; i <= (*f_pos + count); i++) { for (i = 0; i <= pinsGroupTableCounter; i++) { value = gpio_get_value(pinsGroupTable[i].gpio_number); /* bers, eat this */ v2r_pins_retBuffer[counter] = value ? '1' : '0'; counter++; } v2r_pins_retBuffer[counter] = '\n'; counter++; break; case 1: /* binary mode */ counter = 0; bitcounter = 0; tempByte = 0; for (i = 0; i <= pinsGroupTableCounter; i++) { value = gpio_get_value(pinsGroupTable[i].gpio_number)? 1 : 0; tempByte |= value << bitcounter; bitcounter++; if (bitcounter > 7) { v2r_pins_retBuffer[counter] = tempByte; tempByte = 0; bitcounter = 0; counter++; } } if (bitcounter) { // if not all byte filled v2r_pins_retBuffer[counter] = tempByte; counter++; } break; } show: if (copy_to_user(buf, v2r_pins_retBuffer, counter) != 0){ retval = -EFAULT; goto out; } retval = counter; out: mutex_unlock(&dev->device_mutex); return retval; } ssize_t pins_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct pins_dev *dev = (struct pins_dev *)filp->private_data; ssize_t retval = 0; char *command = 0; if (mutex_lock_killable(&dev->device_mutex)) return -EINTR; if (*f_pos !=0) { /* Writing in the middle of the file is not allowed */ printk("%s: writing in the middle (%d) of the file buffer is not allowed\n", DEVICE_NAME, (int)(*f_pos)); retval = -EINVAL; goto out; } if (count > BUFFER_SIZE) count = BUFFER_SIZE; command = kmalloc(count+1, GFP_KERNEL); if (command==NULL) return -ENOMEM; if (copy_from_user(command, buf, count)) { kfree(command); retval = -EFAULT; goto out; } command[count] = 0; // parse command command_parts_counter = 0; if (command[0] < 10) { pins_parse_binary_command(command, count-1); } else { pins_parse_command(command); } // make return to userspace string memcpy(dev->data, "ok\n", 3); dev->buffer_data_size = 3; kfree(command); *f_pos = 0; retval = count; //retval = 0; out: mutex_unlock(&dev->device_mutex); return retval; } struct file_operations pins_fops = { .owner = THIS_MODULE, .read = pins_read, .write = pins_write, .open = pins_open, .release = pins_release, }; static int pins_construct_device(struct pins_dev *dev, int minor, struct class *class) { int err = 0; dev_t devno = MKDEV(pins_major, minor); struct device *device = NULL; BUG_ON(dev == NULL || class == NULL); /* Memory is to be allocated when the device is opened the first time */ printk("%s: construct device:%d\n", DEVICE_NAME, minor); dev->data = NULL; dev->buffer_size = BUFFER_SIZE; mutex_init(&dev->device_mutex); cdev_init(&dev->cdev, &pins_fops); dev->cdev.owner = THIS_MODULE; err = cdev_add(&dev->cdev, devno, 1); if (err){ printk("%s: error %d while trying to add %d", DEVICE_NAME, err, minor); return err; } device = device_create(class, NULL /*no parent device*/, devno, NULL /*no additional data */, DEVICE_NAME); if (IS_ERR(device)) { err = PTR_ERR(device); printk("%s: error %d while trying to create %d", DEVICE_NAME, err, minor); cdev_del(&dev->cdev); return err; } printk("%s: device is created successfully\n", DEVICE_NAME); return 0; } static void pins_destroy_device(struct pins_dev *dev, int minor, struct class *class) { BUG_ON(dev == NULL || class == NULL); printk("%s: destroy device %d\n", DEVICE_NAME, minor); device_destroy(class, MKDEV(pins_major, minor)); cdev_del(&dev->cdev); kfree(dev->data); printk("%s: device is destroyed successfully\n", DEVICE_NAME); return; } static void pins_cleanup_module(int devices_to_destroy) { int i = 0; /* Get rid of character devices (if any exist) */ printk("%s: cleanup module\n", DEVICE_NAME); if (pins_devices) { for (i = 0; i < devices_to_destroy; ++i) { pins_destroy_device(&pins_devices[i], i, pins_class); } kfree(pins_devices); } if (pins_class) class_destroy(pins_class); if (v2r_pins_retBuffer) kfree(v2r_pins_retBuffer); if (command_parts) kfree(command_parts); /* remove proc_fs files */ pins_remove_proc_fs(); unregister_chrdev_region(MKDEV(pins_major, 0), numberofdevices); printk("%s: cleanup completed\n", DEVICE_NAME); return; } static int __init pins_init_module (void) { int err = 0; int i = 0; int devices_to_destroy = 0; dev_t dev = 0; printk("Virt2real CON driver module version 0.3\n"); if (pins_add_proc_fs()) { printk(KERN_ERR "%s: can't create PROCFS files\n", DEVICE_NAME); } if (numberofdevices <= 0) { printk("%s: invalid value of numberofdevices: %d\n", DEVICE_NAME, numberofdevices); return -EINVAL; } /* Get a range of minor numbers (starting with 0) to work with */ err = alloc_chrdev_region(&dev, 0, numberofdevices, DEVICE_NAME); if (err < 0) { printk("%s: alloc_chrdev_region() failed\n", DEVICE_NAME); return err; } pins_major = MAJOR(dev); /* Create device class (before allocation of the array of devices) */ pins_class = class_create(THIS_MODULE, DEVICE_NAME); if (IS_ERR(pins_class)) { err = PTR_ERR(pins_class); printk("%s: class not created %d\n", DEVICE_NAME, err); goto fail; } /* Allocate the array of devices */ pins_devices = (struct pins_dev *) kzalloc( numberofdevices * sizeof(struct pins_dev), GFP_KERNEL); if (pins_devices == NULL) { err = -ENOMEM; printk("%s: devices not allocated %d\n", DEVICE_NAME, err); goto fail; } /* Construct devices */ for (i = 0; i < numberofdevices; ++i) { err = pins_construct_device(&pins_devices[i], i, pins_class); if (err) { printk("%s: device is not created\n", DEVICE_NAME); devices_to_destroy = i; goto fail; } } // init PWMs (Thank's, Cap!) v2r_init_pwm(); v2r_pins_retBuffer = kmalloc(RETBUFFER_SIZE + 1, GFP_KERNEL); if (v2r_pins_retBuffer == NULL) return -ENOMEM; command_parts = kmalloc(128, GFP_KERNEL); if (command_parts == NULL) return -ENOMEM; /* fill gpioGroupTable width default values - all GPIOs */ group_init(); return 0; /* success */ fail: pins_cleanup_module(devices_to_destroy); return err; } static void __exit pins_exit_module(void){ pins_cleanup_module(numberofdevices); return; } module_init(pins_init_module); module_exit(pins_exit_module); MODULE_DESCRIPTION("Virt2real CON driver module version 0.3"); MODULE_AUTHOR("Alexandr Shadrin"); MODULE_AUTHOR("Gol (gol@g0l.ru)"); MODULE_LICENSE("GPL v2");
virt2real/linux-davinci
drivers/v2r/v2r_pins.c
C
gpl-2.0
70,553
{% extends "rjaBase.html" %} {% block extraCss %} #tollAwards { /*float: left;*/ position: absolute; left: 40px; } {% endblock extraCss %} {% block mainDiv %} <div class="mainDiv"> <div id="tollDiv" class="mainContent selectedContent"> <span class="titleText titleSpan">Rachel Johnson Animation</span> <!-- <img class="bgImage" src="images/toll_background_small.jpg" alt="Rachel Johnson Film and Animation"/> --> <img class="bgImage" src="images/opening_page.jpg" alt="Rachel Johnson Film and Animation"/> </div> </div> <div class="overlay" id="contactOverlay" style="z-index: 200000;"> <div class="popupOverlay"> <div class="resumeText"> <div style="text-align: center;"> <br><br><br> You can reach Rachel any time via mail, e-mail, or phone.<br><br> Rachel Johnson Animation<br> 759 1/2 North Wilcox Ave.<br>Los Angeles, CA 90038<br><br> racheljohnson70@hotmail.com<br> (917) 549-1979 </div> <div style="clear: both"></div> </div> </div> </div> <div class="overlay" id="resumeOverlay" style="z-index: 200000;"> <div class="textOverlay"> <div class="resumeText"> {% include "resume.html" %} </div> <div style="clear: both"></div> </div> </div> {% endblock mainDiv %} {% block navLinks %} <div class="navLinks"> <center> <div id="tollAwards" style="display: none; background:white;">Hello</div> <table> <tr class="commonTextNav"><td><a href="toll" id="tollLink" class="firstCourseLink tooltip" title="This is a small tooltip with the Classname 'Tooltip'">The Toll Collector</a></td> <td><a href="gorilla" id="gorilla" class="courseLink">The Gorilla House</a></td> <td><a href="fairies" id="fairies" class="courseLink">The Replacement Fairies</a></td> </tr> <!-- <tr><td><a class="courseLink"></a></td></tr>--> <tr class="commonTextNav"> <td> <!-- <a id="filmography" class="courseLink">Filmography</a> --> <a id="filmography" rel="#filmographyOverlay" class="firstCourseLink overlayLink"> Filmography </a> </td> <td> <a id="resume" rel="#resumeOverlay" class="courseOverlayLink overlayLink"> Resume </a> </td> <!-- <td><a id="contact" class="courseLink">Contact</a></td> --> <td> <a id="contact" rel="#contactOverlay" class="courseOverlayLink overlayLink"> Contact </a> </td> </tr> </table> </center> </div> {% endblock navLinks %} {% block extraJavaScript %} <script type="text/javascript"> /* $("#tollLink").mouseover(function() { //console.info("OVer!!"); $("#tollAwards").show("slow"); }).mouseout(function() { $("#tollAwards").hide("slow"); }); */ function simple_tooltip(target_items, name){ $(target_items).each(function(i){ //console.info("Setting up tooltip"); $("body").append("<div class='"+name+"' id='"+name+i+"'><p>"+$(this).attr('title')+"</p></div>"); var my_tooltip = $("#"+name+i); $(this).removeAttr("title").mouseover(function(){ // console.info("got over"); my_tooltip.css({opacity:0.8, display:"none"}).fadeIn(400); //console.info("done over"); }).mousemove(function(kmouse){ my_tooltip.css({left:kmouse.pageX+15, top:kmouse.pageY+15}); }).mouseout(function(){ my_tooltip.fadeOut(400); //console.info("done out"); }); }); } $(document).ready(function(){ //simple_tooltip("a","tooltip"); simple_tooltip("#tollLink", "tooltip"); }); </script> {% endblock extraJavaScript %}
adamfisk/littleshoot-client
server/rja/templates/rja.html
HTML
gpl-2.0
4,721
<?php /* * Copyright (c) 2006, Universal Diagnostic Solutions, Inc. * * This file is part of Tracmor. * * Tracmor is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Tracmor is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Tracmor; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ ?> <?php class QAssetTransactComposite extends QControl { public $blnEditMode; public $objParentObject; public $strTitleVerb; public $objAssetArray; public $dtgAssetTransact; public $objAsset; public $blnTransactionModified; protected $lstLocation; protected $txtNote; protected $objAssetTransaction; protected $btnSave; protected $btnCancel; protected $btnAdd; protected $btnRemove; protected $txtNewAssetCode; protected $objTransaction; protected $intTransactionTypeId; public function __construct($objParentObject, $strControlId = null) { // First, call the parent to do most of the basic setup try { parent::__construct($objParentObject, $strControlId); } catch (QCallerException $objExc) { $objExc->IncrementOffset(); throw $objExc; } // Assign the parent object (AssetEditForm from asset_edit.php) $this->objParentObject = $objParentObject; // Setup the Asset, which assigns objAsset and blnEditMode $this->objParentObject->SetupAsset($this); // Create an empty Asset Array $this->objAssetArray = array(); $this->btnCancel_Create(); $this->lstLocation_Create(); $this->txtNote_Create(); $this->txtNewAssetCode_Create(); $this->btnAdd_Create(); $this->btnSave_Create(); $this->dtgAssetTransact_Create(); } // This method must be declared in all composite controls public function ParsePostData() { } public function GetJavaScriptAction() { return "onchange"; } public function Validate() {return true;} protected function GetControlHtml() { $strStyle = $this->GetStyleAttributes(); if ($strStyle) { $strStyle = sprintf('style="%s"', $strStyle); } $strAttributes = $this->GetAttributes(); // Store the Output Buffer locally $strAlreadyRendered = ob_get_contents(); ob_clean(); // Evaluate the template require('asset_transact_control.inc.php'); $strTemplateEvaluated = ob_get_contents(); ob_clean(); // Restore the output buffer and return evaluated template print($strAlreadyRendered); $strToReturn = sprintf('<span id="%s" %s%s>%s</span>', $this->strControlId, $strStyle, $strAttributes, $strTemplateEvaluated); return $strToReturn; } // I'm pretty sure that this is not necessary // Create the Asset Code label protected function lblAssetCode_Create() { $this->lblAssetCode = new QLabel($this); $this->lblAssetCode->Name = 'Asset Code'; $this->lblAssetCode->Text = $this->objAsset->AssetCode; } // Create the Note text field protected function txtNote_Create() { $this->txtNote = new QTextBox($this); $this->txtNote->Name = 'Note'; $this->txtNote->TextMode = QTextMode::MultiLine; $this->txtNote->Columns = 80; $this->txtNote->Rows = 4; $this->txtNote->CausesValidation = false; } // Create and Setup lstLocation protected function lstLocation_Create() { $this->lstLocation = new QListBox($this); $this->lstLocation->Name = 'Location'; $this->lstLocation->AddItem('- Select One -', null); $objLocationArray = Location::LoadAllLocations(false, false, 'short_description'); if ($objLocationArray) foreach ($objLocationArray as $objLocation) { $objListItem = new QListItem($objLocation->__toString(), $objLocation->LocationId); $this->lstLocation->AddItem($objListItem); } $this->lstLocation->CausesValidation = false; } // Create the text field to enter new asset codes to add to the transaction // Eventually this field will receive information from the AML protected function txtNewAssetCode_Create() { $this->txtNewAssetCode = new QTextBox($this); $this->txtNewAssetCode->Name = 'Asset Code'; $this->txtNewAssetCode->AddAction(new QEnterKeyEvent(), new QAjaxControlAction($this, 'btnAdd_Click')); $this->txtNewAssetCode->AddAction(new QEnterKeyEvent(), new QTerminateAction()); $this->txtNewAssetCode->CausesValidation = false; } // Create the save button protected function btnSave_Create() { $this->btnSave = new QButton($this); $this->btnSave->Text = 'Save'; $this->btnSave->AddAction(new QClickEvent(), new QAjaxControlAction($this, 'btnSave_Click')); $this->btnSave->AddAction(new QEnterKeyEvent(), new QAjaxControlAction($this, 'btnSave_Click')); $this->btnSave->AddAction(new QEnterKeyEvent(), new QTerminateAction()); $this->btnSave->CausesValidation = false; } // Setup Cancel Button protected function btnCancel_Create() { $this->btnCancel = new QButton($this); $this->btnCancel->Text = 'Cancel'; $this->btnCancel->AddAction(new QClickEvent(), new QAjaxControlAction($this, 'btnCancel_Click')); $this->btnCancel->AddAction(new QEnterKeyEvent(), new QAjaxControlAction($this, 'btnCancel_Click')); $this->btnCancel->AddAction(new QEnterKeyEvent(), new QTerminateAction()); $this->btnCancel->CausesValidation = false; } // Setup Add Button protected function btnAdd_Create() { $this->btnAdd = new QButton($this); $this->btnAdd->Text = 'Add'; $this->btnAdd->AddAction(new QClickEvent(), new QAjaxControlAction($this, 'btnAdd_Click')); $this->btnAdd->AddAction(new QEnterKeyEvent(), new QAjaxControlAction($this, 'btnAdd_Click')); $this->btnAdd->AddAction(new QEnterKeyEvent(), new QTerminateAction()); $this->btnAdd->CausesValidation = false; } // Setup the datagrid protected function dtgAssetTransact_Create() { $this->dtgAssetTransact = new QDataGrid($this); $this->dtgAssetTransact->CellPadding = 5; $this->dtgAssetTransact->CellSpacing = 0; $this->dtgAssetTransact->CssClass = "datagrid"; // Enable AJAX - this won't work while using the DB profiler $this->dtgAssetTransact->UseAjax = true; // Enable Pagination, and set to 20 items per page $objPaginator = new QPaginator($this->dtgAssetTransact); $this->dtgAssetTransact->Paginator = $objPaginator; $this->dtgAssetTransact->ItemsPerPage = 20; $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Asset Code', '<?= $_ITEM->__toStringWithLink("bluelink") ?>', array('OrderByClause' => QQ::OrderBy(QQN::Asset()->AssetCode), 'ReverseOrderByClause' => QQ::OrderBy(QQN::Asset()->AssetCode, false), 'CssClass' => "dtg_column", 'HtmlEntities' => false))); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Model', '<?= $_ITEM->AssetModel->__toStringWithLink("bluelink") ?>', array('OrderByClause' => QQ::OrderBy(QQN::Asset()->AssetModel->ShortDescription), 'ReverseOrderByClause' => QQ::OrderBy(QQN::Asset()->AssetModel->ShortDescription, false), 'Width' => 200, 'CssClass' => "dtg_column", 'HtmlEntities' => false))); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Current Location', '<?= $_ITEM->Location->__toString() ?>', array('OrderByClause' => QQ::OrderBy(QQN::Asset()->Location->ShortDescription), 'ReverseOrderByClause' => QQ::OrderBy(QQN::Asset()->Location->ShortDescription, false), 'CssClass' => "dtg_column", 'HtmlEntities' => false))); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Action', '<?= $_FORM->RemoveColumn_Render($_ITEM) ?>', array('CssClass' => "dtg_column", 'HtmlEntities' => false))); /* $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Asset Code', '<?= $_ITEM->__toStringWithLink("bluelink") ?>', 'SortByCommand="asset_code ASC"', 'ReverseSortByCommand="asset_code DESC"', 'CssClass="dtg_column"', 'HtmlEntities=false"')); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Model', '<?= $_ITEM->AssetModel->__toStringWithLink("bluelink") ?>', 'Width=200', 'SortByCommand="asset__asset_model_id__short_description ASC"', 'ReverseSortByCommand="asset__asset_model_id__short_description DESC"', 'CssClass="dtg_column"', 'HtmlEntities=false"')); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Current Location', '<?= $_ITEM->Location->__toString() ?>', 'SortByCommand="asset__location_id__short_description ASC"', 'ReverseSortByCommand="asset__location_id__short_description DESC"', 'CssClass=dtg_column', 'HtmlEntities=false"')); $this->dtgAssetTransact->AddColumn(new QDataGridColumn('Action', '<?= $_FORM->RemoveColumn_Render($_ITEM) ?>', 'CssClass=dtg_column', 'HtmlEntities=false"')); */ $objStyle = $this->dtgAssetTransact->RowStyle; $objStyle->ForeColor = '#000000'; $objStyle->BackColor = '#FFFFFF'; $objStyle->FontSize = 12; $objStyle = $this->dtgAssetTransact->AlternateRowStyle; $objStyle->BackColor = '#EFEFEF'; $objStyle = $this->dtgAssetTransact->HeaderRowStyle; $objStyle->ForeColor = '#000000'; $objStyle->BackColor = '#EFEFEF'; $objStyle->CssClass = 'dtg_header'; $this->blnTransactionModified = true; } // Add Button Click public function btnAdd_Click($strFormId, $strControlId, $strParameter) { $strAssetCode = $this->txtNewAssetCode->Text; $blnDuplicate = false; $blnError = false; if ($strAssetCode) { // Begin error checking if ($this->objAssetArray) { foreach ($this->objAssetArray as $asset) { if ($asset && $asset->AssetCode == $strAssetCode) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset has already been added."; } } } if (!$blnError) { $objNewAsset = Asset::LoadByAssetCode($this->txtNewAssetCode->Text); if (!($objNewAsset instanceof Asset)) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset code does not exist."; } // Cannot move, check out/in, nor reserve/unreserve any assets that have been shipped elseif ($objNewAsset->LocationId == 2) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset has already been shipped."; } // Cannot move, check out/in, nor reserve/unreserve any assets that are scheduled to be received elseif ($objNewAsset->LocationId == 5) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is currently scheduled to be received."; } elseif ($objPendingShipment = AssetTransaction::PendingShipment($objNewAsset->AssetId)) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is already in a pending shipment."; } elseif (!QApplication::AuthorizeEntityBoolean($objNewAsset, 2)) { $blnError = true; $this->txtNewAssetCode->Warning = "You do not have authorization to perform a transaction on this asset."; } // Move elseif ($this->intTransactionTypeId == 1) { if ($objNewAsset->CheckedOutFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is checked out."; } elseif ($objNewAsset->ReservedFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is reserved."; } } // Check in elseif ($this->intTransactionTypeId == 2) { if (!$objNewAsset->CheckedOutFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is not checked out."; } elseif ($objNewAsset->ReservedFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is reserved."; } elseif ($objNewAsset->CheckedOutFlag) { $objUserAccount = $objNewAsset->GetLastTransactionUser(); if ($objUserAccount->UserAccountId != QApplication::$objUserAccount->UserAccountId) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset was not checked out by the current user."; } } } elseif ($this->intTransactionTypeId ==3) { if ($objNewAsset->CheckedOutFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is already checked out."; } elseif ($objNewAsset->ReservedFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is reserved."; } } elseif ($this->intTransactionTypeId == 8) { if ($objNewAsset->ReservedFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is already reserved."; } elseif ($objNewAsset->CheckedOutFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is checked out."; } } // Unreserver elseif ($this->intTransactionTypeId == 9) { if (!$objNewAsset->ReservedFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is not reserved"; } elseif ($objNewAsset->CheckedOutFlag) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset is checked out."; } elseif ($objNewAsset->ReservedFlag) { $objUserAccount = $objNewAsset->GetLastTransactionUser(); if ($objUserAccount->UserAccountId != QApplication::$objUserAccount->UserAccountId) { $blnError = true; $this->txtNewAssetCode->Warning = "That asset was not reserved by the current user."; } } } if (!$blnError && $objNewAsset instanceof Asset) { $this->objAssetArray[] = $objNewAsset; $this->txtNewAssetCode->Text = null; } } } else { $this->txtNewAssetCode->Warning = "Please enter an asset code."; } } // Save Button Click public function btnSave_Click($strFormId, $strControlId, $strParameter) { if ($this->objAssetArray) { $blnError = false; foreach ($this->objAssetArray as $asset) { // TransactionTypeId = 1 is for moves if ($this->intTransactionTypeId == 1) { if ($asset->LocationId == $this->lstLocation->SelectedValue) { $this->dtgAssetTransact->Warning = 'Cannot move an asset from a location to the same location.'; $blnError = true; } } // For all transactions except Unreserve, make sure the asset is not already reserved if ($this->intTransactionTypeId != 9 && $asset->ReservedFlag) { $this->btnCancel->Warning = sprintf('The Asset %s is reserved.',$asset->AssetCode); $blnError = true; } } if (!$blnError) { if (($this->intTransactionTypeId == 1 || $this->intTransactionTypeId == 2) && is_null($this->lstLocation->SelectedValue)) { $this->lstLocation->Warning = 'Location is required.'; $blnError = true; } elseif ($this->txtNote->Text == '') { $this->txtNote->Warning = 'Note is required.'; $blnError = true; } } if (!$blnError) { try { // Get an instance of the database $objDatabase = QApplication::$Database[1]; // Begin a MySQL Transaction to be either committed or rolled back $objDatabase->TransactionBegin(); // Create the new transaction object and save it $this->objTransaction = new Transaction(); // Entity Qtype is Asset $this->objTransaction->EntityQtypeId = EntityQtype::Asset; $this->objTransaction->TransactionTypeId = $this->intTransactionTypeId; $this->objTransaction->Note = $this->txtNote->Text; $this->objTransaction->Save(); // Assign different source and destinations depending on transaction type foreach ($this->objAssetArray as $asset) { if ($asset instanceof Asset) { $SourceLocationId = $asset->LocationId; if ($this->intTransactionTypeId == 1) { $DestinationLocationId = $this->lstLocation->SelectedValue; } elseif ($this->intTransactionTypeId == 2) { $DestinationLocationId = $this->lstLocation->SelectedValue; $asset->CheckedOutFlag = false; } elseif ($this->intTransactionTypeId == 3) { $DestinationLocationId = 1; $asset->CheckedOutFlag = true; } elseif ($this->intTransactionTypeId == 8) { $DestinationLocationId = $asset->LocationId; $asset->ReservedFlag = true; } elseif ($this->intTransactionTypeId == 9) { $DestinationLocationId = $asset->LocationId; $asset->ReservedFlag = false; } $asset->LocationId = $DestinationLocationId; $asset->Save(); // Create the new assettransaction object and save it $this->objAssetTransaction = new AssetTransaction(); $this->objAssetTransaction->AssetId = $asset->AssetId; $this->objAssetTransaction->TransactionId = $this->objTransaction->TransactionId; $this->objAssetTransaction->SourceLocationId = $SourceLocationId; $this->objAssetTransaction->DestinationLocationId = $DestinationLocationId; $this->objAssetTransaction->Save(); } } // Commit the above transactions to the database $objDatabase->TransactionCommit(); QApplication::Redirect('../common/transaction_edit.php?intTransactionId='.$this->objTransaction->TransactionId); } catch (QOptimisticLockingException $objExc) { // Rollback the database $objDatabase->TransactionRollback(); $objAsset = Asset::Load($objExc->EntityId); $this->objParentObject->btnRemove_Click($this->objParentObject->FormId, 'btnRemove' . $objExc->EntityId, $objExc->EntityId); // Lock Exception Thrown, Report the Error $this->btnCancel->Warning = sprintf('The Asset %s has been altered by another user and removed from the transaction. You may add the asset again or save the transaction without it.', $objAsset->AssetCode); } } } } // Cancel Button Click public function btnCancel_Click($strFormId, $strControlId, $strParameter) { if ($this->blnEditMode) { $this->objParentObject->DisplayTransaction(false); $this->objAssetArray = null; $this->txtNewAssetCode->Text = null; $this->txtNote->Text = null; $this->objParentObject->DisplayEdit(true); $this->objAssetArray[] = $this->objAsset; } else { QApplication::Redirect('asset_list.php'); } } // Prepare the Transaction form display depending on transaction type public function SetupDisplay($intTransactionTypeId) { $this->intTransactionTypeId = $intTransactionTypeId; switch ($this->intTransactionTypeId) { // Move case 1: $this->lstLocation->Display = true; break; // Check In case 2: $this->lstLocation->Display = true; break; // Check Out case 3: $this->lstLocation->Display = false; break; // Reserve case 8: $this->lstLocation->Display = false; break; // Unreserve case 9: $this->lstLocation->Display = false; break; } // Redeclare in case the asset has been edited $this->objAssetArray = null; if ($this->blnEditMode && $this->objAsset instanceof Asset) { $this->objAssetArray[] = Asset::Load($this->objAsset->AssetId); } } // And our public getter/setters public function __get($strName) { switch ($strName) { case "objAsset": return $this->objAsset; case "objAssetArray": return $this->objAssetArray; case "dtgAssetTransact": return $this->dtgAssetTransact; case "intTransactionTypeId": return $this->intTransactionTypeId; case "blnTransactionModified": return $this->blnTransactionModified; default: try { return parent::__get($strName); } catch (QCallerException $objExc) { $objExc->IncrementOffset(); throw $objExc; } } } ///////////////////////// // Public Properties: SET ///////////////////////// public function __set($strName, $mixValue) { $this->blnModified = true; switch ($strName) { case "objAsset": $this->objAsset = $mixValue; break; case "objAssetArray": $this->objAssetArray = $mixValue; break; case "strTitleVerb": $this->strTitleVerb = $mixValue; break; case "blnEditMode": $this->blnEditMode = $mixValue; break; case "dtgAssetTransact": $this->dtgAssetTransact = $mixValue; break; case "intTransactionTypeId": $this->intTransactionTypeId = $mixValue; break; case "blnTransactionModified": $this->blnTransactionModified = $mixValue; default: try { parent::__set($strName, $mixValue); } catch (QCallerException $objExc) { $objExc->IncrementOffset(); throw $objExc; } break; } } } ?>
heshuai64/einv2
includes/qcodo/qform/QAssetTransactComposite.class.php
PHP
gpl-2.0
20,711
''' Created on Nov 13, 2013 @author: samriggs CODE CHALLENGE: Solve the Minimum Skew Problem. https://beta.stepic.org/Bioinformatics-Algorithms-2/Peculiar-Statistics-of-the-Forward-and-Reverse-Half-Strands-7/#step-6 ''' from bi_utils.helpers import sane_open from cStringIO import StringIO def min_skew(dataset=''): if (not dataset): dataset = "stepic_dataset.txt" # O(n) with sane_open(dataset) as f: skew = 0 skew_list = [] for c in f.readline(): # calculate skew for each char skew_list.append(skew) if (c == "C"): skew -= 1 elif (c == "G"): skew += 1 # get min value, O(n) min_skew = min(skew_list) # O(n) position = 0 file_str = StringIO() for num in skew_list: if (num == min_skew): file_str.write( str(position) ) file_str.write(" ") position += 1 print file_str.getvalue().strip() if (__name__ == "__main__"): min_skew()
samriggs/bioinf
Homeworks/bi-Python/chapter1/quiz6_solution.py
Python
gpl-2.0
1,112
<?php /** * * Profile Flair. An extension for the phpBB Forum Software package. * * @copyright (c) 2017, Steve Guidetti, https://github.com/stevotvr * @license GNU General Public License, version 2 (GPL-2.0) * */ namespace stevotvr\flair\controller; use phpbb\db\driver\driver_interface; use phpbb\json_response; use stevotvr\flair\operator\category_interface; use stevotvr\flair\operator\flair_interface; use stevotvr\flair\operator\user_interface; /** * Profile Flair user MCP controller. */ class mcp_user_controller extends acp_base_controller implements mcp_user_interface { /** * @var driver_interface */ protected $db; /** * @var category_interface */ protected $cat_operator; /** * @var flair_interface */ protected $flair_operator; /** * @var user_interface */ protected $user_operator; /** * @var p_master */ protected $p_master; /** * Set up the controller. * * @param driver_interface $db * @param category_interface $cat_operator * @param flair_interface $flair_operator * @param user_interface $user_operator */ public function setup(driver_interface $db, category_interface $cat_operator, flair_interface $flair_operator, user_interface $user_operator) { $this->db = $db; $this->cat_operator = $cat_operator; $this->flair_operator = $flair_operator; $this->user_operator = $user_operator; } /** * @inheritDoc */ public function set_p_master($p_master) { $this->p_master = $p_master; } /** * @inheritDoc */ public function find_user() { $this->language->add_lang('acp/users'); $u_find_username = append_sid($this->root_path . 'memberlist.' . $this->php_ext, 'mode=searchuser&amp;form=select_user&amp;field=username&amp;select_single=true'); $this->template->assign_vars(array( 'S_SELECT_USER' => true, 'U_ACTION' => str_replace('mode=front', 'mode=user_flair', $this->u_action), 'U_FIND_USERNAME' => $u_find_username, )); } /** * @inheritDoc */ public function edit_user_flair() { $user_id = $this->request->variable('u', 0); $username = $this->request->variable('username', '', true); $where = ($user_id) ? 'user_id = ' . (int) $user_id : "username_clean = '" . $this->db->sql_escape(utf8_clean_string($username)) . "'"; $sql = 'SELECT user_id, username, user_colour FROM ' . USERS_TABLE . ' WHERE ' . $where; $this->db->sql_query($sql); $userrow = $this->db->sql_fetchrow(); $this->db->sql_freeresult(); if (!$userrow) { trigger_error($this->language->lang('NO_USER'), E_USER_WARNING); } $user_id = (int) $userrow['user_id']; if (strpos($this->u_action, '&amp;u=' . $user_id) === false) { $this->p_master->adjust_url('&amp;u=' . $user_id); $this->u_action .= '&amp;u=' . $user_id; } if ($this->request->is_set_post('add_flair')) { $this->change_flair($user_id, 'add'); } else if ($this->request->is_set_post('remove_flair')) { $this->change_flair($user_id, 'remove'); } else if ($this->request->is_set_post('set_flair')) { $this->change_flair($user_id, 'set'); } $user_flair = $this->user_operator->get_user_flair((array) $user_id); $user_flair = isset($user_flair[$user_id]) ? $user_flair[$user_id] : array(); $this->assign_tpl_vars($user_id, $userrow['username'], $userrow['user_colour'], $user_flair); } /** * Assign the template variables for the page. * * @param int $user_id The ID of the user being worked on * @param string $username The name of the user being worked on * @param string $user_colour The color of the user being worked on * @param array $user_flair The flair items assigned to the user being worked on */ protected function assign_tpl_vars($user_id, $username, $user_colour, array $user_flair) { $this->template->assign_vars(array( 'FLAIR_USER' => $username, 'FLAIR_USER_FULL' => get_username_string('full', $user_id, $username, $user_colour), 'U_ACTION' => $this->u_action . '&amp;u=' . $user_id, )); $this->assign_flair_tpl_vars(); $this->assign_user_tpl_vars($user_flair); } /** * Assign template variables for the available flair. */ protected function assign_flair_tpl_vars() { $available_cats = $this->cat_operator->get_categories(); $categories = array(array('category' => $this->language->lang('FLAIR_UNCATEGORIZED'))); foreach ($available_cats as $entity) { $categories[$entity->get_id()]['category'] = $entity->get_name(); } $flair = $this->flair_operator->get_flair(); foreach ($flair as $entity) { $categories[$entity->get_category()]['items'][] = $entity; } foreach ($categories as $category) { if (!isset($category['items'])) { continue; } $this->template->assign_block_vars('cat', array( 'CAT_NAME' => $category['category'], )); foreach ($category['items'] as $entity) { $this->template->assign_block_vars('cat.item', array( 'FLAIR_TYPE' => $entity->get_type(), 'FLAIR_SIZE' => 2, 'FLAIR_ID' => $entity->get_id(), 'FLAIR_NAME' => $entity->get_name(), 'FLAIR_NAME_SHORT' => truncate_string($entity->get_name(), 30, 255, false, '…'), 'FLAIR_COLOR' => $entity->get_color(), 'FLAIR_ICON' => $entity->get_icon(), 'FLAIR_ICON_COLOR' => $entity->get_icon_color(), 'FLAIR_IMG' => $this->img_path . $entity->get_img(2), )); } } } /** * Assign template variables for the user flair. * * @param array $user_flair The flair items assigned to the user being worked on */ protected function assign_user_tpl_vars(array $user_flair) { foreach ($user_flair as $category) { $this->template->assign_block_vars('flair', array( 'CAT_NAME' => $category['category']->get_name(), )); foreach ($category['items'] as $item) { $entity = $item['flair']; $this->template->assign_block_vars('flair.item', array( 'S_FROM_GROUP' => $item['from_group'], 'FLAIR_TYPE' => $entity->get_type(), 'FLAIR_SIZE' => 2, 'FLAIR_ID' => $entity->get_id(), 'FLAIR_NAME' => $entity->get_name(), 'FLAIR_NAME_SHORT' => truncate_string($entity->get_name(), 30, 255, false, '…'), 'FLAIR_COLOR' => $entity->get_color(), 'FLAIR_ICON' => $entity->get_icon(), 'FLAIR_ICON_COLOR' => $entity->get_icon_color(), 'FLAIR_IMG' => $this->img_path . $entity->get_img(2), 'FLAIR_FONT_COLOR' => $entity->get_font_color(), 'FLAIR_COUNT' => $item['count'], )); } } } /** * Make a change to the flair assigned to the user or group being worked on. * * @param int $user_id The ID of the user being worked on * @param string $change The type of change to make (add|remove|set) */ protected function change_flair($user_id, $change) { $action = $this->request->variable($change . '_flair', array('' => '')); if (is_array($action)) { list($id, ) = each($action); } if ($id) { if ($change === 'remove') { if (!confirm_box(true)) { $hidden_fields = build_hidden_fields(array( 'remove_flair[' . $id . ']' => true, )); confirm_box(false, $this->language->lang('MCP_FLAIR_REMOVE_CONFIRM'), $hidden_fields); return; } $this->user_operator->set_flair_count($user_id, $id, 0); } else { $counts = $this->request->variable($change . '_count', array('' => '')); $count = (isset($counts[$id])) ? (int) $counts[$id] : 1; if ($change === 'add') { $this->user_operator->add_flair($user_id, $id, $count); } else if ($change === 'set') { $this->user_operator->set_flair_count($user_id, $id, $count); } } if ($this->request->is_ajax()) { $json_response = new json_response(); $json_response->send(array( 'REFRESH_DATA' => array( 'url' => html_entity_decode($this->u_action) . '&u=' . $user_id, ), )); } } redirect($this->u_action . '&amp;u=' . $user_id); } }
stevotvr/phpbb-flair
controller/mcp_user_controller.php
PHP
gpl-2.0
7,940
<div class="popin_selection choisir_#ENV{type}" id="popin-#ENV{nom}"> <div class="selection"> <div class="sortable"> <INCLURE{fond=prive/squelettes/inclure/ajax_#ENV{type}}{type}{ids}> </div> <div class="nettoyeur"></div> <input type="hidden" id="ids" value="#ENV{ids}" /> <input type="submit" id="valider_selection" value="Valider la sélection" /> </div> <INCLURE{fond=prive/squelettes/inclure/selection_#ENV{type}}{ajax}{env}> </div> <script> $(document).ready(function(){ $('#valider_selection').click(function(e){ var ids = $('#ids').val(); [(#ENV{fonction})](ids); return false; }); $('#popin-[(#ENV{nom})] .sortable').sortable({ tolerance: 'intersect', cursor: 'move', items: '.item', update: function(event, ui){ var data = ''; $('#popin-[(#ENV{nom})] .sortable .item').each(function(i, el){ data+= $(el).data('id')+","; }); data = data.slice(0, -1); $('#ids').val(data); } }).disableSelection(); }); $(document).on('keyup', '#ids', function(e){ var ids = $(this).val(); if (ids) { $('#popin-[(#ENV{nom})] .sortable').load('[(#URL_ECRIRE{ajax_objets})]&var_zajax=contenu&type=#ENV{type}&ids='+ids); } else { $('#popin-[(#ENV{nom})] .sortable').html(''); } }); $(document).on('click', '#popin-[(#ENV{nom})] .choisir', function(e){ var id_a_ajouter = $(this).data('id'); var ids = $('#ids').val(); if (ids == '') { var tableau = new Array(); } else { var tableau = ids.split(','); } var present = false; if (tableau.length) { tableau.forEach(function(id){ if (id == id_a_ajouter) present = true; }); if (!present) tableau.push(id_a_ajouter); } else { tableau.push(id_a_ajouter); } ids = tableau.join(); $('#ids').val(ids).keyup(); }); $(document).on('click', '#popin-[(#ENV{nom})] .supprimer', function(e){ var id_a_supprimer = $(this).parent().data('id'); var ids = $("#ids").val().split(','); var nouveaux_ids = new Array(); if (ids.length) { ids.forEach(function(id){ if (id != id_a_supprimer) nouveaux_ids.push(id); }); } nouveaux_ids = nouveaux_ids.join(); $("#ids").val(nouveaux_ids).keyup(); }); </script>
studiomaiis/saisies_maiis
prive/squelettes/contenu/popin_selection_objets.html
HTML
gpl-2.0
2,130
# vandofb.github.io Vando Batista
vandofb/vandofb.github.io
README.md
Markdown
gpl-2.0
34
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("XWINDOWS")] [assembly: AssemblyDescription("WINDOWS Model for XORCISM")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("Jerome Athias")] [assembly: AssemblyProduct("XWINDOWS")] [assembly: AssemblyCopyright("Copyright © Jerome Athias 2015")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("c113e364-c8b6-4791-aff4-9814a8495859")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
athiasjerome/XORCISM
SOURCES/XWINDOWS/Properties/AssemblyInfo.cs
C#
gpl-2.0
1,443
GumpScript ========== a script just fot fun just for learn name is from movie Forrest Gump 。。。。 i love
songbingyu/GumpScript
README.md
Markdown
gpl-2.0
117
<?php /** * @package EasySocial * @copyright Copyright (C) 2010 - 2013 Stack Ideas Sdn Bhd. All rights reserved. * @license GNU/GPL, see LICENSE.php * EasySocial is free software. This version may have been modified pursuant * to the GNU General Public License, and as distributed it includes or * is derivative of works licensed under the GNU General Public License or * other free or open source software licenses. * See COPYRIGHT.php for copyright notices and details. */ defined( '_JEXEC' ) or die( 'Unauthorized Access' ); ?> <?php if( $params->get( 'format' , 1 ) == 1 ){ ?> <?php echo $name->first;?> <?php echo $name->middle;?> <?php echo $name->last;?> <?php } ?> <?php if( $params->get( 'format' , 1 ) == 2 ){ ?> <?php echo $name->last;?> <?php echo $name->middle;?> <?php echo $name->first;?> <?php } ?> <?php if( $params->get( 'format' , 1 ) == 3 ){ ?> <?php echo $name->name;?> <?php } ?>
cuongnd/test_pro
media/com_easysocial/apps/fields/user/joomla_fullname/themes/default/display_content.php
PHP
gpl-2.0
909
package anzac.peripherals.proxy; public class ClientProxy extends CommonProxy { @Override public void registerKeyBindings() { // TODO Auto-generated method stub } }
williamanzac/anzacperipherals
src/main/java/anzac/peripherals/proxy/ClientProxy.java
Java
gpl-2.0
181
#ifndef EXP_H #define EXP_H #include <exception> #include <stdexcept> #include <string> class Existential_Exception : public std::logic_error { public: Existential_Exception() : std::logic_error( "\nExistential Crisis\n") { } }; #endif
InsidiousMind/jokes
EX/EXP.h
C
gpl-2.0
244
# gpSP makefile # Gilead Kutnick - Exophase # GP2X port(ion) - Z # Global definitions PREFIX = /opt/open2x/gcc-4.1.1-glibc-2.3.6 CC = $(PREFIX)/bin/arm-open2x-linux-gcc STRIP = $(PREFIX)/bin/arm-open2x-linux-strip OBJS = main.o cpu.o memory.u video.o input.o sound.o gp2x.o gui.o \ cheats.o zip.o cpu_threaded.z arm_stub.o video_blend.o \ warm.o upscale_aspect.o ifeq ($(WIZ),1) OBJS += pollux_dpc_set.o BIN = gpsp_wiz else BIN = gpsp_gp2x endif # Platform specific definitions VPATH += .. CFLAGS += -DARM_ARCH -DGP2X_BUILD ifeq ($(WIZ),1) CFLAGS += -DWIZ_BUILD endif # NOTE: -funroll-loops will slow down compiling considerably CFLAGS += -O3 -std=c99 -msoft-float -funsigned-char -fno-common \ -fno-builtin \ INCLUDES = `$(PREFIX)/bin/sdl-config --cflags` -I$(PREFIX)/include LIBS = `$(PREFIX)/bin/sdl-config --libs` \ -lm -ldl -lpthread -lz ifneq ($(WIZ),1) LIBS += -static endif # Compilation: .SUFFIXES: .c %.z: %.c $(CC) $(CFLAGS) $(INCLUDES) -c -o $@ $< %.u: %.c $(CC) $(CFLAGS) $(INCLUDES) -c -o $@ $< %.o: %.c $(CC) $(CFLAGS) $(INCLUDES) -c -o $@ $< %.o: %.S $(CC) $(ASFLAGS) $(INCLUDES) -c -o $@ $< %.o: %.s $(CC) $(ASFLAGS) $(INCLUDES) -c -o $@ $< all: $(OBJS) $(CC) $(OBJS) $(LIBS) -o $(BIN) $(STRIP) $(BIN) clean: rm -f *.o *.u *.z $(BIN)
nirvous/gpsp_lf1000
gp2x/Makefile
Makefile
gpl-2.0
1,487
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef NET_HTTP_HTTP_AUTH_CONTROLLER_H_ #define NET_HTTP_HTTP_AUTH_CONTROLLER_H_ #pragma once #include <set> #include <string> #include "base/basictypes.h" #include "base/memory/ref_counted.h" #include "base/memory/scoped_ptr.h" #include "base/string16.h" #include "base/threading/non_thread_safe.h" #include "googleurl/src/gurl.h" #include "net/base/completion_callback.h" #include "net/base/net_log.h" #include "net/http/http_auth.h" namespace net { class AuthChallengeInfo; class HttpAuthHandler; class HttpAuthHandlerFactory; class HttpAuthCache; class HttpRequestHeaders; struct HttpRequestInfo; class HttpAuthController : public base::RefCounted<HttpAuthController>, public base::NonThreadSafe { public: HttpAuthController(HttpAuth::Target target, const GURL& auth_url, HttpAuthCache* http_auth_cache, HttpAuthHandlerFactory* http_auth_handler_factory); virtual int MaybeGenerateAuthToken(const HttpRequestInfo* request, CompletionCallback* callback, const BoundNetLog& net_log); virtual void AddAuthorizationHeader( HttpRequestHeaders* authorization_headers); virtual int HandleAuthChallenge(scoped_refptr<HttpResponseHeaders> headers, bool do_not_send_server_auth, bool establishing_tunnel, const BoundNetLog& net_log); virtual void ResetAuth(const string16& username, const string16& password); virtual bool HaveAuthHandler() const; virtual bool HaveAuth() const; virtual scoped_refptr<AuthChallengeInfo> auth_info(); virtual bool IsAuthSchemeDisabled(HttpAuth::Scheme scheme) const; virtual void DisableAuthScheme(HttpAuth::Scheme scheme); private: enum InvalidateHandlerAction { INVALIDATE_HANDLER_AND_CACHED_CREDENTIALS, INVALIDATE_HANDLER }; friend class base::RefCounted<HttpAuthController>; virtual ~HttpAuthController(); bool SelectPreemptiveAuth(const BoundNetLog& net_log); void InvalidateCurrentHandler(InvalidateHandlerAction action); void InvalidateRejectedAuthFromCache(); bool SelectNextAuthIdentityToTry(); void PopulateAuthChallenge(); bool DisableOnAuthHandlerResult(int result); void OnIOComplete(int result); HttpAuth::Target target_; const GURL auth_url_; const GURL auth_origin_; const std::string auth_path_; scoped_ptr<HttpAuthHandler> handler_; HttpAuth::Identity identity_; std::string auth_token_; scoped_refptr<AuthChallengeInfo> auth_info_; bool embedded_identity_used_; bool default_credentials_used_; HttpAuthCache* const http_auth_cache_; HttpAuthHandlerFactory* const http_auth_handler_factory_; std::set<HttpAuth::Scheme> disabled_schemes_; CompletionCallbackImpl<HttpAuthController> io_callback_; CompletionCallback* user_callback_; }; } #endif
qtekfun/htcDesire820Kernel
external/chromium/net/http/http_auth_controller.h
C
gpl-2.0
3,336
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <!-- template designed by Marco Von Ballmoos --> <title>Docs For Class BannersViewDownload</title> <link rel="stylesheet" href="../../media/stylesheet.css" /> <script src="../../media/lib/classTree.js"></script> <script language="javascript" type="text/javascript"> var imgPlus = new Image(); var imgMinus = new Image(); imgPlus.src = "../../media/images/plus.png"; imgMinus.src = "../../media/images/minus.png"; function showNode(Node){ switch(navigator.family){ case 'nn4': // Nav 4.x code fork... var oTable = document.layers["span" + Node]; var oImg = document.layers["img" + Node]; break; case 'ie4': // IE 4/5 code fork... var oTable = document.all["span" + Node]; var oImg = document.all["img" + Node]; break; case 'gecko': // Standards Compliant code fork... var oTable = document.getElementById("span" + Node); var oImg = document.getElementById("img" + Node); break; } oImg.src = imgMinus.src; oTable.style.display = "block"; } function hideNode(Node){ switch(navigator.family){ case 'nn4': // Nav 4.x code fork... var oTable = document.layers["span" + Node]; var oImg = document.layers["img" + Node]; break; case 'ie4': // IE 4/5 code fork... var oTable = document.all["span" + Node]; var oImg = document.all["img" + Node]; break; case 'gecko': // Standards Compliant code fork... var oTable = document.getElementById("span" + Node); var oImg = document.getElementById("img" + Node); break; } oImg.src = imgPlus.src; oTable.style.display = "none"; } function nodeIsVisible(Node){ switch(navigator.family){ case 'nn4': // Nav 4.x code fork... var oTable = document.layers["span" + Node]; break; case 'ie4': // IE 4/5 code fork... var oTable = document.all["span" + Node]; break; case 'gecko': // Standards Compliant code fork... var oTable = document.getElementById("span" + Node); break; } return (oTable && oTable.style.display == "block"); } function toggleNodeVisibility(Node){ if (nodeIsVisible(Node)){ hideNode(Node); }else{ showNode(Node); } } </script> </head> <body> <div class="page-body"> <h2 class="class-name"><img src="../../media/images/Class_logo.png" alt=" Class" title=" Class" style="vertical-align: middle"> BannersViewDownload</h2> <a name="sec-description"></a> <div class="info-box"> <div class="info-box-title">Description</div> <div class="nav-bar"> <span class="disabled">Description</span> | <a href="#sec-var-summary">Vars</a> (<a href="#sec-vars">details</a>) | <a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">View class for download a list of tracks.</p> <ul class="tags"> <li><span class="field">since:</span> 1.6</li> </ul> <p class="notes"> Located in <a class="field" href="_administrator---components---com_banners---views---download---view.html.php.html">/administrator/components/com_banners/views/download/view.html.php</a> (line <span class="field"><a href="../../filesource/fsource_Joomla-Administrator_com_banners_administratorcomponentscom_bannersviewsdownloadview.html.php.html#a19">19</a></span>) </p> <pre><a href="../../Joomla-Platform/Object/JObject.html">JObject</a> | --<a href="../../Joomla-Legacy/View/JViewLegacy.html">JViewLegacy</a> | --BannersViewDownload</pre> </div> </div> <a name="sec-var-summary"></a> <div class="info-box"> <div class="info-box-title">Variable Summary</span></div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <span class="disabled">Vars</span> (<a href="#sec-vars">details</a>) | <a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <div class="var-summary"> <div class="var-title"> <img src="../../media/images/Variable.png" alt=" " /> <span class="var-type">mixed</span> <a href="#$form" title="details" class="var-name">$form</a> </div> </div> </div> </div> <a name="sec-method-summary"></a> <div class="info-box"> <div class="info-box-title">Method Summary</span></div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <a href="#sec-var-summary">Vars</a> (<a href="#sec-vars">details</a>) | <span class="disabled">Methods</span> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <div class="method-summary"> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">void</span> <a href="#display" title="details" class="method-name">display</a> ([<span class="var-type"></span>&nbsp;<span class="var-name">$tpl</span> = <span class="var-default">null</span>]) </div> </div> </div> </div> <a name="sec-vars"></a> <div class="info-box"> <div class="info-box-title">Variables</div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <a href="#sec-var-summary">Vars</a> (<span class="disabled">details</span>) | <a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <a name="var$form" id="$form"><!-- --></A> <div class="oddrow"> <div class="var-header"> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-type">mixed</span> <span class="var-name">$form</span> (line <span class="line-number"><a href="../../filesource/fsource_Joomla-Administrator_com_banners_administratorcomponentscom_bannersviewsdownloadview.html.php.html#a21">21</a></span>) </span> </div> <!-- ========== Info from phpDoc block ========= --> <ul class="tags"> <li><span class="field">access:</span> protected</li> </ul> </div> <h4>Inherited Variables</h4> <A NAME='inherited_vars'><!-- --></A> <p>Inherited from <span class="classname"><a href="../../Joomla-Legacy/View/JViewLegacy.html">JViewLegacy</a></span></p> <blockquote> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_basePath">JViewLegacy::$_basePath</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_charset">JViewLegacy::$_charset</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_defaultModel">JViewLegacy::$_defaultModel</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_escape">JViewLegacy::$_escape</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_layout">JViewLegacy::$_layout</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_layoutExt">JViewLegacy::$_layoutExt</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_layoutTemplate">JViewLegacy::$_layoutTemplate</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_models">JViewLegacy::$_models</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_name">JViewLegacy::$_name</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_output">JViewLegacy::$_output</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_path">JViewLegacy::$_path</a></span><br> </span> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#var$_template">JViewLegacy::$_template</a></span><br> </span> </blockquote> <p>Inherited from <span class="classname"><a href="../../Joomla-Platform/Object/JObject.html">JObject</a></span></p> <blockquote> <img src="../../media/images/Variable.png" /> <span class="var-title"> <span class="var-name"><a href="../../Joomla-Platform/Object/JObject.html#var$_errors">JObject::$_errors</a></span><br> </span> </blockquote> </div> </div> <a name="sec-methods"></a> <div class="info-box"> <div class="info-box-title">Methods</div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <a href="#sec-var-summary">Vars</a> (<a href="#sec-vars">details</a>) <a href="#sec-method-summary">Methods</a> (<span class="disabled">details</span>) </div> <div class="info-box-body"> <A NAME='method_detail'></A> <a name="methoddisplay" id="display"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">display</span> (line <span class="line-number"><a href="../../filesource/fsource_Joomla-Administrator_com_banners_administratorcomponentscom_bannersviewsdownloadview.html.php.html#a25">25</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Display the view</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">void</span> <span class="method-name"> display </span> ([<span class="var-type"></span>&nbsp;<span class="var-name">$tpl</span> = <span class="var-default">null</span>]) </div> <ul class="parameters"> <li> <span class="var-type"></span> <span class="var-name">$tpl</span> </li> </ul> <hr class="separator" /> <div class="notes">Redefinition of:</div> <dl> <dt><a href="../../Joomla-Legacy/View/JViewLegacy.html#methoddisplay">JViewLegacy::display()</a></dt> <dd>Execute and display a template script.</dd> </dl> </div> <h4>Inherited Methods</h4> <a name='inherited_methods'><!-- --></a> <!-- =========== Summary =========== --> <p>Inherited From <span class="classname"><a href="../../Joomla-Legacy/View/JViewLegacy.html">JViewLegacy</a></span></p> <blockquote> <img src="../../media/images/Constructor.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#method__construct">JViewLegacy::__construct()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodaddHelperPath">JViewLegacy::addHelperPath()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodaddTemplatePath">JViewLegacy::addTemplatePath()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodassign">JViewLegacy::assign()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodassignRef">JViewLegacy::assignRef()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methoddisplay">JViewLegacy::display()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodescape">JViewLegacy::escape()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodget">JViewLegacy::get()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodgetForm">JViewLegacy::getForm()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodgetLayout">JViewLegacy::getLayout()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodgetLayoutTemplate">JViewLegacy::getLayoutTemplate()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodgetModel">JViewLegacy::getModel()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodgetName">JViewLegacy::getName()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodloadHelper">JViewLegacy::loadHelper()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodloadTemplate">JViewLegacy::loadTemplate()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodsetEscape">JViewLegacy::setEscape()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodsetLayout">JViewLegacy::setLayout()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodsetLayoutExt">JViewLegacy::setLayoutExt()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#methodsetModel">JViewLegacy::setModel()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#method_addPath">JViewLegacy::_addPath()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#method_createFileName">JViewLegacy::_createFileName()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Legacy/View/JViewLegacy.html#method_setPath">JViewLegacy::_setPath()</a></span><br> </blockquote> <!-- =========== Summary =========== --> <p>Inherited From <span class="classname"><a href="../../Joomla-Platform/Object/JObject.html">JObject</a></span></p> <blockquote> <img src="../../media/images/Constructor.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#method__construct">JObject::__construct()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methoddef">JObject::def()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodget">JObject::get()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodgetError">JObject::getError()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodgetErrors">JObject::getErrors()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodgetProperties">JObject::getProperties()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodset">JObject::set()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodsetError">JObject::setError()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#methodsetProperties">JObject::setProperties()</a></span><br> <img src="../../media/images/Method.png" alt=" "/> <span class="method-name"><a href="../../Joomla-Platform/Object/JObject.html#method__toString">JObject::__toString()</a></span><br> </blockquote> </div> </div> <p class="notes" id="credit"> Documentation generated on Tue, 19 Nov 2013 15:17:31 +0100 by <a href="http://www.phpdoc.org" target="_blank">phpDocumentor 1.4.3</a> </p> </div></body> </html>
asika32764/Joomla-CMS-API-Document
Joomla-Administrator/com_banners/BannersViewDownload.html
HTML
gpl-2.0
19,319
/* * Copyright (C) 2011-2021 Project SkyFire <https://www.projectskyfire.org/> * Copyright (C) 2008-2021 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2021 MaNGOS <https://www.getmangos.eu/> * Copyright (C) 2006-2014 ScriptDev2 <https://github.com/scriptdev2/scriptdev2/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 3 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ObjectMgr.h" #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "SpellAuraEffects.h" #include "Group.h" #include "Spell.h" #include "icecrown_citadel.h" #include "Vehicle.h" #include "GridNotifiers.h" enum Say { // Festergut SAY_FESTERGUT_GASEOUS_BLIGHT = 0, SAY_FESTERGUT_DEATH = 1, // Rotface SAY_ROTFACE_OOZE_FLOOD = 2, SAY_ROTFACE_DEATH = 3, // Professor Putricide SAY_AGGRO = 4, EMOTE_UNSTABLE_EXPERIMENT = 5, SAY_PHASE_TRANSITION_HEROIC = 6, SAY_TRANSFORM_1 = 7, SAY_TRANSFORM_2 = 8, // always used for phase2 change, DO NOT GROUP WITH SAY_TRANSFORM_1 EMOTE_MALLEABLE_GOO = 9, EMOTE_CHOKING_GAS_BOMB = 10, SAY_KILL = 11, SAY_BERSERK = 12, SAY_DEATH = 13 }; enum Spells { // Festergut SPELL_RELEASE_GAS_VISUAL = 69125, SPELL_GASEOUS_BLIGHT_LARGE = 69157, SPELL_GASEOUS_BLIGHT_MEDIUM = 69162, SPELL_GASEOUS_BLIGHT_SMALL = 69164, SPELL_MALLEABLE_GOO_H = 72296, SPELL_MALLEABLE_GOO_SUMMON = 72299, // Professor Putricide SPELL_SLIME_PUDDLE_TRIGGER = 70341, SPELL_MALLEABLE_GOO = 70852, SPELL_UNSTABLE_EXPERIMENT = 70351, SPELL_TEAR_GAS = 71617, // phase transition SPELL_TEAR_GAS_CREATURE = 71618, SPELL_TEAR_GAS_CANCEL = 71620, SPELL_TEAR_GAS_PERIODIC_TRIGGER = 73170, SPELL_CREATE_CONCOCTION = 71621, SPELL_GUZZLE_POTIONS = 71893, SPELL_OOZE_TANK_PROTECTION = 71770, // protects the tank SPELL_CHOKING_GAS_BOMB = 71255, SPELL_OOZE_VARIABLE = 74118, SPELL_GAS_VARIABLE = 74119, SPELL_UNBOUND_PLAGUE = 70911, SPELL_UNBOUND_PLAGUE_SEARCHER = 70917, SPELL_PLAGUE_SICKNESS = 70953, SPELL_UNBOUND_PLAGUE_PROTECTION = 70955, SPELL_MUTATED_PLAGUE = 72451, SPELL_MUTATED_PLAGUE_CLEAR = 72618, // Slime Puddle SPELL_GROW_STACKER = 70345, SPELL_GROW = 70347, SPELL_SLIME_PUDDLE_AURA = 70343, // Gas Cloud SPELL_GASEOUS_BLOAT_PROC = 70215, SPELL_GASEOUS_BLOAT = 70672, SPELL_GASEOUS_BLOAT_PROTECTION = 70812, SPELL_EXPUNGED_GAS = 70701, // Volatile Ooze SPELL_OOZE_ERUPTION = 70492, SPELL_VOLATILE_OOZE_ADHESIVE = 70447, SPELL_OOZE_ERUPTION_SEARCH_PERIODIC = 70457, SPELL_VOLATILE_OOZE_PROTECTION = 70530, // Choking Gas Bomb SPELL_CHOKING_GAS_BOMB_PERIODIC = 71259, SPELL_CHOKING_GAS_EXPLOSION_TRIGGER = 71280, // Mutated Abomination vehicle SPELL_ABOMINATION_VEHICLE_POWER_DRAIN = 70385, SPELL_MUTATED_TRANSFORMATION = 70311, SPELL_MUTATED_TRANSFORMATION_DAMAGE = 70405, SPELL_MUTATED_TRANSFORMATION_NAME = 72401, // Unholy Infusion SPELL_UNHOLY_INFUSION_CREDIT = 71518 }; #define SPELL_GASEOUS_BLOAT_HELPER RAID_MODE<uint32>(70672, 72455, 72832, 72833) enum Events { // Festergut EVENT_FESTERGUT_DIES = 1, EVENT_FESTERGUT_GOO = 2, // Rotface EVENT_ROTFACE_DIES = 3, EVENT_ROTFACE_OOZE_FLOOD = 5, // Professor Putricide EVENT_BERSERK = 6, // all phases EVENT_SLIME_PUDDLE = 7, // all phases EVENT_UNSTABLE_EXPERIMENT = 8, // P1 && P2 EVENT_TEAR_GAS = 9, // phase transition not heroic EVENT_RESUME_ATTACK = 10, EVENT_MALLEABLE_GOO = 11, EVENT_CHOKING_GAS_BOMB = 12, EVENT_UNBOUND_PLAGUE = 13, EVENT_MUTATED_PLAGUE = 14, EVENT_PHASE_TRANSITION = 15 }; enum Phases { PHASE_NONE = 0, PHASE_FESTERGUT = 1, PHASE_ROTFACE = 2, PHASE_COMBAT_1 = 4, PHASE_COMBAT_2 = 5, PHASE_COMBAT_3 = 6 }; enum Points { POINT_FESTERGUT = 366260, POINT_ROTFACE = 366270, POINT_TABLE = 366780 }; Position const festergutWatchPos = {4324.820f, 3166.03f, 389.3831f, 3.316126f}; //emote 432 (release gas) Position const rotfaceWatchPos = {4390.371f, 3164.50f, 389.3890f, 5.497787f}; //emote 432 (release ooze) Position const tablePos = {4356.190f, 3262.90f, 389.4820f, 1.483530f}; // used in Rotface encounter uint32 const oozeFloodSpells[4] = {69782, 69796, 69798, 69801}; enum PutricideData { DATA_EXPERIMENT_STAGE = 1, DATA_PHASE = 2, DATA_ABOMINATION = 3 }; #define EXPERIMENT_STATE_OOZE false #define EXPERIMENT_STATE_GAS true class AbominationDespawner { public: explicit AbominationDespawner(Unit* owner) : _owner(owner) { } bool operator()(uint64 guid) { if (Unit* summon = ObjectAccessor::GetUnit(*_owner, guid)) { if (summon->GetEntry() == NPC_MUTATED_ABOMINATION_10 || summon->GetEntry() == NPC_MUTATED_ABOMINATION_25) { if (Vehicle* veh = summon->GetVehicleKit()) veh->RemoveAllPassengers(); // also despawns the vehicle // Found unit is Mutated Abomination, remove it return true; } // Found unit is not Mutated Abomintaion, leave it return false; } // No unit found, remove from SummonList return true; } private: Unit* _owner; }; struct RotfaceHeightCheck { RotfaceHeightCheck(Creature* rotface) : _rotface(rotface) { } bool operator()(Creature* stalker) const { return stalker->GetPositionZ() < _rotface->GetPositionZ() + 5.0f; } private: Creature* _rotface; }; class boss_professor_putricide : public CreatureScript { public: boss_professor_putricide() : CreatureScript("boss_professor_putricide") { } struct boss_professor_putricideAI : public BossAI { boss_professor_putricideAI(Creature* creature) : BossAI(creature, DATA_PROFESSOR_PUTRICIDE), _baseSpeed(creature->GetSpeedRate(MOVE_RUN)), _experimentState(EXPERIMENT_STATE_OOZE) { _phase = PHASE_NONE; } void Reset() OVERRIDE { if (!(events.IsInPhase(PHASE_ROTFACE) || events.IsInPhase(PHASE_FESTERGUT))) instance->SetBossState(DATA_PROFESSOR_PUTRICIDE, NOT_STARTED); instance->SetData(DATA_NAUSEA_ACHIEVEMENT, uint32(true)); events.Reset(); summons.DespawnAll(); SetPhase(PHASE_COMBAT_1); _experimentState = EXPERIMENT_STATE_OOZE; me->SetReactState(REACT_DEFENSIVE); me->SetWalk(false); if (me->GetMotionMaster()->GetCurrentMovementGeneratorType() == POINT_MOTION_TYPE) me->GetMotionMaster()->MovementExpired(); if (instance->GetBossState(DATA_ROTFACE) == DONE && instance->GetBossState(DATA_FESTERGUT) == DONE) me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_NOT_SELECTABLE); } void EnterCombat(Unit* who) OVERRIDE { if (events.IsInPhase(PHASE_ROTFACE) || events.IsInPhase(PHASE_FESTERGUT)) return; if (!instance->CheckRequiredBosses(DATA_PROFESSOR_PUTRICIDE, who->ToPlayer())) { EnterEvadeMode(); instance->DoCastSpellOnPlayers(LIGHT_S_HAMMER_TELEPORT); return; } me->setActive(true); events.Reset(); events.ScheduleEvent(EVENT_BERSERK, 600000); events.ScheduleEvent(EVENT_SLIME_PUDDLE, 10000); events.ScheduleEvent(EVENT_UNSTABLE_EXPERIMENT, urand(30000, 35000)); if (IsHeroic()) events.ScheduleEvent(EVENT_UNBOUND_PLAGUE, 20000); SetPhase(PHASE_COMBAT_1); Talk(SAY_AGGRO); DoCast(me, SPELL_OOZE_TANK_PROTECTION, true); DoZoneInCombat(me); instance->SetBossState(DATA_PROFESSOR_PUTRICIDE, IN_PROGRESS); } void JustReachedHome() OVERRIDE { _JustReachedHome(); me->SetWalk(false); if (events.IsInPhase(PHASE_COMBAT_1) || events.IsInPhase(PHASE_COMBAT_2) || events.IsInPhase(PHASE_COMBAT_3)) instance->SetBossState(DATA_PROFESSOR_PUTRICIDE, FAIL); } void KilledUnit(Unit* victim) OVERRIDE { if (victim->GetTypeId() == TypeID::TYPEID_PLAYER) Talk(SAY_KILL); } void JustDied(Unit* /*killer*/) OVERRIDE { _JustDied(); Talk(SAY_DEATH); if (Is25ManRaid() && me->HasAura(SPELL_SHADOWS_FATE)) DoCastAOE(SPELL_UNHOLY_INFUSION_CREDIT, true); DoCast(SPELL_MUTATED_PLAGUE_CLEAR); } void JustSummoned(Creature* summon) OVERRIDE { summons.Summon(summon); switch (summon->GetEntry()) { case NPC_MALLEABLE_OOZE_STALKER: DoCast(summon, SPELL_MALLEABLE_GOO_H); return; case NPC_GROWING_OOZE_PUDDLE: summon->CastSpell(summon, SPELL_GROW_STACKER, true); summon->CastSpell(summon, SPELL_SLIME_PUDDLE_AURA, true); // blizzard casts this spell 7 times initially (confirmed in sniff) for (uint8 i = 0; i < 7; ++i) summon->CastSpell(summon, SPELL_GROW, true); break; case NPC_GAS_CLOUD: // no possible aura seen in sniff adding the aurastate summon->ModifyAuraState(AURA_STATE_UNKNOWN22, true); summon->CastSpell(summon, SPELL_GASEOUS_BLOAT_PROC, true); summon->ApplySpellImmune(0, IMMUNITY_EFFECT, SPELL_EFFECT_KNOCK_BACK, true); summon->SetReactState(REACT_PASSIVE); break; case NPC_VOLATILE_OOZE: // no possible aura seen in sniff adding the aurastate summon->ModifyAuraState(AURA_STATE_UNKNOWN19, true); summon->CastSpell(summon, SPELL_OOZE_ERUPTION_SEARCH_PERIODIC, true); summon->ApplySpellImmune(0, IMMUNITY_EFFECT, SPELL_EFFECT_KNOCK_BACK, true); summon->SetReactState(REACT_PASSIVE); break; case NPC_CHOKING_GAS_BOMB: summon->CastSpell(summon, SPELL_CHOKING_GAS_BOMB_PERIODIC, true); summon->CastSpell(summon, SPELL_CHOKING_GAS_EXPLOSION_TRIGGER, true); return; case NPC_MUTATED_ABOMINATION_10: case NPC_MUTATED_ABOMINATION_25: return; default: break; } if (me->IsInCombat()) DoZoneInCombat(summon); } void DamageTaken(Unit* /*attacker*/, uint32& /*damage*/) OVERRIDE { switch (_phase) { case PHASE_COMBAT_1: if (HealthAbovePct(80)) return; me->SetReactState(REACT_PASSIVE); DoAction(ACTION_CHANGE_PHASE); break; case PHASE_COMBAT_2: if (HealthAbovePct(35)) return; me->SetReactState(REACT_PASSIVE); DoAction(ACTION_CHANGE_PHASE); break; default: break; } } void MovementInform(uint32 type, uint32 id) OVERRIDE { if (type != POINT_MOTION_TYPE) return; switch (id) { case POINT_FESTERGUT: instance->SetBossState(DATA_FESTERGUT, IN_PROGRESS); // needed here for delayed gate close me->SetSpeed(MOVE_RUN, _baseSpeed, true); DoAction(ACTION_FESTERGUT_GAS); if (Creature* festergut = Unit::GetCreature(*me, instance->GetData64(DATA_FESTERGUT))) festergut->CastSpell(festergut, SPELL_GASEOUS_BLIGHT_LARGE, false, NULL, NULL, festergut->GetGUID()); break; case POINT_ROTFACE: instance->SetBossState(DATA_ROTFACE, IN_PROGRESS); // needed here for delayed gate close me->SetSpeed(MOVE_RUN, _baseSpeed, true); DoAction(ACTION_ROTFACE_OOZE); events.ScheduleEvent(EVENT_ROTFACE_OOZE_FLOOD, 25000, 0, PHASE_ROTFACE); break; case POINT_TABLE: // stop attack me->GetMotionMaster()->MoveIdle(); me->SetSpeed(MOVE_RUN, _baseSpeed, true); if (GameObject* table = ObjectAccessor::GetGameObject(*me, instance->GetData64(DATA_PUTRICIDE_TABLE))) me->SetFacingToObject(table); // operating on new phase already switch (_phase) { case PHASE_COMBAT_2: { SpellInfo const* spell = sSpellMgr->GetSpellInfo(SPELL_CREATE_CONCOCTION); DoCast(me, SPELL_CREATE_CONCOCTION); events.ScheduleEvent(EVENT_PHASE_TRANSITION, sSpellMgr->GetSpellForDifficultyFromSpell(spell, me)->CalcCastTime() + 100); break; } case PHASE_COMBAT_3: { SpellInfo const* spell = sSpellMgr->GetSpellInfo(SPELL_GUZZLE_POTIONS); DoCast(me, SPELL_GUZZLE_POTIONS); events.ScheduleEvent(EVENT_PHASE_TRANSITION, sSpellMgr->GetSpellForDifficultyFromSpell(spell, me)->CalcCastTime() + 100); break; } default: break; } break; default: break; } } void DoAction(int32 action) OVERRIDE { switch (action) { case ACTION_FESTERGUT_COMBAT: SetPhase(PHASE_FESTERGUT); me->SetSpeed(MOVE_RUN, _baseSpeed*2.0f, true); me->GetMotionMaster()->MovePoint(POINT_FESTERGUT, festergutWatchPos); me->SetReactState(REACT_PASSIVE); DoZoneInCombat(me); if (IsHeroic()) events.ScheduleEvent(EVENT_FESTERGUT_GOO, urand(13000, 18000), 0, PHASE_FESTERGUT); break; case ACTION_FESTERGUT_GAS: Talk(SAY_FESTERGUT_GASEOUS_BLIGHT); DoCast(me, SPELL_RELEASE_GAS_VISUAL, true); break; case ACTION_FESTERGUT_DEATH: events.ScheduleEvent(EVENT_FESTERGUT_DIES, 4000, 0, PHASE_FESTERGUT); break; case ACTION_ROTFACE_COMBAT: { SetPhase(PHASE_ROTFACE); me->SetSpeed(MOVE_RUN, _baseSpeed*2.0f, true); me->GetMotionMaster()->MovePoint(POINT_ROTFACE, rotfaceWatchPos); me->SetReactState(REACT_PASSIVE); _oozeFloodStage = 0; DoZoneInCombat(me); // init random sequence of floods if (Creature* rotface = Unit::GetCreature(*me, instance->GetData64(DATA_ROTFACE))) { std::list<Creature*> list; GetCreatureListWithEntryInGrid(list, rotface, NPC_PUDDLE_STALKER, 50.0f); list.remove_if(RotfaceHeightCheck(rotface)); if (list.size() > 4) { list.sort(Skyfire::ObjectDistanceOrderPred(rotface)); do { list.pop_back(); } while (list.size() > 4); } uint8 i = 0; while (!list.empty()) { std::list<Creature*>::iterator itr = list.begin(); std::advance(itr, urand(0, list.size()-1)); _oozeFloodDummyGUIDs[i++] = (*itr)->GetGUID(); list.erase(itr); } } break; } case ACTION_ROTFACE_OOZE: Talk(SAY_ROTFACE_OOZE_FLOOD); if (Creature* dummy = Unit::GetCreature(*me, _oozeFloodDummyGUIDs[_oozeFloodStage])) dummy->CastSpell(dummy, oozeFloodSpells[_oozeFloodStage], true, NULL, NULL, me->GetGUID()); // cast from self for LoS (with prof's GUID for logs) if (++_oozeFloodStage == 4) _oozeFloodStage = 0; break; case ACTION_ROTFACE_DEATH: events.ScheduleEvent(EVENT_ROTFACE_DIES, 4500, 0, PHASE_ROTFACE); break; case ACTION_CHANGE_PHASE: me->SetSpeed(MOVE_RUN, _baseSpeed*2.0f, true); events.DelayEvents(30000); me->AttackStop(); if (!IsHeroic()) { DoCast(me, SPELL_TEAR_GAS); events.ScheduleEvent(EVENT_TEAR_GAS, 2500); } else { Talk(SAY_PHASE_TRANSITION_HEROIC); DoCast(me, SPELL_UNSTABLE_EXPERIMENT, true); DoCast(me, SPELL_UNSTABLE_EXPERIMENT, true); // cast variables if (Is25ManRaid()) { std::list<Unit*> targetList; { const std::list<HostileReference*>& threatlist = me->getThreatManager().getThreatList(); for (std::list<HostileReference*>::const_iterator itr = threatlist.begin(); itr != threatlist.end(); ++itr) if ((*itr)->getTarget()->GetTypeId() == TypeID::TYPEID_PLAYER) targetList.push_back((*itr)->getTarget()); } size_t half = targetList.size()/2; // half gets ooze variable while (half < targetList.size()) { std::list<Unit*>::iterator itr = targetList.begin(); advance(itr, urand(0, targetList.size() - 1)); (*itr)->CastSpell(*itr, SPELL_OOZE_VARIABLE, true); targetList.erase(itr); } // and half gets gas for (std::list<Unit*>::iterator itr = targetList.begin(); itr != targetList.end(); ++itr) (*itr)->CastSpell(*itr, SPELL_GAS_VARIABLE, true); } me->GetMotionMaster()->MovePoint(POINT_TABLE, tablePos); } switch (_phase) { case PHASE_COMBAT_1: SetPhase(PHASE_COMBAT_2); events.ScheduleEvent(EVENT_MALLEABLE_GOO, urand(21000, 26000)); events.ScheduleEvent(EVENT_CHOKING_GAS_BOMB, urand(35000, 40000)); break; case PHASE_COMBAT_2: SetPhase(PHASE_COMBAT_3); events.ScheduleEvent(EVENT_MUTATED_PLAGUE, 25000); events.CancelEvent(EVENT_UNSTABLE_EXPERIMENT); break; default: break; } break; default: break; } } uint32 GetData(uint32 type) const OVERRIDE { switch (type) { case DATA_EXPERIMENT_STAGE: return _experimentState; case DATA_PHASE: return _phase; case DATA_ABOMINATION: return uint32(summons.HasEntry(NPC_MUTATED_ABOMINATION_10) || summons.HasEntry(NPC_MUTATED_ABOMINATION_25)); default: break; } return 0; } void SetData(uint32 id, uint32 data) OVERRIDE { if (id == DATA_EXPERIMENT_STAGE) _experimentState = bool(data); } void UpdateAI(uint32 diff) OVERRIDE { if ((!(events.IsInPhase(PHASE_ROTFACE) || events.IsInPhase(PHASE_FESTERGUT)) && !UpdateVictim()) || !CheckInRoom()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_FESTERGUT_DIES: Talk(SAY_FESTERGUT_DEATH); EnterEvadeMode(); break; case EVENT_FESTERGUT_GOO: me->CastCustomSpell(SPELL_MALLEABLE_GOO_SUMMON, SPELLVALUE_MAX_TARGETS, 1, NULL, true); events.ScheduleEvent(EVENT_FESTERGUT_GOO, (Is25ManRaid() ? 10000 : 30000) + urand(0, 5000), 0, PHASE_FESTERGUT); break; case EVENT_ROTFACE_DIES: Talk(SAY_ROTFACE_DEATH); EnterEvadeMode(); break; case EVENT_ROTFACE_OOZE_FLOOD: DoAction(ACTION_ROTFACE_OOZE); events.ScheduleEvent(EVENT_ROTFACE_OOZE_FLOOD, 25000, 0, PHASE_ROTFACE); break; case EVENT_BERSERK: Talk(SAY_BERSERK); DoCast(me, SPELL_BERSERK2); break; case EVENT_SLIME_PUDDLE: { std::list<Unit*> targets; SelectTargetList(targets, 2, SELECT_TARGET_RANDOM, 0.0f, true); if (!targets.empty()) for (std::list<Unit*>::iterator itr = targets.begin(); itr != targets.end(); ++itr) DoCast(*itr, SPELL_SLIME_PUDDLE_TRIGGER); events.ScheduleEvent(EVENT_SLIME_PUDDLE, 35000); break; } case EVENT_UNSTABLE_EXPERIMENT: Talk(EMOTE_UNSTABLE_EXPERIMENT); DoCast(me, SPELL_UNSTABLE_EXPERIMENT); events.ScheduleEvent(EVENT_UNSTABLE_EXPERIMENT, urand(35000, 40000)); break; case EVENT_TEAR_GAS: me->GetMotionMaster()->MovePoint(POINT_TABLE, tablePos); DoCast(me, SPELL_TEAR_GAS_PERIODIC_TRIGGER, true); break; case EVENT_RESUME_ATTACK: me->SetReactState(REACT_DEFENSIVE); AttackStart(me->GetVictim()); // remove Tear Gas me->RemoveAurasDueToSpell(SPELL_TEAR_GAS_PERIODIC_TRIGGER); instance->DoRemoveAurasDueToSpellOnPlayers(71615); DoCastAOE(SPELL_TEAR_GAS_CANCEL); instance->DoRemoveAurasDueToSpellOnPlayers(SPELL_GAS_VARIABLE); instance->DoRemoveAurasDueToSpellOnPlayers(SPELL_OOZE_VARIABLE); break; case EVENT_MALLEABLE_GOO: if (Is25ManRaid()) { std::list<Unit*> targets; SelectTargetList(targets, 2, SELECT_TARGET_RANDOM, -7.0f, true); if (!targets.empty()) { Talk(EMOTE_MALLEABLE_GOO); for (std::list<Unit*>::iterator itr = targets.begin(); itr != targets.end(); ++itr) DoCast(*itr, SPELL_MALLEABLE_GOO); } } else { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 1, -7.0f, true)) { Talk(EMOTE_MALLEABLE_GOO); DoCast(target, SPELL_MALLEABLE_GOO); } } events.ScheduleEvent(EVENT_MALLEABLE_GOO, urand(25000, 30000)); break; case EVENT_CHOKING_GAS_BOMB: Talk(EMOTE_CHOKING_GAS_BOMB); DoCast(me, SPELL_CHOKING_GAS_BOMB); events.ScheduleEvent(EVENT_CHOKING_GAS_BOMB, urand(35000, 40000)); break; case EVENT_UNBOUND_PLAGUE: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, NonTankTargetSelector(me))) { DoCast(target, SPELL_UNBOUND_PLAGUE); DoCast(target, SPELL_UNBOUND_PLAGUE_SEARCHER); } events.ScheduleEvent(EVENT_UNBOUND_PLAGUE, 90000); break; case EVENT_MUTATED_PLAGUE: DoCastVictim(SPELL_MUTATED_PLAGUE); events.ScheduleEvent(EVENT_MUTATED_PLAGUE, 10000); break; case EVENT_PHASE_TRANSITION: { switch (_phase) { case PHASE_COMBAT_2: if (Creature* face = me->FindNearestCreature(NPC_TEAR_GAS_TARGET_STALKER, 50.0f)) me->SetFacingToObject(face); me->HandleEmoteCommand(EMOTE_ONESHOT_KNEEL); Talk(SAY_TRANSFORM_1); events.ScheduleEvent(EVENT_RESUME_ATTACK, 5500, 0, PHASE_COMBAT_2); break; case PHASE_COMBAT_3: if (Creature* face = me->FindNearestCreature(NPC_TEAR_GAS_TARGET_STALKER, 50.0f)) me->SetFacingToObject(face); me->HandleEmoteCommand(EMOTE_ONESHOT_KNEEL); Talk(SAY_TRANSFORM_2); summons.DespawnIf(AbominationDespawner(me)); events.ScheduleEvent(EVENT_RESUME_ATTACK, 8500, 0, PHASE_COMBAT_3); break; default: break; } } default: break; } } DoMeleeAttackIfReady(); } private: void SetPhase(Phases newPhase) { _phase = newPhase; events.SetPhase(newPhase); } uint64 _oozeFloodDummyGUIDs[4]; Phases _phase; // external of EventMap because event phase gets reset on evade float const _baseSpeed; uint8 _oozeFloodStage; bool _experimentState; }; CreatureAI* GetAI(Creature* creature) const OVERRIDE { return GetIcecrownCitadelAI<boss_professor_putricideAI>(creature); } }; class npc_putricide_oozeAI : public ScriptedAI { public: npc_putricide_oozeAI(Creature* creature, uint32 hitTargetSpellId) : ScriptedAI(creature), _hitTargetSpellId(hitTargetSpellId), _newTargetSelectTimer(0) { } void SpellHitTarget(Unit* /*target*/, SpellInfo const* spell) OVERRIDE { if (!_newTargetSelectTimer && spell->Id == sSpellMgr->GetSpellIdForDifficulty(_hitTargetSpellId, me)) _newTargetSelectTimer = 1000; } void SpellHit(Unit* /*caster*/, SpellInfo const* spell) OVERRIDE { if (spell->Id == SPELL_TEAR_GAS_CREATURE) _newTargetSelectTimer = 1000; } void UpdateAI(uint32 diff) OVERRIDE { if (!UpdateVictim() && !_newTargetSelectTimer) return; if (!_newTargetSelectTimer && !me->IsNonMeleeSpellCasted(false, false, true, false, true)) _newTargetSelectTimer = 1000; DoMeleeAttackIfReady(); if (!_newTargetSelectTimer) return; if (me->HasAura(SPELL_TEAR_GAS_CREATURE)) return; if (_newTargetSelectTimer <= diff) { _newTargetSelectTimer = 0; CastMainSpell(); } else _newTargetSelectTimer -= diff; } virtual void CastMainSpell() = 0; private: uint32 _hitTargetSpellId; uint32 _newTargetSelectTimer; }; class npc_volatile_ooze : public CreatureScript { public: npc_volatile_ooze() : CreatureScript("npc_volatile_ooze") { } struct npc_volatile_oozeAI : public npc_putricide_oozeAI { npc_volatile_oozeAI(Creature* creature) : npc_putricide_oozeAI(creature, SPELL_OOZE_ERUPTION) { } void CastMainSpell() { me->CastSpell(me, SPELL_VOLATILE_OOZE_ADHESIVE, false); } }; CreatureAI* GetAI(Creature* creature) const OVERRIDE { return GetIcecrownCitadelAI<npc_volatile_oozeAI>(creature); } }; class npc_gas_cloud : public CreatureScript { public: npc_gas_cloud() : CreatureScript("npc_gas_cloud") { } struct npc_gas_cloudAI : public npc_putricide_oozeAI { npc_gas_cloudAI(Creature* creature) : npc_putricide_oozeAI(creature, SPELL_EXPUNGED_GAS) { _newTargetSelectTimer = 0; } void CastMainSpell() { me->CastCustomSpell(SPELL_GASEOUS_BLOAT, SPELLVALUE_AURA_STACK, 10, me, false); } private: uint32 _newTargetSelectTimer; }; CreatureAI* GetAI(Creature* creature) const OVERRIDE { return GetIcecrownCitadelAI<npc_gas_cloudAI>(creature); } }; class spell_putricide_gaseous_bloat : public SpellScriptLoader { public: spell_putricide_gaseous_bloat() : SpellScriptLoader("spell_putricide_gaseous_bloat") { } class spell_putricide_gaseous_bloat_AuraScript : public AuraScript { PrepareAuraScript(spell_putricide_gaseous_bloat_AuraScript); void HandleExtraEffect(AuraEffect const* /*aurEff*/) { Unit* target = GetTarget(); if (Unit* caster = GetCaster()) { target->RemoveAuraFromStack(GetSpellInfo()->Id, GetCasterGUID()); if (!target->HasAura(GetId())) caster->CastCustomSpell(SPELL_GASEOUS_BLOAT, SPELLVALUE_AURA_STACK, 10, caster, false); } } void Register() OVERRIDE { OnEffectPeriodic += AuraEffectPeriodicFn(spell_putricide_gaseous_bloat_AuraScript::HandleExtraEffect, EFFECT_0, SPELL_AURA_PERIODIC_DAMAGE); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_putricide_gaseous_bloat_AuraScript(); } }; class spell_putricide_ooze_channel : public SpellScriptLoader { public: spell_putricide_ooze_channel() : SpellScriptLoader("spell_putricide_ooze_channel") { } class spell_putricide_ooze_channel_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_ooze_channel_SpellScript); bool Validate(SpellInfo const* spell) OVERRIDE { if (!spell->ExcludeTargetAuraSpell) return false; if (!sSpellMgr->GetSpellInfo(spell->ExcludeTargetAuraSpell)) return false; return true; } // set up initial variables and check if caster is creature // this will let use safely use ToCreature() casts in entire script bool Load() OVERRIDE { _target = NULL; return GetCaster()->GetTypeId() == TypeID::TYPEID_UNIT; } void SelectTarget(std::list<WorldObject*>& targets) { if (targets.empty()) { FinishCast(SpellCastResult::SPELL_FAILED_NO_VALID_TARGETS); GetCaster()->ToCreature()->DespawnOrUnsummon(1); // despawn next update return; } WorldObject* target = Skyfire::Containers::SelectRandomContainerElement(targets); targets.clear(); targets.push_back(target); _target = target; } void SetTarget(std::list<WorldObject*>& targets) { targets.clear(); if (_target) targets.push_back(_target); } void StartAttack() { GetCaster()->ClearUnitState(UNIT_STATE_CASTING); GetCaster()->DeleteThreatList(); GetCaster()->ToCreature()->AI()->AttackStart(GetHitUnit()); GetCaster()->AddThreat(GetHitUnit(), 500000000.0f); // value seen in sniff } void Register() OVERRIDE { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_ooze_channel_SpellScript::SelectTarget, EFFECT_0, TARGET_UNIT_SRC_AREA_ENEMY); OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_ooze_channel_SpellScript::SetTarget, EFFECT_1, TARGET_UNIT_SRC_AREA_ENEMY); OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_ooze_channel_SpellScript::SetTarget, EFFECT_2, TARGET_UNIT_SRC_AREA_ENEMY); AfterHit += SpellHitFn(spell_putricide_ooze_channel_SpellScript::StartAttack); } WorldObject* _target; }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_ooze_channel_SpellScript(); } }; class ExactDistanceCheck { public: ExactDistanceCheck(Unit* source, float dist) : _source(source), _dist(dist) { } bool operator()(WorldObject* unit) const { return _source->GetExactDist2d(unit) > _dist; } private: Unit* _source; float _dist; }; class spell_putricide_slime_puddle : public SpellScriptLoader { public: spell_putricide_slime_puddle() : SpellScriptLoader("spell_putricide_slime_puddle") { } class spell_putricide_slime_puddle_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_slime_puddle_SpellScript); void ScaleRange(std::list<WorldObject*>& targets) { targets.remove_if(ExactDistanceCheck(GetCaster(), 2.5f * GetCaster()->GetObjectScale())); } void Register() OVERRIDE { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_slime_puddle_SpellScript::ScaleRange, EFFECT_0, TARGET_UNIT_DEST_AREA_ENEMY); OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_slime_puddle_SpellScript::ScaleRange, EFFECT_1, TARGET_UNIT_DEST_AREA_ENTRY); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_slime_puddle_SpellScript(); } }; // this is here only because on retail you dont actually enter HEROIC mode for ICC class spell_putricide_slime_puddle_aura : public SpellScriptLoader { public: spell_putricide_slime_puddle_aura() : SpellScriptLoader("spell_putricide_slime_puddle_aura") { } class spell_putricide_slime_puddle_aura_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_slime_puddle_aura_SpellScript); void ReplaceAura() { if (Unit* target = GetHitUnit()) GetCaster()->AddAura((GetCaster()->GetMap()->GetSpawnMode() & 1) ? 72456 : 70346, target); } void Register() OVERRIDE { OnHit += SpellHitFn(spell_putricide_slime_puddle_aura_SpellScript::ReplaceAura); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_slime_puddle_aura_SpellScript(); } }; class spell_putricide_unstable_experiment : public SpellScriptLoader { public: spell_putricide_unstable_experiment() : SpellScriptLoader("spell_putricide_unstable_experiment") { } class spell_putricide_unstable_experiment_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_unstable_experiment_SpellScript); void HandleScript(SpellEffIndex effIndex) { PreventHitDefaultEffect(effIndex); if (GetCaster()->GetTypeId() != TypeID::TYPEID_UNIT) return; Creature* creature = GetCaster()->ToCreature(); uint32 stage = creature->AI()->GetData(DATA_EXPERIMENT_STAGE); creature->AI()->SetData(DATA_EXPERIMENT_STAGE, stage ^ true); Creature* target = NULL; std::list<Creature*> creList; GetCreatureListWithEntryInGrid(creList, GetCaster(), NPC_ABOMINATION_WING_MAD_SCIENTIST_STALKER, 200.0f); // 2 of them are spawned at green place - weird trick blizz for (std::list<Creature*>::iterator itr = creList.begin(); itr != creList.end(); ++itr) { target = *itr; std::list<Creature*> tmp; GetCreatureListWithEntryInGrid(tmp, target, NPC_ABOMINATION_WING_MAD_SCIENTIST_STALKER, 10.0f); if ((!stage && tmp.size() > 1) || (stage && tmp.size() == 1)) break; } GetCaster()->CastSpell(target, uint32(GetSpellInfo()->Effects[stage].CalcValue()), true, NULL, NULL, GetCaster()->GetGUID()); } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_unstable_experiment_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_unstable_experiment_SpellScript(); } }; class spell_putricide_ooze_eruption_searcher : public SpellScriptLoader { public: spell_putricide_ooze_eruption_searcher() : SpellScriptLoader("spell_putricide_ooze_eruption_searcher") { } class spell_putricide_ooze_eruption_searcher_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_ooze_eruption_searcher_SpellScript); void HandleDummy(SpellEffIndex /*effIndex*/) { uint32 adhesiveId = sSpellMgr->GetSpellIdForDifficulty(SPELL_VOLATILE_OOZE_ADHESIVE, GetCaster()); if (GetHitUnit()->HasAura(adhesiveId)) { GetCaster()->CastSpell(GetHitUnit(), SPELL_OOZE_ERUPTION, true); GetHitUnit()->RemoveAurasDueToSpell(adhesiveId, GetCaster()->GetGUID(), 0, AURA_REMOVE_BY_ENEMY_SPELL); } } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_ooze_eruption_searcher_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_ooze_eruption_searcher_SpellScript(); } }; class spell_putricide_choking_gas_bomb : public SpellScriptLoader { public: spell_putricide_choking_gas_bomb() : SpellScriptLoader("spell_putricide_choking_gas_bomb") { } class spell_putricide_choking_gas_bomb_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_choking_gas_bomb_SpellScript); void HandleScript(SpellEffIndex /*effIndex*/) { uint32 skipIndex = urand(0, 2); for (uint32 i = 0; i < 3; ++i) { if (i == skipIndex) continue; uint32 spellId = uint32(GetSpellInfo()->Effects[i].CalcValue()); GetCaster()->CastSpell(GetCaster(), spellId, true, NULL, NULL, GetCaster()->GetGUID()); } } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_choking_gas_bomb_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_choking_gas_bomb_SpellScript(); } }; class spell_putricide_unbound_plague : public SpellScriptLoader { public: spell_putricide_unbound_plague() : SpellScriptLoader("spell_putricide_unbound_plague") { } class spell_putricide_unbound_plague_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_unbound_plague_SpellScript); bool Validate(SpellInfo const* /*spell*/) OVERRIDE { if (!sSpellMgr->GetSpellInfo(SPELL_UNBOUND_PLAGUE)) return false; if (!sSpellMgr->GetSpellInfo(SPELL_UNBOUND_PLAGUE_SEARCHER)) return false; return true; } void FilterTargets(std::list<WorldObject*>& targets) { if (AuraEffect const* eff = GetCaster()->GetAuraEffect(SPELL_UNBOUND_PLAGUE_SEARCHER, EFFECT_0)) { if (eff->GetTickNumber() < 2) { targets.clear(); return; } } targets.remove_if(Skyfire::UnitAuraCheck(true, sSpellMgr->GetSpellIdForDifficulty(SPELL_UNBOUND_PLAGUE, GetCaster()))); Skyfire::Containers::RandomResizeList(targets, 1); } void HandleScript(SpellEffIndex /*effIndex*/) { if (!GetHitUnit()) return; InstanceScript* instance = GetCaster()->GetInstanceScript(); if (!instance) return; uint32 plagueId = sSpellMgr->GetSpellIdForDifficulty(SPELL_UNBOUND_PLAGUE, GetCaster()); if (!GetHitUnit()->HasAura(plagueId)) { if (Creature* professor = ObjectAccessor::GetCreature(*GetCaster(), instance->GetData64(DATA_PROFESSOR_PUTRICIDE))) { if (Aura* oldPlague = GetCaster()->GetAura(plagueId, professor->GetGUID())) { if (Aura* newPlague = professor->AddAura(plagueId, GetHitUnit())) { newPlague->SetMaxDuration(oldPlague->GetMaxDuration()); newPlague->SetDuration(oldPlague->GetDuration()); oldPlague->Remove(); GetCaster()->RemoveAurasDueToSpell(SPELL_UNBOUND_PLAGUE_SEARCHER); GetCaster()->CastSpell(GetCaster(), SPELL_PLAGUE_SICKNESS, true); GetCaster()->CastSpell(GetCaster(), SPELL_UNBOUND_PLAGUE_PROTECTION, true); professor->CastSpell(GetHitUnit(), SPELL_UNBOUND_PLAGUE_SEARCHER, true); } } } } } void Register() OVERRIDE { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_unbound_plague_SpellScript::FilterTargets, EFFECT_0, TARGET_UNIT_SRC_AREA_ALLY); OnEffectHitTarget += SpellEffectFn(spell_putricide_unbound_plague_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_unbound_plague_SpellScript(); } }; class spell_putricide_eat_ooze : public SpellScriptLoader { public: spell_putricide_eat_ooze() : SpellScriptLoader("spell_putricide_eat_ooze") { } class spell_putricide_eat_ooze_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_eat_ooze_SpellScript); void SelectTarget(std::list<WorldObject*>& targets) { if (targets.empty()) return; targets.sort(Skyfire::ObjectDistanceOrderPred(GetCaster())); WorldObject* target = targets.front(); targets.clear(); targets.push_back(target); } void HandleScript(SpellEffIndex /*effIndex*/) { Creature* target = GetHitCreature(); if (!target) return; if (Aura* grow = target->GetAura(uint32(GetEffectValue()))) { if (grow->GetStackAmount() < 3) { target->RemoveAurasDueToSpell(SPELL_GROW_STACKER); target->RemoveAura(grow); target->DespawnOrUnsummon(1); } else grow->ModStackAmount(-3); } } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_eat_ooze_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_eat_ooze_SpellScript::SelectTarget, EFFECT_0, TARGET_UNIT_DEST_AREA_ENTRY); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_eat_ooze_SpellScript(); } }; class spell_putricide_mutated_plague : public SpellScriptLoader { public: spell_putricide_mutated_plague() : SpellScriptLoader("spell_putricide_mutated_plague") { } class spell_putricide_mutated_plague_AuraScript : public AuraScript { PrepareAuraScript(spell_putricide_mutated_plague_AuraScript); void HandleTriggerSpell(AuraEffect const* aurEff) { PreventDefaultAction(); Unit* caster = GetCaster(); if (!caster) return; uint32 triggerSpell = GetSpellInfo()->Effects[aurEff->GetEffIndex()].TriggerSpell; SpellInfo const* spell = sSpellMgr->GetSpellInfo(triggerSpell); spell = sSpellMgr->GetSpellForDifficultyFromSpell(spell, caster); int32 damage = spell->Effects[EFFECT_0].CalcValue(caster); float multiplier = 2.0f; if (GetTarget()->GetMap()->GetSpawnMode() & 1) multiplier = 3.0f; damage *= int32(pow(multiplier, GetStackAmount())); damage = int32(damage * 1.5f); GetTarget()->CastCustomSpell(triggerSpell, SPELLVALUE_BASE_POINT0, damage, GetTarget(), true, NULL, aurEff, GetCasterGUID()); } void OnRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { uint32 healSpell = uint32(GetSpellInfo()->Effects[EFFECT_0].CalcValue()); GetTarget()->CastSpell(GetTarget(), healSpell, true, NULL, NULL, GetCasterGUID()); } void Register() OVERRIDE { OnEffectPeriodic += AuraEffectPeriodicFn(spell_putricide_mutated_plague_AuraScript::HandleTriggerSpell, EFFECT_0, SPELL_AURA_PERIODIC_TRIGGER_SPELL); AfterEffectRemove += AuraEffectRemoveFn(spell_putricide_mutated_plague_AuraScript::OnRemove, EFFECT_0, SPELL_AURA_PERIODIC_TRIGGER_SPELL, AURA_EFFECT_HANDLE_REAL); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_putricide_mutated_plague_AuraScript(); } }; class spell_putricide_mutation_init : public SpellScriptLoader { public: spell_putricide_mutation_init() : SpellScriptLoader("spell_putricide_mutation_init") { } class spell_putricide_mutation_init_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_mutation_init_SpellScript); SpellCastResult CheckRequirementInternal(SpellCustomErrors& extendedError) { InstanceScript* instance = GetExplTargetUnit()->GetInstanceScript(); if (!instance) return SpellCastResult::SPELL_FAILED_CANT_DO_THAT_RIGHT_NOW; Creature* professor = ObjectAccessor::GetCreature(*GetExplTargetUnit(), instance->GetData64(DATA_PROFESSOR_PUTRICIDE)); if (!professor) return SpellCastResult::SPELL_FAILED_CANT_DO_THAT_RIGHT_NOW; if (professor->AI()->GetData(DATA_PHASE) == PHASE_COMBAT_3 || !professor->IsAlive()) { extendedError = SPELL_CUSTOM_ERROR_ALL_POTIONS_USED; return SpellCastResult::SPELL_FAILED_CUSTOM_ERROR; } if (professor->AI()->GetData(DATA_ABOMINATION)) { extendedError = SPELL_CUSTOM_ERROR_TOO_MANY_ABOMINATIONS; return SpellCastResult::SPELL_FAILED_CUSTOM_ERROR; } return SpellCastResult::SPELL_CAST_OK; } SpellCastResult CheckRequirement() { if (!GetExplTargetUnit()) return SpellCastResult::SPELL_FAILED_BAD_TARGETS; if (GetExplTargetUnit()->GetTypeId() != TypeID::TYPEID_PLAYER) return SpellCastResult::SPELL_FAILED_TARGET_NOT_PLAYER; SpellCustomErrors extension = SPELL_CUSTOM_ERROR_NONE; SpellCastResult result = CheckRequirementInternal(extension); if (result != SpellCastResult::SPELL_CAST_OK) { Spell::SendCastResult(GetExplTargetUnit()->ToPlayer(), GetSpellInfo(), 0, result, extension); return result; } return SpellCastResult::SPELL_CAST_OK; } void Register() OVERRIDE { OnCheckCast += SpellCheckCastFn(spell_putricide_mutation_init_SpellScript::CheckRequirement); } }; class spell_putricide_mutation_init_AuraScript : public AuraScript { PrepareAuraScript(spell_putricide_mutation_init_AuraScript); void OnRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { uint32 spellId = 70311; if (GetTarget()->GetMap()->GetSpawnMode() & 1) spellId = 71503; GetTarget()->CastSpell(GetTarget(), spellId, true); } void Register() OVERRIDE { AfterEffectRemove += AuraEffectRemoveFn(spell_putricide_mutation_init_AuraScript::OnRemove, EFFECT_0, SPELL_AURA_DUMMY, AURA_EFFECT_HANDLE_REAL); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_mutation_init_SpellScript(); } AuraScript* GetAuraScript() const OVERRIDE { return new spell_putricide_mutation_init_AuraScript(); } }; class spell_putricide_mutated_transformation_dismiss : public SpellScriptLoader { public: spell_putricide_mutated_transformation_dismiss() : SpellScriptLoader("spell_putricide_mutated_transformation_dismiss") { } class spell_putricide_mutated_transformation_dismiss_AuraScript : public AuraScript { PrepareAuraScript(spell_putricide_mutated_transformation_dismiss_AuraScript); void OnRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { if (Vehicle* veh = GetTarget()->GetVehicleKit()) veh->RemoveAllPassengers(); } void Register() OVERRIDE { AfterEffectRemove += AuraEffectRemoveFn(spell_putricide_mutated_transformation_dismiss_AuraScript::OnRemove, EFFECT_0, SPELL_AURA_PERIODIC_TRIGGER_SPELL, AURA_EFFECT_HANDLE_REAL); } }; AuraScript* GetAuraScript() const OVERRIDE { return new spell_putricide_mutated_transformation_dismiss_AuraScript(); } }; class spell_putricide_mutated_transformation : public SpellScriptLoader { public: spell_putricide_mutated_transformation() : SpellScriptLoader("spell_putricide_mutated_transformation") { } class spell_putricide_mutated_transformation_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_mutated_transformation_SpellScript); void HandleSummon(SpellEffIndex effIndex) { PreventHitDefaultEffect(effIndex); Unit* caster = GetOriginalCaster(); if (!caster) return; InstanceScript* instance = caster->GetInstanceScript(); if (!instance) return; Creature* putricide = ObjectAccessor::GetCreature(*caster, instance->GetData64(DATA_PROFESSOR_PUTRICIDE)); if (!putricide) return; if (putricide->AI()->GetData(DATA_ABOMINATION)) { if (Player* player = caster->ToPlayer()) Spell::SendCastResult(player, GetSpellInfo(), 0, SpellCastResult::SPELL_FAILED_CUSTOM_ERROR, SPELL_CUSTOM_ERROR_TOO_MANY_ABOMINATIONS); return; } uint32 entry = uint32(GetSpellInfo()->Effects[effIndex].MiscValue); SummonPropertiesEntry const* properties = sSummonPropertiesStore.LookupEntry(uint32(GetSpellInfo()->Effects[effIndex].MiscValueB)); uint32 duration = uint32(GetSpellInfo()->GetDuration()); Position pos; caster->GetPosition(&pos); TempSummon* summon = caster->GetMap()->SummonCreature(entry, pos, properties, duration, caster, GetSpellInfo()->Id); if (!summon || !summon->IsVehicle()) return; summon->CastSpell(summon, SPELL_ABOMINATION_VEHICLE_POWER_DRAIN, true); summon->CastSpell(summon, SPELL_MUTATED_TRANSFORMATION_DAMAGE, true); caster->CastSpell(summon, SPELL_MUTATED_TRANSFORMATION_NAME, true); caster->EnterVehicle(summon, 0); // VEHICLE_SPELL_RIDE_HARDCODED is used according to sniff, this is ok summon->SetCreatorGUID(caster->GetGUID()); putricide->AI()->JustSummoned(summon); } void Register() OVERRIDE { OnEffectHit += SpellEffectFn(spell_putricide_mutated_transformation_SpellScript::HandleSummon, EFFECT_0, SPELL_EFFECT_SUMMON); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_mutated_transformation_SpellScript(); } }; class spell_putricide_mutated_transformation_dmg : public SpellScriptLoader { public: spell_putricide_mutated_transformation_dmg() : SpellScriptLoader("spell_putricide_mutated_transformation_dmg") { } class spell_putricide_mutated_transformation_dmg_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_mutated_transformation_dmg_SpellScript); void FilterTargetsInitial(std::list<WorldObject*>& targets) { if (Unit* owner = ObjectAccessor::GetUnit(*GetCaster(), GetCaster()->GetCreatorGUID())) targets.remove(owner); } void Register() OVERRIDE { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_putricide_mutated_transformation_dmg_SpellScript::FilterTargetsInitial, EFFECT_0, TARGET_UNIT_SRC_AREA_ALLY); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_mutated_transformation_dmg_SpellScript(); } }; class spell_putricide_regurgitated_ooze : public SpellScriptLoader { public: spell_putricide_regurgitated_ooze() : SpellScriptLoader("spell_putricide_regurgitated_ooze") { } class spell_putricide_regurgitated_ooze_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_regurgitated_ooze_SpellScript); // the only purpose of this hook is to fail the achievement void ExtraEffect(SpellEffIndex /*effIndex*/) { if (InstanceScript* instance = GetCaster()->GetInstanceScript()) instance->SetData(DATA_NAUSEA_ACHIEVEMENT, uint32(false)); } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_regurgitated_ooze_SpellScript::ExtraEffect, EFFECT_0, SPELL_EFFECT_APPLY_AURA); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_regurgitated_ooze_SpellScript(); } }; // Removes aura with id stored in effect value class spell_putricide_clear_aura_effect_value : public SpellScriptLoader { public: spell_putricide_clear_aura_effect_value() : SpellScriptLoader("spell_putricide_clear_aura_effect_value") { } class spell_putricide_clear_aura_effect_value_SpellScript : public SpellScript { PrepareSpellScript(spell_putricide_clear_aura_effect_value_SpellScript); void HandleScript(SpellEffIndex effIndex) { PreventHitDefaultEffect(effIndex); uint32 auraId = sSpellMgr->GetSpellIdForDifficulty(uint32(GetEffectValue()), GetCaster()); GetHitUnit()->RemoveAurasDueToSpell(auraId); } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_putricide_clear_aura_effect_value_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_putricide_clear_aura_effect_value_SpellScript(); } }; // Stinky and Precious spell, it's here because its used for both (Festergut and Rotface "pets") class spell_stinky_precious_decimate : public SpellScriptLoader { public: spell_stinky_precious_decimate() : SpellScriptLoader("spell_stinky_precious_decimate") { } class spell_stinky_precious_decimate_SpellScript : public SpellScript { PrepareSpellScript(spell_stinky_precious_decimate_SpellScript); void HandleScript(SpellEffIndex /*effIndex*/) { if (GetHitUnit()->GetHealthPct() > float(GetEffectValue())) { uint32 newHealth = GetHitUnit()->GetMaxHealth() * uint32(GetEffectValue()) / 100; GetHitUnit()->SetHealth(newHealth); } } void Register() OVERRIDE { OnEffectHitTarget += SpellEffectFn(spell_stinky_precious_decimate_SpellScript::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const OVERRIDE { return new spell_stinky_precious_decimate_SpellScript(); } }; void AddSC_boss_professor_putricide() { new boss_professor_putricide(); new npc_volatile_ooze(); new npc_gas_cloud(); new spell_putricide_gaseous_bloat(); new spell_putricide_ooze_channel(); new spell_putricide_slime_puddle(); new spell_putricide_slime_puddle_aura(); new spell_putricide_unstable_experiment(); new spell_putricide_ooze_eruption_searcher(); new spell_putricide_choking_gas_bomb(); new spell_putricide_unbound_plague(); new spell_putricide_eat_ooze(); new spell_putricide_mutated_plague(); new spell_putricide_mutation_init(); new spell_putricide_mutated_transformation_dismiss(); new spell_putricide_mutated_transformation(); new spell_putricide_mutated_transformation_dmg(); new spell_putricide_regurgitated_ooze(); new spell_putricide_clear_aura_effect_value(); new spell_stinky_precious_decimate(); }
ProjectSkyfire/SkyFire.548
src/server/scripts/Northrend/IcecrownCitadel/boss_professor_putricide.cpp
C++
gpl-2.0
66,166
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <head> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta http-equiv="X-UA-Compatible"content="IE=9; IE=8; IE=7; IE=EDGE"> <meta http-equiv="Content-Type" content="text/html; charset=gb2312"> <meta name="desCripTion" content="±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®»¶Ó­´ó¼ÒÀ´µ½dld158.comÓéÀÖÆ½Ì¨" /> <title>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®_±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®-dld158ÓéÀÖ{°Ù¶ÈÐÂÎÅ}°Ù¶ÈÈÏÖ¤</title> <!--ÈÈÁ¦Í¼¿ªÊ¼--> <meta name="uctk" content="enabled"> <!--ÈÈÁ¦Í¼½áÊø--> <meta name="keywords" content="±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®"/> <meta name="desCripTion" content="»¶Ó­´ó¼ÒÀ´µ½dld158.comÓéÀÖÆ½Ì¨"/> <meta name="sitename" content="Ê×¶¼Ö®´°-±±¾©ÊÐÕþÎñÃÅ»§ÍøÕ¾"> <meta name="siteurl" content="http://www.beijing.gov.cn"> <meta name="district" content="±±¾©" > <meta name="filetype" content="0"> <meta name="publishedtype" content="1"> <meta name="pagetype" content="2"> <meta name="subject" content="28428;1"> <!-- Le styles --> <link href="http://www.beijing.gov.cn/images/zhuanti/xysym/bootstrap150609.css" rel="stylesheet"> <link href="http://www.beijing.gov.cn/images/zhuanti/xysym/bootstrap-responsive150609.css" rel="stylesheet"> <style> body { background:#E8E8E8; /* 60px to make the container go all the way to the bottom of the topbar */ } .navbar .btn-navbar { position:absolute; right:0; margin-top:50px;} #othermessage p {width:50%;} #othermessage dl { width:50%;} #breadcrumbnav ul { width:100%;} #breadcrumbnav ul li { line-height:14px; font-family:"ËÎÌå"; padding:0px 10px; margin:0; background:none; } #othermessage span { padding:0px 10px;} #footer { margin:20px -20px 0px -20px;} .navbar .nav li a { font-family:"Microsoft YaHei";} #div_zhengwen { font-family:"SimSun";} #div_zhengwen p{ font-family:"SimSun"; padding:0;} select { width:75px; float:left; height:35px;} .search .input{ border:1px solid #c1c1c1; width:290px;} .bdsharebuttonbox { float:left; width:80%;} .navbar .nav li a { padding: 10px 48px 11px 49px;} .nav_weather span { float:right;} #footer { position:absolute; left:0; right:0; margin:20px 0 0 0;} #essaybottom {font-family:"simsun"; } #pic { text-align:center; } #pic ul { padding-top:10px; display:none; } #pic li {font-family:"SimSun";} .content_text h1 {line-height:150%;} .version { float:right; padding:48px 50px 0 0} .search { padding: 50px 0 0 70px;} .nav_weather a { font-family:simsun;} .version li a { font-family:simsun;} .footer-class { font-family:simsun;} @media only screen and (max-width: 480px) { #pic img { width:100%;} } @media only screen and (max-width: 320px) { #pic img { width:100%;} } @media only screen and (max-width: 640px) { #pic img { width:100%;} } #filerider .filelink {font-family:"SimSun";} #filerider .filelink a:link { color:#0000ff; font-family:"SimSun";} </style> <sCripT type="text/javasCripT"> var pageName = "t1424135"; var pageExt = "htm"; var pageIndex = 0 + 1; var pageCount = 1; function getCurrentPage() { document.write(pageIndex); } function generatePageList() { for (i=0;i<1;i++) { var curPage = i+1; document.write('<option value=' + curPage); if (curPage == pageIndex) document.write(' selected'); document.write('>' + curPage + '</option>'); } } function preVious(n) { if (pageIndex == 1) { alert('ÒѾ­ÊÇÊ×Ò³£¡'); } else { getPageLocation(pageIndex-1); } } function next(n) { if (pageIndex == pageCount) { alert('ÒѾ­ÊÇβҳ£¡'); } else { nextPage(pageIndex); } } function nextPage(page) { var gotoPage = ""; if (page == 0) { gotoPage = pageName + "." + pageExt; } else { gotoPage = pageName + "_" + page + "." + pageExt; } location.href = gotoPage; } function getPageLocation(page) { var gotoPage = ""; var tpage; if (page == 1) { gotoPage = pageName + "." + pageExt; } else { tpage=page-1; gotoPage = pageName + "_" + tpage + "." + pageExt; } location.href = gotoPage; } </sCripT> <SCRIPT type=text/javasCripT> function $(xixi) { return document.getElementById(xixi); } //ת»»×ֺŠfunction doZoom(size){ if(size==12){ $("contentText").style.fontSize = size + "px"; $("fs12").style.display = ""; $("fs14").style.display = "none"; $("fs16").style.display = "none"; } if(size==14){ $("contentText").style.fontSize = size + "px"; $("fs12").style.display = "none"; $("fs14").style.display = ""; $("fs16").style.display = "none"; } if(size==16){ $("contentText").style.fontSize = size + "px"; $("fs12").style.display = "none"; $("fs14").style.display = "none"; $("fs16").style.display = ""; } } </SCRIPT> <!-- Le HTML5 shim, for IE6-8 support of HTML5 elements --> <!--[if lt IE 9]> <sCripT src="//html5shim.googlecode.com/svn/trunk/html5.js"></sCripT> <![endif]--> <!-- Le fav and touch icons --> <link rel="shortcut icon" href="images/favicon.ico"> <link rel="apple-touch-icon" href="images/apple-touch-icon.png"> <link rel="apple-touch-icon" sizes="72x72" href="images/apple-touch-icon-72x72.png"> <link rel="apple-touch-icon" sizes="114x114" href="images/apple-touch-icon-114x114.png"> <sCripT type="text/javasCripT"> window.onload = function(){ var picurl = [ "", "", "", "", "", "", "", "", "", "" ]; var i=0; for(i=0;i<picurl.length;i++) { picurl[i].index=i; if(picurl[i]!="") { document.getElementById("pic_"+i).style.display = "block"; } } } </sCripT> </head> <body> <div class="navbar navbar-fixed-top"> <div class="navbar-inner"> <div class="nav_weather"> <div class="container"><a href="http://zhengwu.beijing.gov.cn/sld/swld/swsj/t1232150.htm" title="ÊÐί" target="_blank">ÊÐί</a> | <a href="http://www.bjrd.gov.cn/" title="ÊÐÈË´ó" target="_blank">ÊÐÈË´ó</a> | <a href="http://www.beijing.gov.cn/" title="ÊÐÕþ¸®" target="_blank">ÊÐÕþ¸®</a> | <a href="http://www.bjzx.gov.cn/" title="ÊÐÕþЭ" target="_blank">ÊÐÕþЭ</a></div> </div> <div class="container"> <a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse"> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </a> <div class="span12"> <a class="brand" href="http://www.beijing.gov.cn/"><img src="http://www.beijing.gov.cn/images/zhuanti/xysym/logo.png" /></a> <div class="search"> <sCripT language="JavaScript" type="text/javasCripT"> function checkForm(){ var temp = searchForm.temp.value; var database = searchForm.database.value; if(temp==null || temp==""){ alert("ÇëÊäÈëËÑË÷Ìõ¼þ"); } else{ var url="http://so.beijing.gov.cn/Query?qt="+encodeURIComponent(temp)+"&database="+encodeURIComponent(database); window.open(url); } return false; } </sCripT> <form id="search" method="get" name="searchForm" action="" target="_blank" onSubmit="return checkForm()"> <input type="hidden" value="bj" id="database" name="database" /> <input name="temp" id="keyword" type="text" value="È«ÎÄËÑË÷" class="input" title="È«ÎÄËÑË÷¹Ø¼ü×Ö" /> <input id="searchbutton" type="image" src="http://www.beijing.gov.cn/images/zhuanti/xysym/search_btn.gif" width="66" height="35" title="µã»÷ËÑË÷" alt="ËÑË÷" /> </form> </div> <div class="version"><ul> <li><a title="ÎÞÕϰ­" href="http://wza.beijing.gov.cn/" target="_blank" id="yx_style_nav">ÎÞÕϰ­</a></li> <li><a target="_blank" title="·±Ìå°æ" href="http://210.75.193.158/gate/big5/www.beijing.gov.cn">·±Ìå</a></li> <li><a target="_blank" title="¼òÌå°æ" href="http://www.beijing.gov.cn">¼òÌå</a></li> <li class="last"><a target="_blank" title="English Version" href="http://www.ebeijing.gov.cn">English</a></li></ul><ul> <li><a href="javasCripT:void(0)" onclick="SetHome(this,window.location)" title="ÉèΪÊ×Ò³">ÉèΪÊ×Ò³</a></li> <li><a title="¼ÓÈëÊÕ²Ø" href="javasCripT:void(0)" onclick="shoucang(document.title,window.location)">¼ÓÈëÊÕ²Ø</a></li> <li class="last"><a target="_blank" title="ÒÆ¶¯°æ" href="http://www.beijing.gov.cn/sjbsy/">ÒÆ¶¯°æ</a></li></ul></div> </div> </div> <div class="nav-collapse"> <div class="container"> <ul class="nav"> <li ><a href="http://www.beijing.gov.cn/" class="normal" title="Ê×Ò³">Ê×Ò³</a></li> <li><a href="http://zhengwu.beijing.gov.cn/" class="normal" title="ÕþÎñÐÅÏ¢">ÕþÎñÐÅÏ¢</a></li> <li><a href="http://www.beijing.gov.cn/sqmy/default.htm" class="normal" title="ÉçÇéÃñÒâ">ÉçÇéÃñÒâ</a></li> <li><a href="http://banshi.beijing.gov.cn" class="normal" title="ÕþÎñ·þÎñ">ÕþÎñ·þÎñ</a></li> <li><a href="http://www.beijing.gov.cn/bmfw" class="normal" title="±ãÃñ·þÎñ">±ãÃñ·þÎñ</a></li> <li style="background:none;"><a href="http://www.beijing.gov.cn/rwbj/default.htm" class="normal" title="ÈËÎı±¾©">ÈËÎı±¾©</a></li> </ul> </div> </div><!--/.nav-collapse --> </div> </div> </div> <div class="container" style="background:#fff; margin-top:24px;"> <div class="content_text"> <div id="breadcrumbnav"> <ul> <li>Ê×Ò³¡¡>¡¡±ãÃñ·þÎñ¡¡>¡¡×îÐÂÌáʾ</li> </ul> <div class="clearboth"></div> </div> <h1>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <div id="othermessage"> <p> <span>À´Ô´£º±±¾©ÈÕ±¨</span> <span>ÈÕÆÚ£º2017-04-21 13:07:22</span></p> <dl> <sCripT language='JavaScript' type="text/javasCripT"> function changeSize(size){document.getElementById('div_zhengwen').style.fontSize=size+'px'}</sCripT> ¡¾×ÖºÅ&nbsp;&nbsp;<a href='javasCripT:changeSize(18)' style="font-size:16px;">´ó</a>&nbsp;&nbsp;<a href='javasCripT:changeSize(14)' style="font-size:14px;">ÖÐ</a>&nbsp;&nbsp;<a href='javasCripT:changeSize(12)' style="font-size:12px;">С</a>¡¿</dl> </div> </h1> <div id="div_zhengwen"> <div id="pic"> <ul id="pic_0"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_1"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_2"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_3"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_4"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_5"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_6"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_7"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_8"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> <ul id="pic_9"> <li><img src="" border="0" alt="" title="" /></li> <li></li> </ul> </div> <div class=TRS_Editor><p align="justify">¡¡¡¡±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® »¶Ó­´ó¼ÒÀ´µ½dld158.comÓéÀÖÆ½Ì¨</p> <img src="{img}" width="300" height="330"/> <p align="justify">¡¡¡¡<a href="http://sapience.com.tw/logs/meng/q0701857.html">±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®</a></p>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <p align="justify">¡¡¡¡<a href="http://sapience.com.tw/logs/meng/q6781682.html">±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®</a></p>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <p align="justify">¡¡¡¡<a href="http://sapience.com.tw/logs/meng/q4741506.html">±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®</a></p>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <p align="justify">¡¡¡¡<a href="http://sapience.com.tw/logs/meng/q0721330.html">±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®</a></p>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <p align="justify">¡¡¡¡<a href="http://sapience.com.tw/logs/meng/q6781155.html">±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»®</a></p>±±¾©Èü³µpk10Ö®°ËÂë¹öÑ©Çò¼Æ»® <p align="justify">¡¡¡¡ <div class="img_wrapper"><img src="http://n.sinaimg.cn/news/transform/20170420/ycKd-fyeqcac0846015.jpg" alt="" data-link=""></div> <p>¡¡¡¡ÖÐÐÂÍøÌì½ò4ÔÂ20ÈÕµç £¨¼ÇÕß ÕŵÀÕý£©Õë¶ÔýÌ忯·¢µÄ¡°Ìì½òÊо²º£ÇøÎ÷µÔׯÕòÙ¡¼Òׯ´å15Íòƽ·½Ã×Éø¿ÓÎÛȾ¡±±¨µÀ£¬¾²º£¹Ù·½20ÈÕ»ØÓ¦³Æ£¬±¨µÀËù֏ɸ¿Ó£¬Êµ¼ÊλÓÚ¾²º£ÇøÌƹÙÍÍÕòÙ¡¼Òׯ´å£¬ÎªÉÏÊÀ¼Í70Äê´ú³õשÍß³§È¡ÍÁÐγɡ£ÓÉÓÚÀúÊ·Ô­Òò£¬²¿·Ö¿ÓÌÁ±»ÎÛȾ£¬¶àΪÇãµ¹·ÏËá»ò͵ÅÅÎÛË®ËùÖ£¬¾²º£Çø¾ÙÒ»·´ÈýѸËÙÍÆ½øÉø¿ÓÎÛȾÖÎÀí¡£</p> <p>¡¡¡¡4ÔÂ18ÈÕ£¬ÓÐýÌ忯·¢ÁË¡°Ìì½òÊо²º£ÇøÎ÷µÔׯÕòÙ¡¼Òׯ´å15Íòƽ·½Ã×Éø¿ÓÎÛȾ¡±µÄͼÎı¨µÀ¡£ÒýÆðÓßÂ۹㷺¹Ø×¢¡£¶Ô´Ë£¬Ìì½òÊо²º£ÇøÎ¯¡¢ÇøÕþ¸®¸ß¶ÈÖØÊÓ£¬18ÈÕÍí£¬Á¢¼´×éÖ¯»·±£¡¢¹«°²¡¢Ë®ÎñµÈ²¿ÃŶÔÉø¿ÓÎÛȾÇé¿ö½øÐÐÏêϸºË²é¡£</p> <p>¡¡¡¡ºË²é½á¹ûÏÔʾ£¬Éø¿Óʵ¼ÊλÓÚÌì½òÊо²º£ÇøÌƹÙÍÍÕòÙ¡¼Òׯ´å£¬ÎªÉÏÊÀ¼Í70Äê´ú³õשÍß³§È¡ÍÁÐγɡ£ÓÉÓÚÀúÊ·Ô­Òò£¬¾²º£ÇøµÄ²úÒµÒÔºÚÉ«¡¢ÓÐÉ«½ðÊô¼Ó¹¤ÎªÖ÷£¬²¿·Ö¿ÓÌÁ±»ÎÛȾ£¬¶àΪÇãµ¹·ÏËá»ò͵ÅÅÎÛË®ËùÖ£¬¶øÙ¡¼Òׯ´å¿ÓÌÁΪÆäÖÐÒ»¸ö¡£</p> <p>¡¡¡¡Ìì½ò¾²º£¹Ù·½³Æ£¬¸ÃÇø2013Äê¼´ÖÆ¶¨ÁË¡¶¾²º£ÇøË®ÎÛȾÖÎÀíʵʩ·½°¸¡·£¬ÏȺóͶÈë6ÒÚÔª£¬ÖÎÀíÉø¿Ó18¸ö¡¢¿ÓÌÁ450¸ö£¬ÖÎÀí¹æÄ£»¯ÑøÖ³³¡99¸ö£¬ÇåÓÙºÓµÀ263¹«ÀÒѽ¨³ÉÎÛË®´¦Àí³§11×ù£¬½¨ÖÆÕòÎÛË®´¦ÀíÕ¾18×ù£»ÒÀ·¨È¡µÞÎÛȾÆóÒµ467¼Ò¡¢ÊÛËáÆóÒµ26¼Ò£»½¨³É·ÏËá´¦ÖÃÖÐÐÄ2×ù£¬ÊµÏÖÁË·ÏËáÎÞº¦»¯¼¯Öд¦Ö㬴ÓÔ´Í·ÉϽâ¾öÁË·ÏËáÎÛȾÎÊÌâ¡£</p> <p>¡¡¡¡Í¬Ê±£¬¾²º£¹Ù·½Ê¼ÖÕ±£³Ö¶Ô»·¾³Î¥·¨ÐÐΪµÄ¸ßѹÑÏ´òÌ¬ÊÆ£¬Â¶Í·¾Í´ò£¬Î¥¹æ¾Í·££¬·¸×ï¾Í×¥¡£¾²º£ÇøÕþ¸®Í¨±¨£¬2013ÄêÒÔÀ´ÒÑ´ò»÷´¦Àí115ÈË£¬ÅÐÐÌ23ÈË£¬ÓÐЧÕðÉåÁË»·¾³Î¥·¨ÐÐΪ¡£</p> <p>¡¡¡¡¹ØÓÚÙ¡¼Òםɸ¿ÓÖÎÀí¹¤×÷£¬¾²º£·½ÃæÍ¸Â¶£¬ÓÚ2014Äê¾ÍÒÑ¿ªÊ¼Æô¶¯£¬¹¤³Ì×ÜͶ×Ê1072ÍòÔª£¬¶ÔÉø¿ÓÎÛË®½øÐÐÖÎÀí£¬11Ô¿¢¹¤²¢Í¨¹ýÑéÊÕ¡£2016ÄêϰëÄ꣬¾²º£Çø»·±£¾ÖѲ²é·¢ÏÖ£¬¸ÃÉø¿ÓÎÛȾ³öÏÖ·´¸´¡£Í¬Äê7Ô£¬¾²º£ÇøÕþ¸®¾ö¶¨¶Ô¸ÃÉø¿ÓÎÛË®¼°µ×Äà½øÐÐÉî¶ÈÖÎÀí£¬²¢ÓÚ12ÔÂÍê³ÉÉø¿ÓÖÎÀíÏîÄ¿µÄÁ¢Ïî¡¢Éè¼Æ¡¢ÕбêµÈ¹¤×÷¡£2017Äê4Ô£¬¸ÃÉø¿ÓÎÛÄàÖÎÀíÈ«²¿°ìÍêǰÆÚÊÖÐø£¬Ä¿Ç°£¬Ê©¹¤µ¥Î»ÒѾ­È볡ÕýÔÚ¼Ó½ôÊ©¹¤¡£</p> <p>¡¡¡¡4ÔÂ19ÈÕ£¬»·±£²¿ºÍÌì½òÊл·±£¾Öµ÷²é×éÒ²ÉîÈë¾²º££¬¶Ô¸ÃÏ×÷½øÐÐʵµØºË²é¡£°´ÕÕ»·±£²¿Ìá³öµÄÒâ¼ûÒªÇ󣬾²º£ÇøÖƶ¨Á˽øÒ»²½ÖÎÀí´ëÊ©£ºÒ»ÊÇÇøÕþ¸®³ÉÁ¢ÁËÒÔÇø³¤Îª×鳤£¬·Ö¹Ü¸±Çø³¤Îª¸±×鳤£¬»·±£¡¢Ë®Îñ¡¢¹«°²ºÍÓйØÏçÕòΪ³ÉÔ±µÄÉø¿ÓÖÎÀíÁ쵼С×é£¬È«ÃæÍÆ½øÉø¿ÓÖÎÀí¹¤×÷£»¶þÊÇÓÉ»·±£²¿ÃÅǣͷ£¬Ïà¹Ø²¿ÃÅÅäºÏ£¬¶ÔÉø¿ÓË®Ñù¡¢ÎÛÄàµÈ½øÐмà²â£¬¸ù¾Ý¼ì²â½á¹û£¬Ï¸»¯ÖÎÀí´ëÊ©£¬¼Ó¿ìÖÎÀí½ø¶È£¬7Ôµ×֮ǰÍê³ÉÖÎÀíÈÎÎñ£»ÈýÊǼ°Ê±ÏòÉç»á¹«¿ª»·¾³¼à²âºÍÎÛȾÖÎÀíÐÅÏ¢£»ËÄÊǾÙÒ»·´Èý£¬È«ÃæÅŲ飬¶ÔÒÑÖÎÀíµÄÉø¿Ó½øÐÐÔÙÅŲ飬ÍêÉÆ³¤Ð§»úÖÆ£¬¹®¹ÌÖÎÀí³É¹û¡£ÇøÎ¯¡¢ÇøÕþ¸®½«ÒÔ´ËΪ½ä£¬¾ÙÈ«ÇøÖ®Á¦ÌúÍóÖÎÎÛ£¬È·±£¸ßÖÊÁ¿Íê³ÉÎÛÈ¾Éø¿ÓÖÎÀíÈÎÎñ¡£</p> <p>¡¡¡¡4ÔÂ19ÈÕÍí£¬Ìì½ò¾²º£ÇøÎ¯Êé¼Ç¼½¹úÇ¿Á¬Ò¹ÕÙ¿ª»áÒ飬´«´ï»·±£²¿ºÍÊÐÕþ¸®µ÷²é×éÒâ¼ûÒªÇó£¬È«Ã沿ÊðÎÛȾÖÎÀí¡°»ØÍ·¿´¡±¹¤×÷¡£¼½¹úǿָ³ö£¬³öÏÖÕâÑùµÄÎÊÌ⣬ÁîÈËÍ´ÐÄ£¬ÊÇÎÒÃǵŤ×÷ûÓÐ×öºÃ£¬ÎÒÃǸºÓв»¿ÉÍÆÐ¶µÄÔðÈΣ¬ÒªÏòÈ«ÇøÈËÃñ×ö³öÉî¿Ì¼ìÌÖ¡£</p> <p>¡¡¡¡¼½¹úǿҪÇó£¬ÒªÒÔ¶ÔÈËÃñ¸ß¶È¸ºÔðµÄ̬¶È£¬Ìá¸ß¶Ô»·¾³±£»¤¹¤×÷µÄÈÏʶ£¬¼Ó¿ì²úÒµ½á¹¹µ÷Õû²½·¥£¬Äþ¿ÉÎþÉüÒ»µã¾­¼ÃÔö³¤ËÙ¶È£¬Ò²²»Òª¡°ÎÛȾµÄGDP¡±¡£¸÷ÏçÕò¡¢Ïà¹Ø²¿Ãź͵¥Î»ÒªÈÏÕæ·´Ë¼£¬ÒÔ´ËΪ½ä£»Ö÷¶¯½ÓÊܸ÷·½Ãæ¼à¶½£¬¼°Ê±ÏòÉç»á¹«¿ª»·¾³¼à²âºÍÎÛȾÖÎÀíÐÅÏ¢£»Òª¾ÙÒ»·´Èý£¬ÍêÉÆ³¤Ð§»úÖÆ£¬¶ÔÈ«ÇøËùÓл·¾³ÖÎÀí¹¤³Ì¿ªÕ¹¡°»ØÍ·¿´¡±ÔÙÅŲ顣£¨Í꣩</p> </p></div> <div id="filerider"> <div class="filelink" style="margin:10px 0 0 0;"><a href="./P020160207382291268334.doc">¸½¼þ£º2016Äê±±¾©µØÇø²©Îï¹Ý´º½ÚϵÁлһÀÀ±í</a></div> </div> </div> <div id="essaybottom" style="border:0; overflow:hidden; margin-bottom:0;"> <div style="padding-bottom:8px;" id="proclaim"><p style="float:left; line-height:30px;">·ÖÏíµ½£º</p><div class="bdsharebuttonbox"><a href="#" class="bds_more" data-cmd="more"></a><a href="#" class="bds_qzone" data-cmd="qzone"></a><a href="#" class="bds_tsina" data-cmd="tsina"></a><a href="#" class="bds_tqq" data-cmd="tqq"></a><a href="#" class="bds_renren" data-cmd="renren"></a><a href="#" class="bds_weixin" data-cmd="weixin"></a></div> <sCripT>window._bd_share_config={"common":{"bdSnsKey":{},"bdText":"","bdMini":"2","bdPic":"","bdStyle":"0","bdSize":"16"},"share":{},"image":{"viewList":["qzone","tsina","tqq","renren","weixin"],"viewText":"·ÖÏíµ½£º","viewSize":"16"},"selectShare":{"bdContainerClass":null,"bdSelectMiniList":["qzone","tsina","tqq","renren","weixin"]}};with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('sCripT')).src='http://bdimg.share.baidu.com/static/api/js/share.js?v=89860593.js?cdnversion='+~(-new Date()/36e5)];</sCripT></div> </div> <div id="essaybottom" style="margin-top:0;"> <div style="padding-bottom:8px;" id="proclaim">תժÉùÃ÷£º×ªÕªÇë×¢Ã÷³ö´¦²¢×ö»ØÁ´</div> </div> </div> </div> <!-- /container --> <div id="footer"> <div class="container"> <div class="span1" style="text-align:center"><sCripT type="text/javasCripT">document.write(unescape("%3Cspan id='_ideConac' %3E%3C/span%3E%3CsCripT src='http://dcs.conac.cn/js/01/000/0000/60429971/CA010000000604299710004.js' type='text/javasCripT'%3E%3C/sCripT%3E"));</sCripT></div> <div class="span8"> <div class="footer-top"> <div class="footer-class"> <p> <a title="¹ØÓÚÎÒÃÇ" href="http://www.beijing.gov.cn/zdxx/sdzcjs/t1306339.htm" style=" background:0">¹ØÓÚÎÒÃÇ</a><a target="_blank" title="Õ¾µãµØÍ¼" href="http://www.beijing.gov.cn/zdxx/sdzcjs/t1306342.htm">Õ¾µãµØÍ¼</a><a target="_blank" title="ÁªÏµÎÒÃÇ" href="http://www.beijing.gov.cn/zdxx/sdzcjs/t1306343.htm">ÁªÏµÎÒÃÇ</a><a target="_blank" title="ÆÀ¼ÛÊ×¶¼Ö®´°" href="mailto:service@beijing.gov.cn">ÆÀ¼ÛÊ×¶¼Ö®´°</a><a target="_blank" title="·¨ÂÉÉùÃ÷" href="http://www.beijing.gov.cn/zdxx/t709204.htm">·¨ÂÉÉùÃ÷</a> </p> <p>Ö÷°ì£º±±¾©ÊÐÈËÃñÕþ¸® °æÈ¨ËùÓга죺±±¾©Êо­¼ÃºÍÐÅÏ¢»¯Î¯Ô±»á ¾©ICP±¸05060933ºÅ ÔËÐйÜÀí£ºÊ×¶¼Ö®´°ÔËÐйÜÀíÖÐÐÄ</p> <p>¾©¹«Íø°²±¸ 110105000722 µØÖ·£º±±¾©Êг¯ÑôÇø±±³½Î÷·Êý×Ö±±¾©´óÏÃÄϰ˲㠴«Õ棺84371700 ¿Í·þÖÐÐĵ绰£º59321109</p> </div> </div> </div> </div> </div> <div style="display:none"> <sCripT type="text/javasCripT">document.write(unescape("%3CsCripT src='http://yhfx.beijing.gov.cn/webdig.js?z=12' type='text/javasCripT'%3E%3C/sCripT%3E"));</sCripT> <sCripT type="text/javasCripT">wd_paramtracker("_wdxid=000000000000000000000000000000000000000000")</sCripT> </div> <sCripT src="http://static.gridsumdissector.com/js/Clients/GWD-800003-C99186/gs.js" language="JavaScript"></sCripT> <sCripT type="text/javasCripT"> // ÉèÖÃΪÖ÷Ò³ function SetHome(obj,vrl){ try{ obj.style.behavior='url(#default#homepage)';obj.setHomePage(vrl); } catch(e){ if(window.netscape) { try { netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); } catch (e) { alert("´Ë²Ù×÷±»ä¯ÀÀÆ÷¾Ü¾ø£¡\nÇëÔÚä¯ÀÀÆ÷µØÖ·À¸ÊäÈë¡°about:config¡±²¢»Ø³µ\nÈ»ºó½« [signed.applets.codebase_principal_support]µÄÖµÉèÖÃΪ'true',Ë«»÷¼´¿É¡£"); } var prefs = Components.classes['@mozilla.org/preferences-service;1'].getService(Components.interfaces.nsIPrefBranch); prefs.setCharPref('browser.startup.homepage',vrl); }else{ alert("ÄúµÄä¯ÀÀÆ÷²»Ö§³Ö£¬Çë°´ÕÕÏÂÃæ²½Öè²Ù×÷£º1.´ò¿ªä¯ÀÀÆ÷ÉèÖá£2.µã»÷ÉèÖÃÍøÒ³¡£3.ÊäÈ룺"+vrl+"µã»÷È·¶¨¡£"); } } } // ¼ÓÈëÊÕ²Ø ¼æÈÝ360ºÍIE6 function shoucang(sTitle,sURL) { try { window.external.addFavorite(sURL, sTitle); } catch (e) { try { window.sidebar.addPanel(sTitle, sURL, ""); } catch (e) { alert("¼ÓÈëÊÕ²ØÊ§°Ü£¬ÇëʹÓÃCtrl+D½øÐÐÌí¼Ó"); } } } </sCripT> <!-- Le javasCripT ================================================== --> <!-- Placed at the end of the document so the pages load faster --> <sCripT src="/images/zhuanti/xysym/jquery.js"></sCripT> <sCripT src="/images/zhuanti/xysym/bootstrap-collapse.js"></sCripT> <sCripT> $(document).ready(function(){ $(".ui-select").selectWidget({ change : function (changes) { return changes; }, effect : "slide", keyControl : true, speed : 200, scrollHeight : 250 }); }); </sCripT> </body> </html>
ForAEdesWeb/AEW25
logs/meng/q2761986.html
HTML
gpl-2.0
19,966
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0_65) on Sun Dec 15 23:08:04 HST 2013 --> <TITLE> SCCGraphDriver </TITLE> <META NAME="date" CONTENT="2013-12-15"> <LINK REL ="stylesheet" TYPE="text/css" HREF="stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="SCCGraphDriver"; } } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <HR> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/SCCGraphDriver.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="NetworkMetrics.html" title="class in &lt;Unnamed&gt;"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="Vertex.html" title="class in &lt;Unnamed&gt;"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="index.html?SCCGraphDriver.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="SCCGraphDriver.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_summary">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_detail">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <!-- ======== START OF CLASS DATA ======== --> <H2> Class SCCGraphDriver</H2> <PRE> java.lang.Object <IMG SRC="./resources/inherit.gif" ALT="extended by "><B>SCCGraphDriver</B> </PRE> <HR> <DL> <DT><PRE>public class <B>SCCGraphDriver</B><DT>extends java.lang.Object</DL> </PRE> <P> Class: ICS 311</br> Project: Network Metrics</br> File name: SCCGraphDriverjava</br> Description: Main </br> <P> <P> <DL> <DT><B>Version:</B></DT> <DD>1, 12/12/13</DD> <DT><B>Author:</B></DT> <DD>Josephine Cher Garces</DD> </DL> <HR> <P> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <A NAME="constructor_summary"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2"> <B>Constructor Summary</B></FONT></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE><B><A HREF="SCCGraphDriver.html#SCCGraphDriver()">SCCGraphDriver</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</TD> </TR> </TABLE> &nbsp; <!-- ========== METHOD SUMMARY =========== --> <A NAME="method_summary"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2"> <B>Method Summary</B></FONT></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>static&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="SCCGraphDriver.html#main(java.lang.String[])">main</A></B>(java.lang.String[]&nbsp;args)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</TD> </TR> </TABLE> &nbsp;<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor"> <TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</CODE></TD> </TR> </TABLE> &nbsp; <P> <!-- ========= CONSTRUCTOR DETAIL ======== --> <A NAME="constructor_detail"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2"> <B>Constructor Detail</B></FONT></TH> </TR> </TABLE> <A NAME="SCCGraphDriver()"><!-- --></A><H3> SCCGraphDriver</H3> <PRE> public <B>SCCGraphDriver</B>()</PRE> <DL> </DL> <!-- ============ METHOD DETAIL ========== --> <A NAME="method_detail"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2"> <B>Method Detail</B></FONT></TH> </TR> </TABLE> <A NAME="main(java.lang.String[])"><!-- --></A><H3> main</H3> <PRE> public static void <B>main</B>(java.lang.String[]&nbsp;args)</PRE> <DL> <DD><DL> </DL> </DD> </DL> <!-- ========= END OF CLASS DATA ========= --> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/SCCGraphDriver.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="NetworkMetrics.html" title="class in &lt;Unnamed&gt;"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="Vertex.html" title="class in &lt;Unnamed&gt;"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="index.html?SCCGraphDriver.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="SCCGraphDriver.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_summary">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_detail">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> </BODY> </HTML>
jogarces/ics-311-network-metrics
Javadoc/SCCGraphDriver.html
HTML
gpl-2.0
9,211
<?php /** * @file * Contains \Drupal\faq\Form\QuestionsForm. */ namespace Drupal\faq\Form; use Drupal\Core\Form\ConfigFormBase; use Drupal\Core\Form\FormStateInterface; /** * Form for the FAQ settings page - questions tab. */ class QuestionsForm extends ConfigFormBase { /** * {@inheritdoc} */ public function getFormId() { return 'faq_questions_settings_form'; } /** * {@inheritdoc} */ protected function getEditableConfigNames() { return []; } /** * {@inheritdoc} */ public function buildForm(array $form, FormStateInterface $form_state) { $faq_settings = $this->config('faq.settings'); $display_options['questions_inline'] = $this->t('Questions inline'); $display_options['questions_top'] = $this->t('Clicking on question takes user to answer further down the page'); $display_options['hide_answer'] = $this->t('Clicking on question opens/hides answer under question'); $display_options['new_page'] = $this->t('Clicking on question opens the answer in a new page'); $form['faq_display'] = array( '#type' => 'radios', '#options' => $display_options, '#title' => $this->t('Page layout'), '#description' => $this->t('This controls how the questions and answers are displayed on the page and what happens when someone clicks on the question.'), '#default_value' => $faq_settings->get('display') ); $form['faq_questions_misc'] = array( '#type' => 'details', '#title' => $this->t('Miscellaneous layout settings'), '#open' => TRUE ); $form['faq_questions_misc']['faq_question_listing'] = array( '#type' => 'select', '#options' => array( 'ol' => $this->t('Ordered list'), 'ul' => $this->t('Unordered list'), ), '#title' => $this->t('Questions listing style'), '#description' => $this->t("This allows to select how the questions listing is presented. It only applies to the layouts: 'Clicking on question takes user to answer further down the page' and 'Clicking on question opens the answer in a new page'. An ordered listing would number the questions, whereas an unordered list will have a bullet to the left of each question."), '#default_value' => $faq_settings->get('question_listing') ); $form['faq_questions_misc']['faq_qa_mark'] = array( '#type' => 'checkbox', '#title' => $this->t('Label questions and answers'), '#description' => $this->t('This option is only valid for the "Questions Inline" and "Clicking on question takes user to answer further down the page" layouts. It labels all questions on the faq page with the "question label" setting and all answers with the "answer label" setting. For example these could be set to "Q:" and "A:".'), '#default_value' => $faq_settings->get('qa_mark') ); $form['faq_questions_misc']['faq_question_label'] = array( '#type' => 'textfield', '#title' => $this->t('Question Label'), '#description' => $this->t('The label to pre-pend to the question text in the "Questions Inline" layout if labelling is enabled.'), '#default_value' => $faq_settings->get('question_label') ); $form['faq_questions_misc']['faq_answer_label'] = array( '#type' => 'textfield', '#title' => $this->t('Answer Label'), '#description' => $this->t('The label to pre-pend to the answer text in the "Questions Inline" layout if labelling is enabled.'), '#default_value' => $faq_settings->get('answer_label') ); $form['faq_questions_misc']['faq_question_length'] = array( '#type' => 'radios', '#title' => $this->t('Question length'), '#options' => array( 'long' => $this->t('Display longer text'), 'short' => $this->t('Display short text'), 'both' => $this->t('Display both short and long questions'), ), '#description' => t("The length of question text to display on the FAQ page. The short question will always be displayed in the FAQ blocks."), '#default_value' => $faq_settings->get('question_length') ); $form['faq_questions_misc']['faq_question_long_form'] = array( '#type' => 'checkbox', '#title' => $this->t('Allow long question text to be configured'), '#default_value' => $faq_settings->get('question_long_form') ); $form['faq_questions_misc']['faq_hide_qa_accordion'] = array( '#type' => 'checkbox', '#title' => $this->t('Use accordion effect for "opens/hides answer under question" layout'), '#description' => $this->t('This enables an "accordion" style effect where when a question is clicked, the answer appears beneath, and is then hidden when another question is opened.'), '#default_value' => $faq_settings->get('hide_qa_accordion') ); $form['faq_questions_misc']['faq_show_expand_all'] = array( '#type' => 'checkbox', '#title' => $this->t('Show "expand / collapse all" links for collapsed questions'), '#description' => $this->t('The links will only be displayed if using the "opens/hides answer under question" or "opens/hides questions and answers under category" layouts.'), '#default_value' => $faq_settings->get('show_expand_all') ); $form['faq_questions_misc']['faq_use_teaser'] = array( '#type' => 'checkbox', '#title' => $this->t('Use answer teaser'), '#description' => t("This enables the display of the answer teaser text instead of the full answer when using the 'Questions inline' or 'Clicking on question takes user to answer further down the page' display options. This is useful when you have long descriptive text. The user can see the full answer by clicking on the question."), '#default_value' => $faq_settings->get('use_teaser') ); // This setting has no meaning in D8 since comments are fields and read more link depends on view mode settings //$form['faq_questions_misc']['faq_show_node_links'] = array( // '#type' => 'checkbox', // '#title' => $this->t('Show node links'), // '#description' => $this->t('This enables the display of links under the answer text on the faq page. Examples of these links include "Read more", "Add comment".'), // '#default_value' => $faq_settings->get('show_node_links') //); $form['faq_questions_misc']['faq_back_to_top'] = array( '#type' => 'textfield', '#title' => $this->t('"Back to Top" link text'), '#description' => $this->t('This allows the user to change the text displayed for the links which return the user to the top of the page on certain page layouts. Defaults to "Back to Top". Leave blank to have no link.'), '#default_value' => $faq_settings->get('back_to_top') ); $form['faq_questions_misc']['faq_disable_node_links'] = array( '#type' => 'checkbox', '#title' => $this->t('Disable question links to nodes'), '#description' => $this->t('This allows the user to prevent the questions being links to the faq node in all layouts except "Clicking on question opens the answer in a new page".'), '#default_value' => $faq_settings->get('disable_node_links'), ); $form['faq_questions_misc']['faq_default_sorting'] = array( '#type' => 'select', '#title' => $this->t('Default sorting for unordered FAQs'), '#options' => array( 'DESC' => $this->t('Date Descending'), 'ASC' => $this->t('Date Ascending'), ), '#description' => t("This controls the default ordering behaviour for new FAQ nodes which haven't been assigned a position."), '#default_value' => $faq_settings->get('default_sorting') ); return parent::buildForm($form, $form_state); } /** * {@inheritdoc} */ public function submitForm(array &$form, FormStateInterface $form_state) { // Remove unnecessary values. $form_state->cleanValues(); $this->configFactory()->getEditable('faq.settings') ->set('display', $form_state->getValue('faq_display')) ->set('question_listing', $form_state->getValue('faq_question_listing')) ->set('qa_mark', $form_state->getValue('faq_qa_mark')) ->set('question_label', $form_state->getValue('faq_question_label')) ->set('answer_label', $form_state->getValue('faq_answer_label')) ->set('question_length', $form_state->getValue('faq_question_length')) ->set('question_long_form', $form_state->getValue('faq_question_long_form')) ->set('hide_qa_accordion', $form_state->getValue('faq_hide_qa_accordion')) ->set('show_expand_all', $form_state->getValue('faq_show_expand_all')) ->set('use_teaser', $form_state->getValue('faq_use_teaser')) ->set('back_to_top', $form_state->getValue('faq_back_to_top')) ->set('disable_node_links', $form_state->getValue('faq_disable_node_links')) ->set('default_sorting', $form_state->getValue('faq_default_sorting')) ->save(); parent::submitForm($form, $form_state); } }
psunthar/intapp
docroot/modules/contrib/faq/src/Form/QuestionsForm.php
PHP
gpl-2.0
8,970
<?php if (cfr('CAPAB')) { $altercfg = rcms_parse_ini_file(CONFIG_PATH . "alter.ini"); if ($altercfg['CAPABDIR_ENABLED']) { $capabilities = new CapabilitiesDirectory(); //process deletion if (wf_CheckGet(array('delete'))) { if (cfr('ROOT')) { $capabilities->deleteCapability($_GET['delete']); rcms_redirect("?module=capabilities"); } else { show_error(__('Permission denied')); } } //process creation if (wf_CheckPost(array('newaddress', 'newphone'))) { $newaddress = $_POST['newaddress']; $newphone = $_POST['newphone']; @$newnotes = $_POST['newnotes']; $capabilities->addCapability($newaddress, $newphone, $newnotes); rcms_redirect("?module=capabilities"); } //show editing form if (wf_CheckGet(array('edit'))) { //editing processing if (wf_CheckPost(array('editaddress', 'editphone'))) { $capabilities->editCapability($_GET['edit'], $_POST['editaddress'], $_POST['editphone'], $_POST['editstateid'], @$_POST['editnotes'], @$_POST['editprice'], $_POST['editemployeeid']); rcms_redirect("?module=capabilities"); } show_window(__('Edit'), $capabilities->editForm($_GET['edit'])); } //show current states editor if (wf_CheckGet(array('states'))) { //creating new state if (wf_CheckPost(array('createstate', 'createstatecolor'))) { $capabilities->statesCreate($_POST['createstate'], $_POST['createstatecolor']); rcms_redirect("?module=capabilities&states=true"); } //deleting existing state if (wf_CheckGet(array('deletestate'))) { $capabilities->statesDelete($_GET['deletestate']); rcms_redirect("?module=capabilities&states=true"); } if (!wf_CheckGet(array('editstate'))) { show_window(__('Create new states'), $capabilities->statesAddForm()); show_window(__('Available states'), $capabilities->statesList()); } else { //editing of existing states if (wf_CheckPost(array('editstate', 'editstatecolor'))) { $capabilities->statesChange($_GET['editstate'], $_POST['editstate'], $_POST['editstatecolor']); rcms_redirect("?module=capabilities&states=true"); } show_window(__('Edit'), $capabilities->statesEditForm($_GET['editstate'])); } } //show available if (!wf_CheckGet(array('edit'))) { if (!wf_CheckGet(array('states'))) { show_window(__('Available connection capabilities'), $capabilities->render()); } } } else { show_error(__('This module is disabled')); } } else { show_error(__('You cant control this module')); } ?>
mehulsbhatt/Ubilling
modules/general/capabilities/index.php
PHP
gpl-2.0
3,022
<?php // $Id: fieldset.tpl.php,v 1.1.2.3.2.1 2010/11/24 21:31:56 adrinux Exp $ ?> <?php print $pre; ?> <div <?php print drupal_attributes($attributes_array); ?>> <?php if ($title): ?> <h2 class='fieldset-title'> <?php print $title; ?> </h2> <?php endif; ?> <?php if ($content): ?> <div class='fieldset-content clearfix'> <?php print $content; ?> </div> <?php endif; ?> </div> <?php print $post; ?>
google-code/app7
sites/all/themes/clean/fieldset.tpl.php
PHP
gpl-2.0
438
<?php /** * @package Joomla.Administrator * @subpackage com_weblinks * * @copyright Copyright (C) 2005 - 2015 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE.txt */ use \AcceptanceTester; class AdministratorWeblinksCest { private $title; public function __construct() { // This way works just fine, but not 100% sure if that is the recommended way: $this->title = 'automated testing' . rand(1,100); } public function administratorCreateWeblink(AcceptanceTester $I) { $I->am('Administrator'); $I->wantToTest('Weblink creation in /administrator/'); $I->doAdministratorLogin(); $I->amGoingTo('Navigate to Weblinks page in /administrator/'); $I->amOnPage('administrator/index.php?option=com_weblinks'); $I->waitForText('Web Links Manager: Web Links','5',['css' => 'h1']); $I->expectTo('see weblinks page'); $I->checkForPhpNoticesOrWarnings(); $I->amGoingTo('try to save a weblink with a filled title and URL'); $I->click(['xpath'=> "//button[@onclick=\"Joomla.submitbutton('weblink.add')\"]"]); $I->waitForText('Web Links Manager: Web Link','5',['css' => 'h1']); $I->fillField(['id' => 'jform_title'], $this->title); $I->fillField(['id' => 'jform_url'],'http://example.com/automated_testing' . $this->title); $I->click(['xpath'=> "//button[@onclick=\"Joomla.submitbutton('weblink.save')\"]"]); $I->waitForText('Web Links Manager: Web Link','5',['css' => 'h1']); $I->expectTo('see a success message and the weblink added after saving the weblink'); $I->see('Web link successfully saved',['id' => 'system-message-container']); $I->see($this->title,['id' => 'weblinkList']); } /** * @depends administratorCreateWeblink * * @param AcceptanceTester $I */ public function administratorCreateWeblinkTrash(AcceptanceTester $I) { $I->am('Administrator'); $I->wantToTest('Weblink removal in /administrator/'); $I->doAdministratorLogin(); $I->amGoingTo('Navigate to Weblinks page in /administrator/'); $I->amOnPage('administrator/index.php?option=com_weblinks'); $I->waitForText('Web Links Manager: Web Links','5',['css' => 'h1']); $I->expectTo('see weblinks page'); $I->checkForPhpNoticesOrWarnings(); $I->amGoingTo('Search the just saved weblink'); $I->fillField(['id' => 'filter_search'], $this->title . "\n"); $I->waitForText('Web Links Manager: Web Links','5',['css' => 'h1']); $I->expectTo('see weblinks page'); $I->checkForPhpNoticesOrWarnings(); $I->amGoingTo('Delete the just saved weblink'); $I->click(['id' => 'cb0']); $I->click(['xpath'=> "//button[@onclick=\"if (document.adminForm.boxchecked.value==0){alert('Please first make a selection from the list');}else{ Joomla.submitbutton('weblinks.trash')}\"]"]); $I->waitForText('Web Links Manager: Web Link','5',['css' => 'h1']); $I->expectTo('see a success message and the weblink removed from the list'); $I->see('Web link successfully trashed',['id' => 'system-message-container']); $I->cantSee($this->title,['id' => 'weblinkList']); } public function administratorCreateWeblinkWithoutTitleFails(AcceptanceTester $I) { $I->am('Administrator'); $I->wantToTest('Weblink creation without title fails in /administrator/'); $I->doAdministratorLogin(); $I->amGoingTo('Navigate to Weblinks page in /administrator/'); $I->amOnPage('administrator/index.php?option=com_weblinks'); $I->waitForText('Web Links Manager: Web Links','5',['css' => 'h1']); $I->expectTo('see weblinks page'); $I->checkForPhpNoticesOrWarnings(); $I->amGoingTo('try to save a weblink with empty title and it should fail'); $I->click(['xpath'=> "//button[@onclick=\"Joomla.submitbutton('weblink.add')\"]"]); $I->waitForText('Web Links Manager: Web Link','5',['css' => 'h1']); $I->click(['xpath'=> "//button[@onclick=\"Joomla.submitbutton('weblink.apply')\"]"]); $I->expectTo('see an error when trying to save a weblink without title and without URL'); $I->see('Invalid field: Title',['id' => 'system-message-container']); $I->see('Invalid field: URL',['id' => 'system-message-container']); } }
javigomez/weblinks
tests/acceptance/AdministratorWeblinksCest.php
PHP
gpl-2.0
4,140
<article id="post-<?php the_ID(); ?>" <?php post_class('post__holder'); ?>> <?php formaticons(); ?> <header class="post-header"> <?php if(!is_singular()) : ?> <h2 class="post-title"><a href="<?php the_permalink(); ?>" title="<?php echo theme_locals('permalink_to');?> <?php the_title(); ?>"><?php the_title(); ?></a></h2> <?php else :?> <h2 class="post-title"><?php the_title(); ?></h2> <?php endif; ?> </header> <?php $post_meta = of_get_option('post_meta'); if ($post_meta=='true' || $post_meta=='') { get_template_part('includes/post-formats/post-meta'); } ?> <?php $hercules_gallery_type = get_post_meta(get_the_ID(), 'tz_gallery_format', true); $hercules_targetheight = get_post_meta(get_the_ID(), 'tz_gallery_targetheight', true); $hercules_gallery_margins = get_post_meta(get_the_ID(), 'tz_gallery_margins', true); $hercules_gallery_captions = get_post_meta(get_the_ID(), 'tz_gallery_captions', true); $hercules_gallery_randomize = get_post_meta(get_the_ID(), 'tz_gallery_randomize', true); $hercules_random = hs_gener_random(10); ?> <div class="post-thumb clearfix"> <?php if ($hercules_gallery_type=='slideshow') { global $hercules_add_owl; $hercules_add_owl = true; ?> <script type="text/javascript"> jQuery(window).load(function() { jQuery("#owl-demo_<?php echo $hercules_random ?>").owlCarousel({ autoPlay : 5000, stopOnHover : true, navigation:false, //items : 2, paginationSpeed : 5000, goToFirstSpeed : 2000, singleItem : true, autoHeight : false, transitionStyle:"fade" }); }); </script> <!-- Slider --> <div id="owl-demo_<?php echo $hercules_random ?>" class="owl-carousel"> <?php $hercules_attachments = get_children(array('post_parent' => get_the_ID(), 'numberposts' => -1, 'post_type' => 'attachment', 'post_mime_type' => 'image' )); if ($hercules_attachments) : foreach ($hercules_attachments as $attachment) : ?> <div class="featured-thumbnail thumbnail large"><?php echo wp_get_attachment_image($attachment->ID, 'slideshow-post'); ?></div> <?php endforeach; endif; ?> </div> <!-- /Slider --> <?php } ?> <!-- Grid --> <?php if ($hercules_gallery_type=='grid') { global $hercules_add_collageplus; $hercules_add_collageplus = true; ?> <script type="text/javascript"> jQuery(document).ready(function () { jQuery(".justifiedgall_<?php echo $hercules_random ?>").justifiedGallery({ rowHeight: <?php if( ! empty( $hercules_targetheight ) ) {echo $hercules_targetheight;}else{echo '400';} ?>, fixedHeight: false, lastRow: 'justify', captions : <?php if( ! empty( $hercules_gallery_captions ) ) {echo $hercules_gallery_captions;}else{echo 'true';} ?>, margins: <?php if( ! empty( $hercules_gallery_margins ) ) {echo $hercules_gallery_margins;}else{echo '10';} ?>, randomize: <?php if( ! empty( $hercules_targetheight ) ) {echo $hercules_gallery_randomize;}else{echo 'false';} ?> }); }); </script> <div class="zoom-gallery justifiedgall_<?php echo $hercules_random ?>" style="margin: 0px 0px 1.5em;"> <div class="spinner"><span></span><span></span><span></span></div> <?php $hercules_attachments = get_children(array('post_parent' => get_the_ID(), 'numberposts' => -1, 'post_type' => 'attachment', 'post_mime_type' => 'image' )); if ($hercules_attachments) : foreach ($hercules_attachments as $attachment) : $attachment_url = wp_get_attachment_image_src( $attachment->ID, 'full' ); $caption = apply_filters('the_title', $attachment->post_excerpt); ?> <a class="zoomer" title="<?php echo apply_filters('the_title', $attachment->post_excerpt); ?>" data-source="<?php echo $attachment_url[0]; ?>" href="<?php echo $attachment_url[0]; ?>"><?php echo wp_get_attachment_image($attachment->ID, 'full'); ?></a> <?php endforeach; endif; ?> </div> <?php } ?> <!-- /Grid --> <div class="row-fluid"> <div class="span12"> <?php $full_content = of_get_option('full_content'); if(!is_singular() && $full_content!='true') : ?> <!-- Post Content --> <div class="post_content"> <?php $post_excerpt = of_get_option('post_excerpt'); $blog_excerpt = of_get_option('blog_excerpt_count'); ?> <?php if ($post_excerpt=='true') { ?> <div class="excerpt"> <?php $content = get_the_content(); if (has_excerpt()) { the_excerpt(); } else { echo limit_text($content,$blog_excerpt); } ?> </div> <?php } else if ($post_excerpt=='') { the_content('<div class="readmore-button">'.theme_locals("continue_reading").'</div>'); wp_link_pages('before=<div class="pagelink">&after=</div>'); ?> <div class="clear"></div> <?php } ?> <?php $readmore_button = of_get_option('readmore_button'); if ($readmore_button=='yes') { ?> <div class="readmore-button"> <a href="<?php the_permalink() ?>" class=""><?php echo theme_locals("continue_reading"); ?></a> </div> <div class="clear"></div> <?php } ?> </div> <?php else :?> <!-- Post Content --> <div class="post_content"> <?php the_content('<div class="readmore-button">'.theme_locals("continue_reading").'</div>'); ?> <?php wp_link_pages('before=<div class="pagelink">&after=</div>'); ?> <div class="clear"></div> </div> <!-- //Post Content --> <?php endif; ?> </div> </div> </div> <?php get_template_part( 'includes/post-formats/share-buttons' ); ?> </article><!--//.post__holder-->
FelixNong1990/andy
wp-content/themes/BUZZBLOG-theme/includes/post-formats/gallery.php
PHP
gpl-2.0
5,576
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=US-ASCII"> <title>Function deconstruct</title> <link rel="stylesheet" href="../../../../doc/src/boostbook.css" type="text/css"> <meta name="generator" content="DocBook XSL Stylesheets V1.79.1"> <link rel="home" href="../../index.html" title="The Boost C++ Libraries BoostBook Documentation Subset"> <link rel="up" href="../../signals2/reference.html#header.boost.signals2.deconstruct_hpp" title="Header &lt;boost/signals2/deconstruct.hpp&gt;"> <link rel="prev" href="scoped_connection.html" title="Class scoped_connection"> <link rel="next" href="deconstruct_access.html" title="Class deconstruct_access"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <table cellpadding="2" width="100%"><tr> <td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../boost.png"></td> <td align="center"><a href="../../../../index.html">Home</a></td> <td align="center"><a href="../../../../libs/libraries.htm">Libraries</a></td> <td align="center"><a href="http://www.boost.org/users/people.html">People</a></td> <td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td> <td align="center"><a href="../../../../more/index.htm">More</a></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="scoped_connection.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../signals2/reference.html#header.boost.signals2.deconstruct_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="deconstruct_access.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> <div class="refentry"> <a name="boost.signals2.deconstruct"></a><div class="titlepage"></div> <div class="refnamediv"> <h2><span class="refentrytitle">Function deconstruct</span></h2> <p>boost::signals2::deconstruct &#8212; Create a <code class="computeroutput">shared_ptr</code> with support for post-constructors and pre-destructors.</p> </div> <h2 xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv-title">Synopsis</h2> <div xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv"><pre class="synopsis"><span class="comment">// In header: &lt;<a class="link" href="../../signals2/reference.html#header.boost.signals2.deconstruct_hpp" title="Header &lt;boost/signals2/deconstruct.hpp&gt;">boost/signals2/deconstruct.hpp</a>&gt; </span> <span class="keyword">template</span><span class="special">&lt;</span><span class="keyword">typename</span> T<span class="special">&gt;</span> <a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a><span class="special">&lt;</span><span class="identifier">T</span><span class="special">&gt;</span> <span class="identifier">deconstruct</span><span class="special">(</span><span class="special">)</span><span class="special">;</span> <span class="keyword">template</span><span class="special">&lt;</span><span class="keyword">typename</span> T<span class="special">,</span> <span class="keyword">typename</span> A1<span class="special">&gt;</span> <a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a><span class="special">&lt;</span><span class="identifier">T</span><span class="special">&gt;</span> <span class="identifier">deconstruct</span><span class="special">(</span><span class="keyword">const</span> <span class="identifier">A1</span> <span class="special">&amp;</span> arg1<span class="special">)</span><span class="special">;</span> <span class="keyword">template</span><span class="special">&lt;</span><span class="keyword">typename</span> T<span class="special">,</span> <span class="keyword">typename</span> A1<span class="special">,</span> <span class="keyword">typename</span> A2<span class="special">&gt;</span> <a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a><span class="special">&lt;</span><span class="identifier">T</span><span class="special">&gt;</span> <span class="identifier">deconstruct</span><span class="special">(</span><span class="keyword">const</span> <span class="identifier">A1</span> <span class="special">&amp;</span> arg1<span class="special">,</span> <span class="keyword">const</span> <span class="identifier">A2</span> <span class="special">&amp;</span> arg2<span class="special">)</span><span class="special">;</span> <span class="keyword">template</span><span class="special">&lt;</span><span class="keyword">typename</span> T<span class="special">,</span> <span class="keyword">typename</span> A1<span class="special">,</span> <span class="keyword">typename</span> A2, ...<span class="special">,</span> <span class="keyword">typename</span> AN<span class="special">&gt;</span> <a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a><span class="special">&lt;</span><span class="identifier">T</span><span class="special">&gt;</span> <span class="identifier">deconstruct</span><span class="special">(</span><span class="keyword">const</span> <span class="identifier">A1</span> <span class="special">&amp;</span> arg1<span class="special">,</span> <span class="keyword">const</span> <span class="identifier">A2</span> <span class="special">&amp;</span> arg2<span class="special">,</span> <span class="special">...</span><span class="special">,</span> <span class="keyword">const</span> <span class="identifier">AN</span> <span class="special">&amp;</span> argN<span class="special">)</span><span class="special">;</span></pre></div> <div class="refsect1"> <a name="idm45946341826720"></a><h2>Description</h2> <p>Creates an object and its owning <code class="computeroutput">shared_ptr&lt;T&gt;</code> (wrapped inside a <code class="computeroutput"><a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a></code>) using only a single allocation, in a manner similar to that of <code class="computeroutput">boost::make_shared()</code>. In addition, <code class="computeroutput">deconstruct</code> supports postconstructors and predestructors. The returned <code class="computeroutput">shared_ptr</code> is wrapped inside a <code class="computeroutput"><a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a></code> in order to provide the user with an opportunity to pass arguments to a postconstructor, while insuring the postconstructor is run before the wrapped <code class="computeroutput">shared_ptr</code> is accessible. </p> <p> In order to use <code class="computeroutput">deconstruct</code> you must define a postconstructor for your class. More specifically, you must define an <code class="computeroutput">adl_postconstruct</code> function which can be found via argument-dependent lookup. Typically, this means defining an <code class="computeroutput">adl_postconstruct</code> function in the same namespace as its associated class. See the reference for <code class="computeroutput"><a class="link" href="postconstructor_invoker.html" title="Class postconstructor_invoker">postconstructor_invoker</a></code> for a specification of what arguments are passed to the <code class="computeroutput">adl_postconstruct</code> call. </p> <p> Optionally, you may define a predestructor for your class. This is done by defining an <code class="computeroutput">adl_predestruct</code> function which may be found by argument-dependent lookup. The deleter of the <code class="computeroutput">shared_ptr</code> created by <code class="computeroutput">deconstruct</code> will make an unqualified call to <code class="computeroutput">adl_predestruct</code> with a single argument: a pointer to the object which is about to be deleted. As a convenience, the pointer will always be cast to point to a non-const type before being passed to <code class="computeroutput">adl_predestruct</code>. If no user-defined <code class="computeroutput">adl_predestruct</code> function is found via argument-dependent lookup, a default function (which does nothing) will be used. After <code class="computeroutput">adl_predestruct</code> is called, the deleter will delete the object with <code class="computeroutput">checked_delete</code>. </p> <p> Any arguments passed to a <code class="computeroutput">deconstruct()</code> call are forwarded to the matching constructor of the template type <code class="computeroutput">T</code>. Arguments may also be passed to the class' associated <code class="computeroutput">adl_postconstruct</code> function by using the <code class="computeroutput"><a class="link" href="postconstructor_invoker.html#idm45443338478560-bb">postconstructor_invoker::postconstruct()</a></code> methods. </p> <div class="variablelist"><table border="0" class="variablelist compact"> <colgroup> <col align="left" valign="top"> <col> </colgroup> <tbody> <tr> <td><p><span class="term">Notes:</span></p></td> <td> <p>If your compiler supports the C++11 features of rvalue references and variadic templates, then <code class="computeroutput">deconstruct</code> will perform perfect forwarding of arguments to the <code class="computeroutput">T</code> constructor, using a prototype of: </p> <pre class="programlisting"><span class="keyword">template</span><span class="special">&lt;</span> <span class="keyword">typename</span> <span class="identifier">T</span><span class="special">,</span> <span class="keyword">typename</span><span class="special">...</span> <span class="identifier">Args</span> <span class="special">&gt;</span> <span class="identifier">postconstructor_invoker</span><span class="special">&lt;</span> <span class="identifier">T</span> <span class="special">&gt;</span> <span class="identifier">deconstruct</span><span class="special">(</span> <span class="identifier">Args</span> <span class="special">&amp;&amp;</span> <span class="special">...</span> <span class="identifier">args</span> <span class="special">)</span><span class="special">;</span></pre> <p>Otherwise, argument forwarding is performed via const references, as specified in the synopsis. In order to pass non-const references to a constructor, you will need to wrap them in reference wrappers using boost::ref. </p> <p>You may give all the <code class="computeroutput">deconstruct</code> overloads access to your class' private and protected constructors by declaring <a class="link" href="deconstruct_access.html" title="Class deconstruct_access">deconstruct_access</a> a friend. Using private constructors in conjunction with <a class="link" href="deconstruct_access.html" title="Class deconstruct_access">deconstruct_access</a> can be useful to ensure your objects are only created by <code class="computeroutput">deconstruct</code>, and thus their postconstructors or predestructors will always be called. </p> </td> </tr> <tr> <td><p><span class="term">Returns:</span></p></td> <td><p>A <code class="computeroutput">postconstructor_invoker&lt;T&gt;</code> owning a newly allocated object of type <code class="computeroutput">T</code>.</p></td> </tr> </tbody> </table></div> </div> </div> <table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr> <td align="left"></td> <td align="right"><div class="copyright-footer">Copyright &#169; 2001-2004 Douglas Gregor<br>Copyright &#169; 2007-2009 Frank Mori Hess<p>Distributed under the Boost Software License, Version 1.0. (See accompanying file <code class="filename">LICENSE_1_0.txt</code> or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)</p> </div></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="scoped_connection.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../signals2/reference.html#header.boost.signals2.deconstruct_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="deconstruct_access.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> </body> </html>
FFMG/myoddweb.piger
myodd/boost/doc/html/boost/signals2/deconstruct.html
HTML
gpl-2.0
13,257
--[[ Copyright (C) 2013-2015 Draios inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. --]] view_info = { id = "procs_errors", name = "Processes Errors", description = "This view shows system error counters for processes. Errors are grouped into 4 categories: file I/O, network I/O, memory allocation and 'other'.", tips = { "If you click 'enter' on a selection in this chart, you will be able to see the specific errors that the process is generating.", "Digging into a process by clicking on F6 will let you explore the system calls for that specific process and see the full details about what's causing the errors." }, tags = {"Default"}, filter = "evt.type!=switch", view_type = "table", applies_to = {"", "container.id", "fd.name", "fd.sport", "evt.type", "fd.directory", "k8s.pod.id", "k8s.rc.id", "k8s.svc.id", "k8s.ns.id"}, drilldown_target = "errors", use_defaults = true, columns = { { name = "NA", field = "proc.pid", is_key = true }, { name = "FILE", field = "evt.count.error.file", description = "Number of file I/O errors generated by the process during the sample interval. On trace files, this is the total for the whole file.", colsize = 8, aggregation = "SUM" }, { name = "NET", field = "evt.count.error.net", description = "Number of network I/O errors generated by the process during the sample interval. On trace files, this is the total for the whole file.", colsize = 8, aggregation = "SUM" }, { name = "MEMORY", field = "evt.count.error.memory", description = "Number of memory allocation/release related errors generated by the process during the sample interval. On trace files, this is the total for the whole file.", colsize = 8, aggregation = "SUM" }, { name = "OTHER", field = "evt.count.error.other", description = "Number of errors generated by the process that don't fall in any of the previous categories. E.g. signal or event related errors. On trace files, this is the total for the whole file.", colsize = 8, aggregation = "SUM" }, { name = "PID", description = "Process PID.", field = "proc.pid", colsize = 8, }, { tags = {"containers"}, name = "Container", field = "container.name", description = "Name of the container. What this field contains depends on the containerization technology. For example, for docker this is the content of the 'NAMES' column in 'docker ps'", colsize = 20 }, { name = "Command", description = "Full command line of the process.", field = "proc.exeline", aggregation = "MAX", colsize = 0 } }, actions = { { hotkey = "9", command = "kill -9 %proc.pid", description = "kill -9", ask_confirmation = true, wait_finish = false }, { hotkey = "c", command = "gcore %proc.pid", description = "generate core", }, { hotkey = "g", command = "gdb -p %proc.pid", description = "gdb attach", wait_finish = false }, { hotkey = "k", command = "kill %proc.pid", description = "kill", ask_confirmation = true, wait_finish = false }, { hotkey = "l", command = "ltrace -p %proc.pid", description = "ltrace", }, { hotkey = "s", command = "gdb -p %proc.pid --batch --quiet -ex \"thread apply all bt full\" -ex \"quit\"", description = "print stack", }, }, }
etown/sysdig
userspace/sysdig/chisels/v_procs_errors.lua
Lua
gpl-2.0
3,873
#!/usr/bin/python #GraphML-Topo-to-Mininet-Network-Generator # # This file parses Network Topologies in GraphML format from the Internet Topology Zoo. # A python file for creating Mininet Topologies will be created as Output. # Files have to be in the same directory. # # Arguments: # -f [filename of GraphML input file] # --file [filename of GraphML input file] # -o [filename of GraphML output file] # --output [filename of GraphML output file] # -b [number as integer for bandwidth in mbit] # --bw [number as integer for bandwidth in mbit] # --bandwidth [number as integer for bandwidth in mbit] # -c [controller ip as string] # --controller [controller ip as string] # # Without any input, program will terminate. # Without specified output, outputfile will have the same name as the input file. # This means, the argument for the outputfile can be omitted. # Parameters for bandwith and controller ip have default values, if they are omitted, too. # # # sjas # Wed Jul 17 02:59:06 PDT 2013 # # # TODO's: # - fix double name error of some topologies # - fix topoparsing (choose by name, not element <d..>) # = topos with duplicate labels # - use 'argparse' for script parameters, eases help creation # ################################################################################# import xml.etree.ElementTree as ET import sys import math import re from sys import argv input_file_name = '' output_file_name = '' bandwidth_argument = '' controller_ip = '' # first check commandline arguments for i in range(len(argv)): if argv[i] == '-f': input_file_name = argv[i+1] if argv[i] == '--file': input_file_name = argv[i+1] if argv[i] == '-o': output_file_name = argv[i+1] if argv[i] == '--output': output_file_name = argv[i+1] if argv[i] == '-b': bandwidth_argument = argv[i+1] if argv[i] == '--bw': bandwidth_argument = argv[i+1] if argv[i] == '--bandwidth': bandwidth_argument = argv[i+1] if argv[i] == '-c': controller_ip = argv[i+1] if argv[i] == '--controller': controller_ip = argv[i+1] # terminate when inputfile is missing if input_file_name == '': sys.exit('\n\tNo input file was specified as argument....!') # define string fragments for output later on outputstring_1 = '''#!/usr/bin/python """ Custom topology for Mininet, generated by GraphML-Topo-to-Mininet-Network-Generator. """ from mininet.topo import Topo from mininet.net import Mininet from mininet.node import RemoteController from mininet.node import Node from mininet.node import CPULimitedHost from mininet.link import TCLink from mininet.cli import CLI from mininet.log import setLogLevel from mininet.util import dumpNodeConnections class GeneratedTopo( Topo ): "Internet Topology Zoo Specimen." def __init__( self, **opts ): "Create a topology." # Initialize Topology Topo.__init__( self, **opts ) ''' outputstring_2a=''' # add nodes, switches first... ''' outputstring_2b=''' # ... and now hosts ''' outputstring_3a=''' # add edges between switch and corresponding host ''' outputstring_3b=''' # add edges between switches ''' outputstring_4a=''' topos = { 'generated': ( lambda: GeneratedTopo() ) } # HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS # the following code produces an executable script working with a remote controller # and providing ssh access to the the mininet hosts from within the ubuntu vm ''' outputstring_4b = ''' def setupNetwork(controller_ip): "Create network and run simple performance test" # check if remote controller's ip was set # else set it to localhost topo = GeneratedTopo() if controller_ip == '': #controller_ip = '10.0.2.2'; controller_ip = '127.0.0.1'; net = Mininet(topo=topo, controller=lambda a: RemoteController( a, ip=controller_ip, port=6633 ), host=CPULimitedHost, link=TCLink) return net def connectToRootNS( network, switch, ip, prefixLen, routes ): "Connect hosts to root namespace via switch. Starts network." "network: Mininet() network object" "switch: switch to connect to root namespace" "ip: IP address for root namespace node" "prefixLen: IP address prefix length (e.g. 8, 16, 24)" "routes: host networks to route to" # Create a node in root namespace and link to switch 0 root = Node( 'root', inNamespace=False ) intf = TCLink( root, switch ).intf1 root.setIP( ip, prefixLen, intf ) # Start network that now includes link to root namespace network.start() # Add routes from root ns to hosts for route in routes: root.cmd( 'route add -net ' + route + ' dev ' + str( intf ) ) # Run D-ITG logger on root root.cmd('ITGLog &') def sshd( network, cmd='/usr/sbin/sshd', opts='-D' ): "Start a network, connect it to root ns, and run sshd on all hosts." switch = network.switches[ 0 ] # switch to use ip = '10.123.123.1' # our IP address on host network routes = [ '10.0.0.0/8' ] # host networks to route to connectToRootNS( network, switch, ip, 8, routes ) for host in network.hosts: host.cmd( cmd + ' ' + opts + '&' ) host.cmd( 'ITGRecv -l /tmp/ITGRecv-Logs/ITGRecv-' + host.IP() + '.log > /dev/null &' ) # DEBUGGING INFO print print "Dumping host connections" dumpNodeConnections(network.hosts) print print "*** Hosts are running sshd at the following addresses:" print for host in network.hosts: print host.name, host.IP() print print "*** Type 'exit' or control-D to shut down network" print print "*** For testing network connectivity among the hosts, wait a bit for the controller to create all the routes, then do 'pingall' on the mininet console." print CLI( network ) for host in network.hosts: host.cmd( 'kill %' + cmd ) network.stop() if __name__ == '__main__': setLogLevel('info') #setLogLevel('debug') sshd( setupNetwork(controller_ip) ) ''' #WHERE TO PUT RESULTS outputstring_to_be_exported = '' outputstring_to_be_exported += outputstring_1 #READ FILE AND DO ALL THE ACTUAL PARSING IN THE NEXT PARTS xml_tree = ET.parse(input_file_name) namespace = "{http://graphml.graphdrawing.org/xmlns}" ns = namespace # just doing shortcutting, namespace is needed often. #GET ALL ELEMENTS THAT ARE PARENTS OF ELEMENTS NEEDED LATER ON root_element = xml_tree.getroot() graph_element = root_element.find(ns + 'graph') # GET ALL ELEMENT SETS NEEDED LATER ON index_values_set = root_element.findall(ns + 'key') node_set = graph_element.findall(ns + 'node') edge_set = graph_element.findall(ns + 'edge') # SET SOME VARIABLES TO SAVE FOUND DATA FIRST # memomorize the values' ids to search for in current topology node_label_name_in_graphml = '' node_latitude_name_in_graphml = '' node_longitude_name_in_graphml = '' # for saving the current values node_index_value = '' node_name_value = '' node_longitude_value = '' node_latitude_value = '' # id:value dictionaries id_node_name_dict = {} # to hold all 'id: node_name_value' pairs id_longitude_dict = {} # to hold all 'id: node_longitude_value' pairs id_latitude_dict = {} # to hold all 'id: node_latitude_value' pairs # FIND OUT WHAT KEYS ARE TO BE USED, SINCE THIS DIFFERS IN DIFFERENT GRAPHML TOPOLOGIES for i in index_values_set: if i.attrib['attr.name'] == 'label' and i.attrib['for'] == 'node': node_label_name_in_graphml = i.attrib['id'] if i.attrib['attr.name'] == 'Longitude': node_longitude_name_in_graphml = i.attrib['id'] if i.attrib['attr.name'] == 'Latitude': node_latitude_name_in_graphml = i.attrib['id'] # NOW PARSE ELEMENT SETS TO GET THE DATA FOR THE TOPO # GET NODE_NAME DATA # GET LONGITUDE DATK # GET LATITUDE DATA for n in node_set: node_index_value = n.attrib['id'] #get all data elements residing under all node elements data_set = n.findall(ns + 'data') #finally get all needed values for d in data_set: #node name if d.attrib['key'] == node_label_name_in_graphml: #strip all whitespace from names so they can be used as id's node_name_value = re.sub(r'\s+', '', d.text) #longitude data if d.attrib['key'] == node_longitude_name_in_graphml: node_longitude_value = d.text #latitude data if d.attrib['key'] == node_latitude_name_in_graphml: node_latitude_value = d.text #save id:data couple id_node_name_dict[node_index_value] = node_name_value id_longitude_dict[node_index_value] = node_longitude_value id_latitude_dict[node_index_value] = node_latitude_value # STRING CREATION # FIRST CREATE THE SWITCHES AND HOSTS tempstring1 = '' tempstring2 = '' tempstring3 = '' for i in range(0, len(id_node_name_dict)): #create switch temp1 = ' ' temp1 += id_node_name_dict[str(i)] temp1 += " = self.addSwitch( 's" temp1 += str(i) temp1 += "' )\n" #create corresponding host temp2 = ' ' temp2 += id_node_name_dict[str(i)] temp2 += "_host = self.addHost( 'h" temp2 += str(i) temp2 += "' )\n" tempstring1 += temp1 tempstring2 += temp2 # link each switch and its host... temp3 = ' self.addLink( ' temp3 += id_node_name_dict[str(i)] temp3 += ' , ' temp3 += id_node_name_dict[str(i)] temp3 += "_host )" temp3 += '\n' tempstring3 += temp3 outputstring_to_be_exported += outputstring_2a outputstring_to_be_exported += tempstring1 outputstring_to_be_exported += outputstring_2b outputstring_to_be_exported += tempstring2 outputstring_to_be_exported += outputstring_3a outputstring_to_be_exported += tempstring3 outputstring_to_be_exported += outputstring_3b # SECOND CALCULATE DISTANCES BETWEEN SWITCHES, # set global bandwidth and create the edges between switches, # and link each single host to its corresponding switch tempstring4 = '' tempstring5 = '' distance = 0.0 latency = 0.0 for e in edge_set: # GET IDS FOR EASIER HANDLING src_id = e.attrib['source'] dst_id = e.attrib['target'] # CALCULATE DELAYS # CALCULATION EXPLANATION # # formula: (for distance) # dist(SP,EP) = arccos{ sin(La[EP]) * sin(La[SP]) + cos(La[EP]) * cos(La[SP]) * cos(Lo[EP] - Lo[SP])} * r # r = 6378.137 km # # formula: (speed of light, not within a vacuumed box) # v = 1.97 * 10**8 m/s # # formula: (latency being calculated from distance and light speed) # t = distance / speed of light # t (in ms) = ( distance in km * 1000 (for meters) ) / ( speed of light / 1000 (for ms)) # ACTUAL CALCULATION: implementing this was no fun. latitude_src = math.radians(float(id_latitude_dict[src_id])) latitude_dst = math.radians(float(id_latitude_dict[dst_id])) longitude_src = math.radians(float(id_longitude_dict[src_id])) longitude_dst = math.radians(float(id_longitude_dict[dst_id])) first_product = math.sin(latitude_dst) * math.sin(latitude_src) second_product_first_part = math.cos(latitude_dst) * math.cos(latitude_src) second_product_second_part = math.cos(longitude_dst - longitude_src) distance = math.acos(first_product + (second_product_first_part * second_product_second_part)) * 6378.137 # t (in ms) = ( distance in km * 1000 (for meters) ) / ( speed of light / 1000 (for ms)) # t = ( distance * 1000 ) / ( 1.97 * 10**8 / 1000 ) latency = ( distance * 1000 ) / ( 197000 ) # BANDWIDTH LIMITING #set bw to 10mbit if nothing was specified otherwise on startup if bandwidth_argument == '': bandwidth_argument = '10'; # ... and link all corresponding switches with each other temp4 = ' self.addLink( ' temp4 += id_node_name_dict[src_id] temp4 += ' , ' temp4 += id_node_name_dict[dst_id] temp4 += ", bw=" temp4 += bandwidth_argument temp4 += ", delay='" temp4 += str(latency) temp4 += "ms')" temp4 += '\n' # next line so i dont have to look up other possible settings #temp += "ms', loss=0, max_queue_size=1000, use_htb=True)" tempstring4 += temp4 outputstring_to_be_exported += tempstring4 outputstring_to_be_exported += outputstring_4a # this is kind of dirty, due to having to use mixed '' "" temp5 = "controller_ip = '" temp5 += controller_ip temp5 += "'\n" tempstring5 += temp5 outputstring_to_be_exported += tempstring5 outputstring_to_be_exported += outputstring_4b # GENERATION FINISHED, WRITE STRING TO FILE outputfile = '' if output_file_name == '': output_file_name = input_file_name + '-generated-Mininet-Topo.py' outputfile = open(output_file_name, 'w') outputfile.write(outputstring_to_be_exported) outputfile.close() print "Topology generation SUCCESSFUL!"
yossisolomon/assessing-mininet
parser/GraphML-Topo-to-Mininet-Network-Generator.py
Python
gpl-2.0
13,124
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Diagnostics; using System.Xml.XPath; using System.Xml; using SenseNet.ApplicationModel; using SenseNet.ContentRepository.Storage; namespace SenseNet.ContentRepository.Xpath { [DebuggerDisplay("<{Name} : {NodeType}")] internal abstract class ElementBase { public NavigatorContext Context { get; private set; } public Content Content { get; private set; } public string Name { get; private set; } public ElementBase Parent { get; private set; } public ElementBase FirstChild { get; private set; } public ElementBase FollowingSibling { get; private set; } public ElementBase PrecedingSibling { get; private set; } public abstract bool IsEmpty { get; } public virtual XPathNodeType NodeType { get { return XPathNodeType.Element; } } public ElementBase(NavigatorContext context, Content content, string name, ElementBase parent) { Name = name; Parent = parent; Context = context; Content = content; } private bool _firstChildCompleted; private bool _siblingsCompleted; public ElementBase GetFirstChild() { if (!_firstChildCompleted) { FirstChild = CreateFirstChild(); _firstChildCompleted = true; } else { } return FirstChild; } public abstract ElementBase CreateFirstChild(); public ElementBase GetNextElement() { if (FollowingSibling != null) return FollowingSibling; if(_siblingsCompleted) return FollowingSibling; var element = CreateNextElement(); if (element != null) { element.PrecedingSibling = this; this.FollowingSibling = element; } else { _siblingsCompleted = true; } return element; } public abstract ElementBase CreateNextElement(); public ElementBase GetPreviousElement() { return PrecedingSibling; } public virtual string[] GetAttributeNames() { return null; } public virtual string GetAttributeValue(string name) { return string.Empty; } public string CollectTextValue() { var sb = new StringBuilder(); CollectTextValue(sb); return sb.ToString(); } protected virtual void CollectTextValue(StringBuilder sb) { var child = GetFirstChild(); if (child != null) { child.CollectTextValue(sb); while ((child = child.FollowingSibling) != null) child.CollectTextValue(sb); } } internal void RemoveHashSignFromName() { Name = Name.Substring(1); } } internal abstract class ContainerElement : ElementBase { public ContainerElement(NavigatorContext context, Content content, string name, ElementBase parent) : base(context, content, name, parent) { } } internal class RootElement : ContainerElement { public override bool IsEmpty { get { return false; } } public override XPathNodeType NodeType { get { return XPathNodeType.Root; } } public RootElement(NavigatorContext context, Content content) : base(context, content, "#document", null) { context.SetRoot(this); } public override ElementBase CreateFirstChild() { return new ContentElement(this.Context, Context.MainContent, this, false, 0); } public override ElementBase CreateNextElement() { return null; } } internal class ChildrenElement : ContainerElement { public override bool IsEmpty { get { return Context.Children.Length == 0; } } public ChildrenElement(NavigatorContext context, Content content, ElementBase parent) : base(context, content, "Children", parent) { } public override ElementBase CreateFirstChild() { if (IsEmpty) return null; var content = Context.Children[0]; return new ContentElement(this.Context, content, this, true, 0); } public override ElementBase CreateNextElement() { return null; } } internal class ContentElement : ContainerElement { public override bool IsEmpty { get { return false; } } public bool IsChildContent { get; private set; } public int ContentIndex { get; private set; } public ContentElement(NavigatorContext context, Content content, ElementBase parent, bool isChildContent, int contentIndex) : base(context, content, "Content", parent) { IsChildContent = isChildContent; ContentIndex = contentIndex; } public override ElementBase CreateFirstChild() { return new ContentHeadElement(this.Context, this.Content, "ContentType", this); } public override ElementBase CreateNextElement() { if (!IsChildContent) return null; var index = ContentIndex + 1; if (index == Context.Children.Length) return null; var content = Context.Children[index]; return new ContentElement(Context, content, Parent, true, index); } } internal class TextElement : ElementBase { public override XPathNodeType NodeType { get { return XPathNodeType.Text; } } public override bool IsEmpty { get { return true; } } private string _textValue; public TextElement(NavigatorContext context, Content content, ElementBase parent, string textValue) : base(context, content, "#text", parent) { _textValue = textValue; } public override ElementBase CreateFirstChild() { return null; } public override ElementBase CreateNextElement() { return null; } protected override void CollectTextValue(StringBuilder sb) { sb.Append(_textValue); } } internal class ContentHeadElement : ElementBase { public override bool IsEmpty { get { return String.IsNullOrEmpty(GetValue()); } } public ContentHeadElement(NavigatorContext context, Content content, string name, ElementBase parent) : base(context, content, name, parent) { } private string _value; private bool? _hasValue; private string GetValue() { if (_hasValue != null) return _value; _value = GetFieldValue(); _hasValue = _value != null; return _value; } private string GetFieldValue() { var contentType = Content.ContentType; switch (Name) { case "ContentType": return contentType == null ? String.Empty : contentType.Name; case "ContentTypePath": return contentType == null ? String.Empty : contentType.Path; case "ContentTypeTitle": return contentType == null ? String.Empty : contentType.DisplayName; case "ContentName": return Content.Name; case "Icon": return contentType.Icon; case "SelfLink": return this.Content.Path; case "IsFolder": return (this.Content.ContentHandler is IFolder).ToString().ToLowerInvariant(); default: throw new SnNotSupportedException(); } } public override ElementBase CreateFirstChild() { if (GetValue() == null) return null; return new TextElement(Context, Content, this, GetValue()); } public override ElementBase CreateNextElement() { switch (Name) { case "ContentType": return new ContentHeadElement(this.Context, this.Content, "ContentTypePath", this.Parent); case "ContentTypePath": return new ContentHeadElement(this.Context, this.Content, "ContentTypeTitle", this.Parent); case "ContentTypeTitle": return new ContentHeadElement(this.Context, this.Content, "ContentName", this.Parent); case "ContentName": return new ContentHeadElement(this.Context, this.Content, "Icon", this.Parent); case "Icon": return new ContentHeadElement(this.Context, this.Content, "SelfLink", this.Parent); case "SelfLink": return new ContentHeadElement(this.Context, this.Content, "IsFolder", this.Parent); case "IsFolder": return new FieldsElement(this.Context, this.Content, this.Parent); default: throw new SnNotSupportedException("##"); } } } internal class FieldsElement : ContainerElement { internal IEnumerator<Field> FieldEnumerator { get; private set; } private bool _isEmpty; public override bool IsEmpty { get { return _isEmpty; } } public FieldsElement(NavigatorContext context, Content content, ElementBase parent) : base(context, content, "Fields", parent) { var enumerator = content.Fields.Values.AsEnumerable<Field>().GetEnumerator(); FieldEnumerator = new FieldEnumerator(enumerator); _isEmpty = !FieldEnumerator.MoveNext(); } public override ElementBase CreateFirstChild() { if (this.IsEmpty) return null; return FieldElement.Create(Context, Content, this, FieldEnumerator.Current); } public override ElementBase CreateNextElement() { return new ActionsElement(this.Context, this.Content, this.Parent); } } internal abstract class FieldElement : ElementBase { private IXmlAttributeOwner _attributeContainer; private bool _isListField; public Field Field { get; private set; } protected FieldElement(NavigatorContext context, Content content, string name, ElementBase parent, Field field) : base(context, content, name, parent) { this.Field = field; _attributeContainer = field as IXmlAttributeOwner; _isListField = field.Name[0] == '#'; if (_isListField) base.RemoveHashSignFromName(); } public override string[] GetAttributeNames() { if (_isListField) { if (_attributeContainer == null) return new[] { Field.FIELDSUBTYPEATTRIBUTENAME }; var names = _attributeContainer.GetXmlAttributeNames().ToList(); names.Add(Field.FIELDSUBTYPEATTRIBUTENAME); return names.ToArray(); } if (_attributeContainer != null) return _attributeContainer.GetXmlAttributeNames().ToArray(); return null; } public override string GetAttributeValue(string name) { if (name == Field.FIELDSUBTYPEATTRIBUTENAME) return "ContentList"; return _attributeContainer.GetXmlAttribute(name); } public override ElementBase CreateNextElement() { var enumerator = ((FieldsElement)Parent).FieldEnumerator; var hasNextField = enumerator.MoveNext(); if (!hasNextField) return null; return FieldElement.Create(Context, Content, Parent, enumerator.Current); } internal static FieldElement Create(NavigatorContext context, Content content, ElementBase parent, Field field) { if (field is IXmlChildList) return new ItemContainerElement(context, content, field.Name, parent, field); if (field is IRawXmlContainer) return new XmlFieldElement(context, content, field.Name, parent, field); return new SimpleFieldElement(context, content, field.Name, parent, field); } } internal class SimpleFieldElement : FieldElement { public override bool IsEmpty { get { return String.IsNullOrEmpty(GetValue()); } } public SimpleFieldElement(NavigatorContext context, Content content, string name, ElementBase parent, Field field) : base(context, content, name, parent, field) { } private string _value; private bool? _hasValue; public string GetValue() { if (_hasValue != null) return _value; _value = GetFieldValue(); _hasValue = _value != null; return _value; } private string GetFieldValue() { var value = Field.GetInnerXml(); if (String.IsNullOrEmpty(value)) return null; return value; } public override ElementBase CreateFirstChild() { if (GetValue() == null) return null; return new TextElement(Context, Content, this, GetValue()); } } internal class ItemContainerElement : FieldElement { internal IEnumerator<string> ChildValueEnumerator { get; private set; } internal string ChildItemName { get; private set; } private bool _isEmpty; public override bool IsEmpty { get { return _isEmpty; } } public ItemContainerElement(NavigatorContext context, Content content, string name, ElementBase parent, Field field) : base(context, content, name, parent, field) { var listField = (IXmlChildList)field; ChildItemName = listField.GetXmlChildName(); ChildValueEnumerator = listField.GetXmlChildValues().GetEnumerator(); _isEmpty = !ChildValueEnumerator.MoveNext(); } public override ElementBase CreateFirstChild() { if (this.IsEmpty) return null; return new ChildItemElement(Context, Content, this, ChildItemName, ChildValueEnumerator.Current); } } internal class ChildItemElement : ElementBase { private string _value; public override bool IsEmpty { get { return false; } } public ChildItemElement(NavigatorContext context, Content content, ElementBase parent, string name, string value) : base(context, content, name, parent) { _value = value; } public override ElementBase CreateFirstChild() { if (_value == null) return null; return new TextElement(Context, Content, this, _value); } public override ElementBase CreateNextElement() { var enumerator = ((ItemContainerElement)Parent).ChildValueEnumerator; var hasNextItem = enumerator.MoveNext(); if (!hasNextItem) return null; return new ChildItemElement(Context, Content, Parent, this.Name, enumerator.Current); } } internal class ActionsElement : ContainerElement { internal IEnumerator<ActionBase> ActionEnumerator { get; private set; } private bool _isEmpty; public override bool IsEmpty { get { return _isEmpty; } } public ActionsElement(NavigatorContext context, Content content, ElementBase parent) : base(context, content, "Actions", parent) { ActionEnumerator = ActionFramework.GetActionsForContentNavigator(content).GetEnumerator(); _isEmpty = !ActionEnumerator.MoveNext(); } public override ElementBase CreateFirstChild() { if (this.IsEmpty) return null; return ActionElement.Create(Context, Content, this, ActionEnumerator.Current); } public override ElementBase CreateNextElement() { if (((ContentElement)Parent).IsChildContent) return null; if (!Context.WithChildren) return null; var mainContent = Context.MainContent; if (mainContent.Children == null) return null; if (mainContent.Children.Count() == 0) return null; return new ChildrenElement(this.Context, this.Content, this.Parent); } } internal class ActionElement : ElementBase { private ActionBase _action; public override bool IsEmpty { get { return false; } } private ActionElement(NavigatorContext context, Content content, string name, ElementBase parent, ActionBase action) : base(context, content, name, parent) { _action = action; } public override string[] GetAttributeNames() { if (!_action.IncludeBackUrl) return new[] { ActionBase.BackUrlParameterName }; return null; } public override string GetAttributeValue(string name) { if (name == ActionBase.BackUrlParameterName) return _action.BackUrlWithParameter; return string.Empty; } public override ElementBase CreateFirstChild() { return new TextElement(Context, Content, this, _action.Uri); } public override ElementBase CreateNextElement() { var enumerator = ((ActionsElement)Parent).ActionEnumerator; var hasNextAction = enumerator.MoveNext(); if (!hasNextAction) return null; return ActionElement.Create(Context, Content, Parent, enumerator.Current); } internal static ActionElement Create(NavigatorContext context, Content content, ElementBase parent, ActionBase action) { return new ActionElement(context, content, action.Name, parent, action); } } /*-----------------------------------------------------------------*/ internal class XmlNodeWrapper : ElementBase { private XmlNode _wrappedNode; public XmlNodeWrapper(NavigatorContext context, Content content, string name, ElementBase parent, XmlNode wrappedNode) : base(context, content, name, parent) { _wrappedNode = wrappedNode; } public override bool IsEmpty { get { throw new SnNotSupportedException(); } } public override XPathNodeType NodeType { get { switch (_wrappedNode.NodeType) { case XmlNodeType.Attribute: return XPathNodeType.Attribute; case XmlNodeType.CDATA: return XPathNodeType.Text; case XmlNodeType.Comment: return XPathNodeType.Comment; case XmlNodeType.Element: return XPathNodeType.Element; case XmlNodeType.ProcessingInstruction: return XPathNodeType.ProcessingInstruction; case XmlNodeType.SignificantWhitespace: return XPathNodeType.SignificantWhitespace; case XmlNodeType.Text: return XPathNodeType.Text; case XmlNodeType.Whitespace: return XPathNodeType.Whitespace; default: throw new NotSupportedException("Not supported NodeType: " + _wrappedNode.NodeType); } } } public override string[] GetAttributeNames() { var attrs = _wrappedNode.Attributes; if (attrs.Count == 0) return null; var names = new string[attrs.Count]; for (int i = 0; i < attrs.Count; i++) names[i] = attrs[i].Name; return names; } public override string GetAttributeValue(string name) { return _wrappedNode.Attributes[name].Value; } public string Value { get { return _wrappedNode.Value; } } protected override void CollectTextValue(StringBuilder sb) { sb.Append(_wrappedNode.InnerText); } public override ElementBase CreateFirstChild() { var firstChild = _wrappedNode.FirstChild; if (firstChild == null) return null; return XmlNodeWrapper.Create(Context, Content, firstChild.LocalName, this, firstChild); } public override ElementBase CreateNextElement() { var node = _wrappedNode.NextSibling; if (node == null) return null; return Create(Context, Content, node.Name, Parent, node); } internal static XmlNodeWrapper Create(NavigatorContext context, Content content, string name, ElementBase parent, XmlNode wrappedNode) { return new XmlNodeWrapper(context, content, name, parent, wrappedNode); } } internal class XmlFieldElement : FieldElement { private const string INNERNAVIGATORROOTELEMENTNAME = "innerdocumentroot"; private XmlDocument __innerDocument; private XmlDocument InnerDocument { get { if (__innerDocument == null) { __innerDocument = new XmlDocument(); __innerDocument.LoadXml(String.Concat("<", INNERNAVIGATORROOTELEMENTNAME, ">", ((IRawXmlContainer)this.Field).GetRawXml(), "</", INNERNAVIGATORROOTELEMENTNAME, ">")); } return __innerDocument; } } public override bool IsEmpty { get { return String.IsNullOrEmpty(FieldValue); } } private string _fieldValue; private bool? _hasFieldValue; public string FieldValue { get { if (_hasFieldValue != null) return _fieldValue; _fieldValue = this.Field.GetData().ToString(); _hasFieldValue = _fieldValue != null; return _fieldValue; } } private string _value; private bool? _hasValue; public string GetValue() { if (_hasValue != null) return _value; _value = GetValue1(); _hasValue = _value != null; return _value; } private string GetValue1() { return InnerDocument.DocumentElement.InnerText; } protected override void CollectTextValue(StringBuilder sb) { sb.Append(InnerDocument.DocumentElement.InnerText); } public XmlFieldElement(NavigatorContext context, Content content, string name, ElementBase parent, Field field) : base(context, content, name, parent, field) { } public override ElementBase CreateFirstChild() { if (GetValue() == null) return null; var firstChild = InnerDocument.DocumentElement.FirstChild; return XmlNodeWrapper.Create(Context, Content, firstChild.LocalName, this, firstChild); } } /*=================================================================*/ internal class FieldEnumerator : IEnumerator<Field> { private IEnumerator<Field> _wrappedEnumerator; public FieldEnumerator(IEnumerator<Field> enumerator) { _wrappedEnumerator = enumerator; } public Field Current { get { return _wrappedEnumerator.Current; } } object System.Collections.IEnumerator.Current { get { return Current; } } public bool MoveNext() { while (true) { if (!_wrappedEnumerator.MoveNext()) return false; if (CurrentIsAllowed()) break; } return true; } private bool CurrentIsAllowed() { var field = Current; if (field.Name == "Name") return false; return true; } public void Reset() { _wrappedEnumerator.Reset(); } public void Dispose() { } } }
SenseNet/sensenet
src/ContentRepository/Xpath/Elements.cs
C#
gpl-2.0
25,411
<?php /** * Wrapper for Symfony Dumper + die * * PHP Version 5 * * @category PHP * @package Phputils * @author Unamata Sanatarai <unamatasanatarai@gmail.com> * @license http://www.gnu.org/copyleft/gpl.html GNU General Public License * @link https://github.com/unamatasanatarai/phputils */ use Symfony\Component\VarDumper\VarDumper; if (!function_exists('dd')) { /** * Wrapper for Symfony Dupmer + die * * @return null */ function dd() { $callstack = debug_backtrace(); VarDumper::dump( 'called from: ' . $callstack[0]['file'] . ':'. $callstack[0]['line'] ); foreach (func_get_args() as $var) { VarDumper::dump($var); } die; } }
unamatasanatarai/phputils
src/phputils/dd.php
PHP
gpl-2.0
757
from django.conf.urls import url from kraut_accounts import views urlpatterns = [ url(r'^logout/$', views.accounts_logout, name='logout'), url(r'^login/$', views.accounts_login, name='login'), url(r'^changepw/$', views.accounts_change_password, name='changepw'), ]
zeroq/kraut_salad
kraut_accounts/urls.py
Python
gpl-2.0
278
#import <Foundation/Foundation.h> typedef id (^WPMapBlock)(id obj); typedef BOOL (^WPFilterBlock)(id obj); typedef id (^WPReduceBlock)(id accumulator, id obj); @interface NSArray (WPMapFilterReduce) /** Transforms values in an array The resulting array will include the results of calling mapBlock for each of the receiver array objects. If mapBlock returns nil that value will be missing from the resulting array. */ - (instancetype)wp_map:(WPMapBlock)mapBlock; /** Filters an array to only include values that satisfy the filter block */ - (instancetype)wp_filter:(WPFilterBlock)filterBlock; /** Combines the array values into a single value The reduce block is called for each value. The first time it's sent the initial value, and subsequent calls will use the result of the previous call as the accumulator. For instance, to calculate the sum of all items: [array wp_reduce:^id(id accumulator, id obj) { return @([accumulator longLongValue] + [obj longLongValue]); } withInitialValue:@0]; */ - (id)wp_reduce:(WPReduceBlock)reduceBlock withInitialValue:(id)initial; @end
ya7lelkom/WordPress-iOS
WordPress/Classes/Utility/WPMapFilterReduce.h
C
gpl-2.0
1,113
/****************************************************************************** Video Hardware for Nichibutsu Mahjong series. Driver by Takahiro Nogi <nogi@kt.rim.or.jp> 2000/01/28 - ******************************************************************************/ #include "driver.h" #include "nb1413m3.h" static int blitter_destx, blitter_desty; static int blitter_sizex, blitter_sizey; static int blitter_src_addr; static int blitter_direction_x, blitter_direction_y; static int hyhoo_gfxrom; static int hyhoo_dispflag; static int hyhoo_highcolorflag; static int hyhoo_flipscreen; static int hyhoo_screen_refresh; static mame_bitmap *hyhoo_tmpbitmap; static unsigned short *hyhoo_videoram; static unsigned short *hyhoo_videoworkram; static unsigned char *hyhoo_clut; static void hyhoo_vramflip(void); static void hyhoo_gfxdraw(void); /****************************************************************************** ******************************************************************************/ PALETTE_INIT( hyhoo ) { int i; int r, g, b; /* initialize 655 RGB lookup */ for (i = 0; i < 65536; i++) { // bbbbbggg_ggrrrrrr r = ((i >> 0) & 0x3f); g = ((i >> 6) & 0x1f); b = ((i >> 11) & 0x1f); r = ((r << 2) | (r >> 3)); g = ((g << 3) | (g >> 2)); b = ((b << 3) | (b >> 2)); palette_set_color(i, r, g, b); } } WRITE8_HANDLER( hyhoo_clut_w ) { hyhoo_clut[offset & 0x0f] = (data ^ 0xff); } /****************************************************************************** ******************************************************************************/ WRITE8_HANDLER( hyhoo_blitter_w ) { switch (offset) { case 0x00: blitter_src_addr = (blitter_src_addr & 0xff00) | data; nb1413m3_gfxradr_l_w(0, data); break; case 0x01: blitter_src_addr = (blitter_src_addr & 0x00ff) | (data << 8); nb1413m3_gfxradr_h_w(0, data); break; case 0x02: blitter_destx = data; break; case 0x03: blitter_desty = data; break; case 0x04: blitter_sizex = data; break; case 0x05: blitter_sizey = data; /* writing here also starts the blit */ hyhoo_gfxdraw(); break; case 0x06: blitter_direction_x = (data & 0x01) ? 1 : 0; blitter_direction_y = (data & 0x02) ? 1 : 0; hyhoo_flipscreen = (data & 0x04) ? 0 : 1; hyhoo_dispflag = (data & 0x08) ? 0 : 1; hyhoo_vramflip(); break; case 0x07: break; } } WRITE8_HANDLER( hyhoo_romsel_w ) { hyhoo_gfxrom = (((data & 0xc0) >> 4) + (data & 0x03)); hyhoo_highcolorflag = data; nb1413m3_gfxrombank_w(0, data); if ((0x20000 * hyhoo_gfxrom) > (memory_region_length(REGION_GFX1) - 1)) { #ifdef MAME_DEBUG ui_popup("GFXROM BANK OVER!!"); #endif hyhoo_gfxrom &= (memory_region_length(REGION_GFX1) / 0x20000 - 1); } } /****************************************************************************** ******************************************************************************/ void hyhoo_vramflip(void) { static int hyhoo_flipscreen_old = 0; int x, y; unsigned short color1, color2; if (hyhoo_flipscreen == hyhoo_flipscreen_old) return; for (y = 0; y < (Machine->drv->screen_height / 2); y++) { for (x = 0; x < Machine->drv->screen_width; x++) { color1 = hyhoo_videoram[(y * Machine->drv->screen_width) + x]; color2 = hyhoo_videoram[((y ^ 0xff) * Machine->drv->screen_width) + (x ^ 0x1ff)]; hyhoo_videoram[(y * Machine->drv->screen_width) + x] = color2; hyhoo_videoram[((y ^ 0xff) * Machine->drv->screen_width) + (x ^ 0x1ff)] = color1; color1 = hyhoo_videoworkram[(y * Machine->drv->screen_width) + x]; color2 = hyhoo_videoworkram[((y ^ 0xff) * Machine->drv->screen_width) + (x ^ 0x1ff)]; hyhoo_videoworkram[(y * Machine->drv->screen_width) + x] = color2; hyhoo_videoworkram[((y ^ 0xff) * Machine->drv->screen_width) + (x ^ 0x1ff)] = color1; } } hyhoo_flipscreen_old = hyhoo_flipscreen; hyhoo_screen_refresh = 1; } static void update_pixel(int x, int y) { int color = hyhoo_videoram[(y * 512) + x]; plot_pixel(hyhoo_tmpbitmap, x, y, Machine->pens[color]); } static void blitter_timer_callback(int param) { nb1413m3_busyflag = 1; } void hyhoo_gfxdraw(void) { unsigned char *GFX = memory_region(REGION_GFX1); int x, y; int dx1, dx2, dy; int startx, starty; int sizex, sizey; int skipx, skipy; int ctrx, ctry; int gfxaddr; unsigned short color, color1, color2; unsigned char r, g, b; nb1413m3_busyctr = 0; hyhoo_gfxrom |= ((nb1413m3_sndrombank1 & 0x02) << 3); startx = blitter_destx + blitter_sizex; starty = blitter_desty + blitter_sizey; if (blitter_direction_x) { sizex = blitter_sizex ^ 0xff; skipx = 1; } else { sizex = blitter_sizex; skipx = -1; } if (blitter_direction_y) { sizey = blitter_sizey ^ 0xff; skipy = 1; } else { sizey = blitter_sizey; skipy = -1; } gfxaddr = (hyhoo_gfxrom << 17) + (blitter_src_addr << 1); for (y = starty, ctry = sizey; ctry >= 0; y += skipy, ctry--) { for (x = startx, ctrx = sizex; ctrx >= 0; x += skipx, ctrx--) { if ((gfxaddr > (memory_region_length(REGION_GFX1) - 1))) { #ifdef MAME_DEBUG ui_popup("GFXROM ADDRESS OVER!!"); #endif gfxaddr = 0; } color = GFX[gfxaddr++]; dx1 = (2 * x + 0) & 0x1ff; dx2 = (2 * x + 1) & 0x1ff; dy = y & 0xff; if (hyhoo_flipscreen) { dx1 ^= 0x1ff; dx2 ^= 0x1ff; dy ^= 0xff; } if (hyhoo_highcolorflag & 0x04) { // direct mode if (hyhoo_highcolorflag & 0x20) { /* least significant bits */ // src xxxxxxxx_bbbggrrr // dst xxbbbxxx_ggxxxrrr r = (((color & 0x07) >> 0) & 0x07); g = (((color & 0x18) >> 3) & 0x03); b = (((color & 0xe0) >> 5) & 0x07); color = ((b << (11 + 0)) | (g << (6 + 0)) | (r << (0 + 0))); if (color != 0xff) { hyhoo_videoram[(dy * Machine->drv->screen_width) + dx1] |= color; hyhoo_videoram[(dy * Machine->drv->screen_width) + dx2] |= color; update_pixel(dx1, dy); update_pixel(dx2, dy); } continue; } else { /* most significant bits */ // src xxxxxxxx_bbgggrrr // dst bbxxxggg_xxrrrxxx r = (((color & 0x07) >> 0) & 0x07); g = (((color & 0x38) >> 3) & 0x07); b = (((color & 0xc0) >> 6) & 0x03); color = ((b << (11 + 3)) | (g << (6 + 2)) | (r << (0 + 3))); if (color != 0xff) { hyhoo_videoram[(dy * Machine->drv->screen_width) + dx1] = color; hyhoo_videoram[(dy * Machine->drv->screen_width) + dx2] = color; update_pixel(dx1, dy); update_pixel(dx2, dy); } } } else { // lookup table mode if (blitter_direction_x) { // flip color1 = (color & 0x0f) >> 0; color2 = (color & 0xf0) >> 4; } else { // normal color1 = (color & 0xf0) >> 4; color2 = (color & 0x0f) >> 0; } if (hyhoo_clut[color1] != 0xff) { // src xxxxxxxx_bbgggrrr // dst bbxxxggg_xxrrrxxx r = (hyhoo_clut[color1] & 0x07) >> 0; g = (hyhoo_clut[color1] & 0x38) >> 3; b = (hyhoo_clut[color1] & 0xc0) >> 6; color1 = ((b << (11 + 3)) | (g << (6 + 2)) | (r << (0 + 3))); hyhoo_videoram[(dy * Machine->drv->screen_width) + dx1] = color1; update_pixel(dx1, dy); } if (hyhoo_clut[color2] != 0xff) { // src xxxxxxxx_bbgggrrr // dst bbxxxggg_xxrrrxxx r = (hyhoo_clut[color2] & 0x07) >> 0; g = (hyhoo_clut[color2] & 0x38) >> 3; b = (hyhoo_clut[color2] & 0xc0) >> 6; color2 = ((b << (11 + 3)) | (g << (6 + 2)) | (r << (0 + 3))); hyhoo_videoram[(dy * Machine->drv->screen_width) + dx2] = color2; update_pixel(dx2, dy); } } nb1413m3_busyctr++; } } nb1413m3_busyflag = 0; timer_set((double)nb1413m3_busyctr * TIME_IN_NSEC(2500), 0, blitter_timer_callback); } /****************************************************************************** ******************************************************************************/ VIDEO_START( hyhoo ) { hyhoo_tmpbitmap = auto_bitmap_alloc(Machine->drv->screen_width, Machine->drv->screen_height); hyhoo_videoram = auto_malloc(Machine->drv->screen_width * Machine->drv->screen_height * sizeof(UINT16)); hyhoo_videoworkram = auto_malloc(Machine->drv->screen_width * Machine->drv->screen_height * sizeof(UINT16)); hyhoo_clut = auto_malloc(0x10 * sizeof(UINT8)); memset(hyhoo_videoram, 0x0000, (Machine->drv->screen_width * Machine->drv->screen_height * sizeof(UINT16))); return 0; } /****************************************************************************** ******************************************************************************/ VIDEO_UPDATE( hyhoo ) { int x, y; if (get_vh_global_attribute_changed() || hyhoo_screen_refresh) { hyhoo_screen_refresh = 0; for (y = 0; y < Machine->drv->screen_height; y++) { for (x = 0; x < Machine->drv->screen_width; x++) { update_pixel(x, y); } } } if (hyhoo_dispflag) { copyscrollbitmap(bitmap, hyhoo_tmpbitmap, 0, 0, 0, 0, &Machine->visible_area, TRANSPARENCY_NONE, 0); } else { fillbitmap(bitmap, Machine->pens[0x0000], 0); } }
amadvance/advancemame
src/vidhrdw/hyhoo.c
C
gpl-2.0
9,073
CC=gcc CFLAGS=-Wall -Wextra -g -std=c99 -O2 -D_FILE_OFFSET_BITS=64 -Wp,-D_FORTIFY_SOURCE=2 -fstack-protector --param=ssp-buffer-size=4 -fPIC -Wl,-z,relro,-z,now,--as-needed -pie LIBS=`pkg-config --libs gmime-2.6` -luuid INCS=`pkg-config --cflags gmime-2.6` recmail: recmail.c $(CC) $(CFLAGS) -o recmail recmail.c ${LIBS} ${INCS} clean: rm -f recmail *.o
ac000/recmail
Makefile
Makefile
gpl-2.0
358
CPU = attiny167 PROGCPU = tn167 PRG = remote SRCS = main.c version.c adc.c lin-uart.c include ../common/Makefile.common
Beirdo/lights-controller
firmware/remote/Makefile
Makefile
gpl-2.0
122
<?php //event-type: return-html e("recipes/admin/design/browsers/browserFoodTypes", array( "section-title" => "Food types", "records" => RecipesModel::getRecords("foodtype"))); // End of file
pragres/recipescookbook.org
packages/recipes/admin/view/browsers/browserFoodTypes.event.php
PHP
gpl-2.0
205
<html> <head> <title>Yadex user's guide</title> <meta http-equiv="Content-Style-Type" content="text/css"> <style type="text/css"> DT { margin-bottom: 1em; margin-top: 1em; display: list-item; list-style-image: none; list-style-position: inside; list-style-type: disc; } DD { margin-bottom: 1em; margin-top: 1em } </style> </head> <body> <div align="center"> <img src="logo_small.png" alt="Fancy logo"> <br>Yadex 1.7.0 (2012-12-09) <h1>User's guide</h1> <table> <tr> <td> </td> <td width="50%" align="center"> <strong> Warning : this document is unfinished, ill-structured and incomplete. </strong> </td> <td> </td> </tr> </table> </div> <br> <br> <br> <p>Contents : <p> <!-- generated by htmltoc { --> <ul> <li><a href="#toc-1">1. Introduction</a></li> <ul> <li><a href="#toc-1-1">1.1. Yadex in one paragraph</a></li> <li><a href="#toc-1-2">1.2. What do I need ?</a></li> <ul> <li><a href="#toc-1-2-1">1.2.1. Iwad</a></li> <li><a href="#toc-1-2-2">1.2.2. Directories</a></li> <li><a href="#toc-1-2-3">1.2.3. Display</a></li> </ul> <li><a href="#toc-1-3">1.3. Compiling and installing Yadex</a></li> </ul> <li><a href="#toc-2">2. Running Yadex</a></li> <ul> <li><a href="#toc-2-1">2.1. The command line arguments</a></li> <li><a href="#opt">2.2. Options</a></li> <li><a href="#game">2.3. Specifying the game for which you want to edit</a></li> <li><a href="#pwads">2.4. Specifying the names of pwads to load</a></li> <li><a href="#env">2.5. Environment variables</a></li> </ul> <li><a href="#toc-3">3. Editing levels with Yadex</a></li> <ul> <li><a href="#toc-3-1">3.1. Using external textures, flats, etc.</a></li> <li><a href="#toc-3-2">3.2. Logging of time spent</a></li> <li><a href="#toc-3-3">3.3. The prompt commands</a></li> <li><a href="#toc-3-4">3.4. Moving around</a></li> <ul> <li><a href="#toc-3-4-1">3.4.1. Moving the pointer</a></li> <li><a href="#toc-3-4-2">3.4.2. Scrolling</a></li> <li><a href="#toc-3-4-3">3.4.3. Autoscrolling</a></li> <li><a href="#toc-3-4-4">3.4.4. Jumping</a></li> </ul> <li><a href="#toc-3-5">3.5. Zooming in and out</a></li> <li><a href="#toc-3-6">3.6. The grid</a></li> <li><a href="#toc-3-7">3.7. Inserting objects</a></li> <li><a href="#toc-3-8">3.8. Copying objects</a></li> <li><a href="#toc-3-9">3.9. Deleting objects</a></li> <li><a href="#toc-3-10">3.10. Changing the properties of objects</a></li> <li><a href="#toc-3-11">3.11. Moving objects a.k.a. drag-and-drop</a></li> <li><a href="#toc-3-12">3.12. Renumbering objects</a></li> <ul> <li><a href="#toc-3-12-1">3.12.1. Exchanging objects numbers</a></li> </ul> <li><a href="#toc-3-13">3.13. Selecting objects</a></li> <li><a href="#toc-3-14">3.14. The use of [<kbd>Shift</kbd>]</a></li> <li><a href="#toc-3-15">3.15. Sector miscellaneous operations</a></li> <li><a href="#toc-3-16">3.16. Thing miscellaneous operations</a></li> <li><a href="#toc-3-17">3.17. Setting/toggling/clearing thing flags</a></li> <li><a href="#toc-3-18">3.18. Vertex miscellaneous operations</a></li> <li><a href="#toc-3-19">3.19. Linedef miscellaneous operations</a></li> <li><a href="#toc-3-20">3.20. Setting/toggling/clearing linedef flags</a></li> <li><a href="#toc-3-21">3.21. Undoing</a></li> <li><a href="#toc-3-22">3.22. Cut-and-paste from one level to another</a></li> <li><a href="#toc-3-23">3.23. Using the flat/patch/sprite/texture viewer</a></li> <li><a href="#toc-3-24">3.24. Saving</a></li> <li><a href="#toc-3-25">3.25. Closing a window</a></li> </ul> <li><a href="#toc-4">4. Variables and configuration</a></li> <ul> <li><a href="#toc-4-1">4.1. Variables</a></li> <ul> <li><a href="#toc-4-1-1">4.1.1. The font</a></li> <li><a href="#toc-4-1-2">4.1.2. Mouse wheel and other mouse issues</a></li> </ul> <li><a href="#toc-4-2">4.2. Preferences</a></li> <li><a href="#toc-4-3">4.3. Command line</a></li> <li><a href="#toc-4-4">4.4. Environment variables</a></li> <li><a href="#toc-4-5">4.5. Configuration files</a></li> <ul> <li><a href="#toc-4-5-1">4.5.1. Contents of configuration files</a></li> <li><a href="#config_locate">4.5.2. Locating configuration files</a></li> <li><a href="#toc-4-5-3">4.5.3. Default configuration files</a></li> <li><a href="#toc-4-5-4">4.5.4. User-specified configuration files</a></li> <li><a href="#toc-4-5-5">4.5.5. Organising your configuration files</a></li> </ul> </ul> <li><a href="#toc-5">5. Game definition files</a></li> <ul> <li><a href="#toc-5-1">5.1. Contents of game definition files</a></li> <li><a href="#ygd_locate">5.2. Locating game definition files</a></li> <li><a href="#toc-5-3">5.3. Use of game definition files</a></li> </ul> <li><a href="#toc-6">6. Known bugs</a></li> <li><a href="#games">7. Supported games</a></li> <ul> <li><a href="#toc-7-1">7.1. Doom</a></li> <li><a href="#toc-7-2">7.2. Doom II</a></li> <li><a href="#toc-7-3">7.3. Doom alpha</a></li> <li><a href="#toc-7-4">7.4. Doom press release pre-beta</a></li> <li><a href="#toc-7-5">7.5. Final Doom</a></li> <li><a href="#toc-7-6">7.6. Heretic</a></li> <li><a href="#toc-7-7">7.7. Hexen</a></li> <li><a href="#toc-7-8">7.8. Strife</a></li> <li><a href="#toc-7-9">7.9. Ultimate Doom</a></li> <li><a href="#toc-7-10">7.10. Boom</a></li> <li><a href="#toc-7-11">7.11. EDGE</a></li> <li><a href="#toc-7-12">7.12. MBF</a></li> <li><a href="#toc-7-13">7.13. Other derivatives</a></li> </ul> </ul> <!-- } generated by htmltoc --> </p> <hr noshade> <h2><a name="toc-1">1. Introduction</a></h2> <h3><a name="toc-1-1">1.1. Yadex in one paragraph</a></h3> <p>Yadex is a Doom level (wad) editor for Unix systems running X, including Linux. It supports Doom, Doom II, Ultimate Doom, Final Doom, Heretic, Doom press release pre-beta and also, in a more or less limited way, Hexen, Strife and Doom alpha. It is available under the terms of the GPL. <h3><a name="toc-1-2">1.2. What do I need ?</a></h3> <p>To compile, install and run this release of Yadex, you need <ul> <li>a POSIX-compatible environment (such as Unix), <li>a C compiler (ISO-compatible), <li>a C++ compiler (ISO-compatible), <li>GNU make (it won't work with other flavours of make), <li>an HTML viewer to read the doc, <li>a Doom/Doom II/Heretic/Hexen/Strife iwad, <li>X11R6 or X11R5. </ul> <p>More specifically, Yadex expects that <ul> <li>the routines in <code>ctype.h</code> accept the range <code>CHAR_MIN</code> through <code>UCHAR_MAX</code>, <li>the C library supports <code>hypot()</code> and at least one of <code>nanosleep()</code> and <code>usleep()</code>. </ul> <h4><a name="toc-1-2-1">1.2.1. Iwad</a></h4> <p>A shareware Doom or Heretic iwad or a demo Hexen or Strife iwad is okay but you need a registered iwad if you want to be able to save your changes. <h4><a name="toc-1-2-2">1.2.2. Directories</a></h4> <p>Yadex mostly conforms to the <a href="http://www.pathname.com/fhs/"> FHS (filesystem hierarchy standard)</a>. By default, the installation procedure will try to copy files in the following directories, creating the directories if they don't exist&nbsp;; <ul> <li><code>/usr/local/bin</code> <li><code>/etc/yadex/1.7.0</code> <li><code>/usr/local/man/man6</code> <li><code>/usr/local/share/games/yadex/1.7.0</code> </ul> <p>So make sure you have the necessary rights before installing. If you don't, install in a different directory by running <code>./configure</code> with the <code>--prefix</code> option. <h4><a name="toc-1-2-3">1.2.3. Display</a></h4> <p>Yadex uses about 270 different colours. In most cases, on PseudoColor displays, there aren't that many free colour cells. So, if it detects a PseudoColor display, Yadex uses a private colormap so as to get as many free colour cells as possible. The drawback of this method is that, when you're in the Yadex window, all other windows are displayed with wrong colours and vice-versa. <p>What's more, as PseudoColor displays typically have 256 colours (at least on PC), which is less than the number of colours needed, Yadex might have to assign the same physical colour to different logical colours. If it happens, it will try to render the colours of the game accurately at the expense of the application colours. <p>Executive summary: if you can, use a TrueColor or DirectColor display and a depth of more than 8 bits per pixel (E.G. by launching X with "<code>startx -- -bpp 16</code>"). <p>A 640x480 screen is okay though, of course, a larger display is better. <h3><a name="toc-1-3">1.3. Compiling and installing Yadex</a></h3> See <a href="README"><code>README</code></a>. <p>Don't forget to tell Yadex where your iwads are by changing the lines "<code>iwad1&nbsp;=</code>", "<code>iwad2&nbsp;=</code>" etc. in <code>/etc/yadex/1.7.0/yadex.cfg</code>. If there is an iwad you don't have, you can just comment out the corresponding line. <hr noshade> <h2><a name="toc-2">2. Running Yadex</a></h2> <h3><a name="toc-2-1">2.1. The command line arguments</a></h3> <p>Yadex takes two sorts of command line arguments&nbsp;: parameters and options. An option is a command line argument that starts with a "<code>-</code>". <p>Some options take an argument. The argument must be separated from the option by some whitespace. <strong>Constructs like "<code>-othingy</code>" are not recognized. You have to type "<code>-o thingy</code>".</strong> <p>The "<code>-file</code>" option takes a variable number of arguments. It uses all the non-options up to the next option. <p>The options that take no argument can be negated by using a "<code>+</code>" instead of a "<code>-</code>". For example, "<code>+sb</code>" will undo the effect of a "<code>swap_buttons&nbsp;=&nbsp;true</code>" directive in the config file. <p>In general, if you specify the same option more than once, the last occurrence overrides the previous ones. For example, "<code>yadex&nbsp;+P&nbsp;-P</code>" is equivalent to "<code>yadex&nbsp;-P</code>". Similarly, "<code>yadex&nbsp;-i1&nbsp;foo.wad&nbsp;-i1&nbsp;bar.wad&nbsp;-i1&nbsp;baz.wad</code>" boils down to "<code>yadex&nbsp;-i1&nbsp;baz.wad</code>". <p>The "<code>-pwad</code>" (or "<code>-pw</code>") option, however are additive. Each occurrence will add to the previous ones. For instance, "<code>yadex&nbsp;-pwad&nbsp;foo.wad&nbsp;-pwad&nbsp;bar.wad</code>" is equivalent to "<code>yadex&nbsp;foo.wad&nbsp;bar.wad</code>". <p>Any command line argument that is not an option is treated as the name of a pwad to load. You can put as many names of pwads on the command line as you wish (or none at all). The details of specifying pwad names are discussed <a href="#pwads">there</a>. <h3>2.2. <a name="opt">Options</a></h3> <p>To know about the command line options that Yadex understands, type <p><code>$ yadex --help</code> <p>You'll get this: <pre> -b <var>string</var> Run benchmark and exit successfully -f -config_file <var>string</var> Config file -d -debug Debug mode -expert Expert mode -fc -fake_cursor Fake cursor -fn -font <var>string</var> Font name -g -game <var>string</var> Game -h -height <var>integer</var> Initial window height -? -help --help Show usage summary -i1 -iwad1 <var>string</var> The name of Doom iwad -i2 -iwad2 <var>string</var> The name of the Doom II iwad -i3 -iwad3 <var>string</var> The name of the Heretic iwad -i4 -iwad4 <var>string</var> The name of the Hexen iwad -i5 -iwad5 <var>string</var> The name of the Strife iwad -i6 -iwad6 <var>string</var> The name of the Doom alpha 0.2 iwad -i7 -iwad7 <var>string</var> The name of the Doom alpha 0.4 iwad -i8 -iwad8 <var>string</var> The name of the Doom alpha 0.5 iwad -i9 -iwad9 <var>string</var> The name of the Doom press release iwad -i10 -iwad10 <var>string</var> The name of the Strife 1.0 iwad -pw -pwad <var>string</var> Pwad file to load -P -no_pixmap No pixmap, saves memory, more flicker -q -quiet Quiet mode -qq -quieter Quieter mode -s0 -select0 Automatic selection of 0th object -sb -swap_buttons Swap mouse buttons -td -text_dot DrawScreenText debug flag -v -verbose Verbose mode --version Print version and exit -w -width <var>integer</var> Initial window width -z -zoom <var>integer</var> Initial zoom factor </pre> <p>You might wonder what the "<code>-file</code>", "<code>-pw</code>" and "<code>-pwad</code>" options are for since it's so simple to just put the names of the pwads on the command line. The answer is that they're only here for compatibility with previous versions of Yadex/Yade/DEU. Don't use them. They might go away one day. <h3>2.3. <a name="game">Specifying the game for which you want to edit</a></h3> <p>Yadex can handle several games (Doom, Doom II, Heretic, etc.) but only one at a time. Unfortunately, that game can't be changed dynamically. If you want to switch game, you must quit Yadex and run it again. <p>By default, the game used is the one specified by the "<code>game=</code>" directive in the configuration file. If you want to override the default, use "<code>-g <var>game</var></code>". <p>The allowed values for <code><var>game</var></code> are&nbsp;: <dl> <dt>"<code>doom</code>" <dd>Doom and Ultimate Doom (shareware or registered) <dt>"<code>doom02</code>" <dd>Doom alpha 0.2 <dt>"<code>doom04</code>" <dd>Doom alpha 0.4 <dt>"<code>doom05</code>" <dd>Doom alpha 0.5 <dt>"<code>doom2</code>" <dd>Doom II and Final Doom <dt>"<code>doompr</code>" <dd>Doom press release pre-beta <dt>"<code>heretic</code>" <dd>Heretic (shareware or registered) <dt>"<code>hexen</code>" <dd>Hexen (demo or commercial) <dt>"<code>strife</code>" <dd>Strife 1.1 or later (demo or commercial) <dt>"<code>strife10</code>" <dd>Strife 1.0 (demo or commercial) </dl> <p>Not all those games are fully supported. For details, see <a href="#games">there</a>. <h3>2.4. <a name="pwads">Specifying the names of pwads to load</a></h3> <p>When you have to specify the name of a pwad to load, for example in the <code>r</code> command, or as a command line argument, you don't always have to spell it all out. <ul> <li>The <code>.wad</code> extension can be omitted, it will be added automatically. For example, specifying the name "<code>foo</code>" is equivalent to specifying "<code>foo.wad</code>". An unfortunate consequence of this convention is that it's impossible to load a wad that would be really called "<code>foo</code>". Though, under DOS, you can still work around this limitation by specifying the name with a trailing dot, ("<code>foo.</code>"). <li> <p> The path can sometimes be omitted too, if the wad is in one of the <dfn>standard directories</dfn>, or one of their subdirectories. If no file of the name you specified is found in the current directory, Yadex will look for a file of that name in the standard directories, which are&nbsp;: </p> <ol> <li>your home directory, <li><code>/usr/local/share/games/</code><var>game</var>, <li><code>/usr/share/games/</code><var>game</var>, <li><code>/usr/local/share/games/wads</code>, <li><code>/usr/share/games/wads</code>, </ol> <p>(where <var>game</var> is the name of the <a href="#game">game</a>).</p> <p>On the other hand, if you give an absolute file name, that is one that begins with a "<code>/</code>", the standard directories will <em>not</em> be used. Yadex will only try to open the file of the exact name you specified. <p>For example, if you type "<code>yadex -g doom2 foo/bar</code>", Yadex will look for the following files in order&nbsp;:</p> <ol> <li><code>~/foo/bar.wad</code>, <li><code>/usr/local/share/games/doom2/foo/bar.wad</code>, <li><code>/usr/share/games/doom2/foo/bar.wad</code>, <li><code>/usr/local/share/games/wads/foo/bar.wad</code>, <li><code>/usr/share/games/wads/foo/bar.wad</code>, </ol> <p>and open the first it finds.</p> <p>But if you type "<code>yadex&nbsp;-g&nbsp;doom2&nbsp;/foo/bar</code>" (note the leading slash), Yadex will try to open "<code>/foo/bar.wad</code>", period. <p>The philosophy behind those standard directories is that&nbsp;: <ul> <li>a local file overrides a global one, <li>a game-specific file overrides a non-game-specific one. </ul> </ul> <p>Gotcha: those shorthands can only be used when <em>reading</em> a pwad. When <em>writing</em> to a pwad, you always have to specify the exact name, with the full path and extension. <h3>2.5. <a name="env">Environment variables</a></h3> <dl> <dt><code>DOOMWADDIR</code> <dd>No, <code>DOOMWADDIR</code> is <em>not</em> supported. Perhaps in another version... <dt><code>DISPLAY</code> <dd>Unix only. The name of the X display that Yadex will try to connect to. <dt><code>HOME</code> <dd>Used to expand <code>~</code> when locating configuration and game definition files. <dt><code>LINES</code> <dd>If set, Yadex assumes the tty has that many lines instead of 24. The value must be an unsigned, non-zero decimal integer. <dt><code>TMPDIR</code> <dd>If set, swap files are created there. Otherwise, in <code>/tmp</code>. <dt><code>YADEX_GAME</code> <dd>Indicates the game to use. Overrides the <code>game</code> parameter in the config file, is overridden by the <code>-g</code> command line option. </dl> <hr noshade> <h2><a name="toc-3">3. Editing levels with Yadex</a></h2> <h3><a name="toc-3-1">3.1. Using external textures, flats, etc.</a></h3> <p>Like Doom, Yadex accepts the addition and replacement of resources from a pwad. In general, if the same resource is defined in several wads, the last definition is used. This is also true if the same resource is defined several times in the same wad. It's not recommended that you use lower case names in your wads; sometimes it's handled but sometimes it's not. So always use upper-case names.</p> <dl> <dt>Flats <dd>New or replacement flats are recognized iff they're placed between <code>FF_START</code>/<code>F_END</code> or <code>FF_START</code>/<code>FF_END</code> labels. There can be any number of <code>FF_START</code>/<code>F_END</code> and <code>FF_START</code>/<code>FF_END</code> pairs and they don't have to be in the same pwad. If there are several lumps for the same flat, the last one is used. <dt>Palette <dd>If there is a replacement <code>PLAYPAL</code> lump, it's used. If there are several <code>PLAYPAL</code> lumps, the last one is used. <dt>Patches <dd>New or replacement patches are recognized iff they're between <code>P_START</code> and <code>P_END</code> or <code>PP_START</code> and <code>PP_END</code>. Within a group of patches, labels <code>P1_START</code>, <code>P1_END</code>, <code>P2_START</code>, <code>P2_END</code>, <code>P3_START</code> and <code>P3_END</code> are ignored&nbsp;; any other label elicits a warning. New patches must also appear in the <code>PNAMES</code> lump or the texture browser won't be able to use them. If there are several <code>PNAMES</code> lumps, the last one is used. If there are several lumps for the same flat, the last one is used. <dt>Sprites <dd>New or replacement sprites are recognized iff they're between one of the following pair of labels&nbsp;: <code>S_START</code>/<code>S_END</code>, <code>SS_START</code>/<code>S_END</code> or <code>SS_START</code>/<code>SS_END</code>. There can be any number of label pairs and they don't have to be in the same pwad. If there are several lumps for the same sprite, the last one is used. <dt>Textures <dd>New or replacement textures are recognized iff they're in a <code>TEXTURE1</code> or <code>TEXTURE2</code> lump. If the same texture happens to be defined in both <code>TEXTURE1</code> and <code>TEXTURE2</code> (which should not happen), only the definition in <code>TEXTURE1</code> is used. If there are several <code>TEXTURE1</code> lumps, the last one is used. If there are several <code>TEXTURE2</code> lumps, the last one is used. If it exists, <code>TEXTURE2</code> is always susceptible to be used, even for games where it's not supposed to exist, like Doom&nbsp;II. </dl> <h3><a name="toc-3-2">3.2. Logging of time spent</a></h3> <p>If you edit a level from a file and there exists a file in the same directory with the same name but with the extension <code>.yl</code>, that file will be used to keep track of the time you spent editing that level. At the end of the editing session, Yadex will append a line to the <code>.yl</code> file with the name of the level and the number of minutes spent on it. <p> Note that if the <code>.yl</code> file does not already exist, it is <em>not</em> created. This is to prevent the creation of a large number of small useless files when browsing through a collection of wads. Thus, if you want to enable logging, you need to manually create the log file first with a command such as "<code>touch <var>name</var>.yl</code>". <h3><a name="toc-3-3">3.3. The prompt commands</a></h3> <p>You can get a summary of the prompt commands by typing "<code>?</code>" at the Yadex prompt. <h3><a name="toc-3-4">3.4. Moving around</a></h3> <h4><a name="toc-3-4-1">3.4.1. Moving the pointer</a></h4> <p>In the X11 version, the only way to do that is to move the pointer device (i.e. the mouse). <h4><a name="toc-3-4-2">3.4.2. Scrolling</a></h4> <p>The arrow keys [<kbd>Left</kbd>], [<kbd>Right</kbd>], [<kbd>Up</kbd>] and [<kbd>Down</kbd>] scroll a little at a time, by default 10% of the screen/window width or height. You can change the exact amount by setting the variable <code>scroll_less</code> in the configuration file. <p>[<kbd>Page-up</kbd>], [<kbd>Page-down</kbd>], [<kbd>Home</kbd>] and [<kbd>End</kbd>] scroll more at a time, by default 90% of the screen/window width or height. You can change the exact amount by setting the variable <code>scroll_more</code> in the configuration file. <h4><a name="toc-3-4-3">3.4.3. Autoscrolling</a></h4> <p>By default, autoscrolling is disabled. You can enable it by setting <code>autoscroll</code> to <code>true</code> in the configuration file. <p>When it is enabled and the pointer is close to the edge of the screen/window, the map scrolls automatically. The closer you are to the edge, the faster it scrolls. Autoscrolling is always disabled near the menu bar items so that the map does not scroll when you're reaching for the menu. <p>You can fine tune autoscrolling by changing the variables <code>autoscroll_amp</code> and <code>autoscroll_edge</code>. <h4><a name="toc-3-4-4">3.4.4. Jumping</a></h4> <p>I plan to develop a full set-mark/jump-to-mark system similar to the one in <code>vi</code>, with <code>m<var>mark-name</var></code>, <code>'<var>mark-name</var></code> and <code>`<var>mark-name</var></code>. <p>For the moment, if you press [<kbd>'</kbd>], the map scrolls so that its centre is at the centre of the screen/window. <p>If you press [<kbd>`</kbd>], the map scrolls so that its centre is at the centre of the screen/window and the zoom factor is adjusted so that the whole map is visible and almost fills the screen/window. <h3><a name="toc-3-5">3.5. Zooming in and out</a></h3> <p>The current zoom factor is shown on the info bar, after the word "<code>Scale:</code>". It is shown in pixels per map units. The indication "<code>Scale: 50%</code>" means that one pixel of the display corresponds to 2 map units. <p>You can zoom in by pressing [<kbd>+</kbd>] or [<kbd>=</kbd>] or the 4th mouse button or by moving the mouse wheel "up". See the <code>zoom_default</code> and <code>zoom_step</code> variables. <p> You can zoom out by pressing [<kbd>-</kbd>] or [<kbd>_</kbd>] or the 5th mouse button or by moving the mouse wheel "down". See the <code>zoom_default</code> and <code>zoom_step</code> variables. <p> You can also set the zoom factor directly with the keys [<kbd>1</kbd>] through [<kbd>9</kbd>] and [<kbd>0</kbd>]. [<kbd>1</kbd>] sets the zoom factor to a value that is controlled by the <code>digit_zoom_base</code> variable (by default, 100%). Each successive key down the keyboard sets the zoom factor <code>digit_zoom_step</code> percents higher than the previous one (by default, -29%). <h3><a name="toc-3-6">3.6. The grid</a></h3> <p>The <dfn>grid</dfn> is a square mesh of blue dots and lines in the background. It's here to help you aligning your objects correctly with regard to Doom's metrics, which will save you a lot of textures misalignments. <p> When you insert or drag objects, they are <dfn><i>snapped to grid</i></dfn>. That is, Yadex prevents you from placing them off the grid. You can toggle the <code>snap_to_grid</code> flag by pressing [<kbd>y</kbd>]. <p> The grid step is always a power of 2, E.G. 128, 64, 32, etc. You can get a finer grid (dividing the grid step by 2) by pressing [<kbd>g</kbd>]. Conversely, pressing [<kbd>G</kbd>] multiplies the grid step by 2. If you press [<kbd>g</kbd>] when the grid step is already at its minimum value, it will be set to its maximum value. And conversely. The minimum and maximum values are set in the configuration file with <code>grid_min</code> and <code>grid_max</code>. <p> When you change the zoom factor, Yadex automatically changes the grid step to make it what it thinks is best for the new zoom factor. If you want to lock the grid step to its current value, press [<kbd>z</kbd>]. To unlock it, press that key again. <p> You can hide the grid by pressing [<kbd>h</kbd>] (but this doesn't disable <code>snap_to_grid</code>). Press that key again to make the grid visible again. <p>You can also use [<kbd>H</kbd>] to reset the grid step. <h3><a name="toc-3-7">3.7. Inserting objects</a></h3> <p>By pressing [<kbd>Ins</kbd>], you insert a new object under the pointer. <p>The type of the new object is generally determined by the current mode. That is, if you are in things mode, pressing [<kbd>Ins</kbd>] will create a new thing. However, this is not always true, particularly if some objects are already selected&nbsp;: <ul> <li>If you are in vertices mode and there are two or more vertices selected, pressing [<kbd>Ins</kbd>] will insert a linedef from the first vertex to the second, another one from the second to the third and so on. That is, a path of linedefs is created from the first to the last vertex. The path is not closed (but see [<kbd>Shift</kbd>][<kbd>Ins</kbd>] below). <li>If you are in linedefs mode and there are selected linedefs, pressing [<kbd>Ins</kbd>] will create a new sector and new sidedefs that point to it and assign them to the linedefs. For the linedefs that had no first sidedef (whether or not they already had a second sidedef), the new sidedef is attached as first sidedef. For the linedefs that had a first sidedef but no second sidedef, the new sidedef is attached as second sidedef. If one or more of the selected linedefs already had two sidedefs, the operation fails. Bug: the side to which the new sidedef is attached is decided without taking into account the actual orientation of the linedef. </ul> <p>If an object of the same type is highlighted at the moment you press [<kbd>Ins</kbd>], the properties of the new object are copied from the highlighted object. In other words, the highlighted object serves as a "model". Else the properties of the new object are set to default values.</p> <table> <tr valign=top> <th>Object type <th>Property <th>Value if there is a model <th>Value if there is no model <tr valign=top> <td rowspan=7>Linedef <td>Start vertex <td>Like the model <td>If there are two selected vertices, the first. If not, you are prompted for a vertex number. <tr valign=top> <td>End vertex <td>Like the model <td>If there are two selected vertices, the second. If not, you are prompted for a vertex number. <tr valign=top> <td>Flags <td>Like the model <td>Impassable <tr valign=top> <td>Type <td>Like the model <td>0 (normal) <tr valign=top> <td>Sector tag <td>Like the model <td>0 (none) <tr valign=top> <td>First sidedef <td>FFFFh (none) <td>FFFFh (none) <tr valign=top> <td>Second sidedef <td>FFFFh (none) <td>FFFFh (none) <tr valign=top> <td rowspan=4>Thing <td>Coordinates <td>Like the pointer <td>Like the pointer <tr valign=top> <td>Type <td>Like the model <td><a href="#param_default_thing"><code>default_thing</code></a> <tr valign=top> <td>Flags <td>Like the model <td>D12 D3 D45 <tr valign=top> <td>Angle <td>Like the model <td>0 (east) <tr valign=top> <td rowspan=7>Sector <td>Floor height <td>Like the model <td><a href="#param_default_floor_height"><code>default_floor_height</code></a> <tr> <td>Ceiling height <td>Like the model <td><a href="#param_default_ceiling_height"><code>default_ceiling_height</code></a> <tr> <td>Floor texture <td>Like the model <td><a href="#param_default_floor_texture"><code>default_floor_texture</code></a> <tr> <td>Ceiling texture <td>Like the model <td><a href="#param_default_ceiling_texture"><code>default_ceiling_texture</code></a> <tr> <td>Light level <td>Like the model <td><a href="#param_default_light_level"><code>default_light_level</code></a> <tr> <td>Type <td>Like the model <td>0 (normal) <tr> <td>Tag <td>Like the model <td>0 (none) <tr valign=top> <td>Vertex <td>Coordinates </tr> </table> <p>In vertex mode, pressing [<kbd>Shift</kbd>][<kbd>Ins</kbd>] is the same as pressing [<kbd>Ins</kbd>] except that the path is closed (an additional linedef is created from the last to the first vertex). <h3><a name="toc-3-8">3.8. Copying objects</a></h3> <p>When you press [<kbd>o</kbd>], the selected or highlighted object(s) is(are) copied. If only one object has been copied, the new copy is spawned under the pointer. If several objects have been copied, the new copies are spawned so that their centre is under the pointer. <p>The new copies are spawned selected and everything else is unselected so that you can easily drag them where you want. <p>All the properties of the new objects are copied from the original objects with the notable exception of references. If you are an OO programmer, think shallow copy vs. deep copy. Otherwise, read on. <p>When you copy linedefs, the start and end vertices are copied too and the new linedefs reference the new vertices instead of the old ones. The same thing goes for sidedefs, except if the <code>copy_linedef_reuse_sidedefs</code> flag is true. In that case, each new linedef uses the same sidedefs as its "model" did. This is useful when you want to create many similar structures, E.G. pillars that stand in the same sector. When all the structures use the same sidedefs, you can change all structures at once by editing only one of them. Another benefit of sharing sidedefs is that it makes your wad file somewhat leaner, since the sidedef, at 30 bytes, is the largest level object. <p>However, sharing sidedefs makes impossible to change one structure independantly of the others. That's where the "unlink sidedef" function enters. If you select one or more linedefs and unlink their sidedefs, Yadex makes the necessary duplications so that none of the sidedefs they use is also used by any other linedef outside the selection. Thus you can edit your linedefs freely. The "unlink sidedef" operation is described in detail <a href="#unlink_sidedefs">there</a>. <p>Similarly, when you copy sectors, the linedefs, sidedefs and vertices are duplicated and the new sectors use the copies, not the original ones. Note that the <code>copy_linedef_reuse_sidedefs</code> has not effect when copying sectors; the new linedefs always use new sidedefs, even if that flag is set. <p>What about copying things and vertices&nbsp;? Well, those don't contain references to other objects, so there are no special precautions to take when duplicating them. Except for the position, the copy is exactly identical to the original. <h3><a name="toc-3-9">3.9. Deleting objects</a></h3> To be written. There is already a concise description in the <a href="getting_started.html">getting started</a> document. <br>Basically, use [<kbd>Del</kbd>]. <h3><a name="toc-3-10">3.10. Changing the properties of objects</a></h3> To be written. There is already a concise description in the <a href="getting_started.html">getting started</a> document. <br>Executive summary: use [<kbd>Return</kbd>] and double-click. <h3><a name="toc-3-11">3.11. Moving objects a.k.a. drag-and-drop</a></h3> To be written. There is already a concise description in the <a href="getting_started.html">getting started</a> document. <h3><a name="toc-3-12">3.12. Renumbering objects</a></h3> Normally, you don't have to worry very much about the actual numbers of the linedefs, sectors, sidedefs, things and vertices you manipulate. You just have to be careful to use the correct numbers in references but that's all. You don't need to <em>change</em> the number that a given object bears. <p>However, for certain things, the <em>relative</em> numbers of objects matter&nbsp;: <ul> <li>if you have several player 1 starts in your level (which is how the voodoo doll trick is done), it's the highest-numbered one that the player incarnates in at level start, <li>if there are several teleport exits in the same sector, only the lowest-numbered one is used (but I don't know of any interesting application of this fact), <li>if there are several superimposed linedefs, the lowest-numbered one is drawn like it was in front, <li>BSP's transparent doors need one of the door tracks to be the lowest-numbered linedef, <li>certain linedef types apply only to the lowest-numbered of the tagged sectors (IIRC), <li>raising stairs use the lowest-numbered linedef. </ul> <p>So how do you control the ordering of objects&nbsp;? <h4><a name="toc-3-12-1">3.12.1. Exchanging objects numbers</a></h4> This function works the same way in all modes. If you select exactly two objects and press [<kbd>Ctrl-x</kbd>] (or use "Edit-&gt;Exchange objects numbers") they exchange their numbers. That is, if the first object had number <var>n1</var> and the second object had number <var>n2</var>, the first object in renumbered to <var>n2</var> and the second one to <var>n1</var>. No other objects are renumbered. <p>At the same time the numbers are exchanged, all relevant references are fixed up. That is, if you exchange the numbers of two vertices, all the linedefs that referred to them are changed accordingly. And if you exchange the numbers of two sectors, all the sidedefs that referred to them are fixed. The thing to remember is that this function leaves the level functionally identical to what it was before, except of course for the possible side effects of the renumbering itself. <p><small>(If you wanted (for some reason I can't imagine) to exchange the numbers without fixing the references, you could do it by replacing <code>true</code> by <code>false</code> in the call to <code>exchange_objects_numbers()</code>.)</small> <h3><a name="toc-3-13">3.13. Selecting objects</a></h3> There are several ways to select objects. The simplest is to click on an object with the left mouse button. By default, when you do that, all other previously selected objects are unselected. <p>If you want to select an object without unselecting everything else, do the same thing but keep [<kbd>Ctrl</kbd>] pressed while you click. You can also unselect an object this way. <p>You can also select several objects at a time with a selection box. You know how to draw a selection box, don't you&nbsp;? Well, it works the same in Yadex. If press [<kbd>Ctrl</kbd>] while setting the first corner of the box, all objects in the box will be "toggle-selected" (that is, selected if they weren't, unselected if they were). <p>A more advanced way to select objects is path-selection. With path selection, you can easily select all the linedefs that form an object with a single keystroke. There are two variants of path selection. <p>To use the first, you must first highlight a linedef. Then press [<kbd>e</kbd>]. All the linedefs that belong to the same non-forked path as the highlighted linedef are selected. For example&nbsp;:</p> <table align="center"> <tr> <td><img src="e1.png" alt="Before pressing [e]" WIDTH=159 HEIGHT=127></td> <td><img src="e2.png" alt="After pressing [e]" WIDTH=159 HEIGHT=127></td> </tr> <tr> <td align="right"><i>Before pressing</i> [<kbd>e</kbd>]</td> <td align="right"><i>After pressing</i> [<kbd>e</kbd>]</td> </tr> </table> <p>Note how the selection stop at the first fork in the path. That kind a path selection is good to select a path of single- or double-sided linedefs that belong to the same sector. <p>The other variant of path selection is better to select all the linedefs that form a single pillar, even if not all of its sides face the same sector. To use it, highlight a single-sided linedef and press [<kbd>E</kbd>]. All the single-sided linedefs that belong to the same path as the original one are highlighted. For example&nbsp;:</p> <table align="center"> <tr> <td><img src="E1.png" alt="Before pressing [E]" WIDTH=156 HEIGHT=160></td> <td><img src="E2.png" alt="After pressing [E]" WIDTH=154 HEIGHT=160></td> </tr> <tr> <td align="right"><i>Before pressing</i> [<kbd>E</kbd>]</td> <td align="right"><i>After pressing</i> [<kbd>E</kbd>]</td> </tr> </table> <p>Note that the selection is not stopped by the forks in the path. Also note that the double-sided linedefs are not selected. <p>Like clicking, [<kbd>e</kbd>] and [<kbd>E</kbd>] have the effect of unselecting everything else that was previously selected unless you use [<kbd>Ctrl</kbd>]. If [<kbd>Ctrl</kbd>] was pressed at the time you pressed [<kbd>e</kbd>] or [<kbd>E</kbd>], all the linedefs that would have been selected if you didn't press [<kbd>Ctrl</kbd>] are either removed from the selection (if they were previously selected) or added to the selection (if they weren't). This is actually simpler than it sounds. <h3><a name="toc-3-14">3.14. The use of [<kbd>Shift</kbd>]</a></h3> <p>The [<kbd>Shift</kbd>] key has an interesting property ; as long as you hold it down, the highlight is locked. If an object is not highlighted, it stays so even if you bring the mouse pointer over it. Likewise, if an object is already highlighted, it remains so, even if you move the pointer away from it. <p>This can be useful in certain occasions, such as when you want to compare two sectors that have other sectors between them. <h3><a name="toc-3-15">3.15. Sector miscellaneous operations</a></h3> <p>Unless otherwise specified, all the operation below act on all the objects in the <em>working set</em>. If there is at least one selected object, the working set is equal to the selection. If there is no selected object, the working set is the currently highlighted object (the object under the pointer). If there is no selection and no highlighted object, the selection is empty, of course. If there is a selection and an object outside the selection, the latter is <em>not</em> part of the working set.</p> <dl> <dt>Find first free tag number <dd>Displays the smallest tag number greater than 0 and not used by any linedef or sector. <dt>Rotate and scale sectors <dd>(description to be written) <dt>Make door from sector <dd>(description to be written) <dt>Make lift from sector <dd>(description to be written) <dt>Distribute sector floor heights <dd>(description to be written) <dt>Distribute sector ceiling heights <dd>(description to be written) <dt>Raise or lower sectors... <dd>Prompts you for a number (N) and adds that number to the floor and ceiling heights of all the sectors in the working set. If N is positive, this results in raising the sectors by N units. If N is negative, this results in lowering the sectors by -N units. <dt>Brighten or darken sectors... <dd>Prompts you for a number (N) and adds that number to the light level of all the sectors in the working set. If N is positive, this results in sectors becoming brighter. If N is positive, this results in sectors becoming darker. No light level is decreased below 0 or increased above 255. <dt>Unlink room <dd>This function is not implemented yet. <dt>Mirror horizontally <dd>This function starts by determining the set <var>S</var> of vertices that are used by the sectors in the working set. Then all the vertices in <var>S</var> have their x-coordinate changed so that they're "mirrored" around the vertical axis that intersects the geometric centre of <var>S</var>. Finally, all the linedefs whose both vertices belong to <var>S</var> are flipped so that the sector references remain correct. <p>This function is designed to be used on either the whole level or a group of isolated sectors (like in the screenshots below). It is not recommended to use it on a group of sectors if that group is connected to other sectors. You can do it but the resulting mess might take you some time to untangle, especially if there are many connecting linedefs. <p>A common use for this function is when you have a symmetrical room with, say, the same staircase on both sides of the axis of symmetry. You can build one staircase, make a copy of it, mirror the copy and paste it on the other side of the room. <p>See the "Mirror horizontally" linedef function. <dt>Mirror vertically <dd>This function starts by determining the set <var>S</var> of vertices that are used by the sectors in the working set. Then all the vertices in <var>S</var> have their y-coordinate changed so that they're "mirrored" around the horizontal axis that intersects the geometric centre of <var>S</var>. Finally, all the linedefs whose both vertices belong to <var>S</var> are flipped so that the sector references remain correct. <p>This function is designed to be used on either the whole level or a group of isolated sectors (like in the screenshots below). It is not recommended to use it on a group of sectors if that group is connected to other sectors. You can do it but the resulting mess might take you some time to untangle, especially if there are many connecting linedefs. <p>A common use for this function is when you have a symmetrical room with, say, the same staircase on both sides of the axis of symmetry. You can build one staircase, make a copy of it, mirror the copy and paste it on the other side of the room. <p>See the "Mirror vertically" linedef function. <dt>Swap flats <dd>For each sector in the working set, exchange the floor texture with the ceiling texture. </dl> <h3><a name="toc-3-16">3.16. Thing miscellaneous operations</a></h3> <p>Unless otherwise specified, all the operation below act on all the objects in the <em>working set</em>. If there is at least one selected object, the working set is equal to the selection. If there is no selected object, the working set is the currently highlighted object (the object under the pointer). If there is no selection and no highlighted object, the selection is empty, of course. If there is a selection and an object outside the selection, the latter is <em>not</em> part of the working set.</p> <dl> <dt>Find first free tag number <dd>Displays the smallest tag number greater than 0 and not used by any linedef or sector. <dt>Rotate and scale things <dd>(description to be written) <dt>Spin things 45° clockwise <dd>Subtract 45 to the angle of all the things in the working set. This is not to be confused with "Rotate and scale things" where it's the <em>position</em> of the things that is changed. <dt>Spin things 45° counter-clockwise <dd>Add 45 to the angle of all the things in the working set. This is not to be confused with "Rotate and scale things" where it's the <em>position</em> of the things that is changed. <dt>Mirror horizontally <dd>All the things in the working set have their x-coordinate changed so that they're "mirrored" around the horizontal axis that intersects the geometric centre of the things in the working set. Their angle is also adjusted. <p>It is not possible to mirror things without changing their angle (short of commenting out a couple of lines in <code>x_mirror.cc</code> and recompiling, of course). <dt>Mirror vertically <dd>All the things in the working set have their y-coordinate changed so that they're "mirrored" around the horizontal axis that intersects the geometric centre of the things in the working set. Their angle is also adjusted. <p>It is not possible to mirror things without changing their angle (short of commenting out a couple of lines in <code>x_mirror.cc</code> and recompiling, of course). </dl> <h3><a name="toc-3-17">3.17. Setting/toggling/clearing thing flags</a></h3> <p>In things mode, press [<kbd>a</kbd>] to open the "set thing flags" popup menu. <br>Press [<kbd>b</kbd>] to open the "toggle thing flags" popup menu. <br>Press [<kbd>c</kbd>] to open the "clear thing flags" popup menu. <h3><a name="toc-3-18">3.18. Vertex miscellaneous operations</a></h3> <p>Unless otherwise specified, all the operation below act on all the objects in the <em>working set</em>. If there is at least one selected object, the working set is equal to the selection. If there is no selected object, the working set is the currently highlighted object (the object under the pointer). If there is no selection and no highlighted object, the selection is empty, of course. If there is a selection and an object outside the selection, the latter is <em>not</em> part of the working set.</p> <dl> <dt>Find first free tag number <dd>Displays the smallest tag number greater than 0 and not used by any linedef or sector. <dt>Rotate and scale vertices <dd>(description to be written) <dt>Delete vertex and join linedefs <dd>(description to be written) <dt>Merge several vertices into one <dd>(description to be written) <dt>Add a linedef and split sector <dd>To perform this operation, you must have exactly two vertices in the working set and there must be an uninterrupted path of linedefs that face a common sector between them. A new linedef is inserted from the first vertex to the second. The newly created sector is put on the left side of the new linedef. <dt>Mirror horizontally <dd>All the vertices in the working set have their x-coordinate changed so that they're "mirrored" around the vertical axis that intersects the geometric centre of the selected vertices. Then all the linedefs whose both vertices belong to the group of selected vertices are flipped so that the sector references remain correct. <p>See the "Mirror horizontally" linedef function. <dt>Mirror vertically <dd>All the vertices in the working set have their y-coordinate changed so that they're "mirrored" around the horizontal axis that intersects the geometric centre of the the selected vertices. Then all the linedefs whose both vertices belong to the group of selected vertices are flipped so that the sector references remain correct. <p>See the "Mirror vertically" linedef function. </dl> <h3><a name="toc-3-19">3.19. Linedef miscellaneous operations</a></h3> <p>Unless otherwise specified, all the operation below act on all the objects in the <em>working set</em>. If there is at least one selected object, the working set is equal to the selection. If there is no selected object, the working set is the currently highlighted object (the object under the pointer). If there is no selection and no highlighted object, the selection is empty, of course. If there is a selection and an object outside the selection, the latter is <em>not</em> part of the working set.</p> <dl> <dt>Find first free tag number <dd>Displays the smallest tag number greater than 0 and not used by any linedef or sector. <dt>Rotate and scale LD &amp; SD... <dd>(description to be written) <dt>Split linedefs (add new vertex) <dd>Split all linedefs in the working set, by adding a vertex in the middle. <p>The key [<kbd>x</kbd>] is a shortcut to this function. <dt>Split linedefs and sector <dd>Split both linedefs in the working set in the middle by adding a vertex for each, create a linedef between the two new vertices and split the sector with this new linedef. There must be exactly 2 linedefs in the working set and they must face a common sector. <p>The new linedef goes from the first linedef in the working set to the second linedef in the working set. The new sector is on the second sidedef of the new linedef. <p>There is a bug in this function ; if the sector contains other sectors, some sidedefs are given the wrong sector number. Jim Flynn has recently fixed this in DETH. Anyone to look into it&nbsp;? <p>The key [<kbd>w</kbd>] is a shortcut to this function. <dt>Delete linedefs and join sector <dd>(description to be written) <dt>Flip linedefs <dd>The start and end vertices of the linedefs in the working set are swapped. Their sidedefs are also swapped, so that the sector references remain correct. <p>If you want to flip linedefs without swapping their sidedefs, you have to use "Flip linedefs" then "Swap sidedefs" on them. <dt>Swap sidedefs <dd>Swap the sidedefs of the linedefs in the working set <em>without</em> flipping the linedefs. This means that the sector references are also swapped. If you don't understand what this implies, don't use this function. <dt>Align textures (Y offset) <dd>This function is buggy. <dt>Align textures (X offset) <dd>This function is buggy. <dt>Remove 2nd sidedef (make single-sided) <dd>When two superimposed linedefs are merged, the result is often a two-sided linedefs, even though the second sidedef faces no sector. Use this function to fix the mess. <p>It sets the I flag, clears the 2 flag, sets the second sidedef to -1, clears the upper and lower texture and resets the middle texture to the default (as defined in the preferences). <dt>Make rectangular nook (32x16) <dd>This is a single-key function to make a rectangular nook in the middle of a linedef. "Nook" means that, seen from the first side of the original linedef, the resulting 5 linedefs form a concave figure. <p>If the linedef is not long enough, the length of the nook is one third of the length of the linedef.</p> <table align="center"> <tr> <td><img src="nook1.png" alt="Before" WIDTH=127 HEIGHT=159> <td><img src="nook2.png" alt="After" WIDTH=127 HEIGHT=159> </tr> <tr> <td align="right"><i>Before</i> <td align="right"><i>After</i> </tr> </table> <dt>Make rectangular boss (32x16) <dd>Same thing as "Make rectangular nook" but the resulting figure is convex. <dt>Set length (move 1st vertex) <dd>Prompts you for a length and moves the 1st vertex of the linedefs in the working set so that they have the length you specified. <dt>Set length (move 2nd vertex) <dd>Same thing as "Set length (move 1st vertex) but moves the 2nd vertex. <dt><a name="unlink_sidedefs">Unlink 1st sidedef</a> <dd>This function is used when you have several linedefs sharing common sidedefs and you don't want them to share any sidedefs anymore so that you can, for instance, change the sector reference or texture of one of the linedefs independently from the others. <p>Here is how it works&nbsp;: all sidedefs that are used on the first side of any linedef in the working set and on any side of any linedef <em>not</em> in the working set are duplicated and the first side of the concerned linedefs in the working set is set to use the copy instead of the original. <p>Gotcha&nbsp;: note that the linedefs in the working set are not "unlinked" from each other. They are only "unlinked" from any other (i.e. not in the working set) linedefs. Thus, if you have <em>n</em> linedefs that you want to unlink from each other, you have to unlink every one of them separately. This is so that, if you have, say, two square pillars (2 x 4 linedefs that all use the same sidedef), you can easily unlink one pillar from the other while still having its 4 linedefs all use the same sidedef. <dt>Unlink 2nd sidedef <dd>Same as "Unlink 1st sidedef" but with second sidedef instead of first sidedef. <dt>Mirror horizontally <dd><p>This function starts by determining the set <var>S</var> of vertices that are used by any of the linedefs in the working set. Then all the vertices in <var>S</var> have their x-coordinate changed so that they're "mirrored" around the vertical axis that intersects the geometric centre of <var>S</var>. Finally, all the linedefs whose both vertices belong to <var>S</var> are flipped so that the sector references remain correct.</p> <table align="center"> <tr> <td><img src="mirror0.png" alt="Before" WIDTH=125 HEIGHT=125> <td><img src="mirrorh.png" alt="After" WIDTH=125 HEIGHT=125> </tr> <tr> <td align="right"><i>Before</i> <td align="right"><i>After</i> </tr> </table> <p>This function is designed to be used on either the whole level or a group of isolated linedefs (like in the screenshots below). It is not recommended to use it on a group of linedefs if that group is connected to other linedefs. You can do it but the resulting mess might take you some time to untangle, especially if there are many connecting linedefs. <p>A common use for this function is when you have a symmetrical room with, say, the same staircase on both sides of the axis of symmetry. You can build one staircase, make a copy of it, mirror the copy and paste it on the other side of the room. <dt>Mirror vertically <dd><p>This function starts by determining the set <var>S</var> of vertices that are used by any of the linedefs in the working set. Then all the vertices in <var>S</var> have their y-coordinate changed so that they're "mirrored" around the horizontal axis that intersects the geometric centre of <var>S</var>. Finally, all the linedefs whose both vertices belong to <var>S</var> are flipped so that the sector references remain correct.</p> <table align="center"> <tr> <td><img src="mirror0.png" alt="Before" WIDTH=125 HEIGHT=125> <td><img src="mirrorv.png" alt="After" WIDTH=125 HEIGHT=125> </tr> <tr> <td align="right"><i>Before</i> <td align="right"><i>After</i> </tr> </table> <p>This function is designed to be used on either the whole level or a group of isolated linedefs (like in the screenshots below). It is not recommended to use it on a group of linedefs if that group is connected to other linedefs. You can do it but the resulting mess might take you some time to untangle, especially if there are many connecting linedefs. <p>A common use for this function is when you have a symmetrical room with, say, the same staircase on both sides of the axis of symmetry. You can build one staircase, make a copy of it, mirror the copy and paste it on the other side of the room. <dt>Cut a slice out of a sector <dd><p>You must select exactly two linedefs that face the same sector S. This function creates a linedef A that goes from the first selected linedef to the second and a linedef B that goes from the second to the first. A new sector is created between those four linedefs, with attributes identical to those of S.</p> <table align="center"> <tr> <td><img src="slice1.png" alt="Before" WIDTH=150 HEIGHT=150> <td><img src="slice2.png" alt="After" WIDTH=150 HEIGHT=150> </tr> <tr> <td align="right"><i>Before</i> <td align="right"><i>After</i> </tr> </table> <p>This function is somewhat similar to "split-linedefs-and-sector" except that it creates two linedefs instead of one and that it works on doughnut-shaped sectors. In fact, this function is the only one that can split a sector when there is no linedef path between the split points (which is the case when splitting a doughnut-shaped sector between its inner and outer borders). <p>If there is a linedef path between the selected linedefs, this function is equivalent to using "add-linedefs-and-split-sector" twice, except that you end up with two sectors, not three. FIXME - need a figure. <p>If the selected linedefs happen to share a vertex, only one linedef is created and the new sector is triangular. The linedefs must not be the same or superimposed. FIXME - need a figure. <p>Linedefs A and B are created with all their attributes set to zero and their middle textures set to "<tt>-</tt>". They're oriented so that their right sidedefs face the new sector. Linedef A is the lowest-numbered one. <p>The following restrictions apply&nbsp;: <ul> <li>selected linedefs must not have two sidedefs in the same sector, <li>selected linedefs must not share more than one sector, <li>there must be no linedef that would be superimposed with A or B. </ul> <p>These restrictions are stricter than necessary. They may be lifted in the future if time and brain power permit. <p>Because this function is not aware of the geometry of the selected linedefs, but only of the sectors they face, it can be used to split sectors in impossible ways. FIXME - need a figure. <p>This function assumes the space between the selected linedefs is empty. If there are any other linedefs there, you will have to fix their sector references manually afterwards. <p>The key [<kbd>Ctrl-k</kbd>] is a shortcut to this function. </dl> <h3><a name="toc-3-20">3.20. Setting/toggling/clearing linedef flags</a></h3> <p>In linedefs mode, press [<kbd>a</kbd>] to open the "set linedef flags" popup menu. <br>Press [<kbd>b</kbd>] to open the "toggle linedef flags" popup menu. <br>Press [<kbd>c</kbd>] to open the "clear linedef flags" popup menu. <h3><a name="toc-3-21">3.21. Undoing</a></h3> <p>As of this release, undoing is not implemented. <h3><a name="toc-3-22">3.22. Cut-and-paste from one level to another</a></h3> <p>As of this release, it's not possible. <h3><a name="toc-3-23">3.23. Using the flat/patch/sprite/texture viewer</a></h3> <p>Flats, patches, sprites and textures are browsed and selected with the same basic tool, snappily named the flat/patch/sprite/texture viewer. How do you use that beast&nbsp;? <p><img src="vflat.png" alt="Browsing flats" WIDTH=315 HEIGHT=86> <p><img src="vsprite.png" alt="Browsing sprites" WIDTH=379 HEIGHT=149> <p><img src="vtexture.png" alt="Browsing textures" WIDTH=507 HEIGHT=149> <p>You can change the current name simply by typing it. Use [<kbd>Backspace</kbd>] to erase the rightmost character. As you type, the list of names scrolls automatically so that the first line of the list shows the first valid name that begins with the characters in the entry box. If the current name does not belong to the list, it is grayed out and you can't validate. If it does, the corresponding image is shown in the box to the right. <p>As in certain Unix programs, [<kbd>Tab</kbd>] is used for name completion. If the current name is "<code>SW1</code>" and you press [<kbd>Tab</kbd>], the current name is set to the first name that begins with "<code>SW1</code>". <p>[<kbd>Ctrl-u</kbd>] and [<kbd>Ctrl-w</kbd>] clear the current name, like pressing [<kbd>Backspace</kbd>] repeatedly would. <p>You can also move through the list without typing names&nbsp;: <ul> <li>[<kbd>Up</kbd>] and [<kbd>Down</kbd>] move to the previous or next name in the list. <li>[<kbd>Pgup</kbd>] and [<kbd>Pgdn</kbd>] move one page at a time. <li>[<kbd>Home</kbd>] and [<kbd>End</kbd>] move you to the first or last name of the list. <li>[<kbd>Ctrl-b</kbd>] does the same thing as [<kbd>Pgup</kbd>]. <li>[<kbd>Ctrl-f</kbd>] and [<kbd>Ctrl-v</kbd>] do the same thing as [<kbd>Pgdn</kbd>]. </ul> <p>To validate the current name as your choice, press [<kbd>Return</kbd>]. If the current name is grayed-out (invalid), [<kbd>Return</kbd>] does not work. To cancel, press [<kbd>Esc</kbd>]. <p>[<kbd>Shift-F1</kbd>] saves the current image to file, in packed PPM (<tt>P6</tt>) format. This can be useful for textures, that cannot be directly extracted from wads. Transparent areas are represented according to the DeuTex convention by colour rgb:0/2f/2f (<img src="002f2f.png" alt="Swatch" width="8" height="8">). The file is created in the working directory and its name is the name of the image lowercased and suffixed by "<tt>.ppm</tt>". For example, flat "<tt>FLOOR0_7</tt>" would be saved as "<tt>./floor0_7.ppm</tt>". If the file already exists, it's mercilessly overwritten. <p>Note that under MS-DOS, newlines in the PPM header are in Unix format (LF, not CR LF).</p> <p>There are a few bugs left in this function. Textures are clipped to the dimension of the viewer window. Under MS-DOS, some "<tt>VILE*</tt>" sprites will not save, because their names contain characters that are not allowed in file names ("<tt>[</tt>", "<tt>\</tt>" and "<tt>]</tt>"). <p>[<kbd>F1</kbd>] prints to stdout the location (file name and offset) of the current flat, patch or sprite. For debugging purposes. <p>When viewing textures, you may press [<kbd>Ctrl-a</kbd>] and [<kbd>Ctrl-x</kbd>] to cycle through the patches that make up the texture. <p>Finally when viewing sprites, two other useful commands are [<kbd>Ctrl-n</kbd>] and [<kbd>Ctrl-p</kbd>]. These go respectively to the next and previous group of sprites (all sprites that have the same first 4 letters belong to the same group). It's handy to skip the 69 animation frames of the heavy weapon dude at once. <h3><a name="toc-3-24">3.24. Saving</a></h3> <p>There are two ways to save&nbsp;: <ul> <li><p>Press [<kbd>F2</kbd>] or do "File-&gt;Save". If the level comes from the iwad or if its level name or file name is unknown (because it's a new level), you are prompted for a level name (EnMn or MAPnm) and a file name first. If the file name has changed since the last time you saved, and if a file of that name already exists, you are asked confirmation before saving. <p>The level in the window is saved in the specified file, in the pwad format, under the specified . <li><p>Press [<kbd>F3</kbd>] or do "File-&gt;Save as". This procedure is identical to the "Save" procedure except that the query for a level name and file name is unconditional. </ul> <p>FIXME -- this needs to be written... <p>If Yadex is in Hexen mode, saving is disabled because writing levels in the Hexen format is not supported as of this release. <h3><a name="toc-3-25">3.25. Closing a window</a></h3> <p>There are two ways to close a window&nbsp;: <ul> <li><p>Press [<kbd>Esc</kbd>]. If you have made changes since the last time you saved, or if you have never saved, Yadex requires confirmation before closing the window. <li><p>Press [<kbd>q</kbd>] or do "File-&gt;Quit". If you have made changes since the last time you saved, or if you have never saved, Yadex first saves the level, using the same procedure as when you press [<kbd>F2</kbd>]. After that the window is closed. If the saving procedure is interactive (for example because it's the first time you save), and if you cancel it, the window is not closed. </ul> <hr noshade> <h2><a name="toc-4">4. Variables and configuration</a></h2> <p> Yadex has internal variables that serve to configure its behaviour. Variables have a name (a string of letters, digits and underscores), a type and a value. The type constraints the value (boolean, integer, strings, etc.). </p> <p> Variables can be set in four ways&nbsp;: </p> <ul> <li>from the Preferences menu, <li>from the command line, <li>through environment variables. <li>from the configuration files, </ul> <p> The settings from the Preferences menu override those from the command line options which in turn override those from the environment variables which in turn override those from the configuration files. </p> <h3><a name="toc-4-1">4.1. Variables</a></h3> <p> You can get a list of variables with their description and value by typing <code>set</code> at the <code>yadex:</code> prompt. </p> <h4><a name="toc-4-1-1">4.1.1. The font</a></h4> <p> By default, Yadex uses the default font of your system (that is often "<code>fixed</code>" a.k.a. "<code>6x13</code>"). But you can use the font of your choice by using the "<code>-fn </code><var>font_name</var>" option or setting "<code>font&nbsp;=&nbsp;</code><var>font_name</var>" in <code>yadex.cfg</code>. You should use a fixed-width font and not one that is too large for the size of your Yadex window or the display will look ugly. If Yadex does not find the specified font, it emits a warning and falls back on the default system font. </p> <p> You can get a list of all available fonts on your system with the command <code>xlsfonts</code>. </p> <h4><a name="toc-4-1-2">4.1.2. Mouse wheel and other mouse issues</a></h4> <p> The 3rd mouse button (middle button) is not used yet but you can bet it will a future version. So a 3-button mouse is of course recommended. </p> <p> You can swap the left and right buttons by setting the <a href="#param_swap_buttons"><code>swap_buttons</code></a> variable. </p> <p> Buttons 4 and 5 are used for zooming in and out. Wheel mice typically have the wheel mapped to buttons 4 and 5 in such a way that when you roll the wheel "up" (forwards), button4 press events are generated and when you roll the wheel "down" (backwards), button5 press events are generated. </p> <p> To configure your X server in the way described above, check the vendor documentation. I have XFree86 and a Logitech Pilot Mouse + and this is what I put in my <code>XF86Config</code> file&nbsp;: </p> <blockquote> <pre>Section "Pointer" Protocol "Intellimouse" Device "/dev/mouse" Buttons 5 ZAxisMapping 4 5</pre> </blockquote> <h3><a name="toc-4-2">4.2. Preferences</a></h3> <p> When you create objects, their properties are automatically given default values. Some of those default values can be set from the configuration file or from the command line but also through the Preferences menu, that pops up when you press [<kbd>F5</kbd>]. </p> <p> The Preferences menu lets you set the <code>default_*</code> variables interactively. The settings made from the Preferences menu are lost when you exit Yadex. </p> <h3><a name="toc-4-3">4.3. Command line</a></h3> <p> See the <a href="#opt">Options</a> section. </p> <h3><a name="toc-4-4">4.4. Environment variables</a></h3> <p> See the <a href="#env">Environment variables</a> section. </p> <h3><a name="toc-4-5">4.5. Configuration files</a></h3> <h4><a name="toc-4-5-1">4.5.1. Contents of configuration files</a></h4> <p>Configuration files are text files. White space at the beginning of a line is ignored. There are three kinds of lines&nbsp;: <dl> <dt>Empty lines (lines containing only white space) <dd> <p> No effect. </p> <dt>Comments (lines whose first non white space character is a <code>#</code>) <dd> <p> No effect. </p> <dt>Variable assignments (lines of the form "<i>name</i> <code>=</code> <i>value</i>") <dd> <p>The effect is to assign <i>value</i> to the variable <i>name</i>. <i>name</i> is a string of one or more <i>identifier characters</i> i.e. letters, digits and underscores. <i>value</i> is a string of zero or more non white space characters. There may be any amount of white space around and between the three tokens, including none at all. </p> <p> Please note that&nbsp;: </p> <ul> <li> <p> <code>#</code> is special only at the beginning of a line. Therefore you can't put a comment on the same line as a variable assignment. </p> </li> <li> <p> Because white space is a token delimiter and there's no way to quote or escape it, including white space in a value is impossible. </p> </li> <li> <p> Yadex's idea of what is a letter or digit is deliberately locale-independent. Whether it's white space or not, on the other hand, is locale-dependent (<code>isspace</code>(3)). </p> </li> </ul> </dl> <p> Parse errors are not fatal. In general, they cause a warning message to be printed and the entire line to be ignored. This is to facilitate the sharing of configuration files across versions of Yadex. </p> <p> The configuration file is self-documenting. Look at the sample configuration file in the Yadex distribution to see what options are available. </p> <h4>4.5.2. <a name="config_locate">Locating configuration files</a></h4> <p> This is the process by which Yadex turns a configuration file name into one or more actual pathnames. </p> <p> If the specified name is absolute, the location yields exactly that. A name is considered absolute if and only if it begins with a <code>/</code>. </p> <p> If the specified name is relative, Yadex uses a search path, much like the shell uses <code>$PATH</code> to locate commands. The composition of the search path depends on the installation prefix (the argument to <code>./configure --prefix</code>). </p> <dl> <dt><code>/usr/local</code> or <code>/usr</code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code>/etc/yadex/1.7.0</code> <li><code>/etc/yadex</code> </ol> <dt><code>/opt/<var>some/path</var></code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code>/etc/opt/<var>some/path</var></code> </ol> <dt><code><var>/some/path</var></code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code><var>/some/path</var>/etc</code> </ol> </dl> <p> The search path may be walked in either direction, depending on the application. For each element in the search path, a pathname is made by concatenating the path and the specified name. The pathname is tested with <code>stat</code>(2). The result of the location is the list of the pathnames that exist and are not directories. </p> <p> For example, assuming Yadex was compiled with the <code>/usr/local</code> prefix, a front-to-back search for a file named <code>foo/bar.cfg</code> would return those of the following pathnames that exist and are not directories&nbsp;: </p> <ol> <li><code>./foo/bar.cfg</code> <li><code>~/.yadex/1.7.0/foo/bar.cfg</code> <li><code>~/.yadex/foo/bar.cfg</code> <li><code>/etc/yadex/1.7.0/foo/bar.cfg</code> <li><code>/etc/yadex/foo/bar.cfg</code> </ol> <h4><a name="toc-4-5-3">4.5.3. Default configuration files</a></h4> <p> By default, Yadex performs a back-to-front search for files named <code>yadex.cfg</code> and reads all the matches in order. For example, assuming Yadex was compiled with the <code>/usr/local</code> prefix, the following configuration files will be read if they exist&nbsp;: </p> <ol> <li><code>/etc/yadex/yadex.cfg</code> <li><code>/etc/yadex/1.7.0/yadex.cfg</code> <li><code>~/.yadex/yadex.cfg</code> <li><code>~/.yadex/1.7.0/yadex.cfg</code> <li><code>./yadex.cfg</code> </ol> <p> Because the search path is walked back-to-front, any parameter settings in a local configuration file override the settings inherited from less local configuration files. For example, assuming <code>/etc/yadex/1.7.0/yadex.cfg</code> contains&nbsp;: </p> <blockquote> <code> a = old<br> b = old </code> </blockquote> <p> and <code>./yadex.cfg</code> contains&nbsp;: </p> <blockquote> <code> a = new<br> c = new </code> </blockquote> <p> the net effect is&nbsp;: </p> <blockquote> <code> a = new<br> b = old<br> c = new </code> </blockquote> <h4><a name="toc-4-5-4">4.5.4. User-specified configuration files</a></h4> <p> The <code>-f</code> option can be used to override Yadex's default choice of configuration files. </p> <p> If Yadex is started with the <code>-f</code> option, the default configuration files are not used. Instead, Yadex performs a front-to-back search for the argument of the <code>-f</code> option, according to the algorithm described in <a href="#config_locate">Locating configuration files</a>. The first match is used as a configuration file. Any other matches are ignored. </p> <p> For example, assuming Yadex was built with the <code>/usr/local</code> prefix and run with the <code>-f&nbsp;myown.cfg</code> option, it will use the first file in this list that exists and is not a directory&nbsp;: </p> <ol> <li><code>./myown.cfg</code> <li><code>~/.yadex/1.7.0/myown.cfg</code> <li><code>~/.yadex/myown.cfg</code> <li><code>/etc/yadex/1.7.0/myown.cfg</code> <li><code>/etc/yadex/myown.cfg</code> </ol> <h4><a name="toc-4-5-5">4.5.5. Organising your configuration files</a></h4> <p> As Yadex looks for its configuration files in several places, you are put in the situation of having to decide which one to use. Here are a few guidelines to help you make a decision. </p> <dl> <dt>Avoid redundancy <dd> <p> Take advantage of Yadex's ability to use more than one config file. Put each setting in the right place. Your config files will be easier to read and maintain. </p> <dt>Versionless vs. versionful files <dd> <p> Versionful files (<code>/etc/yadex/1.7.0/yadex.cfg</code> and <code>~/.yadex/1.7.0/yadex.cfg</code>) are useful to store settings that only work with a particular version of Yadex. But remember that the versionful system-wide file can be clobbered by a reinstallation. </p> <p> Versionless files (<code>/etc/yadex/yadex.cfg</code> and <code>~/.yadex/yadex.cfg</code>) have the advantage of being seen by all versions of Yadex. If you enter your settings there, you won't have to enter them again when you install a new version of Yadex, as long as the config file syntax remains compatible. </p> <dt>System-wide vs. user vs. local files <dd> <p> System-wide configuration files have the theoretical advantage of being seen by all users, which makes them a good place to put settings that apply to everybody, such as the pathnames of the iwads. Of course, it doesn't make any difference for the typical (single-user) Yadex installation. </p> <p> Per-user files (the ones in <code>~/.yadex</code>) are appropriate for settings that have to do with personal preferences (fonts etc.). They also have the advantage over system-wide files of not being overwritten by the installation procedure (typing <code>make install</code> a second time will wipe the versionful system file). </p> <p> Local files (<code>./yadex.cfg</code>) are well suited to settings that vary from project to project, such as default textures. </p> </dl> <p> Concrete application&nbsp;: Yadex was compiled with <code>/usr/local</code> as the prefix. You are the only user on your system. All your levels are for Doom&nbsp;2, except the one in <code>~/herewad</code> which is for Heretic. One of your Doom&nbsp;2 levels (<code>~/cave</code>) is quite dark and requires a different default light level. You usually run Yadex from the directory where the pwad is. </p> <p> There are several ways to configure Yadex for this setup. Here's a solution in accordance with the above guidelines&nbsp;: </p> <p> <code>~/.yadex/yadex.cfg</code>&nbsp;: </p> <blockquote> <pre>iwad2 = /somewhere/doom2.wad iwad3 = /somewhere/else/heretic.wad game = doom2</pre> </blockquote> <p> <code>~/herewad/yadex.cfg</code>&nbsp;: </p> <blockquote> <pre>game = heretic default_floor_texture = floor11 default_ceiling_texture = floor06 default_lower_texture = sandsq2 default_middle_texture = sandsq2 default_upper_texture = sandsq2 default_thing = 66</pre> </blockquote> <p> <code>~/cave/yadex.cfg</code>&nbsp;: </p> <blockquote> <pre>default_light_level = 112</pre> </blockquote> <p> Note how <code>game</code> is set in the per-user config file (the general case) and overridden in the local config file for Heretic level (the exception). </p> <hr noshade> <h2><a name="toc-5">5. Game definition files</a></h2> <p> Most of Yadex's knowledge about thing numbers and names, linedef types etc. is retrieved from so-called game definition files. When you specify "<code>-g&nbsp;foo</code>", it in fact means that Yadex should use the game definition file named "<code>foo.ygd</code>". </p> <h3><a name="toc-5-1">5.1. Contents of game definition files</a></h3> <p> For Yadex to recognize the file as a game definition file, it must begin with a certain magic string. If it doesn't, Yadex will print an error message an bail out. The reason for being so fussy is to avoid headaches when the game definition file format changes and there are old game definition files lying around in your directories. </p> <p> Game definition files are not supposed to be modified in normal use. Their format is described in <a href="ygd.html"><code>ygd.html</code></a>. </p> <h3>5.2. <a name="ygd_locate">Locating game definition files</a></h3> <p> The algorithm used to locate game definition files is exactly the same as the one described in <a href="#config_locate">Locating configuration files</a>. Only the search path is different. As is the case for config files, it depends on the prefix for which Yadex was compiled. </p> <dl> <dt><code>/usr</code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code>/usr/share/games/yadex/1.7.0</code> <li><code>/usr/share/games/yadex</code> </ol> <dt><code>/usr/local</code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code>/usr/local/share/games/yadex/1.7.0</code> <li><code>/usr/local/share/games/yadex</code> </ol> <dt><code>/opt/<var>some/path</var></code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code>/opt/<var>some/path</var>/share</code> </ol> <dt><code><var>/some/path</var></code>&nbsp;: <dd> <ol> <li><code>.</code> <li><code>~/.yadex/1.7.0</code> <li><code>~/.yadex</code> <li><code><var>/some/path</var>/share</code> </ol> </dl> <h3><a name="toc-5-3">5.3. Use of game definition files</a></h3> <p> On startup, Yadex loads the game definition file corresponding to the <code>game</code> variable. It performs a front-to-back search (see <a href="#ygd_locate">Locating game definition files</a>) for a file named <code><var>game</var>.ygd</code>. The first match is used. </p> <hr noshade> <h2><a name="toc-6">6. Known bugs</a></h2> <p>Though Yadex has quite a few bugs, most of them are inconsequential, i.e. not likely to make Yadex unusable or damage precious data. On the other hand, <strong>some can bite</strong>. Those are the ones you should know about&nbsp;: <ul> <li> <p>Command line options that take an argument need to have a space between their argument and themselves.</p> <li> <p>Exposure events are not always handled. If the Yadex window turns black, back out by pressing [<kbd>Esc</kbd>], several times if necessary.</p> <li> <p>If you edit a level from a file that contains several and save it, all the other levels, textures, flats, etc. from that file are <strong>lost</strong>.</p> <li> <p>Out-of-memory conditions and certain wad I/O errors are handled in a <em>very</em> ungraceful manner. Basically, Yadex aborts and if you haven't saved, you've lost all your work. My advice&nbsp;:</p> <ul> <li>Make sure you have at least a couple of megs of free memory. <li>Save often and make backups. <li>Don't use wads that contain errors. <li>Make sure you have enough disk space and adequate permissions. <li>Don't modify files with an external program while Yadex uses them. For example, if you use a texture wad, don't rebuild it without quitting Yadex first. </ul> </ul> <p>Check <a href="TODO"><code>TODO</code></a> for a complete list of things to fix. <hr noshade> <h2>7. <a name="games">Supported games</a></h2> <h3><a name="toc-7-1">7.1. Doom</a></h3> <p>Supported. Linedef types and sector types added in v. 1.666 are supported and marked with "<code>[v1.6]</code>". <h3><a name="toc-7-2">7.2. Doom II</a></h3> <p>Supported. <h3><a name="toc-7-3">7.3. Doom alpha</a></h3> <p>Mostly supported. There are three Doom alpha versions that I know of&nbsp;: <a href="ftp://3darchives.in-span.net/pub/idgames/historic/doom0_2.zip">0.2</a>, <a href="ftp://3darchives.in-span.net/pub/idgames/historic/doom0_4.zip">0.4</a> and <a href="ftp://3darchives.in-span.net/pub/idgames/historic/doom0_5.zip">0.5</a>. They're supported to varying degrees of completeness. All graphics resources (flats, patches, <code>PLAYPAL</code>, sprites and textures) are supported for all versions with the exception of version 0.2, for which textures are not supported. <p>Reading levels does not work either for version 0.2. For version 0.4 and 0.5, it works, including levels <code>E1M10</code> through <code>E1M13</code>. There are some oddities in the levels, such as linedefs with negative types or things with strange bits set in their flags, but I don't think it's Yadex's fault. <p>You can edit and save alpha levels but they will be saved in the regular Doom format, not the alpha format. Writing levels in the Doom alpha format is not supported at all and probably never will, for three reasons. Firstly, there are parts of the format that I don't understand, and therefore don't know how to generate. Secondly, there is AFAIK no nodes builder for the alphas. Thirdly, even if the two above problems were solved, I don't expect many people to actually want to use the alphas because, from a player point of view, they're much less comfortable than the later versions of Doom. To say nothing of the fact that all we have is a DOS executable and no source code. That being said, I can't deny that it would indeed be cool to be able to generate wads for the alphas, if only for the fun of it. If you want to do it, I won't discourage you and I will gladly accept patches. <h3><a name="toc-7-4">7.4. Doom press release pre-beta</a></h3> <p>Supported. The different picture format is handled. Things type 2016 (evil sceptre) and 2017 (unholy bible) are defined in <code>doom02.ygd</code>, <code>doom04.ygd</code>, <code>doom05.ygd</code>, <code>doompr.ygd</code>, <code>doom.ygd</code> and <code>doom2.ygd</code> and marked with "<code>[PR]</code>". However, if you're using <code>betalevl.wad</code> and <code>betagrph.wad</code> supplied with MBF, you won't see the corresponding sprites because Yadex does not support sprites in pwads (yet). To see the sprites for the evil sceptre and the unholy bible as well as the alternate versions of the other sprites, you need to use the iwad from <a href="ftp://3darchives.in-span.net/pub/idgames/historic/doomprbt.zip" ><code>doomprbt.zip</code></a>. <h3><a name="toc-7-5">7.5. Final Doom</a></h3> <p>Supported, AFAIK. The iwads have no <code>F1_START/F1_END</code> labels and some levels contain a thing of type 0 which made older versions of Yadex crash. This has been fixed in version 1.1.0. <h3><a name="toc-7-6">7.6. Heretic</a></h3> <p>Supported. <h3><a name="toc-7-7">7.7. Hexen</a></h3> <p>Very rudimentary support. You can load Hexen wads and edit the maps but most of Hexen's special features are stripped off on reading. So don't try to save a Hexen level after editing it, you won't be able to use it with Hexen. And if you're saving it to the file it comes from, you'll lose your file. <strong>Don't save when in Hexen mode.</strong>. Here's the breakdown&nbsp;: <ul> <li>Most thing types, and some sector and linedef types are missing from <code>hexen.ygd</code>. <li>For things, <code>tid</code>, <code>z</code>, <code>special</code> and <code>arg1</code> through <code>arg5</code> are discarded. There is currently no way to examine or manipulate them from Yadex. <li>For linedefs, <code>arg1</code> is put into <code>tag</code>. <code>arg2</code> through <code>arg5</code> are discarded and there is currently no way to examine or manipulate them from Yadex. <li>The Hexen-specific thing and linedef flags are not recognized (but you can manipulate them). <li><code>BEHAVIOR</code> is ignored. <li><code>MAPINFO</code> is ignored. </ul> <h3><a name="toc-7-8">7.8. Strife</a></h3> <p>Supported, except that&nbsp;: <ul> <li>many thing types are missing, <li>some thing flags are not known, <li>many linedef types are missing or wrong, <li>some linedef flags might be missing, <li>some sector types might be missing. </ul> And perhaps other things as well. If you're a Strife hacker, your help would be welcome. <p>If you have Strife 1.0, you should use <code>-g&nbsp;strife10</code>. Strife&nbsp;1.0 uses the same format as Doom for textures, Strife&nbsp;1.1 and later use a different format. <h3><a name="toc-7-9">7.9. Ultimate Doom</a></h3> <p>Supported. <h3><a name="toc-7-10">7.10. Boom</a></h3> <p>Kind of supported, only very inconvenient to use &lt;g&gt;. <dl> <dt>New linedef flag "P" (pass through) <dd>Supported. <dt>New linedef types <dd>You can enter arbitrary linedef types ("enter number"). <dt>New sector types <dd>You can enter arbitrary sector types ("enter number") <dt>New things flags "N" (not in deathmatch) and "C" (not in coop) <dd>Supported. <dt>New things types 5001 and 5002 <dd>Supported. <dt>Special lumps <code>SWITCHES</code> and <code>ANIMATED</code> <dd>Unsupported. </dl> In progress... <h3><a name="toc-7-11">7.11. EDGE</a></h3> <p>The default DDF definitions included in EDGE 1.24 have been added to <code>doom.ygd</code> and <code>doom2.ygd</code>, marked with "<code>[EDGE]</code>". When the pointer moves over a sector that has extrafloors, the object info window shows the information relevant to those. To see the sprite for the night vision goggles (thing #7000), you must load <code>edge.wad</code>. <p>As of 1.5.0, the support for DDF is static&nbsp;: custom DDF files or DDF lumps are ignored. In other words, you are limited to the default DDF definitions. <h3><a name="toc-7-12">7.12. MBF</a></h3> <p>Mostly supported, I think. Thing 888 (dog), thing flag 80h (friendly), linedef types 271 and 272 are all in. You can't see the sprite for the dog because it's embedded in the MBF executable. <h3><a name="toc-7-13">7.13. Other derivatives</a></h3> <p>Not supported so far.</p> <hr>AYM 2003-03-31 </body> </html>
Nekrofage/YadexSDA
doc/users_guide.html
HTML
gpl-2.0
89,884
<?php /** * Gerenciador Clínico Odontológico * Copyright (C) 2006 - 2009 * Autores: Ivis Silva Andrade - Engenharia e Design(ivis@expandweb.com) * Pedro Henrique Braga Moreira - Engenharia e Programação(ikkinet@gmail.com) * * Este arquivo é parte do programa Gerenciador Clínico Odontológico * * Gerenciador Clínico Odontológico é um software livre; você pode * redistribuí-lo e/ou modificá-lo dentro dos termos da Licença * Pública Geral GNU como publicada pela Fundação do Software Livre * (FSF); na versão 2 da Licença invariavelmente. * * Este programa é distribuído na esperança que possa ser útil, * mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÂO * a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a * Licença Pública Geral GNU para maiores detalhes. * * Você recebeu uma cópia da Licença Pública Geral GNU, * que está localizada na raíz do programa no arquivo COPYING ou COPYING.TXT * junto com este programa. Se não, visite o endereço para maiores informações: * http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (Inglês) * http://www.magnux.org/doc/GPL-pt_BR.txt (Português - Brasil) * * Em caso de dúvidas quanto ao software ou quanto à licença, visite o * endereço eletrônico ou envie-nos um e-mail: * * http://www.smileodonto.com.br/gco * smile@smileodonto.com.br * * Ou envie sua carta para o endereço: * * Smile Odontolóogia * Rua Laudemira Maria de Jesus, 51 - Lourdes * Arcos - MG - CEP 35588-000 * * */ include "../lib/config.inc.php"; include "../lib/func.inc.php"; include "../lib/classes.inc.php"; require_once '../lang/'.$idioma.'.php'; header("Content-type: text/html; charset=UTF-8", true); if(!checklog()) { echo '<script>Ajax("wallpapers/index", "conteudo", "");</script>'; die(); } if(!verifica_nivel('patrimonio', 'L')) { echo $LANG['general']['you_tried_to_access_a_restricted_area']; die(); } if($_GET[confirm_del] == "delete") { mysql_query("DELETE FROM `patrimonio` WHERE `codigo` = '".$_GET[codigo]."'") or die(mysql_error()); } ?> <div class="conteudo" id="conteudo_central"> <table width="100%" border="0" cellpadding="0" cellspacing="0" class="conteudo"> <tr> <td width="46%">&nbsp;&nbsp;&nbsp;<img src="patrimonio/img/patrimonio.png" alt="<?php echo $LANG['patrimony']['manage_patrimony']?>"> <span class="h3"><?php echo $LANG['patrimony']['manage_patrimony']?></span></td> <td width="27%" valign="bottom"> <?php echo $LANG['patrimony']['search_for']?> <input name="procurar" id="procurar" type="text" class="forms" size="20" maxlength="40" onkeyup="javascript:Ajax('patrimonio/pesquisa', 'pesquisa', 'pesquisa='%2Bthis.value)"> </td> <td width="23%" align="right" valign="bottom"><?php echo ((verifica_nivel('patrimonio', 'I'))?'<img src="imagens/icones/novo.png" alt="Incluir" width="19" height="22" border="0"><a href="javascript:Ajax(\'patrimonio/incluir\', \'conteudo\', \'\')">'.$LANG['patrimony']['include_new_item'].'</a>':'')?></td> <td width="2%" valign="bottom">&nbsp;</td> <td width="2%" valign="bottom">&nbsp;</td> </tr> </table> <div class="conteudo" id="table dados"><br> <table width="750" border="0" align="center" cellpadding="0" cellspacing="0" class="tabela_titulo"> <tr bgcolor="#009BE6"> <td colspan="6">&nbsp;</td> </tr> <tr> <td width="50" height="23" align="left"><?php echo $LANG['patrimony']['code']?></td> <td width="338" align="left"><?php echo $LANG['patrimony']['description']?> </td> <td width="130" align="left"><?php echo $LANG['patrimony']['sector']?></td> <td width="107" align="center"><?php echo $LANG['patrimony']['value']?></td> <td width="59" align="center"><?php echo $LANG['patrimony']['edit_view']?></td> <td width="66" align="center"><?php echo $LANG['patrimony']['delete']?></td> </tr> </table> <div id="pesquisa"></div> <script> Ajax('patrimonio/pesquisa', 'pesquisa', 'pesquisa='); </script> </div>
artsjedi/GCO
http/patrimonio/gerenciar_ajax.php
PHP
gpl-2.0
4,182
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package utilesGUIx.plugin.toolBar; import java.io.Serializable; public interface ICompCMB extends Serializable { public String getText(); public String getCodigo(); }
Creativa3d/box3d
paquetes/src/utilesGUIx/plugin/toolBar/ICompCMB.java
Java
gpl-2.0
284
#!/bin/sh export reportpath="/var/raptor/scan_results" export zip_upload_dir="/var/raptor/uploads" export git_clone_dir="/var/raptor/clones" #IMPORTANT: Do NOT add the trailing slash after the URLs. ############PUBLIC############### #your-public-github-endpoint-here export ext_git_url="https://github.com" #your-public-github-api-endpoint-here export ext_git_apiurl="https://api.github.com" #your-public-github-username-here export ext_git_user="dpnishant" #your-public-github-token-here export ext_git_token="55230bdae78b690c187135e766a03a21d5e15d8c" ##############INTERNAL############# #your-internal-github-endpoint-here export int_git_url="" #your-internal-github-api-endpoint-here export int_git_apiurl="" #your-internal-github-username-here export int_git_user="" #your-internal-github-token-here export int_git_token="" cd backend gunicorn -c config.py server:app
dpnishant/raptor
start.sh
Shell
gpl-2.0
885
/* Copyright (C) 2008 - 2016 by Mark de Wever <koraq@xs4all.nl> Part of the Battle for Wesnoth Project http://www.wesnoth.org/ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY. See the COPYING file for more details. */ #define GETTEXT_DOMAIN "wesnoth-lib" #include "gui/widgets/menu_button.hpp" #include "gui/core/log.hpp" #include "gui/core/widget_definition.hpp" #include "gui/core/window_builder.hpp" #include "gui/core/window_builder/helper.hpp" #include "gui/core/register_widget.hpp" #include "gui/widgets/settings.hpp" #include "gui/widgets/window.hpp" #include "gui/dialogs/drop_down_menu.hpp" #include "config_assign.hpp" #include "sound.hpp" #include "utils/functional.hpp" #define LOG_SCOPE_HEADER get_control_type() + " [" + id() + "] " + __func__ #define LOG_HEADER LOG_SCOPE_HEADER + ':' namespace gui2 { // ------------ WIDGET -----------{ REGISTER_WIDGET(menu_button) menu_button::menu_button() : styled_widget(COUNT) , selectable_item() , state_(ENABLED) , retval_(0) , values_() , selected_() , toggle_states_() , keep_open_(false) { values_.push_back(config_of("label", this->get_label())); connect_signal<event::MOUSE_ENTER>( std::bind(&menu_button::signal_handler_mouse_enter, this, _2, _3)); connect_signal<event::MOUSE_LEAVE>( std::bind(&menu_button::signal_handler_mouse_leave, this, _2, _3)); connect_signal<event::LEFT_BUTTON_DOWN>(std::bind( &menu_button::signal_handler_left_button_down, this, _2, _3)); connect_signal<event::LEFT_BUTTON_UP>( std::bind(&menu_button::signal_handler_left_button_up, this, _2, _3)); connect_signal<event::LEFT_BUTTON_CLICK>(std::bind( &menu_button::signal_handler_left_button_click, this, _2, _3)); } void menu_button::set_active(const bool active) { if(get_active() != active) { set_state(active ? ENABLED : DISABLED); } } bool menu_button::get_active() const { return state_ != DISABLED; } unsigned menu_button::get_state() const { return state_; } void menu_button::set_state(const state_t state) { if(state != state_) { state_ = state; set_is_dirty(true); } } const std::string& menu_button::get_control_type() const { static const std::string type = "menu_button"; return type; } void menu_button::signal_handler_mouse_enter(const event::ui_event event, bool& handled) { DBG_GUI_E << LOG_HEADER << ' ' << event << ".\n"; set_state(FOCUSED); handled = true; } void menu_button::signal_handler_mouse_leave(const event::ui_event event, bool& handled) { DBG_GUI_E << LOG_HEADER << ' ' << event << ".\n"; set_state(ENABLED); handled = true; } void menu_button::signal_handler_left_button_down(const event::ui_event event, bool& handled) { DBG_GUI_E << LOG_HEADER << ' ' << event << ".\n"; window* window = get_window(); if(window) { window->mouse_capture(); } set_state(PRESSED); handled = true; } void menu_button::signal_handler_left_button_up(const event::ui_event event, bool& handled) { DBG_GUI_E << LOG_HEADER << ' ' << event << ".\n"; set_state(FOCUSED); handled = true; } void menu_button::signal_handler_left_button_click(const event::ui_event event, bool& handled) { assert(get_window()); DBG_GUI_E << LOG_HEADER << ' ' << event << ".\n"; sound::play_UI_sound(settings::sound_button_click); // If a button has a retval do the default handling. dialogs::drop_down_menu droplist(this->get_rectangle(), this->values_, this->selected_, this->get_use_markup(), this->keep_open_); if(droplist.show(get_window()->video())) { const int selected = droplist.selected_item(); // Saftey check. If the user clicks a selection in the dropdown and moves their mouse away too // quickly, selected_ could be set to -1. This returns in that case, preventing crashes. if(selected < 0) { return; } selected_ = selected; this->set_label(values_[selected_]["label"]); fire(event::NOTIFY_MODIFIED, *this, nullptr); if(callback_state_change_) { callback_state_change_(*this); } if(retval_ != 0) { if(window* window = get_window()) { window->set_retval(retval_); return; } } } // Toggle states are recorded regardless of dismissal type toggle_states_ = droplist.get_toggle_states(); /* In order to allow toggle button states to be specified by verious dialogs in the values config, we write the state * bools to the values_ config here, but only if a checkbox= key was already provided. The value of the checkbox= key * is handled by the drop_down_menu widget. * * Passing the dynamic_bitset directly to the drop_down_menu ctor would mean bool values would need to be passed to this * class independently of the values config by dialogs that use this widget. However, the bool states are also saved * in a dynamic_bitset class member which can be fetched for other uses if necessary. */ for(unsigned i = 0; i < values_.size(); i++) { ::config& c = values_[i]; if(c.has_attribute("checkbox")) { c["checkbox"] = toggle_states_[i]; } } handled = true; } void menu_button::set_values(const std::vector<::config>& values, int selected) { assert(static_cast<size_t>(selected) < values.size()); assert(static_cast<size_t>(selected_) < values_.size()); if(values[selected]["label"] != values_[selected_]["label"]) { set_is_dirty(true); } values_ = values; selected_ = selected; toggle_states_.resize(values_.size(), false); set_label(values_[selected_]["label"]); } void menu_button::set_selected(int selected) { assert(static_cast<size_t>(selected) < values_.size()); assert(static_cast<size_t>(selected_) < values_.size()); if(selected != selected_) { set_is_dirty(true); } selected_ = selected; set_label(values_[selected_]["label"]); } // }---------- DEFINITION ---------{ menu_button_definition::menu_button_definition(const config& cfg) : styled_widget_definition(cfg) { DBG_GUI_P << "Parsing menu_button " << id << '\n'; load_resolutions<resolution>(cfg); } /*WIKI * @page = GUIWidgetDefinitionWML * @order = 1_menu_button * * == menu_button == * * @macro = menu_button_description * * The following states exist: * * state_enabled, the menu_button is enabled. * * state_disabled, the menu_button is disabled. * * state_pressed, the left mouse menu_button is down. * * state_focused, the mouse is over the menu_button. * @begin{parent}{name="gui/"} * @begin{tag}{name="menu_button_definition"}{min=0}{max=-1}{super="generic/widget_definition"} * @begin{tag}{name="resolution"}{min=0}{max=-1}{super="generic/widget_definition/resolution"} * @begin{tag}{name="state_enabled"}{min=0}{max=1}{super="generic/state"} * @end{tag}{name="state_enabled"} * @begin{tag}{name="state_disabled"}{min=0}{max=1}{super="generic/state"} * @end{tag}{name="state_disabled"} * @begin{tag}{name="state_pressed"}{min=0}{max=1}{super="generic/state"} * @end{tag}{name="state_pressed"} * @begin{tag}{name="state_focused"}{min=0}{max=1}{super="generic/state"} * @end{tag}{name="state_focused"} * @end{tag}{name="resolution"} * @end{tag}{name="menu_button_definition"} * @end{parent}{name="gui/"} */ menu_button_definition::resolution::resolution(const config& cfg) : resolution_definition(cfg) { // Note the order should be the same as the enum state_t in menu_button.hpp. state.push_back(state_definition(cfg.child("state_enabled"))); state.push_back(state_definition(cfg.child("state_disabled"))); state.push_back(state_definition(cfg.child("state_pressed"))); state.push_back(state_definition(cfg.child("state_focused"))); } // }---------- BUILDER -----------{ /*WIKI_MACRO * @begin{macro}{menu_button_description} * * A menu_button is a styled_widget to choose an element from a list of elements. * @end{macro} */ /*WIKI * @page = GUIWidgetInstanceWML * @order = 2_menu_button * @begin{parent}{name="gui/window/resolution/grid/row/column/"} * @begin{tag}{name="menu_button"}{min=0}{max=-1}{super="generic/widget_instance"} * == menu_button == * * @macro = menu_button_description * * Instance of a menu_button. When a menu_button has a return value it sets the * return value for the window. Normally this closes the window and returns * this value to the caller. The return value can either be defined by the * user or determined from the id of the menu_button. The return value has a * higher precedence as the one defined by the id. (Of course it's weird to * give a menu_button an id and then override its return value.) * * When the menu_button doesn't have a standard id, but you still want to use the * return value of that id, use return_value_id instead. This has a higher * precedence as return_value. * * List with the menu_button specific variables: * @begin{table}{config} * return_value_id & string & "" & The return value id. $ * return_value & int & 0 & The return value. $ * * @end{table} * @end{tag}{name="menu_button"} * @end{parent}{name="gui/window/resolution/grid/row/column/"} */ namespace implementation { builder_menu_button::builder_menu_button(const config& cfg) : builder_styled_widget(cfg) , retval_id_(cfg["return_value_id"]) , retval_(cfg["return_value"]) , options_() { for(const auto& option : cfg.child_range("option")) { options_.push_back(option); } } widget* builder_menu_button::build() const { menu_button* widget = new menu_button(); init_control(widget); widget->set_retval(get_retval(retval_id_, retval_, id)); if(!options_.empty()) { widget->set_values(options_); } DBG_GUI_G << "Window builder: placed menu_button '" << id << "' with definition '" << definition << "'.\n"; return widget; } } // namespace implementation // }------------ END -------------- } // namespace gui2
TakingInitiative/wesnoth
src/gui/widgets/menu_button.cpp
C++
gpl-2.0
9,996
/* This file is part of Shuriken Beat Slicer. Copyright (C) 2014, 2015 Andrew M Taylor <a.m.taylor303@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/> or write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "audiofilehandler.h" #include <samplerate.h> #include <QDir> #include <QDebug> //================================================================================================== // Public: AudioFileHandler::AudioFileHandler() { // Initialise sndlib so we can read header info not available through aubio's API // and also open some audio file formats that may not be supported via aubio const int errorCode = sndlibInit(); if ( errorCode == MUS_ERROR ) { s_errorTitle = "Error initialising sndlib!"; s_errorInfo = "It may not be possible to read some audio files"; } } SharedSampleBuffer AudioFileHandler::getSampleData( const QString filePath ) { return getSampleData( filePath, 0, 0 ); } SharedSampleBuffer AudioFileHandler::getSampleData( const QString filePath, const int startFrame, const int numFramesToRead ) { Q_ASSERT( ! filePath.isEmpty() ); QByteArray charArray = filePath.toLocal8Bit(); const char* path = charArray.data(); SharedSampleBuffer sampleBuffer; #ifdef ENABLE_AUBIO_FILE_IO // First try using aubio to load the file; if that fails, try using sndlib sampleBuffer = aubioLoadFile( path, startFrame, numFramesToRead ); #else // First try using libsndfile to load the file; if that fails, try using sndlib sampleBuffer = sndfileLoadFile( path, startFrame, numFramesToRead ); #endif if ( sampleBuffer.isNull() ) { sampleBuffer = sndlibLoadFile( path, startFrame, numFramesToRead ); } return sampleBuffer; } SharedSampleHeader AudioFileHandler::getSampleHeader( const QString filePath ) { Q_ASSERT( ! filePath.isEmpty() ); QByteArray charArray = filePath.toLocal8Bit(); const char* path = charArray.data(); SharedSampleHeader sampleHeader; // If `0` is passed as `samplerate` param to new_aubio_source, the sample rate of the original file is used. aubio_source_t* aubioSource = new_aubio_source( const_cast<char*>(path), 0, 4096 ); if ( aubioSource != NULL ) // First try using aubio to read the header { sampleHeader = SharedSampleHeader( new SampleHeader ); sampleHeader->sampleRate = aubio_source_get_samplerate( aubioSource ); sampleHeader->numChans = aubio_source_get_channels( aubioSource ); del_aubio_source( aubioSource ); const int headerCode = mus_sound_header_type( path ); // If sndlib recognises the audio file type if ( mus_header_type_p( headerCode ) ) { sampleHeader->format = mus_header_type_name( headerCode ); sampleHeader->bitsPerSample = mus_sound_bits_per_sample( path ); } else { sampleHeader->bitsPerSample = 0; } } else // If aubio can't read the header, try using sndlib { const int headerCode = mus_sound_header_type( path ); // If sndlib recognises the audio file type if ( mus_header_type_p( headerCode ) ) { sampleHeader = SharedSampleHeader( new SampleHeader ); sampleHeader->format = mus_header_type_name( headerCode ); sampleHeader->numChans = mus_sound_chans( path ); sampleHeader->sampleRate = mus_sound_srate( path ); sampleHeader->bitsPerSample = mus_sound_bits_per_sample( path ); } } // It's essential that the sample rate is known if ( ! sampleHeader.isNull() && sampleHeader->sampleRate < 1.0 ) { sampleHeader.clear(); } return sampleHeader; } QString AudioFileHandler::saveAudioFile( const QString dirPath, const QString fileBaseName, const SharedSampleBuffer sampleBuffer, const int currentSampleRate, const int outputSampleRate, const int sndFileFormat, const bool isOverwriteEnabled ) { Q_ASSERT( currentSampleRate != 0 ); const int hopSize = 8192; const int numChans = sampleBuffer->getNumChannels(); bool isSuccessful = false; QDir saveDir( dirPath ); QString filePath; if ( saveDir.exists() ) { filePath = saveDir.absoluteFilePath( fileBaseName ); SF_INFO sfInfo; memset( &sfInfo, 0, sizeof( SF_INFO ) ); sfInfo.samplerate = outputSampleRate; sfInfo.channels = numChans; sfInfo.format = sndFileFormat; switch ( sndFileFormat & SF_FORMAT_TYPEMASK ) { case SF_FORMAT_WAV: filePath.append( ".wav" ); break; case SF_FORMAT_AIFF: filePath.append( ".aiff" ); break; case SF_FORMAT_AU: filePath.append( ".au" ); break; case SF_FORMAT_FLAC: filePath.append( ".flac" ); break; case SF_FORMAT_OGG: filePath.append( ".ogg" ); break; default: qDebug() << "Unknown format: " << sndFileFormat; break; } Q_ASSERT( sf_format_check( &sfInfo ) ); if ( isOverwriteEnabled || ! QFileInfo( filePath ).exists() ) { SNDFILE* fileID = sf_open( filePath.toLocal8Bit().data(), SFM_WRITE, &sfInfo ); if ( fileID != NULL ) { if ( outputSampleRate == currentSampleRate ) { isSuccessful = sndfileSaveAudioFile( fileID, sampleBuffer, hopSize ); } else { const qreal sampleRateRatio = (qreal) outputSampleRate / (qreal) currentSampleRate; Array<float> interleavedBuffer; isSuccessful = convertSampleRate( sampleBuffer, sampleRateRatio, interleavedBuffer ); if ( isSuccessful ) { isSuccessful = sndfileSaveAudioFile( fileID, interleavedBuffer, hopSize * numChans ); } } sf_write_sync( fileID ); sf_close( fileID ); } else // Could not open file for writing { s_errorTitle = "Couldn't open file for writing"; s_errorInfo = sf_strerror( NULL ); isSuccessful = false; } } else // File already exists and overwriting is not enabled { s_errorTitle = "Couldn't overwrite existing file"; s_errorInfo = "The file " + filePath + " already exists and could not be overwritten"; isSuccessful = false; } } if ( ! isSuccessful ) { filePath.clear(); } return filePath; } //================================================================================================== // Private Static: QString AudioFileHandler::s_errorTitle; QString AudioFileHandler::s_errorInfo; void AudioFileHandler::interleaveSamples( const SharedSampleBuffer inputBuffer, const int numChans, const int inputStartFrame, const int numFrames, Array<float>& outputBuffer ) { for ( int chanNum = 0; chanNum < numChans; ++chanNum ) { const float* sampleData = inputBuffer->getReadPointer( chanNum, inputStartFrame ); for ( int frameNum = 0; frameNum < numFrames; ++frameNum ) { outputBuffer.set( numChans * frameNum + chanNum, // Index sampleData[ frameNum ] ); // Value } } } void AudioFileHandler::deinterleaveSamples( Array<float>& inputBuffer, const int numChans, const int outputStartFrame, const int numFrames, SharedSampleBuffer outputBuffer ) { const float* inputSampleData = inputBuffer.getRawDataPointer(); for ( int chanNum = 0; chanNum < numChans; ++chanNum ) { float* outputSampleData = outputBuffer->getWritePointer( chanNum, outputStartFrame ); for ( int frameNum = 0; frameNum < numFrames; ++frameNum ) { outputSampleData[ frameNum ] = inputSampleData[ numChans * frameNum + chanNum ]; } } } bool AudioFileHandler::convertSampleRate( const SharedSampleBuffer inputBuffer, const qreal sampleRateRatio, Array<float>& outputBuffer ) { const int inputNumFrames = inputBuffer->getNumFrames(); const int numChans = inputBuffer->getNumChannels(); const long outputNumFrames = roundToIntAccurate( inputNumFrames * sampleRateRatio ); if ( outputBuffer.size() != outputNumFrames * numChans ) { outputBuffer.resize( outputNumFrames * numChans ); } Array<float> tempBuffer; tempBuffer.resize( inputNumFrames * numChans ); interleaveSamples( inputBuffer, numChans, 0, inputNumFrames, tempBuffer ); SRC_DATA srcData; memset( &srcData, 0, sizeof( SRC_DATA ) ); srcData.data_in = tempBuffer.getRawDataPointer(); srcData.data_out = outputBuffer.getRawDataPointer(); srcData.input_frames = inputNumFrames; srcData.output_frames = outputNumFrames; srcData.src_ratio = sampleRateRatio; bool isSuccessful = true; const int errorCode = src_simple( &srcData, SRC_SINC_BEST_QUALITY, numChans ); if ( errorCode > 0 ) { s_errorTitle = "Couldn't convert sample rate!"; s_errorInfo = src_strerror( errorCode ); isSuccessful = false; } return isSuccessful; } bool AudioFileHandler::sndfileSaveAudioFile( SNDFILE* fileID, const SharedSampleBuffer sampleBuffer, const int hopSize ) { const int totalNumFrames = sampleBuffer->getNumFrames(); const int numChans = sampleBuffer->getNumChannels(); int numFramesToWrite = 0; int startFrame = 0; int numSamplesWritten = 0; Array<float> tempBuffer; tempBuffer.resize( hopSize * numChans ); bool isSuccessful = true; do { numFramesToWrite = totalNumFrames - startFrame >= hopSize ? hopSize : totalNumFrames - startFrame; interleaveSamples( sampleBuffer, numChans, startFrame, numFramesToWrite, tempBuffer ); numSamplesWritten = sf_write_float( fileID, tempBuffer.getRawDataPointer(), numFramesToWrite * numChans ); if ( numSamplesWritten != numFramesToWrite * numChans ) { sndfileRecordWriteError( numFramesToWrite * numChans, numSamplesWritten ); isSuccessful = false; } startFrame += hopSize; } while ( numFramesToWrite == hopSize && isSuccessful ); return isSuccessful; } bool AudioFileHandler::sndfileSaveAudioFile( SNDFILE* fileID, const Array<float> interleavedBuffer, const int hopSize ) { const int totalNumSamples = interleavedBuffer.size(); int numSamplesToWrite = 0; int startSample = 0; int numSamplesWritten = 0; Array<float> tempBuffer; tempBuffer.resize( hopSize ); bool isSuccessful = true; do { numSamplesToWrite = totalNumSamples - startSample >= hopSize ? hopSize : totalNumSamples - startSample; for ( int i = 0; i < numSamplesToWrite; i++ ) { tempBuffer.setUnchecked( i, interleavedBuffer.getUnchecked( startSample + i ) ); } numSamplesWritten = sf_write_float( fileID, tempBuffer.getRawDataPointer(), numSamplesToWrite ); if ( numSamplesWritten != numSamplesToWrite ) { sndfileRecordWriteError( numSamplesToWrite, numSamplesWritten ); isSuccessful = false; } startSample += hopSize; } while ( numSamplesToWrite == hopSize && isSuccessful ); return isSuccessful; } void AudioFileHandler::sndfileRecordWriteError( const int numSamplesToWrite, const int numSamplesWritten ) { const QString samplesToWrite = QString::number( numSamplesToWrite ); const QString samplesWritten = QString::number( numSamplesWritten ); s_errorTitle = "Error while writing to audio file"; s_errorInfo = "no. of samples to write: " + samplesToWrite + ", " + "no. of samples written: " + samplesWritten; } SharedSampleBuffer AudioFileHandler::sndfileLoadFile( const char* filePath, sf_count_t startFrame, sf_count_t numFramesToRead ) { const sf_count_t hopSize = 4096; SharedSampleBuffer sampleBuffer; Array<float> tempBuffer; SF_INFO sfInfo; memset( &sfInfo, 0, sizeof( SF_INFO ) ); SNDFILE* fileID = sf_open( filePath, SFM_READ, &sfInfo ); if ( fileID == NULL ) { s_errorTitle = "Couldn't open file for reading!"; s_errorInfo = sf_strerror( NULL ); goto end; } if ( sfInfo.channels < 1 ) { mus_error( MUS_NO_CHANNEL, "File has no audio channels!" ); goto end; } if ( sfInfo.channels > 2 ) { mus_error( MUS_UNSUPPORTED_DATA_FORMAT, "Only mono and stereo samples are supported" ); goto end; } tempBuffer.resize( hopSize * sfInfo.channels ); // If caller has not set `numFramesToRead` assume whole file should be read if ( numFramesToRead < 1 ) // Read whole file { startFrame = 0; numFramesToRead = 0; sf_count_t numFramesRead = 0; // Find the no. of frames the long way do { numFramesRead = sf_readf_float( fileID, tempBuffer.getRawDataPointer(), hopSize ); numFramesToRead += numFramesRead; } while ( numFramesRead > 0 ); sf_seek( fileID, 0, SEEK_SET ); } else // Read part of file { sf_seek( fileID, startFrame, SEEK_SET ); } try { sampleBuffer = SharedSampleBuffer( new SampleBuffer( sfInfo.channels, numFramesToRead ) ); sf_count_t numFramesRead = 0; sf_count_t totalNumFramesRead = 0; do { numFramesRead = sf_readf_float( fileID, tempBuffer.getRawDataPointer(), hopSize ); deinterleaveSamples( tempBuffer, sfInfo.channels, totalNumFramesRead, numFramesRead, sampleBuffer ); totalNumFramesRead += numFramesRead; } while ( numFramesRead > 0 && totalNumFramesRead < numFramesToRead ); } catch ( std::bad_alloc& ) { mus_error( MUS_MEMORY_ALLOCATION_FAILED, "Not enough memory to load audio file" ); } sf_close( fileID ); end: return sampleBuffer; } int AudioFileHandler::sndlibInit() { if ( mus_sound_initialize() == MUS_ERROR ) { return MUS_ERROR; } mus_error_set_handler( sndlibRecordError ); return MUS_NO_ERROR; } void AudioFileHandler::sndlibRecordError( int errorCode, char* errorMessage ) { s_errorTitle = mus_error_type_to_string( errorCode ); s_errorInfo = errorMessage; } SharedSampleBuffer AudioFileHandler::sndlibLoadFile( const char* filePath, mus_long_t startFrame, mus_long_t numFramesToRead ) { int fileID = 0; int numChans = 0; mus_long_t numFramesRead = 0; SharedSampleBuffer sampleBuffer; if ( ! mus_header_type_p( mus_sound_header_type(filePath) ) ) { goto end; } if ( ! mus_data_format_p( mus_sound_data_format(filePath) ) ) { goto end; } numChans = mus_sound_chans( filePath ); if ( numChans == MUS_ERROR ) { goto end; } if ( numChans < 1 ) { mus_error( MUS_NO_CHANNEL, "File has no audio channels!" ); goto end; } if ( numChans > 2 ) { mus_error( MUS_UNSUPPORTED_DATA_FORMAT, "Only mono and stereo samples are supported" ); goto end; } if ( mus_sound_srate(filePath) == MUS_ERROR ) { goto end; } // If caller has not set `numFramesToRead` assume whole file should be read if ( numFramesToRead < 1 ) { startFrame = 0; numFramesToRead = mus_sound_frames( filePath ); if ( numFramesToRead == MUS_ERROR ) { goto end; } } try { sampleBuffer = SharedSampleBuffer( new SampleBuffer( numChans, numFramesToRead ) ); fileID = mus_sound_open_input( filePath ); if ( fileID == MUS_ERROR ) { goto end; } if ( mus_file_seek_frame( fileID, startFrame ) == MUS_ERROR ) { mus_sound_close_input( fileID ); goto end; } numFramesRead = mus_file_read( fileID, 0, numFramesToRead - 1, numChans, sampleBuffer->getArrayOfWritePointers() ); if ( numFramesRead == MUS_ERROR ) { mus_sound_close_input( fileID ); sampleBuffer.clear(); goto end; } mus_sound_close_input( fileID ); } catch ( std::bad_alloc& ) { mus_error( MUS_MEMORY_ALLOCATION_FAILED, "Not enough memory to load audio file" ); } end: return sampleBuffer; } SharedSampleBuffer AudioFileHandler::aubioLoadFile( const char* filePath, uint_t startFrame, uint_t numFramesToRead ) { const uint_t hopSize = 4096; uint_t sampleRate = 0; // If `0` is passed as `samplerate` to new_aubio_source, the sample rate of the original file is used. uint_t endFrame = 0; // Exclusive uint_t destStartFrame = 0; // Inclusive uint_t numFramesRead = 0; uint_t numFramesToCopy = 0; uint_t numChans = 0; aubio_source_t* aubioSource = new_aubio_source( const_cast<char*>(filePath), sampleRate, hopSize ); fmat_t* sampleData = NULL; SharedSampleBuffer sampleBuffer; if ( aubioSource != NULL ) { sampleRate = aubio_source_get_samplerate( aubioSource ); numChans = aubio_source_get_channels( aubioSource ); if ( numChans > 2 ) { mus_error( MUS_UNSUPPORTED_DATA_FORMAT, "Only mono and stereo samples are supported" ); } else { sampleData = new_fmat( numChans, hopSize ); if ( sampleData != NULL ) { // If caller has not set `numFramesToRead` assume whole file should be read if ( numFramesToRead < 1 ) // Read whole file { startFrame = 0; numFramesToRead = 0; // Work out the no. of frames the long way do { aubio_source_do_multi( aubioSource, sampleData, &numFramesRead ); numFramesToRead += numFramesRead; } while ( numFramesRead == hopSize ); aubio_source_seek( aubioSource, 0 ); numFramesRead = 0; } else // Read part of file { aubio_source_seek( aubioSource, startFrame ); } endFrame = startFrame + numFramesToRead; fmat_zeros( sampleData ); try { sampleBuffer = SharedSampleBuffer( new SampleBuffer( numChans, numFramesToRead ) ); // Read audio data from file do { aubio_source_do_multi( aubioSource, sampleData, &numFramesRead ); numFramesToCopy = startFrame + numFramesRead <= endFrame ? numFramesRead : endFrame - startFrame; for ( uint_t chanNum = 0; chanNum < numChans; chanNum++ ) { sampleBuffer->copyFrom( chanNum, destStartFrame, sampleData->data[ chanNum ], numFramesToCopy ); } startFrame += numFramesRead; destStartFrame += numFramesRead; } while ( startFrame < endFrame ); } catch ( std::bad_alloc& ) { mus_error( MUS_MEMORY_ALLOCATION_FAILED, "Not enough memory to load audio file" ); } del_fmat( sampleData ); } } del_aubio_source( aubioSource ); } return sampleBuffer; }
rock-hopper/shuriken
src/audiofilehandler.cpp
C++
gpl-2.0
21,603
/* Copyright_License { XCSoar Glide Computer - http://www.xcsoar.org/ Copyright (C) 2000-2011 The XCSoar Project A detailed list of copyright holders can be found in the file "AUTHORS". This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. } */ #include "GlideState.hpp" #include <math.h> #include "Util/Quadratic.hpp" #include "Navigation/SpeedVector.hpp" /** * Quadratic function solver for MacCready theory constraint equation * * - document this equation! */ class AverageSpeedSolver: public Quadratic { public: /** * Constructor. * * @param task Task to initialse solver for * @param V Speed (m/s) * * @return Initialised object (not solved) */ AverageSpeedSolver(const fixed dwcostheta, const fixed wind_speed_squared, const fixed V) : Quadratic(dwcostheta, wind_speed_squared - V * V) { } /** * Find ground speed from task and wind * * @return Ground speed during cruise (m/s) */ gcc_pure fixed Solve() const { if (Check()) /// @todo check this is correct for all theta return SolutionMax(); return -fixed_one; } }; fixed GlideState::CalcAverageSpeed(const fixed Veff) const { if (wind.is_non_zero()) { // only need to solve if positive wind speed return AverageSpeedSolver(head_wind_doubled, wind_speed_squared, Veff).Solve(); } return Veff; } // dummy task GlideState::GlideState(const GeoVector &vector, const fixed htarget, fixed altitude, const SpeedVector wind) : vector(vector), min_height(htarget), altitude_difference(altitude - min_height) { CalcSpeedups(wind); } void GlideState::CalcSpeedups(const SpeedVector _wind) { if (_wind.is_non_zero()) { wind = _wind; effective_wind_angle = wind.bearing.Reciprocal() - vector.Bearing; wind_speed_squared = wind.norm * wind.norm; head_wind = -wind.norm * effective_wind_angle.cos(); head_wind_doubled = fixed_two * head_wind; } else { wind.bearing = Angle::zero(); wind.norm = fixed_zero; effective_wind_angle = Angle::zero(); head_wind = fixed_zero; wind_speed_squared = fixed_zero; head_wind_doubled = fixed_zero; } } fixed GlideState::DriftedDistance(const fixed t_cl) const { if (wind.is_zero()) return vector.Distance; const Angle wd = wind.bearing.Reciprocal(); fixed sinwd, coswd; wd.sin_cos(sinwd, coswd); const Angle tb = vector.Bearing; fixed sintb, costb; tb.sin_cos(sintb, costb); const fixed aw = wind.norm * t_cl; const fixed dx = vector.Distance * sintb - aw * sinwd; const fixed dy = vector.Distance * costb - aw * coswd; return hypot(dx, dy); // ?? task.Bearing = RAD_TO_DEG*(atan2(dx,dy)); }
smurry/XCSoar
src/Engine/GlideSolvers/GlideState.cpp
C++
gpl-2.0
3,366
#pragma once #include "coreinit_enum.h" #include "coreinit_time.h" #include "coreinit_internal_queue.h" #include "ppcutils/wfunc_ptr.h" #include "kernel/kernel.h" #include <common/be_ptr.h> #include <common/be_val.h> #include <common/cbool.h> #include <common/structsize.h> #include <cstdint> struct Fiber; namespace coreinit { /** * \defgroup coreinit_thread Thread * \ingroup coreinit * * The thread scheduler in the Wii U uses co-operative scheduling, this is different * to the usual pre-emptive scheduling that most operating systems use (such as * Windows, Linux, etc). In co-operative scheduling threads must voluntarily yield * execution to other threads. In pre-emptive threads are switched by the operating * system after an amount of time. * * With the Wii U's scheduling model the thread with the highest priority which * is in a non-waiting state will always be running (where 0 is the highest * priority and 31 is the lowest). Execution will only switch to other threads * once this thread has been forced to wait, such as when waiting to acquire a * mutex, or when the thread voluntarily yields execution to other threads which * have the same priority using OSYieldThread. OSYieldThread will never yield to * a thread with lower priority than the current thread. * @{ */ #pragma pack(push, 1) struct OSAlarm; struct OSThread; using OSThreadEntryPointFn = wfunc_ptr<uint32_t, uint32_t, void*>; using OSThreadCleanupCallbackFn = wfunc_ptr<void, OSThread *, void *>; using OSThreadDeallocatorFn = wfunc_ptr<void, OSThread *, void *>; struct OSContext { static const uint64_t Tag1 = 0x4F53436F6E747874ull; //! Should always be set to the value OSContext::Tag. be_val<uint64_t> tag; be_val<uint32_t> gpr[32]; be_val<uint32_t> cr; be_val<uint32_t> lr; be_val<uint32_t> ctr; be_val<uint32_t> xer; // srr0 and srr1 would usually be here, however because these are used // for operating system things and we are HLE, it should be safe to // override them with our internal HLE linkup. be_val<uint32_t> nia; be_val<uint32_t> cia; UNKNOWN(0x14); be_val<uint32_t> fpscr; be_val<double> fpr[32]; be_val<uint16_t> spinLockCount; be_val<uint16_t> state; be_val<uint32_t> gqr[8]; UNKNOWN(4); be_val<double> psf[32]; be_val<uint64_t> coretime[3]; be_val<uint64_t> starttime; be_val<uint32_t> error; UNKNOWN(4); be_val<uint32_t> pmc1; be_val<uint32_t> pmc2; // pmc3 and pmc4 would usually be here, however because these are used // for operating system things, it should be safe to use them. kernel::Fiber *fiber; be_val<uint32_t> mmcr0; be_val<uint32_t> mmcr1; }; CHECK_OFFSET(OSContext, 0x00, tag); CHECK_OFFSET(OSContext, 0x08, gpr); CHECK_OFFSET(OSContext, 0x88, cr); CHECK_OFFSET(OSContext, 0x8c, lr); CHECK_OFFSET(OSContext, 0x90, ctr); CHECK_OFFSET(OSContext, 0x94, xer); //CHECK_OFFSET(OSContext, 0x98, srr0); //CHECK_OFFSET(OSContext, 0x9c, srr1); CHECK_OFFSET(OSContext, 0xb4, fpscr); CHECK_OFFSET(OSContext, 0xb8, fpr); CHECK_OFFSET(OSContext, 0x1b8, spinLockCount); CHECK_OFFSET(OSContext, 0x1ba, state); CHECK_OFFSET(OSContext, 0x1bc, gqr); CHECK_OFFSET(OSContext, 0x1e0, psf); CHECK_OFFSET(OSContext, 0x2e0, coretime); CHECK_OFFSET(OSContext, 0x2f8, starttime); CHECK_OFFSET(OSContext, 0x300, error); CHECK_OFFSET(OSContext, 0x308, pmc1); CHECK_OFFSET(OSContext, 0x30c, pmc2); //CHECK_OFFSET(OSContext, 0x310, pmc3); //CHECK_OFFSET(OSContext, 0x314, pmc4); CHECK_OFFSET(OSContext, 0x318, mmcr0); CHECK_OFFSET(OSContext, 0x31c, mmcr1); CHECK_SIZE(OSContext, 0x320); struct OSMutex; struct OSMutexQueue { be_ptr<OSMutex> head; be_ptr<OSMutex> tail; be_ptr<void> parent; UNKNOWN(4); }; CHECK_OFFSET(OSMutexQueue, 0x0, head); CHECK_OFFSET(OSMutexQueue, 0x4, tail); CHECK_OFFSET(OSMutexQueue, 0x8, parent); CHECK_SIZE(OSMutexQueue, 0x10); struct OSFastMutex; struct OSFastMutexQueue { be_ptr<OSFastMutex> head; be_ptr<OSFastMutex> tail; }; CHECK_OFFSET(OSFastMutexQueue, 0x00, head); CHECK_OFFSET(OSFastMutexQueue, 0x04, tail); CHECK_SIZE(OSFastMutexQueue, 0x08); struct OSThreadLink { be_ptr<OSThread> next; be_ptr<OSThread> prev; }; CHECK_OFFSET(OSThreadLink, 0x00, next); CHECK_OFFSET(OSThreadLink, 0x04, prev); CHECK_SIZE(OSThreadLink, 0x8); struct OSThreadQueue { be_ptr<OSThread> head; be_ptr<OSThread> tail; be_ptr<void> parent; UNKNOWN(4); }; CHECK_OFFSET(OSThreadQueue, 0x00, head); CHECK_OFFSET(OSThreadQueue, 0x04, tail); CHECK_OFFSET(OSThreadQueue, 0x08, parent); CHECK_SIZE(OSThreadQueue, 0x10); struct OSThreadSimpleQueue { be_ptr<OSThread> head; be_ptr<OSThread> tail; }; CHECK_OFFSET(OSThreadSimpleQueue, 0x00, head); CHECK_OFFSET(OSThreadSimpleQueue, 0x04, tail); CHECK_SIZE(OSThreadSimpleQueue, 0x08); struct OSTLSSection { be_ptr<void> data; UNKNOWN(4); }; CHECK_OFFSET(OSTLSSection, 0x00, data); CHECK_SIZE(OSTLSSection, 0x08); struct OSThread { static const uint32_t Tag = 0x74487244; OSContext context; //! Should always be set to the value OSThread::Tag. be_val<uint32_t> tag; //! Bitfield of OScpu::Core be_val<OSThreadState> state; //! Bitfield of OSThreadAttributes be_val<OSThreadAttributes> attr; //! Unique thread ID be_val<uint16_t> id; //! Suspend count (increased by OSSuspendThread). be_val<int32_t> suspendCounter; //! Actual priority of thread. be_val<int32_t> priority; //! Base priority of thread, 0 is highest priority, 31 is lowest priority. be_val<int32_t> basePriority; //! Exit value of the thread be_val<uint32_t> exitValue; //! Core run queue stuff be_ptr<OSThreadQueue> coreRunQueue0; be_ptr<OSThreadQueue> coreRunQueue1; be_ptr<OSThreadQueue> coreRunQueue2; OSThreadLink coreRunQueueLink0; OSThreadLink coreRunQueueLink1; OSThreadLink coreRunQueueLink2; //! Queue the thread is currently waiting on be_ptr<OSThreadQueue> queue; //! Link used for thread queue OSThreadLink link; //! Queue of threads waiting to join this thread OSThreadQueue joinQueue; //! Mutex this thread is waiting to lock be_ptr<OSMutex> mutex; //! Queue of mutexes this thread owns OSMutexQueue mutexQueue; //! Link for global active thread queue OSThreadLink activeLink; //! Stack start (top, highest address) be_ptr<be_val<uint32_t>> stackStart; //! Stack end (bottom, lowest address) be_ptr<be_val<uint32_t>> stackEnd; //! Thread entry point set in OSCreateThread OSThreadEntryPointFn::be entryPoint; UNKNOWN(0x408 - 0x3a0); //! GEH Exception handling thread-specifics be_ptr<void> _ghs__eh_globals; be_ptr<void> _ghs__eh_mem_manage[9]; be_ptr<void> _ghs__eh_store_globals[6]; be_ptr<void> _ghs__eh_store_globals_tdeh[76]; be_val<uint32_t> alarmCancelled; //! Thread specific values, accessed with OSSetThreadSpecific and OSGetThreadSpecific. be_val<uint32_t> specific[0x10]; UNKNOWN(0x5c0 - 0x5bc); //! Thread name, accessed with OSSetThreadName and OSGetThreadName. be_ptr<const char> name; //! Alarm the thread is waiting on in OSWaitEventWithTimeout be_ptr<OSAlarm> waitEventTimeoutAlarm; //! The stack pointer passed in OSCreateThread. be_ptr<be_val<uint32_t>> userStackPointer; //! Called just before thread is terminated, set with OSSetThreadCleanupCallback OSThreadCleanupCallbackFn::be cleanupCallback; //! Called just after a thread is terminated, set with OSSetThreadDeallocator OSThreadDeallocatorFn::be deallocator; //! Current thread cancel state, controls whether the thread is allowed to cancel or not be_val<OSThreadCancelState> cancelState; //! Current thread request, used for cancelleing and suspending the thread. be_val<OSThreadRequest> requestFlag; //! Pending suspend request count be_val<int32_t> needSuspend; //! Result of thread suspend be_val<int32_t> suspendResult; //! Queue of threads waiting for a thread to be suspended. OSThreadQueue suspendQueue; UNKNOWN(0xC); //! The total amount of core time consumed by this thread (Does not include time while Running) be_val<uint64_t> coreTimeConsumedNs; //! The number of times this thread has been awoken. be_val<uint64_t> wakeCount; UNKNOWN(0x664 - 0x610); //! Number of TLS sections be_val<uint16_t> tlsSectionCount; UNKNOWN(0x2); //! TLS Sections be_ptr<OSTLSSection> tlsSections; //! The fast mutex we are currently waiting for be_ptr<OSFastMutex> fastMutex; //! The fast mutexes we are currently contended on OSFastMutexQueue contendedFastMutexes; //! The fast mutexes we currently own locks on OSFastMutexQueue fastMutexQueue; UNKNOWN(0x69c - 0x680); }; CHECK_OFFSET(OSThread, 0x320, tag); CHECK_OFFSET(OSThread, 0x324, state); CHECK_OFFSET(OSThread, 0x325, attr); CHECK_OFFSET(OSThread, 0x326, id); CHECK_OFFSET(OSThread, 0x328, suspendCounter); CHECK_OFFSET(OSThread, 0x32c, priority); CHECK_OFFSET(OSThread, 0x330, basePriority); CHECK_OFFSET(OSThread, 0x334, exitValue); CHECK_OFFSET(OSThread, 0x338, coreRunQueue0); CHECK_OFFSET(OSThread, 0x33C, coreRunQueue1); CHECK_OFFSET(OSThread, 0x340, coreRunQueue2); CHECK_OFFSET(OSThread, 0x344, coreRunQueueLink0); CHECK_OFFSET(OSThread, 0x34C, coreRunQueueLink1); CHECK_OFFSET(OSThread, 0x354, coreRunQueueLink2); CHECK_OFFSET(OSThread, 0x35C, queue); CHECK_OFFSET(OSThread, 0x360, link); CHECK_OFFSET(OSThread, 0x368, joinQueue); CHECK_OFFSET(OSThread, 0x378, mutex); CHECK_OFFSET(OSThread, 0x37C, mutexQueue); CHECK_OFFSET(OSThread, 0x38C, activeLink); CHECK_OFFSET(OSThread, 0x394, stackStart); CHECK_OFFSET(OSThread, 0x398, stackEnd); CHECK_OFFSET(OSThread, 0x39C, entryPoint); CHECK_OFFSET(OSThread, 0x408, _ghs__eh_globals); CHECK_OFFSET(OSThread, 0x40C, _ghs__eh_mem_manage); CHECK_OFFSET(OSThread, 0x430, _ghs__eh_store_globals); CHECK_OFFSET(OSThread, 0x448, _ghs__eh_store_globals_tdeh); CHECK_OFFSET(OSThread, 0x578, alarmCancelled); CHECK_OFFSET(OSThread, 0x57C, specific); CHECK_OFFSET(OSThread, 0x5C0, name); CHECK_OFFSET(OSThread, 0x5C4, waitEventTimeoutAlarm); CHECK_OFFSET(OSThread, 0x5C8, userStackPointer); CHECK_OFFSET(OSThread, 0x5CC, cleanupCallback); CHECK_OFFSET(OSThread, 0x5D0, deallocator); CHECK_OFFSET(OSThread, 0x5D4, cancelState); CHECK_OFFSET(OSThread, 0x5D8, requestFlag); CHECK_OFFSET(OSThread, 0x5DC, needSuspend); CHECK_OFFSET(OSThread, 0x5E0, suspendResult); CHECK_OFFSET(OSThread, 0x5E4, suspendQueue); CHECK_OFFSET(OSThread, 0x600, coreTimeConsumedNs); CHECK_OFFSET(OSThread, 0x608, wakeCount); CHECK_OFFSET(OSThread, 0x664, tlsSectionCount); CHECK_OFFSET(OSThread, 0x668, tlsSections); CHECK_OFFSET(OSThread, 0x66C, fastMutex); CHECK_OFFSET(OSThread, 0x670, contendedFastMutexes); CHECK_OFFSET(OSThread, 0x678, fastMutexQueue); CHECK_SIZE(OSThread, 0x69c); struct tls_index { be_val<uint32_t> moduleIndex; be_val<uint32_t> offset; }; CHECK_OFFSET(tls_index, 0x00, moduleIndex); CHECK_OFFSET(tls_index, 0x04, offset); CHECK_SIZE(tls_index, 0x08); #pragma pack(pop) void OSCancelThread(OSThread *thread); int32_t OSCheckActiveThreads(); int32_t OSCheckThreadStackUsage(OSThread *thread); void OSClearThreadStackUsage(OSThread *thread); void OSContinueThread(OSThread *thread); BOOL OSCreateThread(OSThread *thread, OSThreadEntryPointFn entry, uint32_t argc, void *argv, be_val<uint32_t> *stack, uint32_t stackSize, int32_t priority, OSThreadAttributes attributes); void OSDetachThread(OSThread *thread); void OSExitThread(int value); void OSGetActiveThreadLink(OSThread *thread, OSThreadLink *link); OSThread * OSGetCurrentThread(); OSThread * OSGetDefaultThread(uint32_t coreID); uint32_t OSGetStackPointer(); uint32_t OSGetThreadAffinity(OSThread *thread); const char * OSGetThreadName(OSThread *thread); uint32_t OSGetThreadPriority(OSThread *thread); uint32_t OSGetThreadSpecific(uint32_t id); void OSInitThreadQueue(OSThreadQueue *queue); void OSInitThreadQueueEx(OSThreadQueue *queue, void *parent); BOOL OSIsThreadSuspended(OSThread *thread); BOOL OSIsThreadTerminated(OSThread *thread); BOOL OSJoinThread(OSThread *thread, be_val<int> *exitValue); void OSPrintCurrentThreadState(); int32_t OSResumeThread(OSThread *thread); BOOL OSRunThread(OSThread *thread, OSThreadEntryPointFn entry, uint32_t argc, void *argv); BOOL OSSetThreadAffinity(OSThread *thread, uint32_t affinity); BOOL OSSetThreadCancelState(BOOL state); OSThreadCleanupCallbackFn OSSetThreadCleanupCallback(OSThread *thread, OSThreadCleanupCallbackFn callback); OSThreadDeallocatorFn OSSetThreadDeallocator(OSThread *thread, OSThreadDeallocatorFn deallocator); void OSSetThreadName(OSThread* thread, const char *name); BOOL OSSetThreadPriority(OSThread* thread, uint32_t priority); BOOL OSSetThreadRunQuantum(OSThread* thread, uint32_t quantum); void OSSetThreadSpecific(uint32_t id, uint32_t value); BOOL OSSetThreadStackUsage(OSThread *thread); void OSSleepThread(OSThreadQueue *queue); void OSSleepTicks(OSTime ticks); uint32_t OSSuspendThread(OSThread *thread); void OSTestThreadCancel(); void OSWakeupThread(OSThreadQueue *queue); void OSYieldThread(); void * tls_get_addr(tls_index *index); /** @} */ namespace internal { uint32_t pinThreadAffinity(); void unpinThreadAffinity(uint32_t affinity); void queueThreadDeallocation(OSThread *thread); void startDeallocatorThreads(); void exitThreadNoLock(int value); void setDefaultThread(uint32_t core, OSThread *thread); bool threadSortFunc(OSThread *lhs, OSThread *rhs); using ThreadQueue = SortedQueue<OSThreadQueue, OSThreadLink, OSThread, &OSThread::link, threadSortFunc>; } // namespace internal } // namespace coreinit
CarlKenner/decaf-emu
src/libdecaf/src/modules/coreinit/coreinit_thread.h
C
gpl-2.0
14,091
/** * @package AcyMailing for Joomla! * @version 5.0.0 * @author acyba.com * @copyright (C) 2009-2015 ACYBA S.A.R.L. All rights reserved. * @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html */ @import url("component_default_square_black.css"); #acyarchivelisting .button:hover, #acymodifyform .button:hover, #unsubbutton_div .button:hover { color:#9e9c07 !important; } #acyarchivelisting .contentheading{ color:#727127; border-bottom:1px solid #727127; } #acyarchivelisting .contentpane .contentdescription{ color:#9e9c07; } #acyarchivelisting .contentpane tbody .sectiontableentry1 a:hover{ color:#9e9c07; } #acyarchivelisting .contentpane tbody .sectiontableentry2 a:hover{ color:#9e9c07; } #acyarchivelisting .sectiontableheader a:hover{ color:#727127;} #acyarchiveview .contentheading{ color:#9e9c07;} #acylistslisting .componentheading{ color:#727127; border-bottom:1px solid #727127; } #acylistslisting .list_name a{ color:#9e9c07; } #acylistslisting .list_name a:hover, #acylistslisting .list_name a:focus { color:#9e9c07; } #acymodifyform legend{ color:#727127; border-bottom:1px solid #727127; } #acyusersubscription .list_name{ color: #9e9c07; } #unsubpage .unsubintro{ color:#9e9c07; border-bottom: 1px solid #9e9c07; } #unsubpage .unsubsurveytext{ border-bottom: 1px solid #9e9c07; color: #9e9c07; }
sumithMadhushan/joomla-project
media/com_acymailing/css/component_default_square_green.css
CSS
gpl-2.0
1,420
package com.dandrex.malfriends.controller.adapter; import java.util.List; import com.android.volley.toolbox.ImageLoader; import com.android.volley.toolbox.NetworkImageView; import com.dandrex.malfriends.R; import com.dandrex.malfriends.controller.util.RequestHelper; import com.dandrex.malfriends.model.Friend; import android.annotation.SuppressLint; import android.content.Context; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.TextView; public class FriendAdapter extends ArrayAdapter<Friend> { private List<Friend> friendsList; private static ImageLoader imageLoader; public FriendAdapter(Context context, List<Friend> friendsList) { super(context, R.layout.friends_row_items, friendsList); this.friendsList=friendsList; imageLoader=RequestHelper.getImageLoader(); } @SuppressLint("InflateParams") @Override public View getView(int position, View convertView, ViewGroup parent) { LayoutInflater inflater = (LayoutInflater) getContext() .getSystemService(Context.LAYOUT_INFLATER_SERVICE); View view = convertView; Holder holder; Friend friend = friendsList.get(position); if (view==null) { holder=new Holder(); view = inflater.inflate(R.layout.friends_row_items, null); holder.image= (NetworkImageView) view.findViewById(R.id.friends_row_items_image); holder.image.setDefaultImageResId(R.drawable.ic_default_avatar); holder.image.setErrorImageResId(R.drawable.ic_default_avatar); holder.name = (TextView) view.findViewById(R.id.friends_row_items_name); view.setTag(holder); }else { holder = (Holder) view.getTag(); } holder.name.setText(friend.getName()); holder.image.setImageUrl(friend.getImageUrl(), imageLoader); // Log.i("FriendAdapter", friend.getName()+" avatar: "+friend.getImageUrl()); return view; } private class Holder { NetworkImageView image; TextView name; } }
DandreX/MALFriends
src/com/dandrex/malfriends/controller/adapter/FriendAdapter.java
Java
gpl-2.0
1,975
#ifndef CPPUNIT_TOOLS_STRINGHELPER_H #define CPPUNIT_TOOLS_STRINGHELPER_H #include <cppunit/Portability.h> #include <cppunit/portability/Stream.h> #include <string> #include <type_traits> CPPUNIT_NS_BEGIN /*! \brief Methods for converting values to strings. Replaces CPPUNIT_NS::StringTools::toString */ namespace StringHelper { // work around to handle C++11 enum class correctly. We need an own conversion to std::string // as there is no implicit coversion to int for enum class. template<typename T> typename std::enable_if<!std::is_enum<T>::value, std::string>::type toString(const T& x) { OStringStream ost; ost << x; return ost.str(); } template<typename T> typename std::enable_if<std::is_enum<T>::value, std::string>::type toString(const T& x) { OStringStream ost; ost << static_cast<typename std::underlying_type<T>::type>(x); return ost.str(); } } CPPUNIT_NS_END #endif // CPPUNIT_TOOLS_STRINGHELPER_H
FlightGear/flightgear
3rdparty/cppunit/include/cppunit/tools/StringHelper.h
C
gpl-2.0
954
package com.richousrick.computermod.items.ram; import com.richousrick.computermod.items.ItemCM; public class RamCore extends ItemCM{ public RamCore() { super(); this.setMaxStackSize(1); } }
richousrick/ComputerMod
src/main/java/com/richousrick/computermod/items/ram/RamCore.java
Java
gpl-2.0
202
<?php /** * Order details * * @author WooThemes * @package WooCommerce/Templates * @version 2.2.0 */ if ( ! defined( 'ABSPATH' ) ) exit; // Exit if accessed directly global $woocommerce; $order = new WC_Order( $order_id ); ?> <h2><?php _e( 'Order Details', 'woocommerce' ); ?></h2> <table class="shop_table order_details"> <thead> <tr> <th class="product-name"><?php _e( 'Product', 'woocommerce' ); ?></th> <th class="product-total"><?php _e( 'Total', 'woocommerce' ); ?></th> </tr> </thead> <tfoot> <?php if ( $totals = $order->get_order_item_totals() ) foreach ( $totals as $total ) : ?> <tr> <th scope="row"><?php echo $total['label']; ?></th> <td><?php echo $total['value']; ?></td> </tr> <?php endforeach; ?> </tfoot> <tbody> <?php if (sizeof($order->get_items())>0) { $i=0; foreach($order->get_items() as $item) { $odd_even = $i & 1 ? 'even ' : 'odd '; $_product = get_product( $item['variation_id'] ? $item['variation_id'] : $item['product_id'] ); echo ' <tr class = "' . $odd_even . esc_attr( apply_filters( 'woocommerce_order_table_item_class', 'order_table_item', $item, $order ) ) . '"> <td class="product-name">' . apply_filters( 'woocommerce_order_table_product_title', '<a href="' . get_permalink( $item['product_id'] ) . '">' . $item['name'] . '</a>', $item ) . ' ' . apply_filters( 'woocommerce_order_table_item_quantity', '<strong class="product-quantity">&times; ' . $item['qty'] . '</strong>', $item ); //$item_meta = new WC_Order_Item_Meta( $item['item_meta'] ); //$item_meta->display(); if ( $_product && $_product->exists() && $_product->is_downloadable() && $order->is_download_permitted() ) { $download_file_urls = $order->get_downloadable_file_urls( $item['product_id'], $item['variation_id'], $item ); $i = 0; $links = array(); foreach ( $download_file_urls as $file_url => $download_file_url ) { $filename = woocommerce_get_filename_from_url( $file_url ); $links[] = '<small><a href="' . $download_file_url . '">' . sprintf( __( 'Download file%s', 'woocommerce' ), ( count( $download_file_urls ) > 1 ? ' ' . ( $i + 1 ) . ': ' : ': ' ) ) . $filename . '</a></small>'; $i++; } echo implode( '<br/>', $links ); } echo '</td><td class="product-total">' . $order->get_formatted_line_subtotal( $item ) . '</td></tr>'; // Show any purchase notes if ($order->status=='completed' || $order->status=='processing') { if ($purchase_note = get_post_meta( $_product->id, '_purchase_note', true)) echo '<tr class="product-purchase-note"><td colspan="3">' . apply_filters('the_content', $purchase_note) . '</td></tr>'; } $i++; } } do_action( 'woocommerce_order_items_table', $order ); ?> </tbody> </table> <?php if ( get_option('woocommerce_allow_customers_to_reorder') == 'yes' && $order->status=='completed' ) : ?> <p class="order-again"> <a href="<?php echo esc_url( $woocommerce->nonce_url( 'order_again', add_query_arg( 'order_again', $order->id, add_query_arg( 'order', $order->id, get_permalink( woocommerce_get_page_id( 'view_order' ) ) ) ) ) ); ?>" class="button"><?php _e( 'Order Again', 'woocommerce' ); ?></a> </p> <?php endif; ?> <?php do_action( 'woocommerce_order_details_after_order_table', $order ); ?> <header> <h2><?php _e( 'Customer details', 'woocommerce' ); ?></h2> </header> <dl class="customer_details"> <?php if ($order->billing_email) echo '<dt><strong>'.__( 'Email:', 'woocommerce' ).'</strong></dt><dd>'.$order->billing_email.'</dd>'; if ($order->billing_phone) echo '<dt><strong>'.__( 'Telephone:', 'woocommerce' ).'</strong></dt><dd>'.$order->billing_phone.'</dd>'; ?> </dl> <?php if (get_option('woocommerce_ship_to_billing_address_only')=='no') : ?> <div class="col2-set addresses"> <div class="col-1"> <?php endif; ?> <header class="title"> <h3><?php _e( 'Billing Address', 'woocommerce' ); ?></h3> </header> <address><p> <?php if (!$order->get_formatted_billing_address()) _e( 'N/A', 'woocommerce' ); else echo $order->get_formatted_billing_address(); ?> </p></address> <?php if (get_option('woocommerce_ship_to_billing_address_only')=='no') : ?> </div><!-- /.col-1 --> <div class="col-2"> <header class="title"> <h3><?php _e( 'Shipping Address', 'woocommerce' ); ?></h3> </header> <address><p> <?php if (!$order->get_formatted_shipping_address()) _e( 'N/A', 'woocommerce' ); else echo $order->get_formatted_shipping_address(); ?> </p></address> </div><!-- /.col-2 --> </div><!-- /.col2-set --> <?php endif; ?> <div class="clear"></div>
idies/voyages_sdss_wp
wp-content/themes/wpl-galaxy/woocommerce/order/order-details.php
PHP
gpl-2.0
4,674
/* Copyright Russell Steffen <rsteffen@bayarea.net> Copyright Stephan Zehetner <s.zehetner@nevox.org> Copyright Dmitry Suzdalev <dimsuz@gmail.com> Copyright <inge@lysator.liu.se> Copyright <pinaraf@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef KONQUEST_MAPITEMS_H #define KONQUEST_MAPITEMS_H #include <QGraphicsObject> #include <QTextDocument> #include <QObject> #include "sector.h" class Game; class MapScene; class PlanetItem : public QGraphicsObject { Q_OBJECT public: PlanetItem(MapScene *scene, Sector *sector, Game *game); ~PlanetItem() {} QRectF boundingRect() const; void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget); void hoverEnterEvent ( QGraphicsSceneHoverEvent *event ); void hoverLeaveEvent ( QGraphicsSceneHoverEvent *event ); void mousePressEvent ( QGraphicsSceneMouseEvent *event ); void unselect (); void select (); Sector *sector () { return m_sector; } signals: void planetItemSelected (PlanetItem *); private slots: void updatePlanet (); void blinkPlanet (); private: QPixmap renderPixmap( const QString& svgId, int width, int height ) const; MapScene *m_scene; Sector *m_sector; Game *m_game; bool m_hovered; bool m_selected; bool m_blinkState; QTimer *m_blinkTimer; QString m_lookName; }; class PlanetInfoItem : public QGraphicsItem { public: PlanetInfoItem(Game*); ~PlanetInfoItem() {} QRectF boundingRect() const; void setPlanet (Planet *planet); Planet *planet () { return m_planet; } void paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget); private: Game *m_game; QTextDocument m_textDoc; Planet *m_planet; }; #endif // KONQUEST_MAPITEMS_H
jsj2008/kdegames
konquest/map/mapitems.h
C
gpl-2.0
2,642
<?php namespace IngeniousWeb\Skeleton\Core; use IngeniousWeb\Skeleton\Services\System\User; class BaseController { /** * @var $user */ //protected $user; /** * @param User $user */ public function __construct(/*User $user*/) { //$this->user = $user; } public function title($class) { return get_class($class); } public function view($view, $title, $user, $data = [], $token) { require_once '../web/templates/template.php'; } }
joshCarlisleIT/Skeleton
src/Core/BaseController.php
PHP
gpl-2.0
457
/* * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. */ /* * @test * @bug 6439826 6411930 6380018 6392177 * @summary Exception issuing Diagnostic while processing generated errant code */ import java.io.*; import java.util.*; import javax.annotation.processing.*; import javax.lang.model.*; import javax.lang.model.element.*; import javax.tools.*; import com.sun.source.util.*; import com.sun.tools.javac.api.*; import static javax.lang.model.util.ElementFilter.*; @SupportedAnnotationTypes("*") @SupportedSourceVersion(SourceVersion.RELEASE_6 ) public class T6439826 extends AbstractProcessor { public static void main(String... args) { String testSrc = System.getProperty("test.src", "."); String testClasses = System.getProperty("test.classes"); JavacTool tool = JavacTool.create(); MyDiagListener dl = new MyDiagListener(); StandardJavaFileManager fm = tool.getStandardFileManager(dl, null, null); Iterable<? extends JavaFileObject> files = fm.getJavaFileObjectsFromFiles(Arrays.asList(new File(testSrc, T6439826.class.getName()+".java"))); Iterable<String> opts = Arrays.asList("-proc:only", "-processor", "T6439826", "-processorpath", testClasses); StringWriter out = new StringWriter(); JavacTask task = tool.getTask(out, fm, dl, opts, null, files); task.call(); String s = out.toString(); System.err.print(s); // Expect the following 2 diagnostics, and no output to log // Foo.java:1: illegal character: \35 // Foo.java:1: reached end of file while parsing System.err.println(dl.count + " diagnostics; " + s.length() + " characters"); if (dl.count != 2 || s.length() != 0) throw new AssertionError("unexpected output from compiler"); } public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) { Set<? extends TypeElement> elems = typesIn(roundEnv.getRootElements()); for (TypeElement e: elems) { if (e.getSimpleName().toString().equals(T6439826.class.getName())) writeBadFile(); } return false; } private void writeBadFile() { Filer filer = processingEnv.getFiler(); Messager messager = processingEnv.getMessager(); try { Writer out = filer.createSourceFile("Foo").openWriter(); out.write("class Foo #"); // write a file that generates a scanner error out.close(); } catch (IOException e) { messager.printMessage(Diagnostic.Kind.ERROR, e.toString()); } } static class MyDiagListener implements DiagnosticListener { public void report(Diagnostic d) { System.err.println(d); count++; } public int count; } }
unktomi/form-follows-function
mjavac/langtools/test/tools/javac/processing/T6439826.java
Java
gpl-2.0
3,965
#define DRIVERVERSION "v4.2.4_9322.20131011_BTCOEX20130918-473C_forASUS" #define BTCOEXVERSION "BTCOEX20130918-473C"
barome/me102a
drivers/net/wireless/rtl8723BS/include/rtw_version.h
C
gpl-2.0
117
/* * Hydrogen * Copyright(c) 2002-2008 by Alex >Comix< Cominu [comix@users.sourceforge.net] * * http://www.hydrogen-music.org * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "SoundLibraryDatastructures.h" #include "SoundLibraryImportDialog.h" #include "SoundLibraryRepositoryDialog.h" #include "SoundLibraryPanel.h" #include "../widgets/DownloadWidget.h" #include "../HydrogenApp.h" #include "../InstrumentRack.h" #include <hydrogen/LocalFileMng.h> #include <hydrogen/h2_exception.h> #include <hydrogen/Preferences.h> #include <hydrogen/basics/drumkit.h> #include <hydrogen/helpers/filesystem.h> #include <QTreeWidget> #include <QDomDocument> #include <QMessageBox> #include <QHeaderView> #include <QFileDialog> #include <QCryptographicHash> #include <memory> const char* SoundLibraryImportDialog::__class_name = "SoundLibraryImportDialog"; SoundLibraryImportDialog::SoundLibraryImportDialog( QWidget* pParent, bool OnlineImport ) : QDialog( pParent ) , Object( __class_name ) { setupUi( this ); INFOLOG( "INIT" ); setWindowTitle( trUtf8( "Sound Library import" ) ); setFixedSize( width(), height() ); QStringList headers; headers << trUtf8( "Sound library" ) << trUtf8( "Status" ); QTreeWidgetItem* header = new QTreeWidgetItem( headers ); m_pDrumkitTree->setHeaderItem( header ); m_pDrumkitTree->header()->resizeSection( 0, 200 ); connect( m_pDrumkitTree, SIGNAL( currentItemChanged ( QTreeWidgetItem*, QTreeWidgetItem* ) ), this, SLOT( soundLibraryItemChanged( QTreeWidgetItem*, QTreeWidgetItem* ) ) ); connect( repositoryCombo, SIGNAL(currentIndexChanged(int)), this, SLOT( onRepositoryComboBoxIndexChanged(int) )); SoundLibraryNameLbl->setText( "" ); SoundLibraryInfoLbl->setText( "" ); DownloadBtn->setEnabled( false ); InstallBtn->setEnabled (false ); updateRepositoryCombo(); if( OnlineImport) tabWidget->setCurrentIndex( 0 ); else tabWidget->setCurrentIndex( 1 ); } SoundLibraryImportDialog::~SoundLibraryImportDialog() { INFOLOG( "DESTROY" ); } //update combo box void SoundLibraryImportDialog::updateRepositoryCombo() { H2Core::Preferences* pref = H2Core::Preferences::get_instance(); /* Read serverList from config and put servers into the comboBox */ if( pref->sServerList.size() == 0 ) { pref->sServerList.push_back( "http://www.hydrogen-music.org/feeds/drumkit_list.php" ); } repositoryCombo->clear(); std::list<QString>::const_iterator cur_Server; for( cur_Server = pref->sServerList.begin(); cur_Server != pref->sServerList.end(); ++cur_Server ) { repositoryCombo->insertItem( 0, *cur_Server ); } reloadRepositoryData(); } void SoundLibraryImportDialog::onRepositoryComboBoxIndexChanged(int i) { UNUSED(i); if(!repositoryCombo->currentText().isEmpty()) { QString cacheFile = getCachedFilename(); if( !H2Core::Filesystem::file_exists( cacheFile, true ) ) { SoundLibraryImportDialog::on_UpdateListBtn_clicked(); } reloadRepositoryData(); } } /// /// Edit the server list /// void SoundLibraryImportDialog::on_EditListBtn_clicked() { SoundLibraryRepositoryDialog repoDialog( this ); repoDialog.exec(); updateRepositoryCombo(); } void SoundLibraryImportDialog::clearImageCache() { // Note: After a kit is installed the list refreshes and this gets called to // clear the image cache - maybe we want to keep the cache in this case? QString cacheDir = H2Core::Filesystem::repositories_cache_dir() ; INFOLOG("Deleting cached image files from " + cacheDir.toLocal8Bit() ); QDir dir( cacheDir ); dir.setNameFilters(QStringList() << "*.png"); dir.setFilter(QDir::Files); foreach(QString dirFile, dir.entryList()) { if ( !dir.remove(dirFile) ) { WARNINGLOG("Error removing image file(s) from cache."); } } } QString SoundLibraryImportDialog::getCachedFilename() { QString cacheDir = H2Core::Filesystem::repositories_cache_dir(); QString serverMd5 = QString(QCryptographicHash::hash(( repositoryCombo->currentText().toLatin1() ),QCryptographicHash::Md5).toHex()); QString cacheFile = cacheDir + "/" + serverMd5; return cacheFile; } QString SoundLibraryImportDialog::getCachedImageFilename() { QString cacheDir = H2Core::Filesystem::repositories_cache_dir(); QString kitNameMd5 = QString(QCryptographicHash::hash(( SoundLibraryNameLbl->text().toLatin1() ),QCryptographicHash::Md5).toHex()); QString cacheFile = cacheDir + "/" + kitNameMd5 + ".png"; return cacheFile; } void SoundLibraryImportDialog::writeCachedData(const QString& fileName, const QString& data) { if( data.isEmpty() ) { return; } QFile outFile( fileName ); if( !outFile.open( QIODevice::WriteOnly | QIODevice::Text ) ) { ERRORLOG( "Failed to open file for writing repository cache." ); return; } QTextStream stream( &outFile ); stream << data; outFile.close(); } void SoundLibraryImportDialog::writeCachedImage( const QString& imageFile, QPixmap& pixmap ) { QString cacheFile = getCachedImageFilename() ; QFile outFile( cacheFile ); if( !outFile.open( QIODevice::WriteOnly ) ) { ERRORLOG( "Failed to open file for writing repository image cache." ); return; } pixmap.save(&outFile); outFile.close(); } QString SoundLibraryImportDialog::readCachedData(const QString& fileName) { QString content; QFile inFile( fileName ); if( !inFile.open( QIODevice::ReadOnly | QIODevice::Text ) ) { ERRORLOG( "Failed to open file for reading." ); return content; } QDomDocument document; if( !document.setContent( &inFile ) ) { inFile.close(); return content; } inFile.close(); content = document.toString(); return content; } QString SoundLibraryImportDialog::readCachedImage( const QString& imageFile ) { QString cacheFile = getCachedImageFilename() ; QFile file( cacheFile ); if( !file.exists() ) { // no image in cache, just return NULL return NULL; } return cacheFile; } void SoundLibraryImportDialog::reloadRepositoryData() { QString sDrumkitXML; QString cacheFile = getCachedFilename(); if(H2Core::Filesystem::file_exists(cacheFile,true)) { sDrumkitXML = readCachedData(cacheFile); } m_soundLibraryList.clear(); QDomDocument dom; dom.setContent( sDrumkitXML ); QDomNode drumkitNode = dom.documentElement().firstChild(); while ( !drumkitNode.isNull() ) { if( !drumkitNode.toElement().isNull() ) { if ( drumkitNode.toElement().tagName() == "drumkit" || drumkitNode.toElement().tagName() == "song" || drumkitNode.toElement().tagName() == "pattern" ) { SoundLibraryInfo soundLibInfo; if ( drumkitNode.toElement().tagName() =="song" ) { soundLibInfo.setType( "song" ); } if ( drumkitNode.toElement().tagName() =="drumkit" ) { soundLibInfo.setType( "drumkit" ); } if ( drumkitNode.toElement().tagName() =="pattern" ) { soundLibInfo.setType( "pattern" ); } QDomElement nameNode = drumkitNode.firstChildElement( "name" ); if ( !nameNode.isNull() ) { soundLibInfo.setName( nameNode.text() ); } QDomElement urlNode = drumkitNode.firstChildElement( "url" ); if ( !urlNode.isNull() ) { soundLibInfo.setUrl( urlNode.text() ); } QDomElement infoNode = drumkitNode.firstChildElement( "info" ); if ( !infoNode.isNull() ) { soundLibInfo.setInfo( infoNode.text() ); } QDomElement authorNode = drumkitNode.firstChildElement( "author" ); if ( !authorNode.isNull() ) { soundLibInfo.setAuthor( authorNode.text() ); } QDomElement licenseNode = drumkitNode.firstChildElement( "license" ); if ( !licenseNode.isNull() ) { soundLibInfo.setLicense( licenseNode.text() ); } QDomElement imageNode = drumkitNode.firstChildElement( "image" ); if ( !imageNode.isNull() ) { soundLibInfo.setImage( imageNode.text() ); } QDomElement imageLicenseNode = drumkitNode.firstChildElement( "imageLicense" ); if ( !imageLicenseNode.isNull() ) { soundLibInfo.setImageLicense( imageLicenseNode.text() ); } m_soundLibraryList.push_back( soundLibInfo ); } } drumkitNode = drumkitNode.nextSibling(); } updateSoundLibraryList(); } /// /// Download and update the drumkit list /// void SoundLibraryImportDialog::on_UpdateListBtn_clicked() { QApplication::setOverrideCursor(Qt::WaitCursor); DownloadWidget drumkitList( this, trUtf8( "Updating SoundLibrary list..." ), repositoryCombo->currentText() ); drumkitList.exec(); QString sDrumkitXML = drumkitList.get_xml_content(); /* * Hydrogen creates the following cache hierarchy to cache * the content of server lists: * * CACHE_DIR * +-----repositories * +-----serverlist_$(md5(SERVER_NAME)) */ QString cacheFile = getCachedFilename(); writeCachedData(cacheFile, sDrumkitXML); reloadRepositoryData(); QApplication::restoreOverrideCursor(); } void SoundLibraryImportDialog::updateSoundLibraryList() { // build the sound library tree m_pDrumkitTree->clear(); m_pDrumkitsItem = new QTreeWidgetItem( m_pDrumkitTree ); m_pDrumkitsItem->setText( 0, trUtf8( "Drumkits" ) ); m_pDrumkitTree->setItemExpanded( m_pDrumkitsItem, true ); m_pSongItem = new QTreeWidgetItem( m_pDrumkitTree ); m_pSongItem->setText( 0, trUtf8( "Songs" ) ); m_pDrumkitTree->setItemExpanded( m_pSongItem, true ); m_pPatternItem = new QTreeWidgetItem( m_pDrumkitTree ); m_pPatternItem->setText( 0, trUtf8( "Patterns" ) ); m_pDrumkitTree->setItemExpanded( m_pPatternItem, true ); for ( uint i = 0; i < m_soundLibraryList.size(); ++i ) { QString sLibraryName = m_soundLibraryList[ i ].getName(); QTreeWidgetItem* pDrumkitItem = NULL; if ( m_soundLibraryList[ i ].getType() == "song" ) { pDrumkitItem = new QTreeWidgetItem( m_pSongItem ); } if ( m_soundLibraryList[ i ].getType() == "drumkit" ) { pDrumkitItem = new QTreeWidgetItem( m_pDrumkitsItem ); } if ( m_soundLibraryList[ i ].getType() == "pattern" ) { pDrumkitItem = new QTreeWidgetItem( m_pPatternItem ); } if ( isSoundLibraryItemAlreadyInstalled( m_soundLibraryList[ i ] ) ) { pDrumkitItem->setText( 0, sLibraryName ); pDrumkitItem->setText( 1, trUtf8( "Installed" ) ); } else { pDrumkitItem->setText( 0, sLibraryName ); pDrumkitItem->setText( 1, trUtf8( "New" ) ); } } // Also clear out the image cache clearImageCache(); } /// Is the SoundLibrary already installed? bool SoundLibraryImportDialog::isSoundLibraryItemAlreadyInstalled( SoundLibraryInfo sInfo ) { // check if the filename matchs with an already installed soundlibrary directory. // The filename used in the Soundlibrary URL must be the same of the unpacked directory. // E.g: V-Synth_VariBreaks.h2drumkit must contain the V-Synth_VariBreaks directory once unpacked. // Many drumkit are broken now (wrong filenames) and MUST be fixed! QString sName = QFileInfo( sInfo.getUrl() ).fileName(); sName = sName.left( sName.lastIndexOf( "." ) ); if ( sInfo.getType() == "drumkit" ) { if ( H2Core::Filesystem::drumkit_exists(sName) ) return true; } if ( sInfo.getType() == "pattern" ) { return SoundLibraryDatabase::get_instance()->isPatternInstalled( sInfo.getName() ); } if ( sInfo.getType() == "song" ) { if ( H2Core::Filesystem::song_exists(sName) ) return true; } return false; } void SoundLibraryImportDialog::loadImage(QString img ) { QPixmap pixmap; pixmap.load( img ) ; writeCachedImage( drumkitImageLabel->text(), pixmap ); showImage( pixmap ); } void SoundLibraryImportDialog::showImage( QPixmap pixmap ) { int x = (int) drumkitImageLabel->size().width(); int y = drumkitImageLabel->size().height(); float labelAspect = (float) x / y; float imageAspect = (float) pixmap.width() / pixmap.height(); if ( ( x < pixmap.width() ) || ( y < pixmap.height() ) ) { if ( labelAspect >= imageAspect ) { // image is taller or the same as label frame pixmap = pixmap.scaledToHeight( y ); } else { // image is wider than label frame pixmap = pixmap.scaledToWidth( x ); } } drumkitImageLabel->setPixmap( pixmap ); // TODO: Check if valid! } void SoundLibraryImportDialog::soundLibraryItemChanged( QTreeWidgetItem* current, QTreeWidgetItem* previous ) { UNUSED( previous ); if ( current ) { QString selected = current->text(0); for ( uint i = 0; i < m_soundLibraryList.size(); ++i ) { if ( m_soundLibraryList[ i ].getName() == selected ) { SoundLibraryInfo info = m_soundLibraryList[ i ]; //bool alreadyInstalled = isSoundLibraryAlreadyInstalled( info.m_sURL ); SoundLibraryNameLbl->setText( info.getName() ); if( info.getType() == "pattern" ){ SoundLibraryInfoLbl->setText(""); } else { SoundLibraryInfoLbl->setText( info.getInfo() ); } AuthorLbl->setText( trUtf8( "Author: %1" ).arg( info.getAuthor() ) ); LicenseLbl->setText( trUtf8( "Drumkit License: %1" ).arg( info.getLicense()) ); ImageLicenseLbl->setText( trUtf8("Image License: %1" ).arg( info.getImageLicense() ) ); // Load the drumkit image // Clear any image first drumkitImageLabel->setPixmap( QPixmap() ); drumkitImageLabel->setText( info.getImage() ); if ( info.getImage().length() > 0 ) { if ( isSoundLibraryItemAlreadyInstalled( info ) ) { // get image file from local disk QString sName = QFileInfo( info.getUrl() ).fileName(); sName = sName.left( sName.lastIndexOf( "." ) ); H2Core::Drumkit* drumkitInfo = H2Core::Drumkit::load_by_name( sName, false ); if ( drumkitInfo ) { // get the image from the local filesystem QPixmap pixmap ( drumkitInfo->get_path() + "/" + drumkitInfo->get_image() ); INFOLOG("Loaded image " + drumkitInfo->get_image().toLocal8Bit() + " from local filesystem"); showImage( pixmap ); } else { ___ERRORLOG ( "Error loading the drumkit" ); } } else { // Try from the cache QString cachedFile = readCachedImage( info.getImage() ); if ( cachedFile.length() > 0 ) { QPixmap pixmap ( cachedFile ); showImage( pixmap ); INFOLOG( "Loaded image " + info.getImage().toLocal8Bit() + " from cache (" + cachedFile + ")" ); } else { // Get the drumkit's directory name from URL // // Example: if the server repo URL is: http://www.hydrogen-music.org/feeds/drumkit_list.php // and the image name from the XML is Roland_TR-808_drum_machine.jpg // the URL for the image will be: http://www.hydrogen-music.org/feeds/images/Roland_TR-808_drum_machine.jpg if ( info.getImage().length() > 0 ) { int lastSlash = info.getUrl().lastIndexOf( QString( "/" )); QString imageUrl; QString sLocalFile; imageUrl = repositoryCombo->currentText().left( repositoryCombo->currentText().lastIndexOf( QString( "/" )) + 1 ) + info.getImage() ; sLocalFile = QDir::tempPath() + "/" + QFileInfo( imageUrl ).fileName(); DownloadWidget dl( this, trUtf8( "" ), imageUrl, sLocalFile ); dl.exec(); loadImage( sLocalFile ); // Delete the temporary file QFile::remove( sLocalFile ); } } } } else { // no image file specified in drumkit.xml INFOLOG( "No image for this kit specified in drumkit.xml on remote server" ); } DownloadBtn->setEnabled( true ); return; } } } SoundLibraryNameLbl->setText( "" ); SoundLibraryInfoLbl->setText( "" ); AuthorLbl->setText( "" ); DownloadBtn->setEnabled( false ); } void SoundLibraryImportDialog::on_DownloadBtn_clicked() { QApplication::setOverrideCursor(Qt::WaitCursor); QString selected = m_pDrumkitTree->currentItem()->text(0); for ( uint i = 0; i < m_soundLibraryList.size(); ++i ) { if ( m_soundLibraryList[ i ].getName() == selected ) { // Download the sound library QString sURL = m_soundLibraryList[ i ].getUrl(); QString sType = m_soundLibraryList[ i ].getType(); QString sLocalFile; QString dataDir = H2Core::Preferences::get_instance()->getDataDirectory(); if( sType == "drumkit") { sLocalFile = QDir::tempPath() + "/" + QFileInfo( sURL ).fileName(); } if( sType == "song") { sLocalFile = dataDir + "songs/" + QFileInfo( sURL ).fileName(); } if( sType == "pattern") { sLocalFile = dataDir + "patterns/" + QFileInfo( sURL ).fileName(); } bool Error = false; for ( int i = 0; i < 30; ++i ) { DownloadWidget dl( this, trUtf8( "Downloading SoundLibrary..." ), sURL, sLocalFile ); dl.exec(); QUrl redirect_url = dl.get_redirect_url(); if (redirect_url.isEmpty() ) { // ok, we have all data Error = dl.get_error(); break; } else { sURL = redirect_url.toEncoded(); Error = dl.get_error(); } } //No 'else', error message has been already displayed by DL widget if(!Error) { // install the new soundlibrary try { if ( sType == "drumkit" ) { H2Core::Drumkit::install( sLocalFile ); QApplication::restoreOverrideCursor(); QMessageBox::information( this, "Hydrogen", QString( trUtf8( "SoundLibrary imported in %1" ) ).arg( dataDir ) ); } if ( sType == "song" || sType == "pattern") { QApplication::restoreOverrideCursor(); } } catch( H2Core::H2Exception ex ) { QApplication::restoreOverrideCursor(); QMessageBox::warning( this, "Hydrogen", trUtf8( "An error occurred importing the SoundLibrary." ) ); } } else { QApplication::restoreOverrideCursor(); } QApplication::setOverrideCursor(Qt::WaitCursor); // remove the downloaded files.. if( sType == "drumkit" ) { QDir dir; dir.remove( sLocalFile ); } // update the drumkit list SoundLibraryDatabase::get_instance()->update(); HydrogenApp::get_instance()->getInstrumentRack()->getSoundLibraryPanel()->test_expandedItems(); HydrogenApp::get_instance()->getInstrumentRack()->getSoundLibraryPanel()->updateDrumkitList(); updateSoundLibraryList(); QApplication::restoreOverrideCursor(); return; } } } void SoundLibraryImportDialog::on_BrowseBtn_clicked() { static QString lastUsedDir = QDir::homePath(); QFileDialog fd(this); fd.setFileMode(QFileDialog::ExistingFile); fd.setNameFilter( "Hydrogen drumkit (*.h2drumkit)" ); fd.setDirectory( lastUsedDir ); fd.setWindowTitle( trUtf8( "Import drumkit" ) ); QString filename = ""; if (fd.exec() == QDialog::Accepted) { filename = fd.selectedFiles().first(); } if (filename != "") { SoundLibraryPathTxt->setText( filename ); lastUsedDir = fd.directory().absolutePath(); InstallBtn->setEnabled ( true ); } } void SoundLibraryImportDialog::on_InstallBtn_clicked() { QApplication::setOverrideCursor(Qt::WaitCursor); QString dataDir = H2Core::Preferences::get_instance()->getDataDirectory(); try { H2Core::Drumkit::install( SoundLibraryPathTxt->text() ); QMessageBox::information( this, "Hydrogen", QString( trUtf8( "SoundLibrary imported in %1" ).arg( dataDir ) ) ); // update the drumkit list SoundLibraryDatabase::get_instance()->update(); HydrogenApp::get_instance()->getInstrumentRack()->getSoundLibraryPanel()->test_expandedItems(); HydrogenApp::get_instance()->getInstrumentRack()->getSoundLibraryPanel()->updateDrumkitList(); QApplication::restoreOverrideCursor(); } catch( H2Core::H2Exception ex ) { QApplication::restoreOverrideCursor(); QMessageBox::warning( this, "Hydrogen", trUtf8( "An error occurred importing the SoundLibrary." ) ); } } void SoundLibraryImportDialog::on_close_btn_clicked() { accept(); }
blablack/hydrogen
src/gui/src/SoundLibrary/SoundLibraryImportDialog.cpp
C++
gpl-2.0
20,246
<?php defined('_JEXEC') or die; require_once dirname(__FILE__) . '/helper.php'; //this part is for when it's gonna be database driven $helper = new modPharmecRightBookingHelper(); $service_title = $helper->getCurrentService(); //if we don't have a title, then we need to get a list of services (including the categories) if(empty($service_title)) { $list_of_services = $helper->getListOfServices(); } $document = JFactory::getDocument(); $renderer = $document->loadRenderer('module'); require(JModuleHelper::getLayoutPath('mod_pharmec_right_booking'));
GZamfir/pharmec
modules/mod_pharmec_right_booking/mod_pharmec_right_booking.php
PHP
gpl-2.0
557
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/cdev.h> #include <sound/voice_svc.h> #include <mach/qdsp6v2/apr_tal.h> #include <mach/qdsp6v2/apr.h> #define DRIVER_NAME "voice_svc" #define MINOR_NUMBER 1 #define APR_MAX_RESPONSE 10 #define TIMEOUT_MS 1000 #define MAX(a, b) ((a) >= (b) ? (a) : (b)) struct voice_svc_device { struct cdev *cdev; struct device *dev; int major; }; struct voice_svc_prvt { void* apr_q6_mvm; void* apr_q6_cvs; uint16_t response_count; struct list_head response_queue; wait_queue_head_t response_wait; spinlock_t response_lock; }; struct apr_data { struct apr_hdr hdr; __u8 payload[0]; } __packed; struct apr_response_list { struct list_head list; struct voice_svc_cmd_response resp; }; static struct voice_svc_device *voice_svc_dev; static struct class *voice_svc_class; static bool reg_dummy_sess; static void *dummy_q6_mvm; static void *dummy_q6_cvs; dev_t device_num; static int voice_svc_dummy_reg(void); static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv); static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv) { struct voice_svc_prvt *prtd; struct apr_response_list *response_list; unsigned long spin_flags; if ((data == NULL) || (priv == NULL)) { pr_err("%s: data or priv is NULL\n", __func__); return -EINVAL; } prtd = (struct voice_svc_prvt*)priv; if (data->opcode == RESET_EVENTS) { if (data->reset_proc == APR_DEST_QDSP6) { if (prtd->apr_q6_mvm != NULL) { apr_reset(prtd->apr_q6_mvm); prtd->apr_q6_mvm = NULL; } if (prtd->apr_q6_cvs != NULL) { apr_reset(prtd->apr_q6_cvs); prtd->apr_q6_cvs = NULL; } } } spin_lock_irqsave(&prtd->response_lock, spin_flags); if (prtd->response_count < APR_MAX_RESPONSE) { response_list = (struct apr_response_list *)kmalloc( sizeof(struct apr_response_list) + data->payload_size, GFP_ATOMIC); if (response_list == NULL) { pr_err("%s: kmalloc failed\n", __func__); return -ENOMEM; } response_list->resp.src_port = data->src_port; response_list->resp.dest_port = ((data->dest_port) >> 8); response_list->resp.token = data->token; response_list->resp.opcode = data->opcode; response_list->resp.payload_size = data->payload_size; if (data->payload != NULL && data->payload_size > 0) { memcpy(response_list->resp.payload, data->payload, data->payload_size); } list_add_tail(&response_list->list, &prtd->response_queue); prtd->response_count++; wake_up(&prtd->response_wait); } else { pr_err("%s: Response dropped since the queue is full\n", __func__); } spin_unlock_irqrestore(&prtd->response_lock, spin_flags); return 0; } static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv) { /* Do Nothing */ return 0; } static void voice_svc_update_hdr(struct voice_svc_cmd_request* apr_req_data, struct apr_data *aprdata, struct voice_svc_prvt *prtd) { aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001); aprdata->hdr.dest_port = apr_req_data->dest_port; aprdata->hdr.token = apr_req_data->token; aprdata->hdr.opcode = apr_req_data->opcode; aprdata->hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, apr_req_data->payload_size); memcpy(aprdata->payload, apr_req_data->payload, apr_req_data->payload_size); } static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request, struct voice_svc_prvt *prtd) { int ret = 0; void *apr_handle = NULL; struct apr_data *aprdata = NULL; uint32_t user_payload_size; uint32_t payload_size; if (apr_request == NULL) { pr_err("%s: apr_request is NULL\n", __func__); ret = -EINVAL; goto done; } user_payload_size = apr_request->payload_size; payload_size = sizeof(struct apr_data) + user_payload_size; if (payload_size <= user_payload_size) { pr_err("%s: invalid payload size ( 0x%x ).\n", __func__, user_payload_size); ret = -EINVAL; goto done; } else { aprdata = kmalloc(payload_size, GFP_KERNEL); if (aprdata == NULL) { ret = -ENOMEM; goto done; } } voice_svc_update_hdr(apr_request, aprdata, prtd); if (!strncmp(apr_request->svc_name, VOICE_SVC_CVS_STR, MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_CVS_STR)))) { apr_handle = prtd->apr_q6_cvs; } else if (!strncmp(apr_request->svc_name, VOICE_SVC_MVM_STR, MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_MVM_STR)))) { apr_handle = prtd->apr_q6_mvm; } else { pr_err("%s: Invalid service %s\n", __func__, apr_request->svc_name); ret = -EINVAL; goto done; } ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata); if (ret < 0) { pr_err("%s: Fail in sending SNDRV_VOICE_SVC_REQUEST\n", __func__); ret = -EINVAL; } else { ret = 0; } done: if (aprdata != NULL) kfree(aprdata); return ret; } static int voice_svc_reg(char *svc, uint32_t src_port, struct voice_svc_prvt *prtd, void **handle) { int ret = 0; if (handle == NULL) { pr_err("%s: handle is NULL\n", __func__); ret = -EINVAL; goto done; } if (*handle != NULL) { pr_err("%s: svc handle not NULL\n", __func__); ret = -EINVAL; goto done; } if (src_port == (APR_MAX_PORTS - 1)) { pr_err("%s: SRC port reserved for dummy session\n", __func__); pr_err("%s: Unable to register %s\n", __func__, svc); ret = -EINVAL; goto done; } *handle = apr_register("ADSP", svc, qdsp_apr_callback, ((src_port) << 8 | 0x0001), prtd); if (*handle == NULL) { pr_err("%s: Unable to register %s\n", __func__, svc); ret = -EFAULT; goto done; } done: return ret; } static int voice_svc_dereg(char *svc, void **handle) { int ret = 0; if (handle == NULL) { pr_err("%s: handle is NULL\n", __func__); ret = -EINVAL; goto done; } apr_deregister(*handle); *handle = NULL; done: return 0; } static int process_reg_cmd(struct voice_svc_register apr_reg_svc, struct voice_svc_prvt *prtd) { int ret = 0; char *svc = NULL; void **handle = NULL; if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_MVM_STR, MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_MVM_STR)))) { svc = VOICE_SVC_MVM_STR; handle = &prtd->apr_q6_mvm; } else if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_CVS_STR, MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_CVS_STR)))) { svc = VOICE_SVC_CVS_STR; handle = &prtd->apr_q6_cvs; } else { pr_err("%s: Invalid Service: %s\n", __func__, apr_reg_svc.svc_name); ret = -EINVAL; goto done; } if (*handle == NULL && apr_reg_svc.reg_flag) { ret = voice_svc_reg(svc, apr_reg_svc.src_port, prtd, handle); } else if (handle != NULL && !apr_reg_svc.reg_flag) { ret = voice_svc_dereg(svc, handle); } done: return ret; } static long voice_svc_ioctl(struct file *file, unsigned int cmd, unsigned long u_arg) { int ret = 0; struct voice_svc_prvt *prtd; struct voice_svc_register apr_reg_svc; struct voice_svc_cmd_request *apr_request = NULL; struct voice_svc_cmd_response *apr_response = NULL; struct apr_response_list *resp; void __user *arg = (void __user *)u_arg; uint32_t user_payload_size = 0; unsigned long spin_flags; prtd = (struct voice_svc_prvt*)file->private_data; switch (cmd) { case SNDRV_VOICE_SVC_REGISTER_SVC: if (copy_from_user(&apr_reg_svc, arg, sizeof(apr_reg_svc))) { pr_err("%s: copy_from_user failed\n", __func__); ret = -EFAULT; goto done; } ret = process_reg_cmd(apr_reg_svc, prtd); break; case SNDRV_VOICE_SVC_CMD_REQUEST: if (!access_ok(VERIFY_READ, arg, sizeof(struct voice_svc_cmd_request))) { pr_err("%s: Unable to read user data", __func__); ret = -EFAULT; goto done; } user_payload_size = ((struct voice_svc_cmd_request*)arg)->payload_size; apr_request = kmalloc(sizeof(struct voice_svc_cmd_request) + user_payload_size, GFP_KERNEL); if (apr_request == NULL) { pr_err("%s: apr_request kmalloc failed.", __func__); ret = -ENOMEM; goto done; } if (copy_from_user(apr_request, arg, sizeof(struct voice_svc_cmd_request) + user_payload_size)) { pr_err("%s: copy from user failed, size %d\n", __func__, sizeof(struct voice_svc_cmd_request) + user_payload_size); ret = -EFAULT; goto done; } ret = voice_svc_send_req(apr_request, prtd); break; case SNDRV_VOICE_SVC_CMD_RESPONSE: do { if (!access_ok(VERIFY_READ, arg, sizeof(struct voice_svc_cmd_response))) { pr_err("%s: Unable to read user data", __func__); ret = -EFAULT; goto done; } user_payload_size = ((struct voice_svc_cmd_response*)arg)->payload_size; spin_lock_irqsave(&prtd->response_lock, spin_flags); if (!list_empty(&prtd->response_queue)) { resp = list_first_entry(&prtd->response_queue, struct apr_response_list, list); if (user_payload_size < resp->resp.payload_size) { pr_err("%s: Invalid payload size %d,%d", __func__, user_payload_size, resp->resp.payload_size); ret = -ENOMEM; spin_unlock_irqrestore( &prtd->response_lock, spin_flags); goto done; } if (!access_ok(VERIFY_WRITE, arg, sizeof(struct voice_svc_cmd_response) + resp->resp.payload_size)) { ret = -EFAULT; spin_unlock_irqrestore( &prtd->response_lock, spin_flags); goto done; } if (copy_to_user(arg, &resp->resp, sizeof(struct voice_svc_cmd_response) + resp->resp.payload_size)) { pr_err("%s: copy to user failed, size \ %d\n", __func__, sizeof(struct voice_svc_cmd_response) + resp->resp.payload_size); ret = -EFAULT; spin_unlock_irqrestore( &prtd->response_lock, spin_flags); goto done; } prtd->response_count--; list_del(&resp->list); kfree(resp); spin_unlock_irqrestore(&prtd->response_lock, spin_flags); goto done; } else { spin_unlock_irqrestore(&prtd->response_lock, spin_flags); ret = wait_event_interruptible_timeout( prtd->response_wait, !list_empty(&prtd->response_queue), msecs_to_jiffies(TIMEOUT_MS)); if (ret == 0) { ret = -ETIMEDOUT; goto done; } else if (ret > 0 && !list_empty(&prtd->response_queue)) { ret = 0; } else if (ret < 0) { goto done; } } } while(!apr_response); break; default: ret = -EINVAL; } done: if (apr_request != NULL) kfree(apr_request); return ret; } static int voice_svc_dummy_reg() { uint32_t src_port = APR_MAX_PORTS - 1; dummy_q6_mvm = apr_register("ADSP", "MVM", qdsp_dummy_apr_callback, src_port, NULL); if (dummy_q6_mvm == NULL) { pr_err("%s: Unable to register dummy MVM\n", __func__); goto err; } dummy_q6_cvs = apr_register("ADSP", "CVS", qdsp_dummy_apr_callback, src_port, NULL); if (dummy_q6_cvs == NULL) { pr_err("%s: Unable to register dummy CVS\n", __func__); goto err; } return 0; err: if (dummy_q6_mvm != NULL) { apr_deregister(dummy_q6_mvm); dummy_q6_mvm = NULL; } return -EINVAL; } static int voice_svc_open(struct inode *inode, struct file *file) { struct voice_svc_prvt *prtd = NULL; prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL); if (prtd == NULL) { pr_err("%s: kmalloc failed", __func__); return -ENOMEM; } memset(prtd, 0, sizeof(struct voice_svc_prvt)); prtd->apr_q6_cvs = NULL; prtd->apr_q6_mvm = NULL; prtd->response_count = 0; INIT_LIST_HEAD(&prtd->response_queue); init_waitqueue_head(&prtd->response_wait); spin_lock_init(&prtd->response_lock); file->private_data = (void*)prtd; /* Current APR implementation doesn't support session based * multiple service registrations. The apr_deregister() * function sets the destination and client IDs to zero, if * deregister is called for a single service instance. * To avoid this, register for additional services. */ if (!reg_dummy_sess) { voice_svc_dummy_reg(); reg_dummy_sess = 1; } return 0; } static int voice_svc_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static const struct file_operations voice_svc_fops = { .owner = THIS_MODULE, .open = voice_svc_open, .unlocked_ioctl = voice_svc_ioctl, .release = voice_svc_release, }; static int voice_svc_probe(struct platform_device *pdev) { int ret = 0; voice_svc_dev = devm_kzalloc(&pdev->dev, sizeof(struct voice_svc_device), GFP_KERNEL); if (!voice_svc_dev) { pr_err("%s: kzalloc failed\n", __func__); ret = -ENOMEM; goto done; } ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER, DRIVER_NAME); if (ret) { pr_err("%s: Failed to alloc chrdev\n", __func__); ret = -ENODEV; goto done; } voice_svc_dev->major = MAJOR(device_num); voice_svc_class = class_create(THIS_MODULE, DRIVER_NAME); if (IS_ERR(voice_svc_class)) { ret = PTR_ERR(voice_svc_class); pr_err("%s: Failed to create class; err = %d\n", __func__, ret); goto class_err; } voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num, NULL, DRIVER_NAME); if (IS_ERR(voice_svc_dev->dev)) { ret = PTR_ERR(voice_svc_dev->dev); pr_err("%s: Failed to create device; err = %d\n", __func__, ret); goto dev_err; } voice_svc_dev->cdev = cdev_alloc(); cdev_init(voice_svc_dev->cdev, &voice_svc_fops); ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER); if (ret) { pr_err("%s: Failed to register chrdev; err = %d\n", __func__, ret); goto add_err; } goto done; add_err: cdev_del(voice_svc_dev->cdev); device_destroy(voice_svc_class, device_num); dev_err: class_destroy(voice_svc_class); class_err: unregister_chrdev_region(0, MINOR_NUMBER); done: return ret; } static int voice_svc_remove(struct platform_device *pdev) { cdev_del(voice_svc_dev->cdev); kfree(voice_svc_dev->cdev); device_destroy(voice_svc_class, device_num); class_destroy(voice_svc_class); unregister_chrdev_region(0, MINOR_NUMBER); kfree(voice_svc_dev); return 0; } static struct of_device_id voice_svc_of_match[] = { {.compatible = "qcom,msm-voice-svc"}, { } }; MODULE_DEVICE_TABLE(of, voice_svc_of_match); static struct platform_driver voice_svc_driver = { .probe = voice_svc_probe, .remove = voice_svc_remove, .driver = { .name = "msm-voice-svc", .owner = THIS_MODULE, .of_match_table = voice_svc_of_match, }, }; static int __init voice_svc_init(void) { return platform_driver_register(&voice_svc_driver); } static void __exit voice_svc_exit(void) { platform_driver_unregister(&voice_svc_driver); } module_init(voice_svc_init); module_exit(voice_svc_exit); MODULE_DESCRIPTION("Soc QDSP6v2 Audio APR driver"); MODULE_LICENSE("GPL v2");
rastomanchik/android_kernel_xiaomi_armani
arch/arm/mach-msm/qdsp6v2/voice_svc.c
C
gpl-2.0
15,567
/* * wifi.cpp * * Created on: 24 Oct 2012 * Author: thomas */ #include "ns3/core-module.h" #include "ns3/simulator.h" #include "ns3/node.h" #include "ns3/global-value.h" #include "ns3/wifi-module.h" #include "ns3/wimax-helper.h" #include "ns3/point-to-point-helper.h" #include "ns3/internet-module.h" #include "ns3/udp-echo-helper.h" #include "ns3/olsr-module.h" #include "ns3/flow-monitor.h" #include "ns3/applications-module.h" #include "ns3/mobility-module.h" #include "ns3/inet-socket-address.h" #include "ns3/csma-module.h" #include "ns3/fypApp.h" #include "ns3/flow-monitor-module.h" #include "ns3/packet-socket-address.h" using namespace ns3; NS_LOG_COMPONENT_DEFINE("Wifi"); void packetReceived(Ptr<const Packet>, const Address &); static void SetPosition(Ptr<Node> node, double x, double y) { Ptr<MobilityModel> mobility = node->GetObject<MobilityModel>(); Vector pos = mobility->GetPosition(); pos.x = x; pos.y = y; mobility->SetPosition(pos); } int main(int argc, char * argv[]) { CommandLine cmd; cmd.Parse(argc, argv); NodeContainer wifiNodes; wifiNodes.Create(2); NodeContainer gatewayNode; gatewayNode.Create(1); WifiHelper wifi; YansWifiPhyHelper wifiPhy = YansWifiPhyHelper::Default(); wifiPhy.SetPcapDataLinkType(YansWifiPhyHelper::DLT_IEEE802_11); YansWifiChannelHelper channel; channel.SetPropagationDelay("ns3::ConstantSpeedPropagationDelayModel"); channel.AddPropagationLoss("ns3::TwoRayGroundPropagationLossModel", "SystemLoss", DoubleValue(1), "HeightAboveZ", DoubleValue(1.5)); wifiPhy.Set("TxPowerStart", DoubleValue(33)); wifiPhy.Set("TxPowerEnd", DoubleValue(33)); wifiPhy.Set ("TxPowerLevels", UintegerValue(1)); wifiPhy.Set ("TxGain", DoubleValue(0)); wifiPhy.Set ("RxGain", DoubleValue(0)); wifiPhy.Set ("EnergyDetectionThreshold", DoubleValue(-61.8)); wifiPhy.Set ("CcaMode1Threshold", DoubleValue(-64.8)); wifiPhy.SetChannel(channel.Create()); NqosWifiMacHelper wifiMac = NqosWifiMacHelper::Default(); wifi.SetStandard(WIFI_PHY_STANDARD_80211b); wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "DataMode", StringValue("DsssRate1Mbps"), "ControlMode", StringValue("DsssRate1Mbps")); Ssid ssid = Ssid("rescue-net"); wifiMac.SetType("ns3::StaWifiMac", "Ssid", SsidValue(ssid), "ActiveProbing", BooleanValue(false)); //wifiNodes.Add(gatewayNode); NetDeviceContainer wifiStaDevs = wifi.Install(wifiPhy, wifiMac, wifiNodes); wifiMac.SetType("ns3::ApWifiMac", "Ssid", SsidValue(ssid)); NetDeviceContainer wifiApDev = wifi.Install(wifiPhy, wifiMac, gatewayNode); InternetStackHelper stack; stack.Install(gatewayNode); stack.Install(wifiNodes); Ipv4GlobalRoutingHelper::PopulateRoutingTables (); Ipv4AddressHelper address; address.SetBase("10.1.6.0", "255.255.255.0"); Ipv4InterfaceContainer wifiInterfaces = address.Assign(wifiStaDevs); Ipv4InterfaceContainer wifiApInterface = address.Assign(wifiApDev); address.SetBase("10.1.3.0", "255.255.255.0"); Ptr<Node> t_node = wifiNodes.Get(0); Ipv4StaticRoutingHelper helper; Ptr<Ipv4> ipv4 = t_node->GetObject<Ipv4>(); Ptr<Ipv4StaticRouting> Ipv4stat = helper.GetStaticRouting(ipv4); Ipv4stat->SetDefaultRoute("10.1.6.3",1,-10); Ptr<Node> t_node2 = wifiNodes.Get(1); Ipv4StaticRoutingHelper helper2; Ptr<Ipv4> ipv42 = t_node2->GetObject<Ipv4>(); Ptr<Ipv4StaticRouting> Ipv4stat2 = helper2.GetStaticRouting(ipv42); Ipv4stat2->SetDefaultRoute("10.1.6.3",1,-10); // Install FlowMonitor on all nodes FlowMonitorHelper flowmon; Ptr<FlowMonitor> monitor = flowmon.InstallAll(); UdpEchoClientHelper echoClient (wifiApInterface.GetAddress(0), 9); echoClient.SetAttribute ("MaxPackets", UintegerValue (10000)); echoClient.SetAttribute ("Interval", TimeValue (Seconds (.1))); echoClient.SetAttribute ("PacketSize", UintegerValue (2048)); ApplicationContainer clientApps = echoClient.Install (wifiNodes.Get (0)); clientApps.Start (Seconds (2.0)); clientApps.Stop (Seconds (9.0)); /* UdpEchoClientHelper echoClient1 (wifiApInterface.GetAddress (0), 9); echoClient1.SetAttribute ("MaxPackets", UintegerValue (10000)); echoClient1.SetAttribute ("Interval", TimeValue (Seconds (.15))); echoClient1.SetAttribute ("PacketSize", UintegerValue (2048)); UdpEchoClientHelper echoClient2 (wifiApInterface.GetAddress(0), 9); echoClient2.SetAttribute ("MaxPackets", UintegerValue (10000)); echoClient2.SetAttribute ("Interval", TimeValue (Seconds (.15))); echoClient2.SetAttribute ("PacketSize", UintegerValue (2048)); ApplicationContainer clientApps2 = echoClient2.Install (wifiNodes.Get (0)); clientApps2.Start (Seconds (2.0)); clientApps2.Stop (Seconds (9.0)); UdpEchoServerHelper echoServer(9); ApplicationContainer serverApps = echoServer.Install(gatewayNode.Get(0)); ApplicationContainer clientApps1 = echoClient1.Install (wifiNodes.Get (1)); clientApps1.Start (Seconds (2.0)); clientApps1.Stop (Seconds (9.0)); // Print per flow statistics monitor->CheckForLostPackets (); Ptr<Ipv4FlowClassifier> classifier = DynamicCast<Ipv4FlowClassifier> (flowmon.GetClassifier ()); std::map<FlowId, FlowMonitor::FlowStats> stats = monitor->GetFlowStats (); for (std::map<FlowId, FlowMonitor::FlowStats>::const_iterator iter = stats.begin (); iter != stats.end (); ++iter) { Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow (iter->first); std::cout << "Flow ID: " << iter->first << " Src Addr " << t.sourceAddress << " Dst Addr " << t.destinationAddress; std::cout << "Tx Packets = " << iter->second.txPackets; std::cout << "Rx Packets = " << iter->second.rxPackets; std::cout << "Throughput: " << iter->second.rxBytes * 8.0 / (iter->second.timeLastRxPacket.GetSeconds()-iter->second.timeFirstTxPacket.GetSeconds()) / 1024 << " Kbps\n"; } Simulator::Destroy(); */ PacketSinkHelper sink("ns3::UdpSocketFactory", InetSocketAddress(Ipv4Address::GetAny(), 9)); ApplicationContainer sinkApp = sink.Install(gatewayNode.Get(0)); sinkApp.Start(Seconds(0.1)); sinkApp.Stop(Seconds(12.0)); UdpEchoServerHelper echoServer2(9); ApplicationContainer serverApps2 = echoServer2.Install(gatewayNode.Get(0)); /*UdpEchoServerHelper echoServer(9); ApplicationContainer serverApps = echoServer.Install(gatewayNode.Get(0)); */ Ipv4GlobalRoutingHelper::PopulateRoutingTables (); Ipv4GlobalRoutingHelper::RecomputeRoutingTables(); MobilityHelper mobility; Ptr<ListPositionAllocator> positionAlloc = CreateObject <ListPositionAllocator>(); positionAlloc->Add(Vector(20,20,0)); //positionAlloc->Add(Vector(1000,0,0)); //positionAlloc->Add(Vector(450,0,0)); mobility.SetPositionAllocator(positionAlloc); mobility.SetMobilityModel("ns3::ConstantPositionMobilityModel"); //mobility.Install(hqNode); //mobility.Install(satelliteNode); mobility.Install(wifiNodes); MobilityHelper wimaxMobility; Ptr<ListPositionAllocator> wimaxPositionAlloc = CreateObject<ListPositionAllocator>(); wimaxPositionAlloc->Add(Vector(20,20,0)); wimaxMobility.SetPositionAllocator(wimaxPositionAlloc); wimaxMobility.SetMobilityModel("ns3::ConstantPositionMobilityModel"); wimaxMobility.Install(gatewayNode); //wimaxMobility.Install(wimaxNode); SetPosition(gatewayNode.Get(0), -50, 0); /*SetPosition(satelliteNode.Get(0), 50, -50.0); SetPosition(wimaxNode.Get(0), -100, -50.0); SetPosition(hqNode.Get(0), -50.0, -100.0);*/ SetPosition(wifiNodes.Get(0), -25, 25); SetPosition(wifiNodes.Get(1), -50, 50); Simulator::Stop(Seconds(10.0)); Simulator::Run(); // Print per flow statistics monitor->CheckForLostPackets (); Ptr<Ipv4FlowClassifier> classifier = DynamicCast<Ipv4FlowClassifier> (flowmon.GetClassifier ()); std::map<FlowId, FlowMonitor::FlowStats> stats = monitor->GetFlowStats (); for (std::map<FlowId, FlowMonitor::FlowStats>::const_iterator iter = stats.begin (); iter != stats.end (); ++iter) { Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow (iter->first); std::cout << "Flow ID: " << iter->first << " Src Addr " << t.sourceAddress << " Dst Addr " << t.destinationAddress; std::cout << "Tx Packets = " << iter->second.txPackets; std::cout << "Rx Packets = " << iter->second.rxPackets; std::cout << "Throughput: " << iter->second.rxBytes * 8.0 / (iter->second.timeLastRxPacket.GetSeconds()-iter->second.timeFirstTxPacket.GetSeconds()) / 1024 << " Kbps\n"; } Simulator::Destroy(); } int packetsRx = 0; void packetReceived(Ptr<const Packet> p, const Address & addr) { if(packetsRx%2==0) { std::cout << "Got packet EVEN\n"; } else { std::cout << "Got packet ODD\n"; } packetsRx++; } FYPApp::FYPApp() { std::cout << "Hello i am the FYP App.\n"; } void FYPApp::Setup(Ptr<Node> node) { m_node = node; } FYPApp::~FYPApp() { std::cout << "Killing this instance.\n"; } bool FYPApp::PacketIntercept(Ptr<Packet> p, const Ipv4Header &) { std::cout << "GOT PACEKT!\n"; return true; } void FYPApp::StartApplication() { Ptr<Ipv4L3Protocol> ipv4Proto = m_node->GetObject<Ipv4L3Protocol> (); if (ipv4Proto != 0) { NS_LOG_INFO ("Ipv4 packet interceptor added"); std::cout << "Added packet interceptor!\n"; ipv4Proto->AddPacketInterceptor (MakeCallback (&FYPApp::PacketIntercept, this), UdpL4Protocol::PROT_NUMBER); } else { std::cout << "Did not add packet interceptor!\n"; NS_LOG_INFO ("No Ipv4 with packet intercept facility"); } std::cout << "Starting the application\n"; } void FYPApp::StopApplication() { std::cout << "Stopping the application\n"; }
kelsteNa/fyp
new.cc
C++
gpl-2.0
9,431
<?php /** * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; under version 2 * of the License (non-upgradable). * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (c) 2013-2014 (original work) Open Assessment Technologies SA (under the project TAO-PRODUCT); * * @author Jérôme Bogaerts <jerome@taotesting.com> * @license GPLv2 */ namespace qtism\data\storage\xml\marshalling; use qtism\data\expressions\ItemSubset; use qtism\data\QtiComponent; use qtism\common\collections\IdentifierCollection; use \DOMElement; /** * A complex Operator marshaller focusing on the marshalling/unmarshalling process * of itemSubset QTI operators. * * @author Jérôme Bogaerts <jerome@taotesting.com> * */ class ItemSubsetMarshaller extends Marshaller { /** * @see \qtism\data\storage\xml\marshalling\Marshaller::marshall() */ protected function marshall(QtiComponent $component) { $element = self::getDOMCradle()->createElement($this->getExpectedQtiClassName()); $sectionIdentifier = $component->getSectionIdentifier(); if (!empty($sectionIdentifier)) { self::setDOMElementAttribute($element, 'sectionIdentifier', $sectionIdentifier); } $includeCategories = $component->getIncludeCategories(); if (count($includeCategories) > 0) { self::setDOMElementAttribute($element, 'includeCategory', implode(' ', $includeCategories->getArrayCopy())); } $excludeCategories = $component->getExcludeCategories(); if (count($excludeCategories) > 0) { self::setDOMElementAttribute($element, 'excludeCategory', implode(' ', $excludeCategories->getArrayCopy())); } return $element; } /** * @see \qtism\data\storage\xml\marshalling\Marshaller::unmarshall() */ protected function unmarshall(DOMElement $element) { $object = new ItemSubset(); if (($sectionIdentifier = static::getDOMElementAttributeAs($element, 'sectionIdentifier')) !== null) { $object->setSectionIdentifier($sectionIdentifier); } if (($includeCategories = static::getDOMElementAttributeAs($element, 'includeCategory')) !== null) { $includeCategories = new IdentifierCollection(explode("\x20", $includeCategories)); $object->setIncludeCategories($includeCategories); } if (($excludeCategories = static::getDOMElementAttributeAs($element, 'excludeCategory')) !== null) { $excludeCategories = new IdentifierCollection(explode("\x20", $excludeCategories)); $object->setExcludeCategories($excludeCategories); } return $object; } /** * @see \qtism\data\storage\xml\marshalling\Marshaller::getExpectedQtiClassName() */ public function getExpectedQtiClassName() { return 'itemSubset'; } }
hutnikau/qti-sdk
src/qtism/data/storage/xml/marshalling/ItemSubsetMarshaller.php
PHP
gpl-2.0
3,416
<?php if($_POST){ templatic_load_settings_page(); } /* Name : templatic_load_settings_page Description : redirect user on right tab after save */ function templatic_load_settings_page() { if ( $_POST["settings-submit"] == 'Y' ) { templatic_save_settings(); $url_parameters = isset($_GET['tab'])? 'updated=true&tab='.$_GET['tab'] : 'updated=true'; $sub_url_parameters = isset($_GET['sub_tab'])? '&sub_tab='.$_GET['sub_tab'] : ''; echo "<script>location.href='".admin_url('admin.php?page=templatic_settings&'.$url_parameters.$sub_url_parameters.'')."'</script>"; } } /* Name : templatic_save_settings Description : Save all general settings */ function templatic_save_settings() { global $pagenow; $settings = get_option( "templatic_settings" ); if ( $pagenow == 'admin.php' && $_GET['page'] == 'templatic_settings' ) { /* POST BLOCKED IP ADDRESSES */ if(isset($_POST['block_ip']) && $_POST['block_ip']!="") { /* CALL A FUNCTION TO SAVE IP DATA */ insert_ip_address_data($_POST['block_ip']); } if(isset($_REQUEST['sub_tab']) && $_REQUEST['sub_tab']=="widgets") $_POST['templatic_widgets']=isset($_POST['templatic_widgets'])?$_POST['templatic_widgets']:array(); if(isset($_REQUEST['sub_tab']) && $_REQUEST['sub_tab']=="captcha") $_POST['user_verification_page']=isset($_POST['user_verification_page'])?$_POST['user_verification_page']:array(); if(isset($_REQUEST['sub_tab']) && $_REQUEST['sub_tab']=="email") { $_POST['send_to_frnd']=isset($_POST['send_to_frnd'])?$_POST['send_to_frnd']:''; $_POST['send_inquiry']=isset($_POST['send_inquiry'])?$_POST['send_inquiry']:''; } foreach($_POST as $key=>$val) { $settings[$key] = isset($_POST[$key])?$_POST[$key]:''; update_option('templatic_settings', $settings); } } } ?> <?php // general setting tab filter add_filter('templatic_general_settings_tab', 'general_setting',10); function general_setting($tabs ) { $tabs['general']='General settings'; return $tabs; } /* * create action for captcha-setting-data */ add_action('templatic_general_setting_data','captcha_setting_data'); function captcha_setting_data($column) { $tmpdata = get_option('templatic_settings'); switch($column) { case 'captcha': $user_verification_page = @$tmpdata['user_verification_page'];?> <p class="description"><?php _e('The settings listed here are common for the whole plugin. You just need to select the forms where you want to enable captcha.',DOMAIN); ?></p> <tr> <th><label><?php _e('Enable',DOMAIN);?></label></th> <td> <div class="input_wrap"> <input type="radio" id="recaptcha" name="recaptcha" value="recaptcha" <?php if(isset($tmpdata['recaptcha']) && $tmpdata['recaptcha'] == 'recaptcha'){?>checked="checked"<?php }?> /><label for="recaptcha">&nbsp;<?php _e('WP-reCaptcha',DOMAIN);?></label></div> <div class="input_wrap"> <input type="radio" id="playthru" name="recaptcha" <?php if(isset($tmpdata['recaptcha']) &&$tmpdata['recaptcha'] == 'playthru'){?> checked="checked"<?php }?> value="playthru" /><label for="playthru">&nbsp;<?php _e('Playthru',DOMAIN);?></label></div> <div class="clearfix"></div> <p class="description"><?php _e('You can use any of these captcha options in your site. You can select one for you from here. You can get the plugins here : <br/> <a href="http://wordpress.org/extend/plugins/are-you-a-human/">Are You a Human</a> <br/> <a href="http://wordpress.org/extend/plugins/wp-recaptcha/">WP-reCaptcha</a>',DOMAIN); ?></p> </td> </tr> <tr> <th><label><?php _e('Enable User verification on',DOMAIN);?></label></th> <td class="captcha_chk"> <label><input type='checkbox' name="user_verification_page[]" id="user_verification_page" <?php if(count($user_verification_page) > 0 && in_array('registration', $user_verification_page)){ echo "checked=checked"; } ?> value="registration"/> <?php _e('Registration page',DOMAIN); ?></label><div class="clearfix"></div> <label><input type='checkbox' name="user_verification_page[]" id="user_verification_page" <?php if(count($user_verification_page) > 0 && in_array('submit', $user_verification_page)){ echo "checked=checked"; } ?> value="submit"/> <?php _e('Submit listing page',DOMAIN); ?></label><div class="clearfix"></div> <label><input type='checkbox' name="user_verification_page[]" id="user_verification_page" <?php if(count($user_verification_page) > 0 && in_array('claim', $user_verification_page)){ echo "checked=checked"; } ?> value="claim"/> <?php _e('Claim Ownership',DOMAIN); ?></label><div class="clearfix"></div> <label><input type='checkbox' name="user_verification_page[]" id="user_verification_page" <?php if(count($user_verification_page) > 0 && in_array('emaitofrd', $user_verification_page)){ echo "checked=checked"; } ?> value="emaitofrd"/> <?php _e('Email to Friend',DOMAIN); ?></label><div class="clearfix"></div><div class="clearfix"></div> <!--<label><input type='checkbox' name="user_verification_page[]" id="user_verification_page" <?php if(count($user_verification_page) > 0 && in_array('sendinquiry', $user_verification_page)){ echo "checked=checked"; } ?> value="sendinquiry"/> <?php //_e('Send Inquiry',DOMAIN); ?></label><div class="clearfix"></div><div class="clearfix"></div>--> <p class="description"><?php _e('Just check mark the forms where you want to use the captcha.',DOMAIN); ?></p> </td> </tr> <?php break; } } /* * Create email setting data action */ add_action('templatic_general_setting_data','email_setting_data',10); function email_setting_data($column) { $tmpdata = get_option('templatic_settings'); switch($column) { case 'email': ?> <p class="description"><?php _e('Email settings are common for the whole plugin. Whatever you set here will be common for all the mails sent from your domain.',DOMAIN); ?></p> <tr> <td> <table style="width:60%" class="form-table"> <tr> <th><label><?php _e('Email',DOMAIN);?></label></th> <td> <div class="input_wrap"> <input type="radio" id="php_mail" name="php_mail" value="php_mail" <?php if(isset($tmpdata['php_mail']) && $tmpdata['php_mail'] == 'php_mail'){?>checked="checked"<?php }?> /><label for="php_mail">&nbsp;<?php _e('PHP Mail',DOMAIN);?></label></div> <div class="input_wrap"> <input type="radio" id="wp_smtp" name="php_mail" <?php if(isset($tmpdata['php_mail']) && $tmpdata['php_mail'] == 'wp_smtp'){?> checked="checked"<?php }?> value="wp_smtp" /><label for="wp_smtp">&nbsp;<?php _e('WP SMTP Mail',DOMAIN);?> </label></div> <p class="description"><?php _e('This setting allows you to select the mail function you want to use to send emails from your site. You can either select PHP mail or SMTP mail. By default it will send mails by using PHP Mail.',DOMAIN); ?></p> </td> </tr> </table> </td> </tr> <tr> <td> <table style="width:60%" class="form-table"> <tr> <th><label><?php _e('Enable',DOMAIN);?></label></th> <td> <div class="input_wrap"> <input type="checkbox" id="send_to_frnd" name="send_to_frnd" value="send_to_frnd" <?php if(isset($tmpdata['send_to_frnd']) && $tmpdata['send_to_frnd'] == 'send_to_frnd'){?>checked="checked"<?php }?> /><label for="send_to_frnd">&nbsp;<?php _e('Send to Friend',DOMAIN);?></label></div> <div class="input_wrap"> <input type="checkbox" id="send_inquiry" name="send_inquiry" <?php if(isset($tmpdata['send_inquiry']) && $tmpdata['send_inquiry'] == 'send_inquiry'){?> checked="checked"<?php }?> value="send_inquiry" /><label for="send_inquiry">&nbsp;<?php _e('Send Inquiry',DOMAIN);?> </label> </div> <p class="description"><?php _e('This setting allows you to enable Send to Friend and Send Inquiry emails on your site. The link to show this emails will be seen on post detail page after you enable it from here.',DOMAIN); ?></p> </td> </tr> </table> </td> </tr> <tr> <td> <h3><?php _e('Send email to friend/Send Inquiry Email Content Settings',DOMAIN);?></h3> <table style="width:60%" class="widefat post"> <thead> <tr> <th> <label for="email_type" class="form-textfield-label"><?php _e('Email Type',DOMAIN); ?></label> </th> <th> <label for="email_sub" class="form-textfield-label"><?php _e('Email Subject',DOMAIN); ?></label> </th> <th> <label for="email_desc" class="form-textfield-label"><?php _e('Email Description',DOMAIN); ?></label> </th> </tr> </thead> <tbody> <tr> <td> <label class="form-textfield-label"><?php _e('Send email to friend',DOMAIN); ?></label> </td> <td> <textarea name="mail_friend_sub" style="width:350px; height:130px;"><?php if(isset($tmpdata['mail_friend_sub'])){echo $tmpdata['mail_friend_sub'];}else{echo 'Send to friend';} ?></textarea> </td> <td> <textarea name="mail_friend_description" style="width:350px; height:130px;"><?php if(isset($tmpdata['mail_friend_description'])){echo $tmpdata['mail_friend_description'];}else{echo '<p>Dear [#$to_name#],</p> <p>[#$frnd_comments#]</p> <p>Link : <b>[#$post_title#]</b> </p> <p>From, [#$your_name#]</p> <p>Sent from -[#$post_url_link#]</p>';}?></textarea> </td> </tr> <tr> <td> <label class="form-textfield-label"><?php _e('Send inquiry email',DOMAIN); ?></label> </td> <td> <textarea name="send_inquirey_email_sub" style="width:350px; height:130px;"><?php if(isset($tmpdata['send_inquirey_email_sub'])){echo $tmpdata['send_inquirey_email_sub'];}else{echo 'Inquiry email';}?></textarea> </td> <td> <textarea name="send_inquirey_email_description" style="width:350px; height:130px;"><?php if(isset($tmpdata['send_inquirey_email_description'])){echo $tmpdata['send_inquirey_email_description'];}else{echo '<p>Dear [#to_name#],</p><p>Here is an inquiry for <b>[#post_title#]</b>. </p><p>Below is the message. </p><p><b>Subject : [#frnd_subject#]</b>.</p><p>[#frnd_comments#]</p><p>Thank you,<br /> [#your_name#]</p>';}?></textarea> </td> </tr> </tbody> </table> </td> </tr> <?php break; } } /* * Apply filter for get the general setting tabs * if you want to create new main tab in general setting menu then use 'templatic_general_settings_tab' filter hook and pass the tabs arrya in filter hook function and return tabs array. */ @$tabs = apply_filters('templatic_general_settings_tab',$tabs); echo '<div id="icon-options-general" class="icon32"><br></div>'; echo '<h2 class="nav-tab-wrapper">'; $i=0; foreach( $tabs as $tab => $name ){ if($i==0) $tab_key=$tab; $current_tab=isset($_REQUEST['tab'])?$_REQUEST['tab']:$tab_key; $class = ( $tab == $current_tab) ? ' nav-tab-active' : ''; echo "<a class='nav-tab$class' href='?page=templatic_settings&tab=$tab'>$name</a>"; $i++; } echo '</h2>'; /* Finish the general setting menu main tabs */ /* * create the general setting sub tabs */ if($current_tab=='general'): $i=0; /*Add Filter for create the general setting sub tab for Captcha setting */ add_filter('templatic_general_settings_subtabs', 'captcha_setting',12); function captcha_setting($sub_tabs ) { $sub_tabs['captcha']='Captcha Settings'; $sub_tabs['email']='Email Settings'; return $sub_tabs; } /*Apply filter for create the general setting subtabs */ /* * if you want to create new subtabs in general setting menu then use 'templatic_general_settings_subtabs' filter hook function and pass the subtabs array in filter hook function and return subtabs array. */ @$sub_tabs = apply_filters('templatic_general_settings_subtabs',$sub_tabs); echo '<h3 class="nav-tab-wrapper">'; foreach($sub_tabs as $key=>$value) { if($i==0) $sab_key=$key; $current=isset($_REQUEST['sub_tab'])?$_REQUEST['sub_tab']:$sab_key; $class = (isset($current) && ($key == $current)) ? ' nav-tab-active' : ''; echo "<a id='$key' class='nav-tab$class' href='?page=templatic_settings&tab=general&sub_tab=$key'>$value</a>"; $i++; } echo '</h3>'; endif; ?> <!-- Display the message--> <?php if(isset($_REQUEST['updated']) && $_REQUEST['updated'] == 'true' ): ?> <div class="act_success updated" id="message"> <p><?php echo "<strong>Record updated successfully</strong>"; ?> .</p> </div> <?php endif; ?> <!--Finish the display message--> <div class="templatic_settings"> <form method="post" class="form_style" action="<?php admin_url( 'themes.php?page=templatic_settings' ); ?>"> <table class="form-table"> <?php $j=0; $i=0; foreach( $tabs as $tab => $name ){ if($j==0) $tab_key=$tab; if($current_tab=='general'): /* Display the general setting subtabs menu */ //display general s etting tab wise displaydata foreach($sub_tabs as $key=>$value) { if($i==0) $sab_key=$key; $current=isset($_REQUEST['sub_tab'])?$_REQUEST['sub_tab']:$sab_key; if($current==$key) do_action('templatic_general_setting_data',$key);/*add action hook 'templatic_general_setting_data' for show the subtab data. pass the general setting subtabs key. */ $i++; } endif; if(isset($_REQUEST['tab']) && $_REQUEST['tab']==$tab): do_action('templatic_general_data',$tab);/* add action hook 'templatic_general_data' for show the general setting tabs data. pass the general setting tabs key. */ endif; $tab_key=""; $current_tab=''; $j++; } ?> </table> <p class="submit" style="clear: both;"> <input type="submit" name="Submit" class="button-primary" value="Save All Settings" /> <input type="hidden" name="settings-submit" value="Y" /> </p> </form> </div>
imshashank/osuevents
wp-content/plugins/Tevolution/tmplconnector/monetize/templatic-generalizaion/general_settings.php
PHP
gpl-2.0
15,380
echo "vm.swappiness = 10" >> /etc/sysctl.conf sysctl vm.swappiness=10 addgroup supergroup adduser root supergroup adduser vagrant supergroup # Add group and user for Hadoop Monitor web application export PASSWORD=`openssl passwd -1 password` addgroup webgroup useradd -m -s /bin/bash -g webgroup webuser -p $PASSWORD ## ## INSTALL PACKAGES ## # setup a source for maven3 which is required by Accumulo. echo "deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | tee -a /etc/apt/sources.list echo "deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | tee -a /etc/apt/sources.list apt-get update apt-get -y install openjdk-6-jdk subversion expect git #apt-get -y install curl git openssh-server openssh-client terminator openjdk-6-jdk subversion screen g++ make meld build-essential g++-multilib apt-get -y --force-yes install maven3 # # Set the locale locale-gen en_US # remove the symbolic link to maven2. You can still access it via /usr/share/maven2/bin/mvn ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn export VFILES=/vagrant/files export BASE_DIR=/home/vagrant/accumulo_home cat > /etc/profile.d/accumulo_setup.sh <<EOF export ACCUMULO_HOME=/home/vagrant/accumulo_home/bin/accumulo export HADOOP_PREFIX=/home/vagrant/accumulo_home/bin/hadoop export JAVA_HOME=/usr/lib/jvm/java-6-openjdk-amd64 export ZOOKEEPER_HOME=/home/vagrant/accumulo_home/bin/zookeeper export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/opt/vagrant_ruby/bin export PATH=\$ACCUMULO_HOME/bin:\$PATH export PATH=\$HADOOP_PREFIX/bin:\$PATH export PATH=/usr/lib/jvm/java-6-openjdk-amd64/bin:\$PATH export PATH=\$ZOOKEEPER_HOME/bin:\$PATH EOF source /etc/profile.d/accumulo_setup.sh export HADOOP_VERSION=hadoop-1.2.1 export ZOOKEEPER_VERSION=zookeeper-3.4.3 export LOGFILE=$HOME/build.log export PASSWORD=`openssl passwd -1 password` mkdir -p $BASE_DIR/software $BASE_DIR/bin mkdir -p /home/vagrant/.ssh chmod 700 /home/vagrant/.ssh chown -R vagrant:vagrant /home/vagrant/.ssh su vagrant -c "ssh-keygen -t rsa -P '' -f /home/vagrant/.ssh/id_rsa" mkdir -p /vagrant/files/ssh cp /home/vagrant/.ssh/id_rsa.pub /vagrant/files/ssh/`hostname`.pub
medined/Accumulo_Snapshot_By_Vagrant
files/setup.sh
Shell
gpl-2.0
2,200
-- Vehicle class 'Vehicle'(Entity) -- CTor function Vehicle:__init(id) Entity.__init(self, id) self._type = "Vehicle" end -- Delete function Vehicle:Delete() self:_CheckExists() local c_handle = CMemoryBlock(4) c_handle:WriteDWORD32(0, self.ID) natives.VEHICLE.DELETE_VEHICLE(c_handle) c_handle:Release() end --Set not needed function Vehicle:SetNotNeeded() self:_CheckExists() local c_vehicle_handle = CMemoryBlock(4) c_vehicle_handle:WriteDWORD32(0, self.ID) natives.ENTITY.SET_VEHICLE_AS_NO_LONGER_NEEDED(c_vehicle_handle) c_vehicle_handle:Release() end -- Is vehicle stuck on roof (returns true/false) function Vehicle:IsStuckOnRoof() self:_CheckExists() return natives.VEHICLE.IS_VEHICLE_STUCK_ON_ROOF(self.ID) end -- Returns amount of passengers in vehicle function Vehicle:GetNumberOfPassengers() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_NUMBER_OF_PASSENGERS(self.ID) end -- Returns max number of passengers function Vehicle:GetMaxNumberOfPassengers() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_MAX_NUMBER_OF_PASSENGERS(self.ID) end -- Explode vehicle function Vehicle:Explode() self:_CheckExists() natives.VEHICLE.EXPLODE_VEHICLE(self.ID, true, true) end -- Set vehicle colours function Vehicle:SetColours(p, s) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_COLOURS(self.ID, p, s) end -- Set vehicle extra colours function Vehicle:SetExtraColours(p, s) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_EXTRA_COLOURS(self.ID, p, s) end -- Set primary colour. function Vehicle:SetPrimaryColour(r, g, b) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_CUSTOM_PRIMARY_COLOUR(self.ID,r,g,b) end -- Set secondary colour. function Vehicle:SetSecondaryColour(r, g, b) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_CUSTOM_SECONDARY_COLOUR(self.ID,r,g,b) end -- Checks whether the vehicle siren is on. function Vehicle:IsSirenOn() self:_CheckExists() return natives.VEHICLE.IS_VEHICLE_SIREN_ON(self.ID) end -- Checks the vehicles dirt level function Vehicle:GetDirtlevel() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_DIRT_LEVEL(self.ID) end -- Sets the vehicles dirt level (0 = clean, 15 = dirty) function Vehicle:SetDirtLevel(i) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_DIRT_LEVEL(self.ID,i) end -- Sets whether the vehicle engine is on. function Vehicle:SetEngineState(b) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_ENGINE_ON(self.ID,b,true) end -- Checks whether the vehicle is on all wheels function Vehicle:IsOnAllWheels() self:_CheckExists() return natives.VEHICLE.IS_VEHICLE_ON_ALL_WHEELS(self.ID) end -- Fixes the vehicle function Vehicle:Fix() self:_CheckExists() natives.VEHICLE.SET_VEHICLE_FIXED(self.ID) end -- Neon Lights function Vehicle:SetNeonLights(enabled, r, g, b, location) self:_CheckExists() -- on/off if location == nil then natives.VEHICLE._SET_VEHICLE_NEON_LIGHT_ENABLED(self.ID, 0, enabled) natives.VEHICLE._SET_VEHICLE_NEON_LIGHT_ENABLED(self.ID, 1, enabled) natives.VEHICLE._SET_VEHICLE_NEON_LIGHT_ENABLED(self.ID, 2, enabled) natives.VEHICLE._SET_VEHICLE_NEON_LIGHT_ENABLED(self.ID, 3, enabled) else natives.VEHICLE._SET_VEHICLE_NEON_LIGHT_ENABLED(self.ID, location, enabled) end -- color if r == nil then return end if type(r) == "table" then b = r.b g = r.g r = r.r end natives.VEHICLE._SET_VEHICLE_NEON_LIGHTS_COLOUR(self.ID, r, g, b) end -- Plate function function Vehicle:GetPlateType() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_NUMBER_PLATE_TEXT_INDEX(self.ID) end function Vehicle:SetPlateType(i) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_NUMBER_PLATE_TEXT_INDEX(self.ID, i) end function Vehicle:GetPlateText() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_NUMBER_PLATE_TEXT(self.ID) end function Vehicle:SetPlateText(text) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_NUMBER_PLATE_TEXT(self.ID, text) end -- Get vehicle name from its model hash function Vehicle:GetModelName() self:_CheckExists() return VEHICLES[self:GetModel()] end -- Get vehicle codename from its name function Vehicle:GetCodename() self:_CheckExists() return VEHICLES[self:GetModelName()].Codename end -- Get vehicle maker from its name function Vehicle:GetMaker() self:_CheckExists() return VEHICLES[self:GetModelName()].Maker end -- Get vehicle full name from its name function Vehicle:GetFullName() self:_CheckExists() return VEHICLES[self:GetModelName()].FullName end -- Get vehicle class from its name function Vehicle:GetClass() self:_CheckExists() return VEHICLES[self:GetModelName()].Class end -- Get vehicle type function Vehicle:IsCar() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_CAR(self:GetModel()) end function Vehicle:IsTrain() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_TRAIN(self:GetModel()) end function Vehicle:IsBike() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_BIKE(self:GetModel()) end function Vehicle:IsBicycle() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_BICYCLE(self:GetModel()) end function Vehicle:IsQuadbike() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_QUADBIKE(self:GetModel()) end function Vehicle:IsPlane() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_PLANE(self:GetModel()) end function Vehicle:IsHeli() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_HELI(self:GetModel()) end function Vehicle:IsBoat() self:_CheckExists() return natives.VEHICLE.IS_THIS_MODEL_A_BOAT(self:GetModel()) end function Vehicle:IsSub() self:_CheckExists() return natives.VEHICLE._IS_THIS_MODEL_A_SUBMERSIBLE(self:GetModel()) end -- Set vehicle on ground properly function Vehicle:SetOnGround() self:_CheckExists() natives.VEHICLE.SET_VEHICLE_ON_GROUND_PROPERLY(self.ID) end -- Returns the ped which is on specific vehicle's seat function Vehicle:GetPedInSeat(seat) self:_CheckExists() local ped = natives.VEHICLE.GET_PED_IN_VEHICLE_SEAT(self.ID, seat) return ped>0 and Ped(ped) or nil end -- Sets current vehicle's radio station by name ("OFF" turns radio off) function Vehicle:SetRadioStationName(stationName) self:_CheckExists() natives.AUDIO.SET_VEH_RADIO_STATION(self.ID, stationName) end -- Get vehicle's colours function Vehicle:GetColours() self:_CheckExists() local m_p = CMemoryBlock(4) local m_s = CMemoryBlock(4) natives.VEHICLE.GET_VEHICLE_COLOURS(self.ID, m_p, m_s) local p = m_p:ReadDWORD32(0) local s = m_s:ReadDWORD32(0) m_p:Release() m_s:Release() return p, s end -- Get vehicle's extra colours function Vehicle:GetExtraColours() self:_CheckExists() local m_p = CMemoryBlock(4) local m_s = CMemoryBlock(4) natives.VEHICLE.GET_VEHICLE_EXTRA_COLOURS(self.ID, m_p, m_s) local p = m_p:ReadDWORD32(0) local s = m_s:ReadDWORD32(0) m_p:Release() m_s:Release() return p, s end -- Get vehicle's window tint function Vehicle:GetWindowTint() self:_CheckExists() return natives.VEHICLE.GET_VEHICLE_WINDOW_TINT(self.ID) end -- Get vehicle's window tint function Vehicle:SetWindowTint(t) self:_CheckExists() natives.VEHICLE.SET_VEHICLE_WINDOW_TINT(self.ID, t) end
Freeeaky/GTALua
build/GTALua/internal/game/Vehicle.lua
Lua
gpl-2.0
7,140
<?php $captcha_word = 'DZ5E'; ?>
CoordCulturaDigital-Minc/culturadigital.br
wp-content/plugins/si-captcha-for-wordpress/captcha-secureimage/captcha-temp/pfOxT6CtvmexP9dq.php
PHP
gpl-2.0
32
#!/bin/sh #collect_data.sh #author: wgc filename=`date +%y%m%d` touch $filename data=$1 while read -r line do ../paris-traceroute --algo=exhaustive $line >> paris-traceroute$filename 2>&1 done < $data
wgcitgkaka/paris_Cplus_0.1
src/test/collect_data.sh
Shell
gpl-2.0
204
/* * Rvzware based in CAPAWARE 3D * * Rvzware is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. * * Rvzware is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this application; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * The Rvzware development team */ #include <sstream> //#include <stdafx.h> #include <cpw/entity/Entity.h> #include <cpw/entity/EntityFactory.h> #include <cpw/entity/EntityRegistry.h> #include <cpw/entity/Element3D.h> #include <cpw/persistent/database/DataBase.h> #include <cpw/persistent/database/Table.h> #include <cpw/persistent/database/PersistentDBCallBack.h> using namespace cpw; //! DB constructor /*! Database constructor. \param id Identifier \param class_name Class name */ DataBase::DataBase(const cpw::TypeId id, const std::string &class_name) : cpw::ContainerLayer(id, class_name) { db_boConnected=false;//Initially no connection has been stablished } //! DB destructor /*! Database destructor. */ DataBase::~DataBase(void) { Disconnect(); } //! Adds entity to the layer /*! Adds entity to the layer \param entity Entity to be added */ int DataBase::Add(Entity *entity) { ContainerLayer::Add(entity); entity->SetPersistentCallBack(new PersistentDBCallBack); return 0; } //! DB Disconnction /*! If the connection is currently stablished the connection is finished */ void DataBase::Disconnect() { //Only if the connection is currently stablished if (db_boConnected) { PQfinish(db_conn); db_boConnected=false; } } DataBase::DataBase(const DataBase &database) : cpw::ContainerLayer(database) { db_boConnected=false;//Initially not connected Modified(); } DataBase &DataBase::operator = (const DataBase &database) { ContainerLayer::operator =(database); Modified(); return *this; } //! Persistence manager for DB /*! Persistence manager for DB */ int DataBase::CreatePersistence() { ContainerLayer::CreatePersistence(); //Elements to be saved AddPersistence(std::string("db_host"), db_host); AddPersistence(std::string("db_user"), db_user); AddPersistence(std::string("db_passwd"), db_passwd); AddPersistence(std::string("db_name"), db_name); //Arrays are managed diversely AddPersistenceLevel((std::string)"db_tablenames"); for(std::vector<std::string>::iterator i= db_tablenames.begin(); i!=db_tablenames.end(); i++) { AddPersistence(std::string("db_tablename"), *i); } RemovePersistenceLevel(); AddPersistenceLevel((std::string)"db_columnspertable"); for(std::vector<std::vector<std::string> >::iterator i= db_columnspertable.begin(); i!=db_columnspertable.end(); i++) { AddPersistenceLevel((std::string)"db_columns"); for(std::vector<std::string>::iterator j = i->begin(); j != i->end(); j++) { AddPersistence(std::string("db_columnname"), *j); } RemovePersistenceLevel(); } RemovePersistenceLevel(); return 0; } int DataBase::AdaptPersistence(cpw::Node *root) { ContainerLayer::AdaptPersistence(root); db_host = (root->GetChildValue("db_host")); db_user = (root->GetChildValue("db_user")); db_passwd = (root->GetChildValue("db_passwd")); db_name = (root->GetChildValue("db_name")); std::vector<cpw::Node *> root_children = root->GetChildren(); std::vector<cpw::Node *>::iterator i; for(i = root_children.begin(); i != root_children.end(); i++) { if ((*i)->GetName() == "db_tablenames") { std::vector<cpw::Node *> table_children = (*i)->GetChildren(); std::vector<cpw::Node *>::iterator j; for(j = table_children.begin(); j != table_children.end(); j++) { db_tablenames.push_back((*j)->GetValue()); } } } for(i = root_children.begin(); i != root_children.end(); i++) { if ((*i)->GetName() == "db_columnspertable") { std::vector<cpw::Node *> tables_children = (*i)->GetChildren(); std::vector<cpw::Node *>::iterator j; for(j = tables_children.begin(); j != tables_children.end(); j++) { std::vector<cpw::Node *> columns_children = (*j)->GetChildren(); std::vector<cpw::Node *>::iterator k; db_columnspertable.push_back(std::vector<std::string>()); for(k = columns_children.begin(); k != columns_children.end(); k++) { db_columnspertable.back().push_back((*k)->GetValue()); } } } } //Stablishes connection if necessary SetConnected(OpenConnection()); //Read DB tables ReadTables(); return 0; } //! Gets DB tables /*! Gets DB tables returning the available number. It also saves in the database the field names */ int DataBase::GetDBTables() { //No tables initially contained int ntables=0; //Getting the connection id of the DB PGconn *conn=this->GetDBConn(); //Is the connection is running if (this->GetConnected()) { PGresult *res; int nt; //The SQL command is composed to get the table information //A temporal var is used to get table info std::vector<std::string> tables; //SQL request res = PQexec(conn, "select table_name from information_schema.tables where table_schema = 'public' and TABLE_TYPE = 'BASE TABLE'"); //If no NULL answer is got if(res != NULL) { int npos; ExecStatusType qstatus = PQresultStatus(res); //Different status require different actions //Currently only PGRES_TUPLES_OK status is treated switch(qstatus) { case PGRES_COMMAND_OK: //printf("Result completed successfully.\n"); break; case PGRES_EMPTY_QUERY: //printf("Empty query sent to server.\n"); break; case PGRES_TUPLES_OK: ntables = PQntuples(res); npos=PQfnumber(res, "table_name"); //Getting table names for (nt = 0; nt < ntables; nt++) { // Get the field values (we ignore possibility they are null!!!!) tables.push_back(PQgetvalue(res, nt, npos)); } //Saving table names in the related DB variable this->SetDBTableNames(tables); break; case PGRES_COPY_OUT: // from server case PGRES_COPY_IN: // to server // printf("Data copy between server and client in progress.\n"); break; case PGRES_BAD_RESPONSE: // printf("Bad response from server.\n"); break; case PGRES_NONFATAL_ERROR: // printf("Non-fatal error returned from server.\n"); break; case PGRES_FATAL_ERROR: //terminate(PQresultErrorMessage(res),conn); break; default: //terminate("Query status unknown, terminating.\n", conn); break; } } PQclear(res); //Gathering information about the different columns or fiels od each table std::vector<std::vector<std::string> > columnspertable; std::vector<std::string> columnnames; //Thos names are obtained for each table for (nt=0;nt<ntables;nt++) { //The SQL request is composed int ncolumns=0; std::string str; str = "SELECT column_name FROM information_schema.columns WHERE table_name = '" + tables.at(nt) + "'"; res = PQexec(conn, str.c_str()); columnnames.clear(); //If there is a good answer if (PQresultStatus(res) == PGRES_TUPLES_OK) { ncolumns=PQntuples(res); int npos=PQfnumber(res, "column_name"); //Every colukn/field name is read for (int nc = 0; nc < ncolumns; nc++) { // Get the field values (we ignore possibility they are null!!!!) columnnames.push_back(PQgetvalue(res, nc, npos)); } //Adding the name to the ttemp var columnspertable.push_back(columnnames); } PQclear(res); //Saving field info in the DB instance this->SetDBColumnsperTable(columnspertable); } } //The number of tables is returned return ntables; } //! Gets the records available in a DB table for a given selection of fields /*! Gets the records available in a DB table for a given number of fields \param fields Fields to be returned \param tablename Table to be consulted */ PGresult * DataBase::GetDBRecordsfromTable(std::string &fields, std::string &tablename) { //Getting the connection id of the DB PGconn *conn=this->GetDBConn(); //Is the connection is running if (this->GetConnected()) { PGresult *res; std::string db_row;//temp used to save records //Temporal vars are used to save columns etc. std::vector<std::vector<std::string> > columnspertable; std::vector<std::string> columns; //The SQL command is composed to get the table information //SQL request to get the given fields of the selected table int neltos=0; std::string str; str = "SELECT " + fields + " FROM " + tablename; res = PQexec(conn, str.c_str()); //Is the command was succesful if (PQresultStatus(res) == PGRES_TUPLES_OK) { //Compiles db_row with all the records for (int i = 0; i < PQntuples(res); i++) { db_row.clear();//clear for (int j = 0; j < PQnfields(res); j++) { db_row=db_row + PQgetvalue(res, i, j) + " "; } } return res; } PQclear(res); } return NULL; } //! Inserts a record in the database /*! Inserts a record in the database. It should return the proper identifier error but right now it returns 0 (existing file) \param fields Fields to be written \param tablename Table to be used \param values Field values to be written */ PersistentError DataBase::InsertDBRecordinTable(std::string &tablename,std::string &fields,std::string &values) { //Is connected? if (GetConnected()) { PGresult *res; //the SQL request is composed std::string str; //Using the input arguments the request is formed str = "INSERT INTO " + tablename + "(" + fields + ") VALUES (" + values + ")"; res = PQexec(GetDBConn(), str.c_str()); //If no null answer if(res != NULL) { //Was the command correctly executed? if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "INSERT failed: %s", PQerrorMessage(GetDBConn())); PQclear(res); } else return (cpw::PersistentError)0; //PQclear(res);//Should it be done always } } return (cpw::PersistentError)-1; } //! Remove records satisfying a condition /*! Remove records satisfying a condition \param tablename Table to be used \param condition Condition to be checked */ void DataBase::DeleteDBRecordsinTable(std::string &tablename,std::string &condition) { //Getting the connection id of the DB PGconn *conn=this->GetDBConn(); //I the conenction is running if (this->GetConnected()) { //temp vars PGresult *res; //SQL request std::string str; str = "DELETE FROM " + tablename + condition; res = PQexec(conn, str.c_str()); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "DELETE failed: %s", PQerrorMessage(conn)); PQclear(res); } } } //! Creates a table /*! Creates a table \param tablename Table to be used */ PersistentError DataBase::CreateDBTable(std::string &tablename) { //If the connection is running if (GetConnected()) { PGresult *res; //SQL command composition std::string str; //The table is created without fields, those will be added with the first record str = "CREATE TABLE " + tablename + " ()"; res = PQexec(GetDBConn(), str.c_str()); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "CREATE failed: %s", PQerrorMessage(GetDBConn())); PQclear(res); } else return (cpw::PersistentError)0; //PQclear(res);//Is necessary? } return (cpw::PersistentError)-1; } //! Modifies a table /*! Modifies a table in terms of fields and their type \param tablename Table to be used \param fieldandtype Field and new type */ PersistentError DataBase::AlterDBTable(std::string &tablename,std::string &fieldandtype) { //If the connection is running if (GetConnected()) { PGresult *res; //The table is modified adding a columns std::string str; str = "ALTER TABLE " + tablename + " add " + fieldandtype ; res = PQexec(GetDBConn(), str.c_str()); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "ALTER failed: %s", PQerrorMessage(GetDBConn())); PQclear(res); } else return (cpw::PersistentError)0; } return (cpw::PersistentError)-1; } //! Adds a geospatial field to a table /*! Modifies a table in terms of fields and their type \param tablename Table to be used */ PersistentError DataBase::AddLocationColumn(std::string &tablename,std::string &columnname) { //Is the connection runnning if (GetConnected()) { PGresult *res; //The SQL request is composed std::string str; //Currently only a POINT element is considered //select AddGeometryColumn('nombretabla','utm',4326,'POINT','2') //This command do not use capital letters to find the table, therefore we have to transform //the tablename to avoid capitals before composing the SQL request std::transform(tablename.begin(), tablename.end(), tablename.begin(), tolower); str = "select AddGeometryColumn('" + tablename + "','" + columnname + "',4326,'POINT','2')"; res = PQexec(GetDBConn(), str.c_str()); //If it did not work /*if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "AddGeometryColumn failed: %s", PQerrorMessage(GetDBConn())); PQclear(res); } else*/ return (cpw::PersistentError)0; } return (cpw::PersistentError)-1; } //! Reads tables and records /*! Reads tables and records */ void DataBase::ReadTables() { //Getting DB tables int ntables=GetDBTables(); //Reading table contents if any if (ntables) { //Getting table names std::vector<std::string> tablenames=GetDBTableNames(); //Tables are added to the tree only if they contain geospatial data for (int i=0;i<ntables;i++) { //To check if a table contains geo data //select * from geometry_columns where f_table_name=tablenames.at(i) //getting the geo field name and its type //temporal var std::vector<std::string> tables; //SQL request PGresult *resG; std::string aux="select * from geometry_columns where f_table_name='" + tablenames.at(i) + "'"; resG = PQexec(GetDBConn(), aux.c_str() ); //NO null answer if(resG != NULL) { //OK answer if (PQresultStatus(resG) == PGRES_TUPLES_OK) { //The table contains geo data, adds the table to the tree and its records if (PQntuples(resG)) { //Table instance cpw::Table new_tablep; cpw::Entity* new_table = cpw::EntityFactory::GetInstance()->CreateEntity(new_tablep.GetClassName()); cpw::Table &table= *((cpw::Table*) new_table); new_table->SetName(tablenames.at(i)); //It is assumed that the table contains columns table.SetEmpty(false); cpw::EntityRegistry::GetInstance()->Add(new_table); Add(new_table); //For each record (indeed we expect only one per table, is has not been tested with more) for (int nl = 0; nl < PQntuples(resG); nl++) { //Getting the table name std::string columnname; int ncolumn=PQfnumber(resG, "f_geometry_column"); //geo spatial field name columnname=PQgetvalue(resG, nl, ncolumn); //Geo information is coded, thus it must be done the inverse operation to GeometryfromText, PostGIS function asewkt allows that //http://www.mapbender.org/presentations/Spatial_Data_Management_Arnulf_Christl/Spatial_Data_Management_Arnulf_Christl.pdf //MOre utilities http://postgis.refractions.net/docs/ch03.html#id2656610 //The table column names are requested //Each column is treated accordingly (specially the geospacial info) std::string consulta; std::string str,Name; str = "SELECT column_name FROM information_schema.columns WHERE table_name = '" + tablenames.at(i) + "'"; PGresult *resC = PQexec(GetDBConn(), str.c_str()); //If there are columns we compose in column the total collection of fields to be read from the table if (PQresultStatus(resC) == PGRES_TUPLES_OK) { //Position of the column containing the column_name int npos=PQfnumber(resC, "column_name"); //For each column for (int j = 0; j < PQntuples(resC); j++) { //The name is read std::string strtmp; strtmp=PQgetvalue(resC, j, npos); if (j) { if (strtmp.compare(columnname)==0)//Column with geo spatial info, thus a special treatment is needed { consulta.append(" , asText("+ columnname + ")"); } else//normal column { consulta.append(" , "+ strtmp ); } } else {//We assume that the first column will never be the geospatial column consulta=strtmp; } } } else Name="name"; //Default option PQclear(resC); //Composing SQL request considering the field names just read from tha table //For the geospatial column: //asewkt gets srid and point asText erturns only the point. asGml asSvg asKml PGresult *res=GetDBRecordsfromTable((std::string&)consulta,(std::string&)tablenames.at(i)); //If no null request if (res!=NULL) { if (PQresultStatus(res) == PGRES_TUPLES_OK) { //The records are added to the later-tree //Right now is done for a specific set of fields (that are known) for (int nr = 0; nr < PQntuples(res); nr++) { //lement3d instance std::string strtmp; cpw::Element3D new_elto3dp; cpw::Entity* new_elto3d = cpw::EntityFactory::GetInstance()->CreateEntity(new_elto3dp.GetClassName()); //The field id gives us the field type int pos=PQfnumber(res, "id"); strtmp=PQgetvalue(res, nr, pos); cpw::TypeId id(strtmp); new_elto3d->SetID(id); pos=PQfnumber(res, "name"); strtmp=PQgetvalue(res, nr, pos); new_elto3d->SetName(strtmp); pos=PQfnumber(res, "font"); strtmp=PQgetvalue(res, nr, pos); new_elto3d->SetFont(strtmp); pos=PQfnumber(res, "text"); strtmp=PQgetvalue(res, nr, pos); ((cpw::Element *) new_elto3d)->SetText(strtmp); pos=PQfnumber(res, "description"); strtmp=PQgetvalue(res, nr, pos); new_elto3d->SetDescription(strtmp); pos=PQfnumber(res, "primitive_url"); strtmp=PQgetvalue(res, nr, pos); //To avoid problems std::replace(strtmp.begin(), strtmp.end(), '/', '\\'); new_elto3d->SetPrimitiveUrl(strtmp); pos=PQfnumber(res, "icon"); strtmp=PQgetvalue(res, nr, pos); //To avoid problems std::replace(strtmp.begin(), strtmp.end(), '/', '\\'); new_elto3d->SetIcon(strtmp); pos=PQfnumber(res, "html"); strtmp=PQgetvalue(res, nr, pos); new_elto3d->SetHtml(strtmp); pos=PQfnumber(res, "model_url"); strtmp=PQgetvalue(res, nr, pos); //To avoid problems std::replace(strtmp.begin(), strtmp.end(), '/', '\\'); ((cpw::Element3D *) new_elto3d)->SetModelUrl(strtmp); //booleans pos=PQfnumber(res, "dynamic"); strtmp=PQgetvalue(res, nr, pos); if (atol(strtmp.c_str())) new_elto3d->SetDynamic(true); else new_elto3d->SetDynamic(false); pos=PQfnumber(res, "visible"); strtmp=PQgetvalue(res, nr, pos); if (atol(strtmp.c_str())) new_elto3d->SetVisible(true); else new_elto3d->SetVisible(false); pos=PQfnumber(res, "animate"); strtmp=PQgetvalue(res, nr, pos); if (atol(strtmp.c_str())) new_elto3d->SetAnimate(true); else new_elto3d->SetAnimate(false); pos=PQfnumber(res, "published"); strtmp=PQgetvalue(res, nr, pos); if (atol(strtmp.c_str())) new_elto3d->SetPublished(true); else new_elto3d->SetPublished(false); //Numerical values with spaces for scale pos=PQfnumber(res, "scale"); strtmp=PQgetvalue(res, nr, pos);//This string contains three numerical values separates with blanks //First blank int fb=strtmp.find_first_of(" "); //second blank int sb=strtmp.find_first_of(" ",fb+1); //Getting the three numerical valus float fvals[3]; fvals[0]=(float)atof(strtmp.substr(0,fb).c_str()); fvals[1]=(float)atof(strtmp.substr(fb+1,sb-fb).c_str()); fvals[2]=(float)atof(strtmp.substr(sb,strtmp.length()-sb).c_str()); ((cpw::Element *) new_elto3d)->SetScale(fvals); //Numerical values with spaces for oritntation pos=PQfnumber(res, "orientation"); strtmp=PQgetvalue(res, nr, pos);//This string contains three numerical values separates with blanks //First blank position fb=strtmp.find_first_of(" "); //Second blank position sb=strtmp.find_first_of(" ",fb+1); //The three numerical values are extracted fvals[0]=(float)atof(strtmp.substr(0,fb).c_str()); fvals[1]=(float)atof(strtmp.substr(fb+1,sb-fb).c_str()); fvals[2]=(float)atof(strtmp.substr(sb,strtmp.length()-sb).c_str()); ((cpw::Element *) new_elto3d)->SetOrientation(fvals); //geospatial information column int posgeo=PQfnumber(res, "astext"); strtmp=PQgetvalue(res, nr, posgeo); //The open ( is saearched sb=strtmp.find_first_of("("); //A blank i slocated fb=strtmp.find_first_of(" ",sb+1); //the final ) is located int lb=strtmp.find_first_of(")"); //latitude is between the ( and bank, fvals[0]=(float)atof(strtmp.substr(sb+1,fb-(sb+1)).c_str());//latitude fvals[1]=(float)atof(strtmp.substr(fb+1,lb-fb).c_str());//lenght //Conversion to UTM int RefEllipsoid = 23;//WGS-84. See list with file "LatLong-UTM conversion.cpp" for id numbers UTMLL utm_aux; double UTMNorthing=0.f, UTMEasting=0.f; char UTMZone[5]; utm_aux.LLtoUTM(RefEllipsoid, fvals[0], fvals[1], UTMNorthing, UTMEasting,UTMZone); //The height was stored separately pos=PQfnumber(res, "utm_z"); strtmp=PQgetvalue(res, nr, pos);//La cadena contiene los tres valores separados por espacios float height=(float)atof(strtmp.c_str()); ((cpw::Element *) new_elto3d)->SetUtm(UTMEasting,UTMNorthing,height); //Adding the element to the layer-tree cpw::EntityRegistry::GetInstance()->Add(new_elto3d); table.Add(new_elto3d); } } } PQclear(res); } }//records available }//OK }//Not null PQclear(resG); } } } //! Opens connection with DB /*! Opens connection with DB */ //Falta quizás comprobar si ya está conectada antes de hacerlo de nuevo bool DataBase::OpenConnection() { PGconn *conn; std::string conninfo; //Composing connect command std::string str; str = "hostaddr=" + GetDBHost() + " port=5432" + " user=" + GetDBUser() + " password=" + GetDBPasswd() + " dbname=" + GetDBName(); if (str.c_str()!=NULL) conninfo = str; else//default connection conninfo = "hostaddr=10.22.144.150 port=5432 user=postgres2 password=agustin dbname=pepe"; // Connecting with DB conn = PQconnectdb(conninfo.c_str()); //Saving identifier SetDBConn(conn); //conn = PQconnectdb(conninfo); // Checking connection if (PQstatus(conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s", PQerrorMessage(conn)); Disconnect(); return false; } return true; } /* PRIMARY KEY clave primaria //Eliminar una tabla //DROP TABLE customer //Eliminar los datos de la tabla pero no la tabla //TRUNCATE TABLE customer //Añadir una columna a una tabla (customer), indicamos el nombre y tipo //ALTER table customer add Gender char(1) //Modificar el nombre de una columna //ALTER table customer change Address Addr char(50) //Modificar el tipo de una columna //ALTER table customer modify Addr char(30) //Eliminar una columna //ALTER table customer drop Gender //Borrar un registro DELETE FROM Store_Information WHERE store_name = "Los Angeles" */
BackupTheBerlios/rvzware
src/cpw/persistent/database/DataBase.cpp
C++
gpl-2.0
25,854
// -------------------------------------------------------------------------- #include <vector> #include <string> #include <sstream> #include <iomanip> #include <algorithm> #include <getopt.h> // -------------------------------------------------------------------------- #include "ORepHelpers.h" #include "ObjectRepository.h" #include "ObjectRepositoryFactory.h" #include "Exceptions.h" #include "UniSetObject.h" #include "UniSetTypes.h" #include "ObjectsManager.h" #include "MessageType.h" #include "Configuration.h" #include "ObjectIndex_XML.h" #include "Debug.h" // -------------------------------------------------------------------------- using namespace std; using namespace UniSetTypes; // -------------------------------------------------------------------------- enum Command { StartUp, FoldUp, Finish, Exist, Configure, LogRotate }; static struct option longopts[] = { { "help", no_argument, 0, 'h' }, { "confile", required_argument, 0, 'c' }, { "create", no_argument, 0, 'b' }, { "exist", no_argument, 0, 'e' }, { "omap", no_argument, 0, 'o' }, { "msgmap", no_argument, 0, 'm' }, { "start", no_argument, 0, 's' }, { "finish", no_argument, 0, 'f' }, { "foldUp", no_argument, 0, 'u' }, { "configure", required_argument, 0, 'r' }, { "logrotate", required_argument, 0, 'l' }, { "info", required_argument, 0, 'i' }, { "setValue", required_argument, 0, 'x' }, { "getValue", required_argument, 0, 'g' }, { "getRawValue", required_argument, 0, 'w' }, { "getCalibrate", required_argument, 0, 'y' }, { "oinfo", required_argument, 0, 'p' }, { "verbose", no_argument, 0, 'v' }, { NULL, 0, 0, 0 } }; string conffile("configure.xml"); // -------------------------------------------------------------------------- static bool commandToAll( const string& section, ObjectRepository *rep, Command cmd ); static void createSections( UniSetTypes::Configuration* c ); // -------------------------------------------------------------------------- int omap(); int msgmap(); int configure( const string& args, UniversalInterface &ui ); int logRotate( const string& args, UniversalInterface &ui ); int setValue( const string& args, UniversalInterface &ui, Configuration* conf = UniSetTypes::conf ); int getValue( const string& args, UniversalInterface &ui, Configuration* conf = UniSetTypes::conf ); int getRawValue( const string& args, UniversalInterface &ui ); int getState( const string& args, UniversalInterface &ui ); int getCalibrate( const string& args, UniversalInterface &ui ); int oinfo( const string& args, UniversalInterface &ui ); // -------------------------------------------------------------------------- static void print_help(int width, const string& cmd, const string& help, const string& tab=" " ) { // чтобы не менять параметры основного потока // создаём свой stream... ostringstream info; info.setf(ios::left, ios::adjustfield); info << tab << setw(width) << cmd << " - " << help; cout << info.str(); } // -------------------------------------------------------------------------- static void short_usage() { cout << "Usage: uniset-admin [--confile configure.xml] --command [arg] \n for detailed information arg --help" << endl; } // -------------------------------------------------------------------------- static void usage() { cout << "\nUsage: \n\tuniset-admin [--confile configure.xml] --command [arg]\n"; cout << "commands list:\n"; cout << "-----------------------------------------\n"; print_help(24, "-с|--confile file.xml ","Используемый конфигурационный файл\n"); cout << endl; print_help(24, "-b|--create ","Создание репозитория\n"); print_help(24, "-e|--exist ","Вызов функции exist() показывающей какие объекты зарегистрированы и доступны.\n"); print_help(24, "-o|--omap ","Вывод на экран списка объектов с идентификаторами.\n"); print_help(24, "-m|--msgmap ","Вывод на экран списка сообщений с идентификаторами.\n"); print_help(24, "-s|--start ","Посылка SystemMessage::StartUp всем объектам (процессам)\n"); print_help(24, "-u|--foldUp ","Посылка SystemMessage::FoldUp всем объектам (процессам)\n"); print_help(24, "-f|--finish ","Посылка SystemMessage::Finish всем объектам (процессам)\n"); print_help(24, "-h|--help ","Вывести это сообщение.\n"); cout << endl; print_help(36, "-r|--configure [FullObjName] ","Посылка SystemMessage::ReConfiguration всем объектам (процессам) или заданному по имени (FullObjName).\n"); print_help(36, "-l|--logrotate [FullObjName] ","Посылка SystemMessage::LogRotate всем объектам (процессам) или заданному по имени (FullObjName).\n"); print_help(36, "-p|--oinfo OID ","Получить информацию об объекте (SimpleInfo).\n"); cout << endl; print_help(48, "-x|--setValue id1@node1=val,id2@node2=val2,id3=val3,.. ","Выставить значения датчиков\n"); print_help(36, "-g|--getValue id1@node1,id2@node2,id3,id4 ","Получить значения датчиков.\n"); cout << endl; print_help(36, "-w|--getRawValue id1@node1=val,id2@node2=val2,id3=val3,.. ","Получить 'сырое' значение.\n"); print_help(36, "-y|--getCalibrate id1@node1=val,id2@node2=val2,id3=val3,.. ","Получить параметры калибровки.\n"); print_help(36, "-v|--verbose","Подробный вывод логов.\n"); cout << endl; } // -------------------------------------------------------------------------------------- /*! \todo Оптимизировать commandToAll, т.к. сейчас НА КАЖДОМ ШАГЕ цикла создаётся сообщение и происходит преобразование в TransportMessage. TransportMessage можно создать один раз до цикла. */ // -------------------------------------------------------------------------------------- static bool verb = false; int main(int argc, char** argv) { try { int optindex = 0; char opt = 0; while( (opt = getopt_long(argc, argv, "vhc:beomsfur:l:i:x:g:w:y:p:",longopts,&optindex)) != -1 ) { switch (opt) //разбираем параметры { case 'h': //--help usage(); return 0; case 'v': verb=true; break; case 'c': //--confile conffile = optarg; break; case 'o': //--omap { uniset_init(argc,argv,conffile); return omap(); } break; case 'b': //--create { uniset_init(argc,argv,conffile); createSections(conf); } return 0; case 'm': //--msgmap { uniset_init(argc,argv,conffile); return msgmap(); } break; case 'x': //--setValue { uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return setValue(optarg,ui); } break; case 'g': //--getValue { // cout<<"(main):received option --getValue='"<<optarg<<"'"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return getValue(optarg,ui); } break; case 'w': //--getRawValue { // cout<<"(main):received option --getRawValue='"<<optarg<<"'"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return getRawValue(optarg,ui); } break; case 'p': //--oinfo { // cout<<"(main):received option --oinfo='"<<optarg<<"'"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return oinfo(optarg,ui); } break; case 'e': //--exist { // cout<<"(main):received option --exist"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); Command cmd=Exist; verb = true; ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)cmd); commandToAll(conf->getControllersSection(), rep, (Command)cmd); commandToAll(conf->getObjectsSection(), rep, (Command)cmd); delete rep; // cout<<"(exist): done"<<endl; } return 0; case 's': //--start { // cout<<"(main):received option --start"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); Command cmd=StartUp; ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)cmd); commandToAll(conf->getControllersSection(), rep, (Command)cmd); commandToAll(conf->getObjectsSection(), rep, (Command)cmd); delete rep; } return 0; case 'r': //--configure { uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return configure(optarg,ui); } break; case 'f': //--finish { // cout<<"(main):received option --finish"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); Command cmd=Finish; ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)cmd); commandToAll(conf->getControllersSection(), rep, (Command)cmd); commandToAll(conf->getObjectsSection(), rep, (Command)cmd); delete rep; cout<<"(finish): done"<<endl; } return 0; case 'l': //--logrotate { uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return logRotate(optarg, ui); } break; case 'y': //--getCalibrate { // cout<<"(main):received option --getCalibrate='"<<optarg<<"'"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); return getCalibrate(optarg, ui); } break; case 'u': //--foldUp { // cout<<"(main):received option --foldUp"<<endl; uniset_init(argc,argv,conffile); UniversalInterface ui(conf); Command cmd=FoldUp; ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)cmd); commandToAll(conf->getControllersSection(), rep, (Command)cmd); commandToAll(conf->getObjectsSection(), rep, (Command)cmd); delete rep; // cout<<"(foldUp): done"<<endl; } return 0; case '?': default: { short_usage(); return 1; } } } return 0; } catch(Exception& ex) { if( verb ) cout <<"admin(main): " << ex << endl; } catch(CORBA::SystemException& ex) { if( verb ) cerr << "поймали CORBA::SystemException:" << ex.NP_minorString() << endl; } catch(CORBA::Exception&) { if( verb ) cerr << "поймали CORBA::Exception." << endl; } catch(omniORB::fatalException& fe) { if( verb ) { cerr << "поймали omniORB::fatalException:" << endl; cerr << " file: " << fe.file() << endl; cerr << " line: " << fe.line() << endl; cerr << " mesg: " << fe.errmsg() << endl; } } catch(...) { if( verb ) cerr << "неизвестное исключение" << endl; } return 1; } // ============================================================================================== static bool commandToAll(const string& section, ObjectRepository *rep, Command cmd) { if( verb ) cout <<"\n||=======******** " << section << " ********=========||\n"<< endl; try { ListObjectName ls; rep->list(section.c_str(),&ls); if( ls.empty() ) { if( verb ) cout << "пусто!!!!!!" << endl; return false; } ObjectsManager_i_var proc; UniSetObject_i_var obj; string fullName; ListObjectName::const_iterator li; string buf; cout.setf(ios::left, ios::adjustfield); for ( li=ls.begin();li!=ls.end();++li) { string ob(*li); buf = section+"/"+ob; fullName= buf.c_str(); try { UniSetTypes::ObjectVar o =rep->resolve(fullName); obj= UniSetObject_i::_narrow(o); switch( cmd ) { case StartUp: { if(CORBA::is_nil(obj)) break; SystemMessage msg(SystemMessage::StartUp); obj->push( Message::transport(msg) ); if( verb ) cout << setw(55) << ob <<" <--- start OK" << endl; } break; case FoldUp: { if(CORBA::is_nil(obj)) break; SystemMessage msg(SystemMessage::FoldUp); obj->push( Message::transport(msg) ); if( verb ) cout << setw(55) << ob << " <--- foldUp OK" << endl; } break; case Finish: { if(CORBA::is_nil(obj)) break; SystemMessage msg(SystemMessage::Finish); obj->push( Message::transport(msg) ); if( verb ) cout << setw(55)<< ob << " <--- finish OK" << endl; } break; case Exist: { if (obj->exist()) { if( verb ) cout << setw(55) << ob << " <--- exist ok\n"; } else if( verb ) cout << setw(55) << ob << " <--- exist NOT OK\n"; } break; case Configure: { SystemMessage sm(SystemMessage::ReConfiguration); obj->push(sm.transport_msg()); if( verb ) cout << setw(55) << ob << " <--- configure ok\n"; } break; case LogRotate: { SystemMessage msg(SystemMessage::LogRotate); obj->push( Message::transport(msg) ); if( verb ) cout << setw(55) << ob << " <--- logrotate ok\n"; break; } default: { if( verb ) cout << "неизвестная команда -" << cmd << endl; return false; } } } catch(Exception& ex) { if( verb ) cout << setw(55) << ob << " <--- " << ex << endl; } catch( CORBA::SystemException& ex ) { if( verb ) cout << setw(55) << ob << " <--- недоступен!!(CORBA::SystemException): " << ex.NP_minorString() << endl; } } } catch( ORepFailed ) { return false; } return true; } // ============================================================================================== static void createSections( UniSetTypes::Configuration* rconf ) { ObjectRepositoryFactory repf(rconf); repf.createRootSection(rconf->getRootSection()); repf.createRootSection(rconf->getSensorsSection()); repf.createRootSection(rconf->getObjectsSection()); repf.createRootSection(rconf->getControllersSection()); repf.createRootSection(rconf->getServicesSection()); if( verb ) cout<<"(create): created"<<endl; } // ============================================================================================== int omap() { try { cout.setf(ios::left, ios::adjustfield); cout << "========================== ObjectsMap =================================\n"; conf->oind->printMap(cout); cout << "==========================================================================\n"; } catch(Exception& ex) { if( verb ) unideb[Debug::CRIT] << " configuration init FAILED!!! \n"; return 1; } return 0; } // -------------------------------------------------------------------------------------- int msgmap() { try { cout.setf(ios::left, ios::adjustfield); cout << "========================== MessagesMap =================================\n"; conf->mi->printMessagesMap(cout); cout << "==========================================================================\n"; } catch(Exception& ex) { if( verb ) unideb[Debug::CRIT] << " configuration init FAILED!!! " << ex << endl;; return 1; } return 0; } // -------------------------------------------------------------------------------------- int setValue( const string& args, UniversalInterface &ui, Configuration* conf ) { int err = 0; typedef std::list<UniSetTypes::ParamSInfo> SList; SList sl = UniSetTypes::getSInfoList(args, conf); if( verb ) cout << "====== setValue ======" << endl; for( SList::iterator it=sl.begin(); it!=sl.end(); it++ ) { try { UniversalIO::IOTypes t = conf->getIOType(it->si.id); if( verb ) { cout << " value: " << it->val << endl; cout << " name: (" << it->si.id << ") " << it->fname << endl; cout << " iotype: " << t << endl; cout << " text: " << conf->oind->getTextName(it->si.id) << "\n\n"; } if( it->si.node == DefaultObjectId ) it->si.node = conf->getLocalNode(); switch(t) { case UniversalIO::DigitalInput: ui.saveState(it->si.id,(it->val?true:false),t,it->si.node); break; case UniversalIO::DigitalOutput: ui.setState(it->si.id,(it->val?true:false),it->si.node); break; case UniversalIO::AnalogInput: ui.saveValue(it->si.id,it->val,t,it->si.node); break; case UniversalIO::AnalogOutput: ui.setValue(it->si.id,it->val,it->si.node); break; default: if( verb ) cerr << "FAILED: Unknown 'iotype' for " << it->fname << endl; err = 1; break; } } catch(Exception& ex) { if( verb ) cerr << "(setValue): " << ex << endl;; err = 1; } } return err; } // -------------------------------------------------------------------------------------- int getValue( const string& args, UniversalInterface &ui, Configuration* conf ) { int err = 0; typedef std::list<UniSetTypes::ParamSInfo> SList; SList sl = UniSetTypes::getSInfoList( args, UniSetTypes::conf ); if( verb ) cout << "====== getValue ======" << endl; for( SList::iterator it=sl.begin(); it!=sl.end(); it++ ) { try { UniversalIO::IOTypes t = conf->getIOType(it->si.id); if( verb ) { cout << " name: (" << it->si.id << ") " << it->fname << endl; cout << " iotype: " << t << endl; cout << " text: " << conf->oind->getTextName(it->si.id) << "\n\n"; } if( it->si.node == DefaultObjectId ) it->si.node = conf->getLocalNode(); switch(t) { case UniversalIO::DigitalOutput: case UniversalIO::DigitalInput: if( verb ) cout << " state: " << ui.getState(it->si.id,it->si.node) << endl; else cout << ui.getState(it->si.id,it->si.node); break; case UniversalIO::AnalogOutput: case UniversalIO::AnalogInput: if( verb ) cout << " value: " << ui.getValue(it->si.id,it->si.node) << endl; else cout << ui.getValue(it->si.id,it->si.node); break; default: if( verb ) cerr << "FAILED: Unknown 'iotype' for " << it->fname << endl; err = 1; break; } } catch(Exception& ex) { if( verb ) cerr << "(getValue): " << ex << endl; err = 1; } } return err; } // -------------------------------------------------------------------------------------- int getCalibrate( const std::string& args, UniversalInterface &ui ) { int err = 0; typedef std::list<UniSetTypes::ParamSInfo> SList; SList sl = UniSetTypes::getSInfoList( args, UniSetTypes::conf ); if( verb ) cout << "====== getCalibrate ======" << endl; for( SList::iterator it=sl.begin(); it!=sl.end(); it++ ) { if( it->si.node == DefaultObjectId ) it->si.node = conf->getLocalNode(); cout << " name: (" << it->si.id << ") " << it->fname << endl; cout << " text: " << conf->oind->getTextName(it->si.id) << "\n"; try { cout << "калибровка: "; IOController_i::CalibrateInfo ci = ui.getCalibrateInfo(it->si); cout << ci << endl; } catch(Exception& ex) { cerr << "(getCalibrate): " << ex << endl;; err = 1; } } return err; } // -------------------------------------------------------------------------------------- int getRawValue( const std::string& args, UniversalInterface &ui ) { int err = 0; typedef std::list<UniSetTypes::ParamSInfo> SList; SList sl = UniSetTypes::getSInfoList( args, UniSetTypes::conf ); if( verb ) cout << "====== getRawValue ======" << endl; for( SList::iterator it=sl.begin(); it!=sl.end(); it++ ) { if( it->si.node == DefaultObjectId ) it->si.node = conf->getLocalNode(); if( verb ) { cout << " name: (" << it->si.id << ") " << it->fname << endl; cout << " text: " << conf->oind->getTextName(it->si.id) << "\n\n"; } try { if( verb ) cout << " value: " << ui.getRawValue(it->si) << endl; else cout << " value: " << ui.getRawValue(it->si); } catch(Exception& ex) { if( verb ) cerr << "(getRawValue): " << ex << endl;; err = 1; } } return err; } // -------------------------------------------------------------------------------------- int logRotate( const string& arg, UniversalInterface &ui ) { // посылка всем if( arg.empty() || (arg.c_str())[0]!='-' ) { ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)LogRotate); commandToAll(conf->getControllersSection(), rep, (Command)LogRotate); commandToAll(conf->getObjectsSection(), rep, (Command)LogRotate); delete rep; } else // посылка определённому объекту { UniSetTypes::ObjectId id = conf->oind->getIdByName(arg); if( id == DefaultObjectId ) { if( verb ) cout << "(logrotate): name='" << arg << "' не найдено!!!\n"; return 1; } SystemMessage sm(SystemMessage::LogRotate); TransportMessage tm(sm.transport_msg()); ui.send(id,tm); if( verb ) cout << "\nSend 'LogRotate' to " << arg << " OK.\n"; } return 0; } // -------------------------------------------------------------------------------------- int configure( const string& arg, UniversalInterface &ui ) { // посылка всем if( arg.empty() || (arg.c_str())[0]!='-' ) { ObjectRepository* rep = new ObjectRepository(conf); commandToAll(conf->getServicesSection(), rep, (Command)Configure); commandToAll(conf->getControllersSection(), rep, (Command)Configure); commandToAll(conf->getObjectsSection(), rep, (Command)Configure); delete rep; } else // посылка определённому объекту { UniSetTypes::ObjectId id = conf->oind->getIdByName(arg); if( id == DefaultObjectId ) { if( verb ) cout << "(configure): name='" << arg << "' не найдено!!!\n"; return 1; } SystemMessage sm(SystemMessage::ReConfiguration); TransportMessage tm(sm.transport_msg()); ui.send(id,tm); if( verb ) cout << "\nSend 'ReConfigure' to " << arg << " OK.\n"; } return 0; } // -------------------------------------------------------------------------------------- int oinfo( const string& arg, UniversalInterface &ui ) { UniSetTypes::ObjectId oid(uni_atoi(arg)); if( oid==0 ) { if( verb ) cout << "(oinfo): Не задан OID!"<< endl; return 1; } UniSetTypes::ObjectVar o = ui.resolve(oid); UniSetObject_i_var obj = UniSetObject_i::_narrow(o); if(CORBA::is_nil(obj)) { if( verb ) cout << "(oinfo): объект " << oid << " недоступен" << endl; } else { SimpleInfo_var inf = obj->getInfo(); cout << inf->info << endl; } return 0; } // --------------------------------------------------------------------------------------
vitlav/libuniset
Utilities/Admin/admin.cc
C++
gpl-2.0
23,256
#ifdef CONFIG_SCHED_BFS #include "sched_bfs.c" #else /* * kernel/sched.c * * Kernel scheduler and related syscalls * * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: * hybrid priority-list and round-robin design with * an array-switch method of distributing timeslices * and per-CPU runqueues. Cleanups and useful suggestions * by Davide Libenzi, preemptible kernel bits by Robert Love. * 2003-09-03 Interactivity tuning by Con Kolivas. * 2004-04-02 Scheduler domains code by Nick Piggin * 2007-04-15 Work begun on replacing all interactivity tuning with a * fair scheduling design by Con Kolivas. * 2007-05-05 Load balancing (smp-nice) and other improvements * by Peter Williams * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, * Thomas Gleixner, Mike Kravetz */ #include <linux/mm.h> #include <linux/module.h> #include <linux/nmi.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/highmem.h> #include <linux/smp_lock.h> #include <asm/mmu_context.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/kernel_stat.h> #include <linux/debug_locks.h> #include <linux/perf_event.h> #include <linux/security.h> #include <linux/notifier.h> #include <linux/profile.h> #include <linux/freezer.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/pid_namespace.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/timer.h> #include <linux/rcupdate.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/percpu.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stop_machine.h> #include <linux/sysctl.h> #include <linux/syscalls.h> #include <linux/times.h> #include <linux/tsacct_kern.h> #include <linux/kprobes.h> #include <linux/delayacct.h> #include <linux/unistd.h> #include <linux/pagemap.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/debugfs.h> #include <linux/ctype.h> #include <linux/ftrace.h> #include <linux/slab.h> #include <linux/cpuacct.h> #include <linux/sched.h> #include <asm/tlb.h> #include <asm/irq_regs.h> #include <mach/sec_debug.h> #include "sched_cpupri.h" #define CREATE_TRACE_POINTS #include <trace/events/sched.h> /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], * and back. */ #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, * it's a [ 0 ... 39 ] range. */ #define USER_PRIO(p) ((p)-MAX_RT_PRIO) #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* * Helpers for converting nanosecond timing to jiffy resolution */ #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) #define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_SHIFT SCHED_LOAD_SHIFT /* * These are the 'tuning knobs' of the scheduler: * * default timeslice is 100 msecs (used only for SCHED_RR tasks). * Timeslices get refilled after they expire. */ #define DEF_TIMESLICE (100 * HZ / 1000) /* * single value that denotes runtime == period, ie unlimited time. */ #define RUNTIME_INF ((u64)~0ULL) static inline int rt_policy(int policy) { if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) return 1; return 0; } static inline int task_has_rt_policy(struct task_struct *p) { return rt_policy(p->policy); } /* * This is the priority-queue data structure of the RT scheduling class: */ struct rt_prio_array { DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ struct list_head queue[MAX_RT_PRIO]; }; struct rt_bandwidth { /* nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; ktime_t rt_period; u64 rt_runtime; struct hrtimer rt_period_timer; }; static struct rt_bandwidth def_rt_bandwidth; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) { struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer); ktime_t now; int overrun; int idle = 0; for (;;) { now = hrtimer_cb_get_time(timer); overrun = hrtimer_forward(timer, now, rt_b->rt_period); if (!overrun) break; idle = do_sched_rt_period_timer(rt_b, overrun); } return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } static void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) { rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = sched_rt_period_timer; } static inline int rt_bandwidth_enabled(void) { return sysctl_sched_rt_runtime >= 0; } static void start_rt_bandwidth(struct rt_bandwidth *rt_b) { ktime_t now; if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) return; if (hrtimer_active(&rt_b->rt_period_timer)) return; raw_spin_lock(&rt_b->rt_runtime_lock); for (;;) { unsigned long delta; ktime_t soft, hard; if (hrtimer_active(&rt_b->rt_period_timer)) break; now = hrtimer_cb_get_time(&rt_b->rt_period_timer); hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); hard = hrtimer_get_expires(&rt_b->rt_period_timer); delta = ktime_to_ns(ktime_sub(hard, soft)); __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, HRTIMER_MODE_ABS_PINNED, 0); } raw_spin_unlock(&rt_b->rt_runtime_lock); } #ifdef CONFIG_RT_GROUP_SCHED static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) { hrtimer_cancel(&rt_b->rt_period_timer); } #endif /* * sched_domains_mutex serializes calls to arch_init_sched_domains, * detach_destroy_domains and partition_sched_domains. */ static DEFINE_MUTEX(sched_domains_mutex); #ifdef CONFIG_CGROUP_SCHED #include <linux/cgroup.h> struct cfs_rq; static LIST_HEAD(task_groups); /* task group related information */ struct task_group { struct cgroup_subsys_state css; #ifdef CONFIG_FAIR_GROUP_SCHED /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ struct cfs_rq **cfs_rq; unsigned long shares; #endif #ifdef CONFIG_RT_GROUP_SCHED struct sched_rt_entity **rt_se; struct rt_rq **rt_rq; struct rt_bandwidth rt_bandwidth; #endif struct rcu_head rcu; struct list_head list; struct task_group *parent; struct list_head siblings; struct list_head children; }; #define root_task_group init_task_group /* task_group_lock serializes add/remove of task groups and also changes to * a task group's cpu shares. */ static DEFINE_SPINLOCK(task_group_lock); #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP static int root_task_group_empty(void) { return list_empty(&root_task_group.children); } #endif # define INIT_TASK_GROUP_LOAD NICE_0_LOAD /* * A weight of 0 or 1 can cause arithmetics problems. * A weight of a cfs_rq is the sum of weights of which entities * are queued on this cfs_rq, so a weight of a entity should not be * too large, so as the shares value of a task group. * (The default weight is 1024 - so there's no practical * limitation from this.) */ #define MIN_SHARES 2 #define MAX_SHARES (1UL << 18) static int init_task_group_load = INIT_TASK_GROUP_LOAD; #endif /* Default task group. * Every task in system belong to this group at bootup. */ struct task_group init_task_group; #endif /* CONFIG_CGROUP_SCHED */ /* CFS-related fields in a runqueue */ struct cfs_rq { struct load_weight load; unsigned long nr_running; u64 exec_clock; u64 min_vruntime; struct rb_root tasks_timeline; struct rb_node *rb_leftmost; struct list_head tasks; struct list_head *balance_iterator; /* * 'curr' points to currently running entity on this cfs_rq. * It is set to NULL otherwise (i.e when none are currently running). */ struct sched_entity *curr, *next, *last; unsigned int nr_spread_over; #ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ /* * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * a hierarchy). Non-leaf lrqs hold other higher schedulable entities * (like users, containers etc.) * * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This * list is used during load balance. */ struct list_head leaf_cfs_rq_list; struct task_group *tg; /* group that "owns" this runqueue */ #ifdef CONFIG_SMP /* * the part of load.weight contributed by tasks */ unsigned long task_weight; /* * h_load = weight * f(tg) * * Where f(tg) is the recursive weight fraction assigned to * this group. */ unsigned long h_load; /* * this cpu's part of tg->shares */ unsigned long shares; /* * load.weight at the time we set shares */ unsigned long rq_weight; #endif #endif }; /* Real-Time classes' related field in a runqueue: */ struct rt_rq { struct rt_prio_array active; unsigned long rt_nr_running; #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED struct { int curr; /* highest queued rt task prio */ #ifdef CONFIG_SMP int next; /* next highest */ #endif } highest_prio; #endif #ifdef CONFIG_SMP unsigned long rt_nr_migratory; unsigned long rt_nr_total; int overloaded; struct plist_head pushable_tasks; #endif int rt_throttled; u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; #ifdef CONFIG_RT_GROUP_SCHED unsigned long rt_nr_boosted; struct rq *rq; struct list_head leaf_rt_rq_list; struct task_group *tg; #endif }; #ifdef CONFIG_SMP /* * We add the notion of a root-domain which will be used to define per-domain * variables. Each exclusive cpuset essentially defines an island domain by * fully partitioning the member cpus from any other cpuset. Whenever a new * exclusive cpuset is created, we also create and attach a new root-domain * object. * */ struct root_domain { atomic_t refcount; cpumask_var_t span; cpumask_var_t online; /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. */ cpumask_var_t rto_mask; atomic_t rto_count; #ifdef CONFIG_SMP struct cpupri cpupri; #endif }; /* * By default the system creates a single root-domain with all cpus as * members (mimicking the global state we have today). */ static struct root_domain def_root_domain; #endif /* * This is the main, per-CPU runqueue data structure. * * Locking rule: those places that want to lock multiple runqueues * (such as the load balancing or the thread migration code), lock * acquire operations must be ordered by ascending &runqueue. */ struct rq { /* runqueue lock: */ raw_spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because * remote CPUs use both these fields when doing load calculation. */ unsigned long nr_running; #define CPU_LOAD_IDX_MAX 5 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; unsigned long last_load_update_tick; #ifdef CONFIG_NO_HZ u64 nohz_stamp; unsigned char nohz_balance_kick; #endif unsigned int skip_clock_update; /* capture load from *all* tasks on this cpu: */ struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; struct cfs_rq cfs; struct rt_rq rt; #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; #endif #ifdef CONFIG_RT_GROUP_SCHED struct list_head leaf_rt_rq_list; #endif /* * This is part of a global counter where only the total sum * over all CPUs matters. A task can increase this counter on * one CPU and if it got migrated afterwards it may decrease * it on another CPU. Always updated under the runqueue lock: */ unsigned long nr_uninterruptible; struct task_struct *curr, *idle; unsigned long next_balance; struct mm_struct *prev_mm; u64 clock; atomic_t nr_iowait; #ifdef CONFIG_SMP struct root_domain *rd; struct sched_domain *sd; unsigned long cpu_power; unsigned char idle_at_tick; /* For active balancing */ int post_schedule; int active_balance; int push_cpu; struct cpu_stop_work active_balance_work; /* cpu of this runqueue: */ int cpu; int online; unsigned long avg_load_per_task; u64 rt_avg; u64 age_stamp; u64 idle_stamp; u64 avg_idle; #endif /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP int hrtick_csd_pending; struct call_single_data hrtick_csd; #endif struct hrtimer hrtick_timer; #endif #ifdef CONFIG_SCHEDSTATS /* latency stats */ struct sched_info rq_sched_info; unsigned long long rq_cpu_time; /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ /* sys_sched_yield() stats */ unsigned int yld_count; /* schedule() stats */ unsigned int sched_switch; unsigned int sched_count; unsigned int sched_goidle; /* try_to_wake_up() stats */ unsigned int ttwu_count; unsigned int ttwu_local; /* BKL stats */ unsigned int bkl_count; #endif }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) { rq->curr->sched_class->check_preempt_curr(rq, p, flags); /* * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } static inline int cpu_of(struct rq *rq) { #ifdef CONFIG_SMP return rq->cpu; #else return 0; #endif } #define rcu_dereference_check_sched_domain(p) \ rcu_dereference_check((p), \ rcu_read_lock_sched_held() || \ lockdep_is_held(&sched_domains_mutex)) /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details. * * The domain tree of any CPU may only be accessed from within * preempt-disabled sections. */ #define for_each_domain(cpu, __sd) \ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) #define this_rq() (&__get_cpu_var(runqueues)) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() (&__raw_get_cpu_var(runqueues)) #ifdef CONFIG_CGROUP_SCHED /* * Return the group to which this tasks belongs. * * We use task_subsys_state_check() and extend the RCU verification * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() * holds that lock for each task it moves into the cgroup. Therefore * by holding that lock, we pin the task to the current cgroup. */ static inline struct task_group *task_group(struct task_struct *p) { struct cgroup_subsys_state *css; css = task_subsys_state_check(p, cpu_cgroup_subsys_id, lockdep_is_held(&task_rq(p)->lock)); return container_of(css, struct task_group, css); } /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; #endif #ifdef CONFIG_RT_GROUP_SCHED p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.parent = task_group(p)->rt_se[cpu]; #endif } #else /* CONFIG_CGROUP_SCHED */ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } static inline struct task_group *task_group(struct task_struct *p) { return NULL; } #endif /* CONFIG_CGROUP_SCHED */ inline void update_rq_clock(struct rq *rq) { if (!rq->skip_clock_update) rq->clock = sched_clock_cpu(cpu_of(rq)); } /* * Tunables that become constants when CONFIG_SCHED_DEBUG is off: */ #ifdef CONFIG_SCHED_DEBUG # define const_debug __read_mostly #else # define const_debug static const #endif /** * runqueue_is_locked * @cpu: the processor in question. * * Returns true if the current cpu runqueue is locked. * This interface allows printk to be called with the runqueue lock * held and know whether or not it is OK to wake up the klogd. */ int runqueue_is_locked(int cpu) { return raw_spin_is_locked(&cpu_rq(cpu)->lock); } /* * Debugging: various feature bits */ #define SCHED_FEAT(name, enabled) \ __SCHED_FEAT_##name , enum { #include "sched_features.h" }; #undef SCHED_FEAT #define SCHED_FEAT(name, enabled) \ (1UL << __SCHED_FEAT_##name) * enabled | const_debug unsigned int sysctl_sched_features = #include "sched_features.h" 0; #undef SCHED_FEAT #ifdef CONFIG_SCHED_DEBUG #define SCHED_FEAT(name, enabled) \ #name , static __read_mostly char *sched_feat_names[] = { #include "sched_features.h" NULL }; #undef SCHED_FEAT static int sched_feat_show(struct seq_file *m, void *v) { int i; for (i = 0; sched_feat_names[i]; i++) { if (!(sysctl_sched_features & (1UL << i))) seq_puts(m, "NO_"); seq_printf(m, "%s ", sched_feat_names[i]); } seq_puts(m, "\n"); return 0; } static ssize_t sched_feat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; char *cmp = buf; int neg = 0; int i; if (cnt > 63) cnt = 63; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; if (strncmp(buf, "NO_", 3) == 0) { neg = 1; cmp += 3; } for (i = 0; sched_feat_names[i]; i++) { int len = strlen(sched_feat_names[i]); if (strncmp(cmp, sched_feat_names[i], len) == 0) { if (neg) sysctl_sched_features &= ~(1UL << i); else sysctl_sched_features |= (1UL << i); break; } } if (!sched_feat_names[i]) return -EINVAL; *ppos += cnt; return cnt; } static int sched_feat_open(struct inode *inode, struct file *filp) { return single_open(filp, sched_feat_show, NULL); } static const struct file_operations sched_feat_fops = { .open = sched_feat_open, .write = sched_feat_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static __init int sched_init_debug(void) { debugfs_create_file("sched_features", 0644, NULL, NULL, &sched_feat_fops); return 0; } late_initcall(sched_init_debug); #endif #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) /* * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ const_debug unsigned int sysctl_sched_nr_migrate = 32; /* * ratelimit for updating the group shares. * default: 0.25ms */ unsigned int sysctl_sched_shares_ratelimit = 250000; unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; /* * Inject some fuzzyness into changing the per-cpu group shares * this avoids remote rq-locks at the expense of fairness. * default: 4 */ unsigned int sysctl_sched_shares_thresh = 4; /* * period over which we average the RT time consumption, measured * in ms. * * default: 1s */ const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; /* * period over which we measure -rt task cpu usage in us. * default: 1s */ unsigned int sysctl_sched_rt_period = 1000000; static __read_mostly int scheduler_running; /* * part of the period that we allow rt tasks to run in us. * default: 0.95s */ int sysctl_sched_rt_runtime = 950000; static inline u64 global_rt_period(void) { return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; } static inline u64 global_rt_runtime(void) { if (sysctl_sched_rt_runtime < 0) return RUNTIME_INF; return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; } #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) #endif #ifndef finish_arch_switch # define finish_arch_switch(prev) do { } while (0) #endif static inline int task_current(struct rq *rq, struct task_struct *p) { return rq->curr == p; } #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { return task_current(rq, p); } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq->lock.owner = current; #endif /* * If we are tracking spinlock dependencies then we have to * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); raw_spin_unlock_irq(&rq->lock); } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ static inline int task_running(struct rq *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; #else return task_current(rq, p); #endif } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* * We can optimise this out completely for !SMP, because the * SMP rebalancing from interrupt is the only thing that cares * here. */ next->oncpu = 1; #endif #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW raw_spin_unlock_irq(&rq->lock); #else raw_spin_unlock(&rq->lock); #endif } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* * After ->oncpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. */ smp_wmb(); prev->oncpu = 0; #endif #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); #endif } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ /* * Check whether the task is waking, we use this to synchronize ->cpus_allowed * against ttwu(). */ static inline int task_is_waking(struct task_struct *p) { return unlikely(p->state == TASK_WAKING); } /* * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */ static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; for (;;) { rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; raw_spin_unlock(&rq->lock); } } /* * task_rq_lock - lock the runqueue a given task resides on and disable * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { struct rq *rq; for (;;) { local_irq_save(*flags); rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; raw_spin_unlock_irqrestore(&rq->lock, *flags); } } static void __task_rq_unlock(struct rq *rq) __releases(rq->lock) { raw_spin_unlock(&rq->lock); } static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) __releases(rq->lock) { raw_spin_unlock_irqrestore(&rq->lock, *flags); } /* * this_rq_lock - lock this runqueue and disable interrupts. */ static struct rq *this_rq_lock(void) __acquires(rq->lock) { struct rq *rq; local_irq_disable(); rq = this_rq(); raw_spin_lock(&rq->lock); return rq; } #ifdef CONFIG_SCHED_HRTICK /* * Use HR-timers to deliver accurate preemption points. * * Its all a bit involved since we cannot program an hrt while holding the * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a * reschedule event. * * When we get rescheduled we reprogram the hrtick_timer outside of the * rq->lock. */ /* * Use hrtick when: * - enabled by features * - hrtimer is actually high res */ static inline int hrtick_enabled(struct rq *rq) { if (!sched_feat(HRTICK)) return 0; if (!cpu_active(cpu_of(rq))) return 0; return hrtimer_is_hres_active(&rq->hrtick_timer); } static void hrtick_clear(struct rq *rq) { if (hrtimer_active(&rq->hrtick_timer)) hrtimer_cancel(&rq->hrtick_timer); } /* * High-resolution timer tick. * Runs from hardirq context with interrupts disabled. */ static enum hrtimer_restart hrtick(struct hrtimer *timer) { struct rq *rq = container_of(timer, struct rq, hrtick_timer); WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); raw_spin_lock(&rq->lock); update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); raw_spin_unlock(&rq->lock); return HRTIMER_NORESTART; } #ifdef CONFIG_SMP /* * called from hardirq (IPI) context */ static void __hrtick_start(void *arg) { struct rq *rq = arg; raw_spin_lock(&rq->lock); hrtimer_restart(&rq->hrtick_timer); rq->hrtick_csd_pending = 0; raw_spin_unlock(&rq->lock); } /* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */ static void hrtick_start(struct rq *rq, u64 delay) { struct hrtimer *timer = &rq->hrtick_timer; ktime_t time = ktime_add_ns(timer->base->get_time(), delay); hrtimer_set_expires(timer, time); if (rq == this_rq()) { hrtimer_restart(timer); } else if (!rq->hrtick_csd_pending) { __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); rq->hrtick_csd_pending = 1; } } static int hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (int)(long)hcpu; switch (action) { case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: hrtick_clear(cpu_rq(cpu)); return NOTIFY_OK; } return NOTIFY_DONE; } static __init void init_hrtick(void) { hotcpu_notifier(hotplug_hrtick, 0); } #else /* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */ static void hrtick_start(struct rq *rq, u64 delay) { __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, HRTIMER_MODE_REL_PINNED, 0); } static inline void init_hrtick(void) { } #endif /* CONFIG_SMP */ static void init_rq_hrtick(struct rq *rq) { #ifdef CONFIG_SMP rq->hrtick_csd_pending = 0; rq->hrtick_csd.flags = 0; rq->hrtick_csd.func = __hrtick_start; rq->hrtick_csd.info = rq; #endif hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) { } static inline void init_rq_hrtick(struct rq *rq) { } static inline void init_hrtick(void) { } #endif /* CONFIG_SCHED_HRTICK */ /* * resched_task - mark a task 'to be rescheduled now'. * * On UP this means the setting of the need_resched flag, on SMP it * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */ #ifdef CONFIG_SMP #ifndef tsk_is_polling #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif static void resched_task(struct task_struct *p) { int cpu; assert_raw_spin_locked(&task_rq(p)->lock); if (test_tsk_need_resched(p)) return; set_tsk_need_resched(p); cpu = task_cpu(p); if (cpu == smp_processor_id()) return; /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(p)) smp_send_reschedule(cpu); } static void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; if (!raw_spin_trylock_irqsave(&rq->lock, flags)) return; resched_task(cpu_curr(cpu)); raw_spin_unlock_irqrestore(&rq->lock, flags); } #ifdef CONFIG_NO_HZ /* * In the semi idle case, use the nearest busy cpu for migrating timers * from an idle cpu. This is good for power-savings. * * We don't do similar optimization for completely idle system, as * selecting an idle cpu will add more delays to the timers than intended * (as that cpu's timer base may not be uptodate wrt jiffies etc). */ int get_nohz_timer_target(void) { int cpu = smp_processor_id(); int i; struct sched_domain *sd; for_each_domain(cpu, sd) { for_each_cpu(i, sched_domain_span(sd)) if (!idle_cpu(i)) return i; } return cpu; } /* * When add_timer_on() enqueues a timer into the timer wheel of an * idle CPU then this timer might expire before the next timer event * which is scheduled to wake up that CPU. In case of a completely * idle system the next event might even be infinite time into the * future. wake_up_idle_cpu() ensures that the CPU is woken up and * leaves the inner idle loop so the newly added timer is taken into * account when the CPU goes back to idle and evaluates the timer * wheel for the next timer event. */ void wake_up_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (cpu == smp_processor_id()) return; /* * This is safe, as this function is called with the timer * wheel base lock of (cpu) held. When the CPU is on the way * to idle and has not yet set rq->curr to idle then it will * be serialized on the timer wheel base lock and take the new * timer into account automatically. */ if (rq->curr != rq->idle) return; /* * We can set TIF_RESCHED on the idle task of the other CPU * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ set_tsk_need_resched(rq->idle); /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(rq->idle)) smp_send_reschedule(cpu); } #endif /* CONFIG_NO_HZ */ static u64 sched_avg_period(void) { return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; } static void sched_avg_update(struct rq *rq) { s64 period = sched_avg_period(); while ((s64)(rq->clock - rq->age_stamp) > period) { /* * Inline assembly required to prevent the compiler * optimising this loop into a divmod call. * See __iter_div_u64_rem() for another example of this. */ asm("" : "+rm" (rq->age_stamp)); rq->age_stamp += period; rq->rt_avg /= 2; } } static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { rq->rt_avg += rt_delta; sched_avg_update(rq); } #else /* !CONFIG_SMP */ static void resched_task(struct task_struct *p) { assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } #endif /* CONFIG_SMP */ #if BITS_PER_LONG == 32 # define WMULT_CONST (~0UL) #else # define WMULT_CONST (1UL << 32) #endif #define WMULT_SHIFT 32 /* * Shift right and round: */ #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) /* * delta *= weight / lw */ static unsigned long calc_delta_mine(unsigned long delta_exec, unsigned long weight, struct load_weight *lw) { u64 tmp; if (!lw->inv_weight) { if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) lw->inv_weight = 1; else lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) / (lw->weight+1); } tmp = (u64)delta_exec * weight; /* * Check whether we'd overflow the 64-bit multiplication: */ if (unlikely(tmp > WMULT_CONST)) tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, WMULT_SHIFT/2); else tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); } static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; lw->inv_weight = 0; } static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; lw->inv_weight = 0; } /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that * each task makes to its run queue's load is weighted according to its * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a * scaled version of the new time slice allocation that they receive on time * slice expiry etc. */ #define WEIGHT_IDLEPRIO 3 #define WMULT_IDLEPRIO 1431655765 /* * Nice levels are multiplicative, with a gentle 10% change for every * nice level changed. I.e. when a CPU-bound task goes from nice 0 to * nice 1, it will get ~10% less CPU time than another CPU-bound task * that remained on nice 0. * * The "10% effect" is relative and cumulative: from _any_ nice level, * if you go up 1 level, it's -10% CPU usage, if you go down 1 level * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. * If a task goes up by ~10% and another task goes down by ~10% then * the relative distance between them is ~25%.) */ static const int prio_to_weight[40] = { /* -20 */ 88761, 71755, 56483, 46273, 36291, /* -15 */ 29154, 23254, 18705, 14949, 11916, /* -10 */ 9548, 7620, 6100, 4904, 3906, /* -5 */ 3121, 2501, 1991, 1586, 1277, /* 0 */ 1024, 820, 655, 526, 423, /* 5 */ 335, 272, 215, 172, 137, /* 10 */ 110, 87, 70, 56, 45, /* 15 */ 36, 29, 23, 18, 15, }; /* * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. * * In cases where the weight does not change often, we can use the * precalculated inverse to speed up arithmetics by turning divisions * into multiplications: */ static const u32 prio_to_wmult[40] = { /* -20 */ 48388, 59856, 76040, 92818, 118348, /* -15 */ 147320, 184698, 229616, 287308, 360437, /* -10 */ 449829, 563644, 704093, 875809, 1099582, /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; /* Time spent by the tasks of the cpu accounting group executing in ... */ enum cpuacct_stat_index { CPUACCT_STAT_USER, /* ... user mode */ CPUACCT_STAT_SYSTEM, /* ... kernel mode */ CPUACCT_STAT_NSTATS, }; #ifdef CONFIG_CGROUP_CPUACCT static void cpuacct_charge(struct task_struct *tsk, u64 cputime); static void cpuacct_update_stats(struct task_struct *tsk, enum cpuacct_stat_index idx, cputime_t val); #else static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} static inline void cpuacct_update_stats(struct task_struct *tsk, enum cpuacct_stat_index idx, cputime_t val) {} #endif static inline void inc_cpu_load(struct rq *rq, unsigned long load) { update_load_add(&rq->load, load); } static inline void dec_cpu_load(struct rq *rq, unsigned long load) { update_load_sub(&rq->load, load); } #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) typedef int (*tg_visitor)(struct task_group *, void *); /* * Iterate the full tree, calling @down when first entering a node and @up when * leaving it for the final time. */ static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; rcu_read_lock(); parent = &root_task_group; down: ret = (*down)(parent, data); if (ret) goto out_unlock; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; up: continue; } ret = (*up)(parent, data); if (ret) goto out_unlock; child = parent; parent = parent->parent; if (parent) goto up; out_unlock: rcu_read_unlock(); return ret; } static int tg_nop(struct task_group *tg, void *data) { return 0; } #endif #ifdef CONFIG_SMP /* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(const int cpu) { return cpu_rq(cpu)->load.weight; } /* * Return a low guess at the load of a migration-source cpu weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to * balance conservatively. */ static unsigned long source_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); if (type == 0 || !sched_feat(LB_BIAS)) return total; return min(rq->cpu_load[type-1], total); } /* * Return a high guess at the load of a migration-target cpu weighted * according to the scheduling class and "nice" value. */ static unsigned long target_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); if (type == 0 || !sched_feat(LB_BIAS)) return total; return max(rq->cpu_load[type-1], total); } static unsigned long power_of(int cpu) { return cpu_rq(cpu)->cpu_power; } static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long nr_running = ACCESS_ONCE(rq->nr_running); if (nr_running) rq->avg_load_per_task = rq->load.weight / nr_running; else rq->avg_load_per_task = 0; return rq->avg_load_per_task; } #ifdef CONFIG_FAIR_GROUP_SCHED static __read_mostly unsigned long __percpu *update_shares_data; static void __set_se_shares(struct sched_entity *se, unsigned long shares); /* * Calculate and set the cpu's group shares. */ static void update_group_shares_cpu(struct task_group *tg, int cpu, unsigned long sd_shares, unsigned long sd_rq_weight, unsigned long *usd_rq_weight) { unsigned long shares, rq_weight; int boost = 0; rq_weight = usd_rq_weight[cpu]; if (!rq_weight) { boost = 1; rq_weight = NICE_0_LOAD; } /* * \Sum_j shares_j * rq_weight_i * shares_i = ----------------------------- * \Sum_j rq_weight_j */ shares = (sd_shares * rq_weight) / sd_rq_weight; shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); if (abs(shares - tg->se[cpu]->load.weight) > sysctl_sched_shares_thresh) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; tg->cfs_rq[cpu]->shares = boost ? 0 : shares; __set_se_shares(tg->se[cpu], shares); raw_spin_unlock_irqrestore(&rq->lock, flags); } } /* * Re-compute the task group their per cpu shares over the given domain. * This needs to be done in a bottom-up fashion because the rq weight of a * parent group depends on the shares of its child groups. */ static int tg_shares_up(struct task_group *tg, void *data) { unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0; unsigned long *usd_rq_weight; struct sched_domain *sd = data; unsigned long flags; int i; if (!tg->se[0]) return 0; local_irq_save(flags); usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); for_each_cpu(i, sched_domain_span(sd)) { weight = tg->cfs_rq[i]->load.weight; usd_rq_weight[i] = weight; rq_weight += weight; /* * If there are currently no tasks on the cpu pretend there * is one of average load so that when a new task gets to * run here it will not get delayed by group starvation. */ if (!weight) weight = NICE_0_LOAD; sum_weight += weight; shares += tg->cfs_rq[i]->shares; } if (!rq_weight) rq_weight = sum_weight; if ((!shares && rq_weight) || shares > tg->shares) shares = tg->shares; if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) shares = tg->shares; for_each_cpu(i, sched_domain_span(sd)) update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); local_irq_restore(flags); return 0; } /* * Compute the cpu's hierarchical load factor for each task group. * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ static int tg_load_down(struct task_group *tg, void *data) { unsigned long load; long cpu = (long)data; if (!tg->parent) { load = cpu_rq(cpu)->load.weight; } else { load = tg->parent->cfs_rq[cpu]->h_load; load *= tg->cfs_rq[cpu]->shares; load /= tg->parent->cfs_rq[cpu]->load.weight + 1; } tg->cfs_rq[cpu]->h_load = load; return 0; } static void update_shares(struct sched_domain *sd) { s64 elapsed; u64 now; if (root_task_group_empty()) return; now = cpu_clock(raw_smp_processor_id()); elapsed = now - sd->last_update; if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { sd->last_update = now; walk_tg_tree(tg_nop, tg_shares_up, sd); } } static void update_h_load(long cpu) { walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); } #else static inline void update_shares(struct sched_domain *sd) { } #endif #ifdef CONFIG_PREEMPT static void double_rq_lock(struct rq *rq1, struct rq *rq2); /* * fair double_lock_balance: Safely acquires both rq->locks in a fair * way at the expense of forcing extra atomic operations in all * invocations. This assures that the double_lock is acquired using the * same underlying policy as the spinlock_t on this architecture, which * reduces latency compared to the unfair variant below. However, it * also adds more overhead and therefore may reduce throughput. */ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) { raw_spin_unlock(&this_rq->lock); double_rq_lock(this_rq, busiest); return 1; } #else /* * Unfair double_lock_balance: Optimizes throughput at the expense of * latency by eliminating extra atomic operations when the locks are * already in proper order on entry. This favors lower cpu-ids and will * grant the double lock to lower cpus over higher ids under contention, * regardless of entry order into the function. */ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) { int ret = 0; if (unlikely(!raw_spin_trylock(&busiest->lock))) { if (busiest < this_rq) { raw_spin_unlock(&this_rq->lock); raw_spin_lock(&busiest->lock); raw_spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); ret = 1; } else raw_spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); } return ret; } #endif /* CONFIG_PREEMPT */ /* * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) { if (unlikely(!irqs_disabled())) { /* printk() doesn't work good under rq->lock */ raw_spin_unlock(&this_rq->lock); BUG_ON(1); } return _double_lock_balance(this_rq, busiest); } static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) { raw_spin_unlock(&busiest->lock); lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); } /* * double_rq_lock - safely lock two runqueues * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ static void double_rq_lock(struct rq *rq1, struct rq *rq2) __acquires(rq1->lock) __acquires(rq2->lock) { BUG_ON(!irqs_disabled()); if (rq1 == rq2) { raw_spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { if (rq1 < rq2) { raw_spin_lock(&rq1->lock); raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); } else { raw_spin_lock(&rq2->lock); raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); } } } /* * double_rq_unlock - safely unlock two runqueues * * Note this does not restore interrupts like task_rq_unlock, * you need to do so manually after calling. */ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { raw_spin_unlock(&rq1->lock); if (rq1 != rq2) raw_spin_unlock(&rq2->lock); else __release(rq2->lock); } #endif #ifdef CONFIG_FAIR_GROUP_SCHED static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) { #ifdef CONFIG_SMP cfs_rq->shares = shares; #endif } #endif static void calc_load_account_idle(struct rq *this_rq); static void update_sysctl(void); static int get_update_sysctl_factor(void); static void update_cpu_load(struct rq *this_rq); static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { set_task_rq(p, cpu); #ifdef CONFIG_SMP /* * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be * successfuly executed on another CPU. We must ensure that updates of * per-task data have been completed by this moment. */ smp_wmb(); task_thread_info(p)->cpu = cpu; #endif } static const struct sched_class rt_sched_class; #define sched_class_highest (&rt_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) #include "sched_stats.h" static void inc_nr_running(struct rq *rq) { rq->nr_running++; } static void dec_nr_running(struct rq *rq) { rq->nr_running--; } static void set_load_weight(struct task_struct *p) { /* * SCHED_IDLE tasks get minimal weight: */ if (p->policy == SCHED_IDLE) { p->se.load.weight = WEIGHT_IDLEPRIO; p->se.load.inv_weight = WMULT_IDLEPRIO; return; } p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO]; p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; } static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { update_rq_clock(rq); sched_info_queued(p); p->sched_class->enqueue_task(rq, p, flags); p->se.on_rq = 1; } static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) { update_rq_clock(rq); sched_info_dequeued(p); p->sched_class->dequeue_task(rq, p, flags); p->se.on_rq = 0; } /* * activate_task - move a task to the runqueue. */ static void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible--; enqueue_task(rq, p, flags); inc_nr_running(rq); } /* * deactivate_task - remove a task from the runqueue. */ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_contributes_to_load(p)) rq->nr_uninterruptible++; dequeue_task(rq, p, flags); dec_nr_running(rq); } #include "sched_idletask.c" #include "sched_fair.c" #include "sched_rt.c" #ifdef CONFIG_SCHED_DEBUG # include "sched_debug.c" #endif /* * __normal_prio - return the priority that is based on the static prio */ static inline int __normal_prio(struct task_struct *p) { return p->static_prio; } /* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates. */ static inline int normal_prio(struct task_struct *p) { int prio; if (task_has_rt_policy(p)) prio = MAX_RT_PRIO-1 - p->rt_priority; else prio = __normal_prio(p); return prio; } /* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */ static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* * If we are RT tasks or we were boosted to RT priority, * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ if (!rt_prio(p->prio)) return p->normal_prio; return p->prio; } /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } static inline void check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio, int running) { if (prev_class != p->sched_class) { if (prev_class->switched_from) prev_class->switched_from(rq, p, running); p->sched_class->switched_to(rq, p, running); } else p->sched_class->prio_changed(rq, p, oldprio, running); } #ifdef CONFIG_SMP /* * Is this task likely cache-hot: */ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) { s64 delta; if (p->sched_class != &fair_sched_class) return 0; /* * Buddy candidates are cache hot: */ if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && (&p->se == cfs_rq_of(&p->se)->next || &p->se == cfs_rq_of(&p->se)->last)) return 1; if (sysctl_sched_migration_cost == -1) return 1; if (sysctl_sched_migration_cost == 0) return 0; delta = now - p->se.exec_start; return delta < (s64)sysctl_sched_migration_cost; } void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { #ifdef CONFIG_SCHED_DEBUG /* * We should never call set_task_cpu() on a blocked task, * ttwu() will sort out the placement. */ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); #endif trace_sched_migrate_task(p, new_cpu); if (task_cpu(p) != new_cpu) { p->se.nr_migrations++; perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); } __set_task_cpu(p, new_cpu); } struct migration_arg { struct task_struct *task; int dest_cpu; }; static int migration_cpu_stop(void *data); /* * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ static bool migrate_task(struct task_struct *p, int dest_cpu) { struct rq *rq = task_rq(p); /* * If the task is not on a runqueue (and not running), then * the next wake-up will properly place the task. */ return p->se.on_rq || task_running(rq, p); } /* * wait_task_inactive - wait for a thread to unschedule. * * If @match_state is nonzero, it's the @p->state value just checked and * not expected to change. If it changes, i.e. @p might have woken up, * then return zero. When we succeed in waiting for @p to be off its CPU, * we return a positive number (its total switch count). If a second call * a short while later returns the same number, the caller can be sure that * @p has remained unscheduled the whole time. * * The caller must ensure that the task *will* unschedule sometime soon, * else this function might spin for a *long* time. This function can't * be called with interrupts off, or it may introduce deadlock with * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ unsigned long wait_task_inactive(struct task_struct *p, long match_state) { unsigned long flags = 0; int running, on_rq; unsigned long ncsw; struct rq *rq; for (;;) { /* * We do the initial early heuristics without holding * any task-queue locks at all. We'll only try to get * the runqueue lock when things look like they will * work out! */ rq = task_rq(p); /* * If the task is actively running on another CPU * still, just relax and busy-wait without holding * any locks. * * NOTE! Since we don't hold any locks, it's not * even sure that "rq" stays as the right runqueue! * But we don't care, since "task_running()" will * return false if the runqueue has changed and p * is actually now running somewhere else! */ while (task_running(rq, p)) { if (match_state && unlikely(p->state != match_state)) return 0; cpu_relax(); } /* * Ok, time to look more closely! We need the rq * lock now, to be *sure*. If we're wrong, we'll * just go back and repeat. */ rq = task_rq_lock(p, &flags); trace_sched_wait_task(p); running = task_running(rq, p); on_rq = p->se.on_rq; ncsw = 0; if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, &flags); /* * If it changed from the expected state, bail out now. */ if (unlikely(!ncsw)) break; /* * Was it really running after all now that we * checked with the proper locks actually held? * * Oops. Go back and try again.. */ if (unlikely(running)) { cpu_relax(); continue; } /* * It's not enough that it's not actively running, * it must be off the runqueue _entirely_, and not * preempted! * * So if it was still runnable (but just not actively * running right now), it's preempted, and we should * yield - it could be a while. */ if (unlikely(on_rq)) { schedule_timeout_uninterruptible(1); continue; } /* * Ahh, all good. It wasn't running, and it wasn't * runnable, which means that it will never become * running in the future either. We're all done! */ break; } return ncsw; } /*** * kick_process - kick a running thread to enter/exit the kernel * @p: the to-be-kicked thread * * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * * NOTE: this function doesnt have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been * achieved as well. */ void kick_process(struct task_struct *p) { int cpu; preempt_disable(); cpu = task_cpu(p); if ((cpu != smp_processor_id()) && task_curr(p)) smp_send_reschedule(cpu); preempt_enable(); } EXPORT_SYMBOL_GPL(kick_process); #endif /* CONFIG_SMP */ /** * task_oncpu_function_call - call a function on the cpu on which a task runs * @p: the task to evaluate * @func: the function to be called * @info: the function call argument * * Calls the function @func when the task is currently running. This might * be on the current CPU, which just calls the function directly */ void task_oncpu_function_call(struct task_struct *p, void (*func) (void *info), void *info) { int cpu; preempt_disable(); cpu = task_cpu(p); if (task_curr(p)) smp_call_function_single(cpu, func, info, 1); preempt_enable(); } #ifdef CONFIG_SMP /* * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. */ static int select_fallback_rq(int cpu, struct task_struct *p) { int dest_cpu; const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); /* Look for allowed, online CPU in same node. */ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return dest_cpu; /* Any allowed, online CPU? */ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); if (dest_cpu < nr_cpu_ids) return dest_cpu; /* No more Mr. Nice Guy. */ if (unlikely(dest_cpu >= nr_cpu_ids)) { dest_cpu = cpuset_cpus_allowed_fallback(p); /* * Don't tell them about moving exiting tasks or * kernel threads (both mm NULL), since they never * leave kernel. */ if (p->mm && printk_ratelimit()) { printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", task_pid_nr(p), p->comm, cpu); } } return dest_cpu; } /* * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. */ static inline int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) { int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); /* * In order not to call set_task_cpu() on a blocking task we need * to rely on ttwu() to place the task on a valid ->cpus_allowed * cpu. * * Since this is common to all placement strategies, this lives here. * * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); return cpu; } static void update_avg(u64 *avg, u64 sample) { s64 diff = sample - *avg; *avg += diff >> 3; } #endif /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread * @state: the mask of task states that can be woken * @sync: do a synchronous wakeup? * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual * re-schedule is in progress), and as such you're allowed to do * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * * returns failure only if the task is already active. */ static int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags = 0; unsigned long en_flags = ENQUEUE_WAKEUP; struct rq *rq; this_cpu = get_cpu(); smp_wmb(); rq = task_rq_lock(p, &flags); if (!(p->state & state)) goto out; if (p->se.on_rq) goto out_running; cpu = task_cpu(p); orig_cpu = cpu; #ifdef CONFIG_SMP if (unlikely(task_running(rq, p))) goto out_activate; /* * In order to handle concurrent wakeups and release the rq->lock * we put the task in TASK_WAKING state. * * First fix up the nr_uninterruptible count: */ if (task_contributes_to_load(p)) { if (likely(cpu_online(orig_cpu))) rq->nr_uninterruptible--; else this_rq()->nr_uninterruptible--; } p->state = TASK_WAKING; if (p->sched_class->task_waking) { p->sched_class->task_waking(rq, p); en_flags |= ENQUEUE_WAKING; } cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); if (cpu != orig_cpu) set_task_cpu(p, cpu); __task_rq_unlock(rq); rq = cpu_rq(cpu); raw_spin_lock(&rq->lock); /* * We migrated the task without holding either rq->lock, however * since the task is not on the task list itself, nobody else * will try and migrate the task, hence the rq should match the * cpu we just moved it to. */ WARN_ON(task_cpu(p) != cpu); WARN_ON(p->state != TASK_WAKING); #ifdef CONFIG_SCHEDSTATS schedstat_inc(rq, ttwu_count); if (cpu == this_cpu) schedstat_inc(rq, ttwu_local); else { struct sched_domain *sd; for_each_domain(this_cpu, sd) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { schedstat_inc(sd, ttwu_wake_remote); break; } } } #endif /* CONFIG_SCHEDSTATS */ out_activate: #endif /* CONFIG_SMP */ schedstat_inc(p, se.statistics.nr_wakeups); if (wake_flags & WF_SYNC) schedstat_inc(p, se.statistics.nr_wakeups_sync); if (orig_cpu != cpu) schedstat_inc(p, se.statistics.nr_wakeups_migrate); if (cpu == this_cpu) schedstat_inc(p, se.statistics.nr_wakeups_local); else schedstat_inc(p, se.statistics.nr_wakeups_remote); activate_task(rq, p, en_flags); success = 1; out_running: trace_sched_wakeup(p, success); check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); if (unlikely(rq->idle_stamp)) { u64 delta = rq->clock - rq->idle_stamp; u64 max = 2*sysctl_sched_migration_cost; if (delta > max) rq->avg_idle = max; else update_avg(&rq->avg_idle, delta); rq->idle_stamp = 0; } #endif out: task_rq_unlock(rq, &flags); put_cpu(); return success; } /** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * * Attempt to wake up the nominated process and move it to the set of runnable * processes. Returns 1 if the process was woken up, 0 if it was already * running. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_ALL, 0); } EXPORT_SYMBOL(wake_up_process); int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. * * __sched_fork() is basic setup used by init_idle() too: */ static void __sched_fork(struct task_struct *p) { p->se.exec_start = 0; p->se.sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif INIT_LIST_HEAD(&p->rt.run_list); p->se.on_rq = 0; INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&p->preempt_notifiers); #endif } /* * fork()/clone()-time setup: */ void sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); __sched_fork(p); /* * We mark the process as running here. This guarantees that * nobody will actually run it, and a signal or other external * event cannot wake it up and insert it on the runqueue either. */ p->state = TASK_RUNNING; /* * Revert to default priority/policy on fork if requested. */ if (unlikely(p->sched_reset_on_fork)) { if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { p->policy = SCHED_NORMAL; p->normal_prio = p->static_prio; } if (PRIO_TO_NICE(p->static_prio) < 0) { p->static_prio = NICE_TO_PRIO(0); p->normal_prio = p->static_prio; set_load_weight(p); } /* * We don't need the reset flag anymore after the fork. It has * fulfilled its duty: */ p->sched_reset_on_fork = 0; } /* * Make sure we do not leak PI boosting priority to the child. */ p->prio = current->normal_prio; if (!rt_prio(p->prio)) p->sched_class = &fair_sched_class; if (p->sched_class->task_fork) p->sched_class->task_fork(p); /* * The child is not yet in the pid-hash so no cgroup attach races, * and the cgroup is pinned to this child due to cgroup_fork() * is ran before sched_fork(). * * Silence PROVE_RCU. */ rcu_read_lock(); set_task_cpu(p, cpu); rcu_read_unlock(); #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) p->oncpu = 0; #endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif plist_node_init(&p->pushable_tasks, MAX_PRIO); put_cpu(); } /* * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags = 0; struct rq *rq; int cpu __maybe_unused = get_cpu(); #ifdef CONFIG_SMP rq = task_rq_lock(p, &flags); p->state = TASK_WAKING; /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path * - any previously selected cpu might disappear through hotplug * * We set TASK_WAKING so that select_task_rq() can drop rq->lock * without people poking at ->cpus_allowed. */ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); set_task_cpu(p, cpu); p->state = TASK_RUNNING; task_rq_unlock(rq, &flags); #endif rq = task_rq_lock(p, &flags); activate_task(rq, p, 0); trace_sched_wakeup_new(p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); #endif task_rq_unlock(rq, &flags); put_cpu(); } #ifdef CONFIG_PREEMPT_NOTIFIERS /** * preempt_notifier_register - tell me when current is being preempted & rescheduled * @notifier: notifier struct to register */ void preempt_notifier_register(struct preempt_notifier *notifier) { hlist_add_head(&notifier->link, &current->preempt_notifiers); } EXPORT_SYMBOL_GPL(preempt_notifier_register); /** * preempt_notifier_unregister - no longer interested in preemption notifications * @notifier: notifier struct to unregister * * This is safe to call from within a preemption notifier. */ void preempt_notifier_unregister(struct preempt_notifier *notifier) { hlist_del(&notifier->link); } EXPORT_SYMBOL_GPL(preempt_notifier_unregister); static void fire_sched_in_preempt_notifiers(struct task_struct *curr) { struct preempt_notifier *notifier; struct hlist_node *node; hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) notifier->ops->sched_in(notifier, raw_smp_processor_id()); } static void fire_sched_out_preempt_notifiers(struct task_struct *curr, struct task_struct *next) { struct preempt_notifier *notifier; struct hlist_node *node; hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) notifier->ops->sched_out(notifier, next); } #else /* !CONFIG_PREEMPT_NOTIFIERS */ static void fire_sched_in_preempt_notifiers(struct task_struct *curr) { } static void fire_sched_out_preempt_notifiers(struct task_struct *curr, struct task_struct *next) { } #endif /* CONFIG_PREEMPT_NOTIFIERS */ /** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch * @prev: the current task that is being switched out * @next: the task we are going to switch to. * * This is called with the rq lock held and interrupts off. It must * be paired with a subsequent finish_task_switch after the context * switch. * * prepare_task_switch sets up locking and calls architecture specific * hooks. */ static inline void prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { fire_sched_out_preempt_notifiers(prev, next); prepare_lock_switch(rq, next); prepare_arch_switch(next); } /** * finish_task_switch - clean up after a task-switch * @rq: runqueue associated with task-switch * @prev: the thread we just switched away from. * * finish_task_switch must be called after the context switch, paired * with a prepare_task_switch call before the context switch. * finish_task_switch will reconcile locking set up by prepare_task_switch, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */ static void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; long prev_state; rq->prev_mm = NULL; /* * A task struct has one reference for the use as "current". * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. * The test for TASK_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. * Manfred Spraul <manfred@colorfullife.com> */ prev_state = prev->state; finish_arch_switch(prev); #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_disable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ perf_event_task_sched_in(current); #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); fire_sched_in_preempt_notifiers(current); if (mm) mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. */ kprobe_flush_task(prev); put_task_struct(prev); } } #ifdef CONFIG_SMP /* assumes rq->lock is held */ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) { if (prev->sched_class->pre_schedule) prev->sched_class->pre_schedule(rq, prev); } /* rq->lock is NOT held, but preemption is disabled */ static inline void post_schedule(struct rq *rq) { if (rq->post_schedule) { unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->curr->sched_class->post_schedule) rq->curr->sched_class->post_schedule(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); rq->post_schedule = 0; } } #else static inline void pre_schedule(struct rq *rq, struct task_struct *p) { } static inline void post_schedule(struct rq *rq) { } #endif /** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { struct rq *rq = this_rq(); finish_task_switch(rq, prev); /* * FIXME: do we need to worry about rq being invalidated by the * task_switch? */ post_schedule(rq); #ifdef __ARCH_WANT_UNLOCKED_CTXSW /* In this case, finish_task_switch does not reenable preemption */ preempt_enable(); #endif if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); } /* * context_switch - switch to the new MM and the new * thread's register state. */ static inline void context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct mm_struct *mm, *oldmm; prepare_task_switch(rq, prev, next); trace_sched_switch(prev, next); mm = next->mm; oldmm = prev->active_mm; /* * For paravirt, this is coupled with an exit in switch_to to * combine the page table reload and the switch backend into * one hypercall. */ arch_start_context_switch(prev); if (likely(!mm)) { next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next); } else switch_mm(oldmm, mm, next); if (likely(!prev->mm)) { prev->active_mm = NULL; rq->prev_mm = oldmm; } /* * Since the runqueue lock will be released by the next * task (which is an invalid locking op but in the case * of the scheduler it's an obvious special-case), so we * do an early lockdep release here: */ #ifndef __ARCH_WANT_UNLOCKED_CTXSW spin_release(&rq->lock.dep_map, 1, _THIS_IP_); #endif /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); /* * this_rq must be evaluated again because prev may have moved * CPUs since it called schedule(), thus the 'rq' on its stack * frame will be invalid. */ finish_task_switch(this_rq(), prev); } /* * nr_running, nr_uninterruptible and nr_context_switches: * * externally visible scheduler statistics: current number of runnable * threads, current number of uninterruptible-sleeping threads, total * number of context switches performed since bootup. */ unsigned long nr_running(void) { unsigned long i, sum = 0; for_each_online_cpu(i) sum += cpu_rq(i)->nr_running; return sum; } unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; for_each_possible_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; /* * Since we read the counters lockless, it might be slightly * inaccurate. Do not allow it to go below zero though: */ if (unlikely((long)sum < 0)) sum = 0; return sum; } unsigned long long nr_context_switches(void) { int i; unsigned long long sum = 0; for_each_possible_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; } unsigned long nr_iowait(void) { unsigned long i, sum = 0; for_each_possible_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; } unsigned long nr_iowait_cpu(int cpu) { struct rq *this = cpu_rq(cpu); return atomic_read(&this->nr_iowait); } unsigned long this_cpu_load(void) { struct rq *this = this_rq(); return this->cpu_load[0]; } /* Variables and functions for calc_load */ static atomic_long_t calc_load_tasks; static unsigned long calc_load_update; unsigned long avenrun[3]; EXPORT_SYMBOL(avenrun); static long calc_load_fold_active(struct rq *this_rq) { long nr_active, delta = 0; nr_active = this_rq->nr_running; nr_active += (long) this_rq->nr_uninterruptible; if (nr_active != this_rq->calc_load_active) { delta = nr_active - this_rq->calc_load_active; this_rq->calc_load_active = nr_active; } return delta; } #ifdef CONFIG_NO_HZ /* * For NO_HZ we delay the active fold to the next LOAD_FREQ update. * * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_tasks_idle; static void calc_load_account_idle(struct rq *this_rq) { long delta; delta = calc_load_fold_active(this_rq); if (delta) atomic_long_add(delta, &calc_load_tasks_idle); } static long calc_load_fold_idle(void) { long delta = 0; /* * Its got a race, we don't care... */ if (atomic_long_read(&calc_load_tasks_idle)) delta = atomic_long_xchg(&calc_load_tasks_idle, 0); return delta; } #else static void calc_load_account_idle(struct rq *this_rq) { } static inline long calc_load_fold_idle(void) { return 0; } #endif /** * get_avenrun - get the load average array * @loads: pointer to dest load array * @offset: offset to add * @shift: shift count to shift the result left * * These values are estimates at best, so no need for locking. */ void get_avenrun(unsigned long *loads, unsigned long offset, int shift) { loads[0] = (avenrun[0] + offset) << shift; loads[1] = (avenrun[1] + offset) << shift; loads[2] = (avenrun[2] + offset) << shift; } static unsigned long calc_load(unsigned long load, unsigned long exp, unsigned long active) { load *= exp; load += active * (FIXED_1 - exp); return load >> FSHIFT; } /* * calc_load - update the avenrun load estimates 10 ticks after the * CPUs have updated calc_load_tasks. */ void calc_global_load(unsigned long ticks) { unsigned long upd = calc_load_update + 10; long active; if (time_before(jiffies, upd)) return; active = atomic_long_read(&calc_load_tasks); active = active > 0 ? active * FIXED_1 : 0; avenrun[0] = calc_load(avenrun[0], EXP_1, active); avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); calc_load_update += LOAD_FREQ; } /* * Called from update_cpu_load() to periodically update this CPU's * active count. */ static void calc_load_account_active(struct rq *this_rq) { long delta; if (time_before(jiffies, this_rq->calc_load_update)) return; delta = calc_load_fold_active(this_rq); delta += calc_load_fold_idle(); if (delta) atomic_long_add(delta, &calc_load_tasks); this_rq->calc_load_update += LOAD_FREQ; } /* * The exact cpuload at various idx values, calculated at every tick would be * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load * * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called * on nth tick when cpu may be busy, then we have: * load = ((2^idx - 1) / 2^idx)^(n-1) * load * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load * * decay_load_missed() below does efficient calculation of * load = ((2^idx - 1) / 2^idx)^(n-1) * load * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load * * The calculation is approximated on a 128 point scale. * degrade_zero_ticks is the number of ticks after which load at any * particular idx is approximated to be zero. * degrade_factor is a precomputed table, a row for each load idx. * Each column corresponds to degradation factor for a power of two ticks, * based on 128 point scale. * Example: * row 2, col 3 (=12) says that the degradation at load idx 2 after * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). * * With this power of 2 load factors, we can degrade the load n times * by looking at 1 bits in n and doing as many mult/shift instead of * n mult/shifts needed by the exact degradation. */ #define DEGRADE_SHIFT 7 static const unsigned char degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; static const unsigned char degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { {0, 0, 0, 0, 0, 0, 0, 0}, {64, 32, 8, 0, 0, 0, 0, 0}, {96, 72, 40, 12, 1, 0, 0}, {112, 98, 75, 43, 15, 1, 0}, {120, 112, 98, 76, 45, 16, 2} }; /* * Update cpu_load for any missed ticks, due to tickless idle. The backlog * would be when CPU is idle and so we just decay the old load without * adding any new load. */ static unsigned long decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) { int j = 0; if (!missed_updates) return load; if (missed_updates >= degrade_zero_ticks[idx]) return 0; if (idx == 1) return load >> missed_updates; while (missed_updates) { if (missed_updates % 2) load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; missed_updates >>= 1; j++; } return load; } /* * Update rq->cpu_load[] statistics. This function is usually called every * scheduler tick (TICK_NSEC). With tickless idle this will not be called * every tick. We fix it up based on jiffies. */ static void update_cpu_load(struct rq *this_rq) { unsigned long this_load = this_rq->load.weight; unsigned long curr_jiffies = jiffies; unsigned long pending_updates; int i, scale; this_rq->nr_load_updates++; /* Avoid repeated calls on same jiffy, when moving in and out of idle */ if (curr_jiffies == this_rq->last_load_update_tick) return; pending_updates = curr_jiffies - this_rq->last_load_update_tick; this_rq->last_load_update_tick = curr_jiffies; /* Update our load: */ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; old_load = decay_load_missed(old_load, pending_updates - 1, i); new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale - 1; this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } } static void update_cpu_load_active(struct rq *this_rq) { update_cpu_load(this_rq); calc_load_account_active(this_rq); } #ifdef CONFIG_SMP /* * sched_exec - execve() is a valuable balancing opportunity, because at * this point the task has the smallest effective memory and cache footprint. */ void sched_exec(void) { struct task_struct *p = current; unsigned long flags = 0; struct rq *rq; int dest_cpu; rq = task_rq_lock(p, &flags); dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); if (dest_cpu == smp_processor_id()) goto unlock; /* * select_task_rq() can race against ->cpus_allowed */ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) { struct migration_arg arg = { p, dest_cpu }; task_rq_unlock(rq, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); return; } unlock: task_rq_unlock(rq, &flags); } #endif DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* * Return any ns on the sched_clock that have not yet been accounted in * @p in case that task is currently running. * * Called with task_rq_lock() held on @rq. */ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) { u64 ns = 0; if (task_current(rq, p)) { update_rq_clock(rq); ns = rq->clock - p->se.exec_start; if ((s64)ns < 0) ns = 0; } return ns; } unsigned long long task_delta_exec(struct task_struct *p) { unsigned long flags = 0; struct rq *rq; u64 ns = 0; rq = task_rq_lock(p, &flags); ns = do_task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; } /* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. */ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags = 0; struct rq *rq; u64 ns = 0; rq = task_rq_lock(p, &flags); ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; } /* * Return sum_exec_runtime for the thread group. * In case the task is currently running, return the sum plus current's * pending runtime that have not been accounted yet. * * Note that the thread group might have other running tasks as well, * so the return value not includes other pending runtime that other * running tasks might have. */ unsigned long long thread_group_sched_runtime(struct task_struct *p) { struct task_cputime totals; unsigned long flags = 0; struct rq *rq; u64 ns; rq = task_rq_lock(p, &flags); thread_group_cputime(p, &totals); ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; } /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in user space since the last update * @cputime_scaled: cputime scaled by cpu frequency */ void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; /* Add user time to process. */ p->utime = cputime_add(p->utime, cputime); p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); account_group_user_time(p, cputime); /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ acct_update_integrals(p); } /* * Account guest cpu time to a process. * @p: the process that the cpu time gets accounted to * @cputime: the cpu time spent in virtual machine since the last update * @cputime_scaled: cputime scaled by cpu frequency */ static void account_guest_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { cputime64_t tmp; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; tmp = cputime_to_cputime64(cputime); /* Add guest time to process. */ p->utime = cputime_add(p->utime, cputime); p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); account_group_user_time(p, cputime); p->gtime = cputime_add(p->gtime, cputime); /* Add guest time to cpustat. */ if (TASK_NICE(p) > 0) { cpustat->nice = cputime64_add(cpustat->nice, tmp); cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); } else { cpustat->user = cputime64_add(cpustat->user, tmp); cpustat->guest = cputime64_add(cpustat->guest, tmp); } } /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in kernel space since the last update * @cputime_scaled: cputime scaled by cpu frequency */ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { account_guest_time(p, cputime, cputime_scaled); return; } /* Add system time to process. */ p->stime = cputime_add(p->stime, cputime); p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); account_group_system_time(p, cputime); /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); /* Account for system time used */ acct_update_integrals(p); } /* * Account for involuntary wait time. * @steal: the cpu time spent in involuntary wait */ void account_steal_time(cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t cputime64 = cputime_to_cputime64(cputime); cpustat->steal = cputime64_add(cpustat->steal, cputime64); } /* * Account for idle time. * @cputime: the cpu time spent in idle wait */ void account_idle_time(cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t cputime64 = cputime_to_cputime64(cputime); struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); else cpustat->idle = cputime64_add(cpustat->idle, cputime64); } #ifndef CONFIG_VIRT_CPU_ACCOUNTING /* * Account a single tick of cpu time. * @p: the process that the cpu time gets accounted to * @user_tick: indicates if the tick is a user or a system tick */ void account_process_tick(struct task_struct *p, int user_tick) { cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); struct rq *rq = this_rq(); if (user_tick) account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, one_jiffy_scaled); else account_idle_time(cputime_one_jiffy); } /* * Account multiple ticks of steal time. * @p: the process from which the cpu time has been stolen * @ticks: number of stolen ticks */ void account_steal_ticks(unsigned long ticks) { account_steal_time(jiffies_to_cputime(ticks)); } /* * Account multiple ticks of idle time. * @ticks: number of stolen ticks */ void account_idle_ticks(unsigned long ticks) { account_idle_time(jiffies_to_cputime(ticks)); } #endif /* * Use precise platform statistics if available: */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { *ut = p->utime; *st = p->stime; } void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime; thread_group_cputime(p, &cputime); *ut = cputime.utime; *st = cputime.stime; } #else #ifndef nsecs_to_cputime # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) #endif void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); /* * Use CFS's precise accounting: */ rtime = nsecs_to_cputime(p->se.sum_exec_runtime); if (total) { u64 temp = rtime; temp *= utime; do_div(temp, total); utime = (cputime_t)temp; } else utime = rtime; /* * Compare with previous values, to keep monotonicity: */ p->prev_utime = max(p->prev_utime, utime); p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); *ut = p->prev_utime; *st = p->prev_stime; } /* * Must be called with siglock held. */ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct signal_struct *sig = p->signal; struct task_cputime cputime; cputime_t rtime, utime, total; thread_group_cputime(p, &cputime); total = cputime_add(cputime.utime, cputime.stime); rtime = nsecs_to_cputime(cputime.sum_exec_runtime); if (total) { u64 temp = rtime; temp *= cputime.utime; do_div(temp, total); utime = (cputime_t)temp; } else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); sig->prev_stime = max(sig->prev_stime, cputime_sub(rtime, sig->prev_utime)); *ut = sig->prev_utime; *st = sig->prev_stime; } #endif /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. * * It also gets called by the fork code, when changing the parent's * timeslices. */ void scheduler_tick(void) { int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; sched_clock_tick(); raw_spin_lock(&rq->lock); update_rq_clock(rq); update_cpu_load_active(rq); curr->sched_class->task_tick(rq, curr, 0); raw_spin_unlock(&rq->lock); perf_event_task_tick(curr); #ifdef CONFIG_SMP rq->idle_at_tick = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif } notrace unsigned long get_parent_ip(unsigned long addr) { if (in_lock_functions(addr)) { addr = CALLER_ADDR2; if (in_lock_functions(addr)) addr = CALLER_ADDR3; } return addr; } #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) void __kprobes add_preempt_count(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) return; #endif preempt_count() += val; #ifdef CONFIG_DEBUG_PREEMPT /* * Spinlock count overflowing soon? */ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif if (preempt_count() == val) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } EXPORT_SYMBOL(add_preempt_count); void __kprobes sub_preempt_count(int val) { #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? */ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) return; /* * Is the spinlock portion underflowing? */ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK))) return; #endif if (preempt_count() == val) trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); #endif /* * Print scheduling while atomic bug: */ static noinline void __schedule_bug(struct task_struct *prev) { struct pt_regs *regs = get_irq_regs(); printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", prev->comm, prev->pid, preempt_count()); debug_show_held_locks(prev); print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); if (regs) show_regs(regs); else dump_stack(); } /* * Various schedule()-time debugging checks and statistics: */ static inline void schedule_debug(struct task_struct *prev) { /* * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); schedstat_inc(this_rq(), sched_count); #ifdef CONFIG_SCHEDSTATS if (unlikely(prev->lock_depth >= 0)) { schedstat_inc(this_rq(), bkl_count); schedstat_inc(prev, sched_info.bkl_count); } #endif } static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); prev->sched_class->put_prev_task(rq, prev); } /* * Pick up the highest-prio task: */ static inline struct task_struct * pick_next_task(struct rq *rq) { const struct sched_class *class; struct task_struct *p; /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ if (likely(rq->nr_running == rq->cfs.nr_running)) { p = fair_sched_class.pick_next_task(rq); if (likely(p)) return p; } class = sched_class_highest; for ( ; ; ) { p = class->pick_next_task(rq); if (p) return p; /* * Will never be NULL as the idle class always * returns a non-NULL p: */ class = class->next; } } /* * schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; switch_count = &prev->nivcsw; release_kernel_lock(prev); need_resched_nonpreemptible: schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) prev->state = TASK_RUNNING; else deactivate_task(rq, prev, DEQUEUE_SLEEP); switch_count = &prev->nvcsw; } pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); put_prev_task(rq, prev); next = pick_next_task(rq); clear_tsk_need_resched(prev); rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); perf_event_task_sched_out(prev, next); rq->nr_switches++; rq->curr = next; ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ /* * the context switch might have flipped the stack from under * us, hence refresh the local variables. */ cpu = smp_processor_id(); rq = cpu_rq(cpu); } else raw_spin_unlock_irq(&rq->lock); sec_debug_task_sched_log(cpu, rq->curr); post_schedule(rq); if (unlikely(reacquire_kernel_lock(current) < 0)) { prev = rq->curr; switch_count = &prev->nivcsw; goto need_resched_nonpreemptible; } preempt_enable_no_resched(); if (need_resched()) goto need_resched; } EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER /* * Look out! "owner" is an entirely speculative pointer * access and not reliable. */ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) { unsigned int cpu; struct rq *rq; if (!sched_feat(OWNER_SPIN)) return 0; #ifdef CONFIG_DEBUG_PAGEALLOC /* * Need to access the cpu field knowing that * DEBUG_PAGEALLOC could have unmapped it if * the mutex owner just released it and exited. */ if (probe_kernel_address(&owner->cpu, cpu)) return 0; #else cpu = owner->cpu; #endif /* * Even if the access succeeded (likely case), * the cpu field may no longer be valid. */ if (cpu >= nr_cpumask_bits) return 0; /* * We need to validate that we can do a * get_cpu() and that we have the percpu area. */ if (!cpu_online(cpu)) return 0; rq = cpu_rq(cpu); for (;;) { /* * Owner changed, break to re-assess state. */ if (lock->owner != owner) { /* * If the lock has switched to a different owner, * we likely have heavy contention. Return 0 to quit * optimistic spinning and not contend further: */ if (lock->owner) return 0; break; } /* * Is that owner really running on that cpu? */ if (task_thread_info(rq->curr) != owner || need_resched()) return 0; cpu_relax(); } return 1; } #endif #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. */ if (likely(ti->preempt_count || irqs_disabled())) return; do { add_preempt_count(PREEMPT_ACTIVE); schedule(); sub_preempt_count(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); } while (need_resched()); } EXPORT_SYMBOL(preempt_schedule); /* * this is the entry point to schedule() from kernel preemption * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. */ asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); } while (need_resched()); } #endif /* CONFIG_PREEMPT */ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, void *key) { return try_to_wake_up(curr->private, mode, wake_flags); } EXPORT_SYMBOL(default_wake_function); /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int wake_flags, void *key) { wait_queue_t *curr, *next; list_for_each_entry_safe(curr, next, &q->task_list, task_list) { unsigned flags = curr->flags; if (curr->func(curr, mode, wake_flags, key) && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; } } /** * __wake_up - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, 0, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0, NULL); } EXPORT_SYMBOL_GPL(__wake_up_locked); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) { __wake_up_common(q, mode, 1, 0, key); } /** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: opaque value to be passed to wakeup targets * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; int wake_flags = WF_SYNC; if (unlikely(!q)) return; if (unlikely(!nr_exclusive)) wake_flags = 0; spin_lock_irqsave(&q->lock, flags); __wake_up_common(q, mode, nr_exclusive, wake_flags, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(__wake_up_sync_key); /* * __wake_up_sync - see __wake_up_sync_key() */ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { __wake_up_sync_key(q, mode, nr_exclusive, NULL); } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ /** * complete: - signals a single thread waiting on this completion * @x: holds the state of this particular completion * * This will wake up a single thread waiting on this completion. Threads will be * awakened in the same order in which they were queued. * * See also complete_all(), wait_for_completion() and related routines. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void complete(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done++; __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); /** * complete_all: - signals all threads waiting on this completion * @x: holds the state of this particular completion * * This will wake up all threads waiting on this particular completion event. * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. */ void complete_all(struct completion *x) { unsigned long flags; spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { if (!x->done) { DECLARE_WAITQUEUE(wait, current); __add_wait_queue_tail_exclusive(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } __set_current_state(state); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); __remove_wait_queue(&x->wait, &wait); if (!x->done) return timeout; } x->done--; return timeout ?: 1; } static long __sched wait_for_common(struct completion *x, long timeout, int state) { might_sleep(); spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, timeout, state); spin_unlock_irq(&x->wait.lock); return timeout; } /** * wait_for_completion: - waits for completion of a task * @x: holds the state of this particular completion * * This waits to be signaled for completion of a specific task. It is NOT * interruptible and there is no timeout. * * See also similar routines (i.e. wait_for_completion_timeout()) with timeout * and interrupt capability. Also see complete(). */ void __sched wait_for_completion(struct completion *x) { wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion); /** * wait_for_completion_timeout: - waits for completion of a task (w/timeout) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_timeout); /** * wait_for_completion_interruptible: - waits for completion of a task (w/intr) * @x: holds the state of this particular completion * * This waits for completion of a specific task to be signaled. It is * interruptible. */ int __sched wait_for_completion_interruptible(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); if (t == -ERESTARTSYS) return t; return 0; } EXPORT_SYMBOL(wait_for_completion_interruptible); /** * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. */ unsigned long __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); /** * wait_for_completion_killable: - waits for completion of a task (killable) * @x: holds the state of this particular completion * * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. */ int __sched wait_for_completion_killable(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); if (t == -ERESTARTSYS) return t; return 0; } EXPORT_SYMBOL(wait_for_completion_killable); /** * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) * @x: holds the state of this particular completion * @timeout: timeout value in jiffies * * This waits for either a completion of a specific task to be * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. */ unsigned long __sched wait_for_completion_killable_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_KILLABLE); } EXPORT_SYMBOL(wait_for_completion_killable_timeout); /** * try_wait_for_completion - try to decrement a completion without blocking * @x: completion structure * * Returns: 0 if a decrement cannot be done without blocking * 1 if a decrement succeeded. * * If a completion is being used as a counting completion, * attempt to decrement the counter without blocking. This * enables us to avoid waiting if the resource the completion * is protecting is not available. */ bool try_wait_for_completion(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); /** * completion_done - Test to see if a completion has any waiters * @x: completion structure * * Returns: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * */ bool completion_done(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(completion_done); static long __sched sleep_on_common(wait_queue_head_t *q, int state, long timeout) { unsigned long flags; wait_queue_t wait; init_waitqueue_entry(&wait, current); __set_current_state(state); spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, &wait); spin_unlock(&q->lock); timeout = schedule_timeout(timeout); spin_lock_irq(&q->lock); __remove_wait_queue(q, &wait); spin_unlock_irqrestore(&q->lock, flags); return timeout; } void __sched interruptible_sleep_on(wait_queue_head_t *q) { sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(interruptible_sleep_on); long __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); } EXPORT_SYMBOL(interruptible_sleep_on_timeout); void __sched sleep_on(wait_queue_head_t *q) { sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(sleep_on); long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); } EXPORT_SYMBOL(sleep_on_timeout); #ifdef CONFIG_RT_MUTEXES /* * rt_mutex_setprio - set the current priority of a task * @p: task * @prio: prio value (kernel-internal form) * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * * Used by the rt_mutex code to implement priority inheritance logic. */ void rt_mutex_setprio(struct task_struct *p, int prio) { unsigned long flags = 0; int oldprio, on_rq, running; struct rq *rq; const struct sched_class *prev_class; BUG_ON(prio < 0 || prio > MAX_PRIO); rq = task_rq_lock(p, &flags); oldprio = p->prio; prev_class = p->sched_class; on_rq = p->se.on_rq; running = task_current(rq, p); if (on_rq) dequeue_task(rq, p, 0); if (running) p->sched_class->put_prev_task(rq, p); if (rt_prio(prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; p->prio = prio; if (running) p->sched_class->set_curr_task(rq); if (on_rq) { enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); check_class_changed(rq, p, prev_class, oldprio, running); } task_rq_unlock(rq, &flags); } #endif void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta, on_rq; unsigned long flags = 0; struct rq *rq; if (TASK_NICE(p) == nice || nice < -20 || nice > 19) return; /* * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU. */ rq = task_rq_lock(p, &flags); /* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected * it wont have any effect on scheduling until the task is * SCHED_FIFO/SCHED_RR: */ if (task_has_rt_policy(p)) { p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } on_rq = p->se.on_rq; if (on_rq) dequeue_task(rq, p, 0); p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); old_prio = p->prio; p->prio = effective_prio(p); delta = p->prio - old_prio; if (on_rq) { enqueue_task(rq, p, 0); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: */ if (delta < 0 || (delta > 0 && task_running(rq, p))) resched_task(rq->curr); } out_unlock: task_rq_unlock(rq, &flags); } EXPORT_SYMBOL(set_user_nice); /* * can_nice - check if a task can reduce its nice value * @p: task * @nice: nice value */ int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || capable(CAP_SYS_NICE)); } #ifdef __ARCH_WANT_SYS_NICE /* * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things. */ SYSCALL_DEFINE1(nice, int, increment) { long nice, retval; /* * Setpriority might change our priority at the same moment. * We don't have to worry. Conceptually one call occurs first * and we have a single winner. */ if (increment < -40) increment = -40; if (increment > 40) increment = 40; nice = TASK_NICE(current) + increment; if (nice < -20) nice = -20; if (nice > 19) nice = 19; if (increment < 0 && !can_nice(current, nice)) return -EPERM; retval = security_task_setnice(current, nice); if (retval) return retval; set_user_nice(current, nice); return 0; } #endif /** * task_prio - return the priority value of a given task. * @p: the task in question. * * This is the priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } /** * task_nice - return the nice value of a given task. * @p: the task in question. */ int task_nice(const struct task_struct *p) { return TASK_NICE(p); } EXPORT_SYMBOL(task_nice); /** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */ int idle_cpu(int cpu) { return cpu_curr(cpu) == cpu_rq(cpu)->idle; } /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ static struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_vpid(pid) : current; } /* Actually do priority change: must hold rq lock. */ static void __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) { BUG_ON(p->se.on_rq); p->policy = policy; p->rt_priority = prio; p->normal_prio = normal_prio(p); /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; set_load_weight(p); } /* * check the target process has a UID that matches the current process's */ static bool check_same_owner(struct task_struct *p) { const struct cred *cred = current_cred(), *pcred; bool match; rcu_read_lock(); pcred = __task_cred(p); match = (cred->euid == pcred->euid || cred->euid == pcred->uid); rcu_read_unlock(); return match; } static int __sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param, bool user) { int retval, oldprio, oldpolicy = -1, on_rq, running; unsigned long flags; const struct sched_class *prev_class; struct rq *rq; int reset_on_fork; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); recheck: /* double check policy once rq lock held */ if (policy < 0) { reset_on_fork = p->sched_reset_on_fork; policy = oldpolicy = p->policy; } else { reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); policy &= ~SCHED_RESET_ON_FORK; if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_NORMAL && policy != SCHED_BATCH && policy != SCHED_IDLE) return -EINVAL; } /* * Valid priorities for SCHED_FIFO and SCHED_RR are * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, * SCHED_BATCH and SCHED_IDLE is 0. */ if (param->sched_priority < 0 || (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) return -EINVAL; if (rt_policy(policy) != (param->sched_priority != 0)) return -EINVAL; /* * Allow unprivileged RT tasks to decrease priority: */ if (user && !capable(CAP_SYS_NICE)) { if (rt_policy(policy)) { unsigned long rlim_rtprio; if (!lock_task_sighand(p, &flags)) return -ESRCH; rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); unlock_task_sighand(p, &flags); /* can't set/change the rt policy */ if (policy != p->policy && !rlim_rtprio) return -EPERM; /* can't increase priority */ if (param->sched_priority > p->rt_priority && param->sched_priority > rlim_rtprio) return -EPERM; } /* * Like positive nice levels, dont allow tasks to * move out of SCHED_IDLE either: */ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) return -EPERM; /* can't change other user's priorities */ if (!check_same_owner(p)) return -EPERM; /* Normal users shall not reset the sched_reset_on_fork flag */ if (p->sched_reset_on_fork && !reset_on_fork) return -EPERM; } if (user) { retval = security_task_setscheduler(p, policy, param); if (retval) return retval; } /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: */ raw_spin_lock_irqsave(&p->pi_lock, flags); /* * To be able to change p->policy safely, the apropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); #ifdef CONFIG_RT_GROUP_SCHED if (user) { /* * Do not allow realtime tasks into groups that have no runtime * assigned. */ if (rt_bandwidth_enabled() && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) { __task_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, flags); return -EPERM; } } #endif /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; __task_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } on_rq = p->se.on_rq; running = task_current(rq, p); if (on_rq) deactivate_task(rq, p, 0); if (running) p->sched_class->put_prev_task(rq, p); p->sched_reset_on_fork = reset_on_fork; oldprio = p->prio; prev_class = p->sched_class; __setscheduler(rq, p, policy, param->sched_priority); if (running) p->sched_class->set_curr_task(rq); if (on_rq) { activate_task(rq, p, 0); check_class_changed(rq, p, prev_class, oldprio, running); } __task_rq_unlock(rq); raw_spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); return 0; } /** * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * NOTE that the task may be already dead. */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) { return __sched_setscheduler(p, policy, param, true); } EXPORT_SYMBOL_GPL(sched_setscheduler); /** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * Just like sched_setscheduler, only don't bother checking if the * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. */ int sched_setscheduler_nocheck(struct task_struct *p, int policy, struct sched_param *param) { return __sched_setscheduler(p, policy, param, false); } static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { struct sched_param lparam; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (p != NULL) retval = sched_setscheduler(p, policy, &lparam); rcu_read_unlock(); return retval; } /** * sys_sched_setscheduler - set/change the scheduler policy and RT priority * @pid: the pid in question. * @policy: new policy. * @param: structure containing the new RT priority. */ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) { /* negative values for policy are not valid */ if (policy < 0) return -EINVAL; return do_sched_setscheduler(pid, policy, param); } /** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. */ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) { return do_sched_setscheduler(pid, -1, param); } /** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) { struct task_struct *p; int retval; if (pid < 0) return -EINVAL; retval = -ESRCH; rcu_read_lock(); p = find_process_by_pid(pid); if (p) { retval = security_task_getscheduler(p); if (!retval) retval = p->policy | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); } rcu_read_unlock(); return retval; } /** * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { struct sched_param lp; struct task_struct *p; int retval; if (!param || pid < 0) return -EINVAL; rcu_read_lock(); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; lp.sched_priority = p->rt_priority; rcu_read_unlock(); /* * This one might sleep, we cannot do it with a spinlock held ... */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; return retval; out_unlock: rcu_read_unlock(); return retval; } long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) { cpumask_var_t cpus_allowed, new_mask; struct task_struct *p; int retval; get_online_cpus(); rcu_read_lock(); p = find_process_by_pid(pid); if (!p) { rcu_read_unlock(); put_online_cpus(); return -ESRCH; } /* Prevent p going away */ get_task_struct(p); rcu_read_unlock(); if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { retval = -ENOMEM; goto out_put_task; } if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { retval = -ENOMEM; goto out_free_cpus_allowed; } retval = -EPERM; if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p, 0, NULL); if (retval) goto out_unlock; cpuset_cpus_allowed(p, cpus_allowed); cpumask_and(new_mask, in_mask, cpus_allowed); again: retval = set_cpus_allowed_ptr(p, new_mask); if (!retval) { cpuset_cpus_allowed(p, cpus_allowed); if (!cpumask_subset(new_mask, cpus_allowed)) { /* * We must have raced with a concurrent cpuset * update. Just reset the cpus_allowed to the * cpuset's cpus_allowed */ cpumask_copy(new_mask, cpus_allowed); goto again; } } out_unlock: free_cpumask_var(new_mask); out_free_cpus_allowed: free_cpumask_var(cpus_allowed); out_put_task: put_task_struct(p); put_online_cpus(); return retval; } static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { if (len < cpumask_size()) cpumask_clear(new_mask); else if (len > cpumask_size()) len = cpumask_size(); return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; } /** * sys_sched_setaffinity - set the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval == 0) retval = sched_setaffinity(pid, new_mask); free_cpumask_var(new_mask); return retval; } long sched_getaffinity(pid_t pid, struct cpumask *mask) { struct task_struct *p; unsigned long flags = 0; struct rq *rq; int retval; get_online_cpus(); rcu_read_lock(); retval = -ESRCH; p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; rq = task_rq_lock(p, &flags); cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); task_rq_unlock(rq, &flags); out_unlock: rcu_read_unlock(); put_online_cpus(); return retval; } /** * sys_sched_getaffinity - get the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(unsigned long)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (copy_to_user(user_mask_ptr, mask, retlen)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } /** * sys_sched_yield - yield the current processor to other threads. * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. */ SYSCALL_DEFINE0(sched_yield) { struct rq *rq = this_rq_lock(); schedstat_inc(rq, yld_count); current->sched_class->yield_task(rq); /* * Since we are going to call schedule() anyway, there's * no need to preempt or enable interrupts: */ __release(rq->lock); spin_release(&rq->lock.dep_map, 1, _THIS_IP_); do_raw_spin_unlock(&rq->lock); preempt_enable_no_resched(); schedule(); return 0; } static inline int should_resched(void) { return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); } static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); schedule(); sub_preempt_count(PREEMPT_ACTIVE); } int __sched _cond_resched(void) { if (should_resched()) { __cond_resched(); return 1; } return 0; } EXPORT_SYMBOL(_cond_resched); /* * __cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * * This works OK both with and without CONFIG_PREEMPT. We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ int __cond_resched_lock(spinlock_t *lock) { int resched = should_resched(); int ret = 0; lockdep_assert_held(lock); if (spin_needbreak(lock) || resched) { spin_unlock(lock); if (resched) __cond_resched(); else cpu_relax(); ret = 1; spin_lock(lock); } return ret; } EXPORT_SYMBOL(__cond_resched_lock); int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); if (should_resched()) { local_bh_enable(); __cond_resched(); local_bh_disable(); return 1; } return 0; } EXPORT_SYMBOL(__cond_resched_softirq); /** * yield - yield the current processor to other threads. * * This is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ void __sched yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); } EXPORT_SYMBOL(yield); /* * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. */ void __sched io_schedule(void) { struct rq *rq = raw_rq(); delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); current->in_iowait = 1; schedule(); current->in_iowait = 0; atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); } EXPORT_SYMBOL(io_schedule); long __sched io_schedule_timeout(long timeout) { struct rq *rq = raw_rq(); long ret; delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); current->in_iowait = 1; ret = schedule_timeout(timeout); current->in_iowait = 0; atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); return ret; } /** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = MAX_USER_RT_PRIO-1; break; case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: ret = 0; break; } return ret; } /** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) { int ret = -EINVAL; switch (policy) { case SCHED_FIFO: case SCHED_RR: ret = 1; break; case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: ret = 0; } return ret; } /** * sys_sched_rr_get_interval - return the default timeslice of a process. * @pid: pid of the process. * @interval: userspace pointer to the timeslice value. * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) { struct task_struct *p; unsigned int time_slice; unsigned long flags = 0; struct rq *rq; int retval; struct timespec t; if (pid < 0) return -EINVAL; retval = -ESRCH; rcu_read_lock(); p = find_process_by_pid(pid); if (!p) goto out_unlock; retval = security_task_getscheduler(p); if (retval) goto out_unlock; rq = task_rq_lock(p, &flags); time_slice = p->sched_class->get_rr_interval(rq, p); task_rq_unlock(rq, &flags); rcu_read_unlock(); jiffies_to_timespec(time_slice, &t); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; return retval; out_unlock: rcu_read_unlock(); return retval; } static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; void sched_show_task(struct task_struct *p) { unsigned long free = 0; unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; printk(KERN_INFO "%-15.15s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) printk(KERN_CONT " running "); else printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) printk(KERN_CONT " running task "); else printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); #endif printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, task_pid_nr(p), task_pid_nr(p->real_parent), (unsigned long)task_thread_info(p)->flags); show_stack(p, NULL); } void show_state_filter(unsigned long state_filter) { struct task_struct *g, *p; #if BITS_PER_LONG == 32 printk(KERN_INFO " task PC stack pid father\n"); #else printk(KERN_INFO " task PC stack pid father\n"); #endif read_lock(&tasklist_lock); do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow * console might take alot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) sched_show_task(p); } while_each_thread(g, p); touch_all_softlockup_watchdogs(); #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif read_unlock(&tasklist_lock); /* * Only show locks if all tasks are dumped: */ if (!state_filter) debug_show_all_locks(); } void __cpuinit init_idle_bootup_task(struct task_struct *idle) { idle->sched_class = &idle_sched_class; } /** * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ void __cpuinit init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); __sched_fork(idle); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); __set_task_cpu(idle, cpu); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif raw_spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ #if defined(CONFIG_PREEMPT) task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); #else task_thread_info(idle)->preempt_count = 0; #endif /* * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; ftrace_graph_init_task(idle); } /* * In a system that switches off the HZ timer nohz_cpu_mask * indicates which cpus entered this state. This is used * in the rcu update to wait only for active cpus. For system * which do not switch off the HZ timer nohz_cpu_mask should * always be CPU_BITS_NONE. */ cpumask_var_t nohz_cpu_mask; /* * Increase the granularity value when there are more CPUs, * because with more CPUs the 'effective latency' as visible * to users decreases. But the relationship is not linear, * so pick a second-best guess by going with the log2 of the * number of CPUs. * * This idea comes from the SD scheduler of Con Kolivas: */ static int get_update_sysctl_factor(void) { unsigned int cpus = min_t(int, num_online_cpus(), 8); unsigned int factor; switch (sysctl_sched_tunable_scaling) { case SCHED_TUNABLESCALING_NONE: factor = 1; break; case SCHED_TUNABLESCALING_LINEAR: factor = cpus; break; case SCHED_TUNABLESCALING_LOG: default: factor = 1 + ilog2(cpus); break; } return factor; } static void update_sysctl(void) { unsigned int factor = get_update_sysctl_factor(); #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_latency); SET_SYSCTL(sched_wakeup_granularity); SET_SYSCTL(sched_shares_ratelimit); #undef SET_SYSCTL } static inline void sched_init_granularity(void) { update_sysctl(); } #ifdef CONFIG_SMP /* * This is how migration works: * * 1) we invoke migration_cpu_stop() on the target CPU using * stop_one_cpu(). * 2) stopper starts to run (implicitly forcing the migrated thread * off the CPU) * 3) it checks whether the migrated task is still in the wrong runqueue. * 4) if it's in the wrong runqueue then the migration thread removes * it and puts it into the right queue. * 5) stopper completes and stop_one_cpu() returns and the migration * is done. */ /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { unsigned long flags; struct rq *rq; unsigned int dest_cpu; int ret = 0; /* * Serialize against TASK_WAKING so that ttwu() and wunt() can * drop the rq->lock and still rely on ->cpus_allowed. */ again: while (task_is_waking(p)) cpu_relax(); rq = task_rq_lock(p, &flags); if (task_is_waking(p)) { task_rq_unlock(rq, &flags); goto again; } if (!cpumask_intersects(new_mask, cpu_active_mask)) { ret = -EINVAL; goto out; } if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && !cpumask_equal(&p->cpus_allowed, new_mask))) { ret = -EINVAL; goto out; } if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); else { cpumask_copy(&p->cpus_allowed, new_mask); p->rt.nr_cpus_allowed = cpumask_weight(new_mask); } /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); if (migrate_task(p, dest_cpu)) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); return 0; } out: task_rq_unlock(rq, &flags); return ret; } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. * * Returns non-zero if task was successfully migrated. */ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { struct rq *rq_dest, *rq_src; int ret = 0; if (unlikely(!cpu_active(dest_cpu))) return ret; rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); double_rq_lock(rq_src, rq_dest); /* Already moved. */ if (task_cpu(p) != src_cpu) goto done; /* Affinity changed (again). */ if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) goto fail; /* * If we're not on a rq, the next wake-up will ensure we're * placed properly. */ if (p->se.on_rq) { deactivate_task(rq_src, p, 0); set_task_cpu(p, dest_cpu); activate_task(rq_dest, p, 0); check_preempt_curr(rq_dest, p, 0); } done: ret = 1; fail: double_rq_unlock(rq_src, rq_dest); return ret; } /* * migration_cpu_stop - this will be executed by a highprio stopper thread * and performs thread migration by bumping thread off CPU then * 'pushing' onto another runqueue. */ static int migration_cpu_stop(void *data) { struct migration_arg *arg = data; /* * The original target cpu might have gone down and we might * be on another cpu but it doesn't matter. */ local_irq_disable(); __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); local_irq_enable(); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* * Figure out where task on dead CPU should go, use force if necessary. */ void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) { struct rq *rq = cpu_rq(dead_cpu); int needs_cpu, uninitialized_var(dest_cpu); unsigned long flags; local_irq_save(flags); raw_spin_lock(&rq->lock); needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING); if (needs_cpu) dest_cpu = select_fallback_rq(dead_cpu, p); raw_spin_unlock(&rq->lock); /* * It can only fail if we race with set_cpus_allowed(), * in the racer should migrate the task anyway. */ if (needs_cpu) __migrate_task(p, dead_cpu, dest_cpu); local_irq_restore(flags); } /* * While a dead CPU has no uninterruptible tasks queued at this point, * it might still have a nonzero ->nr_uninterruptible counter, because * for performance reasons the counter is not stricly tracking tasks to * their home CPUs. So we just add the counter to another CPU's counter, * to keep the global sum constant after CPU-down: */ static void migrate_nr_uninterruptible(struct rq *rq_src) { struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); unsigned long flags; local_irq_save(flags); double_rq_lock(rq_src, rq_dest); rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; rq_src->nr_uninterruptible = 0; double_rq_unlock(rq_src, rq_dest); local_irq_restore(flags); } /* Run through task list and migrate tasks from the dead cpu. */ static void migrate_live_tasks(int src_cpu) { struct task_struct *p, *t; read_lock(&tasklist_lock); do_each_thread(t, p) { if (p == current) continue; if (task_cpu(p) == src_cpu) move_task_off_dead_cpu(src_cpu, p); } while_each_thread(t, p); read_unlock(&tasklist_lock); } /* * Schedules idle task to be the next runnable task on current CPU. * It does so by boosting its priority to highest possible. * Used by CPU offline code. */ void sched_idle_next(void) { int this_cpu = smp_processor_id(); struct rq *rq = cpu_rq(this_cpu); struct task_struct *p = rq->idle; unsigned long flags; /* cpu has to be offline */ BUG_ON(cpu_online(this_cpu)); /* * Strictly not necessary since rest of the CPUs are stopped by now * and interrupts disabled on the current cpu. */ raw_spin_lock_irqsave(&rq->lock, flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); activate_task(rq, p, 0); raw_spin_unlock_irqrestore(&rq->lock, flags); } /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. */ void idle_task_exit(void) { struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) switch_mm(mm, &init_mm, current); mmdrop(mm); } /* called under rq->lock with disabled interrupts */ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ BUG_ON(!p->exit_state); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->state == TASK_DEAD); get_task_struct(p); /* * Drop lock around migration; if someone else moves it, * that's OK. No task can be added to this CPU, so iteration is * fine. */ raw_spin_unlock_irq(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); raw_spin_lock_irq(&rq->lock); put_task_struct(p); } /* release_task() removes task from tasklist, so we won't find dead tasks. */ static void migrate_dead_tasks(unsigned int dead_cpu) { struct rq *rq = cpu_rq(dead_cpu); struct task_struct *next; for ( ; ; ) { if (!rq->nr_running) break; next = pick_next_task(rq); if (!next) break; next->sched_class->put_prev_task(rq, next); migrate_dead(dead_cpu, next); } } /* * remove the tasks which were accounted by rq from calc_load_tasks. */ static void calc_global_load_remove(struct rq *rq) { atomic_long_sub(rq->calc_load_active, &calc_load_tasks); rq->calc_load_active = 0; } #endif /* CONFIG_HOTPLUG_CPU */ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) static struct ctl_table sd_ctl_dir[] = { { .procname = "sched_domain", .mode = 0555, }, {} }; static struct ctl_table sd_ctl_root[] = { { .procname = "kernel", .mode = 0555, .child = sd_ctl_dir, }, {} }; static struct ctl_table *sd_alloc_ctl_entry(int n) { struct ctl_table *entry = kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); return entry; } static void sd_free_ctl_entry(struct ctl_table **tablep) { struct ctl_table *entry; /* * In the intermediate directories, both the child directory and * procname are dynamically allocated and could fail but the mode * will always be set. In the lowest directory the names are * static strings and all have proc handlers. */ for (entry = *tablep; entry->mode; entry++) { if (entry->child) sd_free_ctl_entry(&entry->child); if (entry->proc_handler == NULL) kfree(entry->procname); } kfree(*tablep); *tablep = NULL; } static void set_table_entry(struct ctl_table *entry, const char *procname, void *data, int maxlen, mode_t mode, proc_handler *proc_handler) { entry->procname = procname; entry->data = data; entry->maxlen = maxlen; entry->mode = mode; entry->proc_handler = proc_handler; } static struct ctl_table * sd_alloc_ctl_domain_table(struct sched_domain *sd) { struct ctl_table *table = sd_alloc_ctl_entry(13); if (table == NULL) return NULL; set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax); set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax); set_table_entry(&table[2], "busy_idx", &sd->busy_idx, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[3], "idle_idx", &sd->idle_idx, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[5], "wake_idx", &sd->wake_idx, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[7], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[11], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); /* &table[12] is terminator */ return table; } static ctl_table *sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; int domain_num = 0, i; char buf[32]; for_each_domain(cpu, sd) domain_num++; entry = table = sd_alloc_ctl_entry(domain_num + 1); if (table == NULL) return NULL; i = 0; for_each_domain(cpu, sd) { snprintf(buf, 32, "domain%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; entry->child = sd_alloc_ctl_domain_table(sd); entry++; i++; } return table; } static struct ctl_table_header *sd_sysctl_header; static void register_sched_domain_sysctl(void) { int i, cpu_num = num_possible_cpus(); struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; WARN_ON(sd_ctl_dir[0].child); sd_ctl_dir[0].child = entry; if (entry == NULL) return; for_each_possible_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; entry->child = sd_alloc_ctl_cpu_table(i); entry++; } WARN_ON(sd_sysctl_header); sd_sysctl_header = register_sysctl_table(sd_ctl_root); } /* may be called multiple times per register */ static void unregister_sched_domain_sysctl(void) { if (sd_sysctl_header) unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; if (sd_ctl_dir[0].child) sd_free_ctl_entry(&sd_ctl_dir[0].child); } #else static void register_sched_domain_sysctl(void) { } static void unregister_sched_domain_sysctl(void) { } #endif static void set_rq_online(struct rq *rq) { if (!rq->online) { const struct sched_class *class; cpumask_set_cpu(rq->cpu, rq->rd->online); rq->online = 1; for_each_class(class) { if (class->rq_online) class->rq_online(rq); } } } static void set_rq_offline(struct rq *rq) { if (rq->online) { const struct sched_class *class; for_each_class(class) { if (class->rq_offline) class->rq_offline(rq); } cpumask_clear_cpu(rq->cpu, rq->rd->online); rq->online = 0; } } /* * migration_call - callback that gets triggered when a CPU is added. * Here we can start up the necessary migration thread for the new CPU. */ static int __cpuinit migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (long)hcpu; unsigned long flags; struct rq *rq = cpu_rq(cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rq->calc_load_update = calc_load_update; break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_online(rq); } raw_spin_unlock_irqrestore(&rq->lock, flags); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: case CPU_DEAD_FROZEN: migrate_live_tasks(cpu); /* Idle task back to normal (off runqueue, low prio) */ raw_spin_lock_irq(&rq->lock); deactivate_task(rq, rq->idle, 0); __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); rq->idle->sched_class = &idle_sched_class; migrate_dead_tasks(cpu); raw_spin_unlock_irq(&rq->lock); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); calc_global_load_remove(rq); break; case CPU_DYING: case CPU_DYING_FROZEN: /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } raw_spin_unlock_irqrestore(&rq->lock, flags); break; #endif } return NOTIFY_OK; } /* * Register at high priority so that task migration (migrate_all_tasks) * happens before everything else. This has to be lower priority than * the notifier in the perf_event subsystem, though. */ static struct notifier_block __cpuinitdata migration_notifier = { .notifier_call = migration_call, .priority = CPU_PRI_MIGRATION, }; static int __cpuinit sched_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: set_cpu_active((long)hcpu, true); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: set_cpu_active((long)hcpu, false); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; /* Initialize migration for the boot CPU */ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); /* Register cpu active notifiers */ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); return 0; } early_initcall(migration_init); #endif #ifdef CONFIG_SMP #ifdef CONFIG_SCHED_DEBUG static __read_mostly int sched_domain_debug_enabled; static int __init sched_domain_debug_setup(char *str) { sched_domain_debug_enabled = 1; return 0; } early_param("sched_debug", sched_domain_debug_setup); static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask) { struct sched_group *group = sd->groups; char str[256]; cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); cpumask_clear(groupmask); printk(KERN_DEBUG "%*s domain %d: ", level, "", level); if (!(sd->flags & SD_LOAD_BALANCE)) { printk("does not load-balance\n"); if (sd->parent) printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" " has parent"); return -1; } printk(KERN_CONT "span %s level %s\n", str, sd->name); if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { printk(KERN_ERR "ERROR: domain->span does not contain " "CPU%d\n", cpu); } if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { printk(KERN_ERR "ERROR: domain->groups does not contain" " CPU%d\n", cpu); } printk(KERN_DEBUG "%*s groups:", level + 1, ""); do { if (!group) { printk("\n"); printk(KERN_ERR "ERROR: group is NULL\n"); break; } if (!group->cpu_power) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); break; } if (!cpumask_weight(sched_group_cpus(group))) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: empty group\n"); break; } if (cpumask_intersects(groupmask, sched_group_cpus(group))) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); break; } cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); printk(KERN_CONT " %s", str); if (group->cpu_power != SCHED_LOAD_SCALE) { printk(KERN_CONT " (cpu_power = %d)", group->cpu_power); } group = group->next; } while (group != sd->groups); printk(KERN_CONT "\n"); if (!cpumask_equal(sched_domain_span(sd), groupmask)) printk(KERN_ERR "ERROR: groups don't span domain->span\n"); if (sd->parent && !cpumask_subset(groupmask, sched_domain_span(sd->parent))) printk(KERN_ERR "ERROR: parent span is not a superset " "of domain->span\n"); return 0; } static void sched_domain_debug(struct sched_domain *sd, int cpu) { cpumask_var_t groupmask; int level = 0; if (!sched_domain_debug_enabled) return; if (!sd) { printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); return; } printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); return; } for (;;) { if (sched_domain_debug_one(sd, cpu, level, groupmask)) break; level++; sd = sd->parent; if (!sd) break; } free_cpumask_var(groupmask); } #else /* !CONFIG_SCHED_DEBUG */ # define sched_domain_debug(sd, cpu) do { } while (0) #endif /* CONFIG_SCHED_DEBUG */ static int sd_degenerate(struct sched_domain *sd) { if (cpumask_weight(sched_domain_span(sd)) == 1) return 1; /* Following flags need at least 2 groups */ if (sd->flags & (SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)) { if (sd->groups != sd->groups->next) return 0; } /* Following flags don't use groups */ if (sd->flags & (SD_WAKE_AFFINE)) return 0; return 1; } static int sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) { unsigned long cflags = sd->flags, pflags = parent->flags; if (sd_degenerate(parent)) return 1; if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) return 0; /* Flags needing groups don't count if only 1 group in parent */ if (parent->groups == parent->groups->next) { pflags &= ~(SD_LOAD_BALANCE | SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES); if (nr_node_ids == 1) pflags &= ~SD_SERIALIZE; } if (~cflags & pflags) return 0; return 1; } static void free_rootdomain(struct root_domain *rd) { synchronize_sched(); cpupri_cleanup(&rd->cpupri); free_cpumask_var(rd->rto_mask); free_cpumask_var(rd->online); free_cpumask_var(rd->span); kfree(rd); } static void rq_attach_root(struct rq *rq, struct root_domain *rd) { struct root_domain *old_rd = NULL; unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) set_rq_offline(rq); cpumask_clear_cpu(rq->cpu, old_rd->span); /* * If we dont want to free the old_rt yet then * set old_rd to NULL to skip the freeing later * in this function: */ if (!atomic_dec_and_test(&old_rd->refcount)) old_rd = NULL; } atomic_inc(&rd->refcount); rq->rd = rd; cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) free_rootdomain(old_rd); } static int init_rootdomain(struct root_domain *rd, bool bootmem) { gfp_t gfp = GFP_KERNEL; memset(rd, 0, sizeof(*rd)); if (bootmem) gfp = GFP_NOWAIT; if (!alloc_cpumask_var(&rd->span, gfp)) goto out; if (!alloc_cpumask_var(&rd->online, gfp)) goto free_span; if (!alloc_cpumask_var(&rd->rto_mask, gfp)) goto free_online; if (cpupri_init(&rd->cpupri, bootmem) != 0) goto free_rto_mask; return 0; free_rto_mask: free_cpumask_var(rd->rto_mask); free_online: free_cpumask_var(rd->online); free_span: free_cpumask_var(rd->span); out: return -ENOMEM; } static void init_defrootdomain(void) { init_rootdomain(&def_root_domain, true); atomic_set(&def_root_domain.refcount, 1); } static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; rd = kmalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return NULL; if (init_rootdomain(rd, false) != 0) { kfree(rd); return NULL; } return rd; } /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; for (tmp = sd; tmp; tmp = tmp->parent) tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; if (!parent) break; if (sd_parent_degenerate(tmp, parent)) { tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; } else tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { sd = sd->parent; if (sd) sd->child = NULL; } sched_domain_debug(sd, cpu); rq_attach_root(rq, rd); rcu_assign_pointer(rq->sd, sd); } /* cpus with isolated domains */ static cpumask_var_t cpu_isolated_map; /* Setup the mask of cpus configured for isolated domains */ static int __init isolated_cpu_setup(char *str) { alloc_bootmem_cpumask_var(&cpu_isolated_map); cpulist_parse(str, cpu_isolated_map); return 1; } __setup("isolcpus=", isolated_cpu_setup); /* * init_sched_build_groups takes the cpumask we wish to span, and a pointer * to a function which identifies what group(along with sched group) a CPU * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids * (due to the fact that we keep track of groups covered with a struct cpumask). * * init_sched_build_groups will build a circular linked list of the groups * covered by the given span, and will set each group's ->cpumask correctly, * and ->cpu_power to 0. */ static void init_sched_build_groups(const struct cpumask *span, const struct cpumask *cpu_map, int (*group_fn)(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *tmpmask), struct cpumask *covered, struct cpumask *tmpmask) { struct sched_group *first = NULL, *last = NULL; int i; cpumask_clear(covered); for_each_cpu(i, span) { struct sched_group *sg; int group = group_fn(i, cpu_map, &sg, tmpmask); int j; if (cpumask_test_cpu(i, covered)) continue; cpumask_clear(sched_group_cpus(sg)); sg->cpu_power = 0; for_each_cpu(j, span) { if (group_fn(j, cpu_map, NULL, tmpmask) != group) continue; cpumask_set_cpu(j, covered); cpumask_set_cpu(j, sched_group_cpus(sg)); } if (!first) first = sg; if (last) last->next = sg; last = sg; } last->next = first; } #define SD_NODES_PER_DOMAIN 16 #ifdef CONFIG_NUMA /** * find_next_best_node - find the next node to include in a sched_domain * @node: node whose sched_domain we're building * @used_nodes: nodes already in the sched_domain * * Find the next node to include in a given scheduling domain. Simply * finds the closest node not already in the @used_nodes map. * * Should use nodemask_t. */ static int find_next_best_node(int node, nodemask_t *used_nodes) { int i, n, val, min_val, best_node = 0; min_val = INT_MAX; for (i = 0; i < nr_node_ids; i++) { /* Start at @node */ n = (node + i) % nr_node_ids; if (!nr_cpus_node(n)) continue; /* Skip already used nodes */ if (node_isset(n, *used_nodes)) continue; /* Simple min distance search */ val = node_distance(node, n); if (val < min_val) { min_val = val; best_node = n; } } node_set(best_node, *used_nodes); return best_node; } /** * sched_domain_node_span - get a cpumask for a node's sched_domain * @node: node whose cpumask we're constructing * @span: resulting cpumask * * Given a node, construct a good cpumask for its sched_domain to span. It * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ static void sched_domain_node_span(int node, struct cpumask *span) { nodemask_t used_nodes; int i; cpumask_clear(span); nodes_clear(used_nodes); cpumask_or(span, span, cpumask_of_node(node)); node_set(node, used_nodes); for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, &used_nodes); cpumask_or(span, span, cpumask_of_node(next_node)); } } #endif /* CONFIG_NUMA */ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; /* * The cpus mask in sched_group and sched_domain hangs off the end. * * ( See the the comments in include/linux/sched.h:struct sched_group * and struct sched_domain. ) */ struct static_sched_group { struct sched_group sg; DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); }; struct static_sched_domain { struct sched_domain sd; DECLARE_BITMAP(span, CONFIG_NR_CPUS); }; struct s_data { #ifdef CONFIG_NUMA int sd_allnodes; cpumask_var_t domainspan; cpumask_var_t covered; cpumask_var_t notcovered; #endif cpumask_var_t nodemask; cpumask_var_t this_sibling_map; cpumask_var_t this_core_map; cpumask_var_t send_covered; cpumask_var_t tmpmask; struct sched_group **sched_group_nodes; struct root_domain *rd; }; enum s_alloc { sa_sched_groups = 0, sa_rootdomain, sa_tmpmask, sa_send_covered, sa_this_core_map, sa_this_sibling_map, sa_nodemask, sa_sched_group_nodes, #ifdef CONFIG_NUMA sa_notcovered, sa_covered, sa_domainspan, #endif sa_none, }; /* * SMT sched-domains: */ #ifdef CONFIG_SCHED_SMT static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_groups); static int cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *unused) { if (sg) *sg = &per_cpu(sched_groups, cpu).sg; return cpu; } #endif /* CONFIG_SCHED_SMT */ /* * multi-core sched-domains: */ #ifdef CONFIG_SCHED_MC static DEFINE_PER_CPU(struct static_sched_domain, core_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); #endif /* CONFIG_SCHED_MC */ #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) static int cpu_to_core_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *mask) { int group; cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); group = cpumask_first(mask); if (sg) *sg = &per_cpu(sched_group_core, group).sg; return group; } #elif defined(CONFIG_SCHED_MC) static int cpu_to_core_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *unused) { if (sg) *sg = &per_cpu(sched_group_core, cpu).sg; return cpu; } #endif static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); static int cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *mask) { int group; #ifdef CONFIG_SCHED_MC cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); group = cpumask_first(mask); #elif defined(CONFIG_SCHED_SMT) cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); group = cpumask_first(mask); #else group = cpu; #endif if (sg) *sg = &per_cpu(sched_group_phys, group).sg; return group; } #ifdef CONFIG_NUMA /* * The init_sched_build_groups can't handle what we want to do with node * groups, so roll our own. Now each node has its own list of groups which * gets dynamically allocated. */ static DEFINE_PER_CPU(struct static_sched_domain, node_domains); static struct sched_group ***sched_group_nodes_bycpu; static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, struct sched_group **sg, struct cpumask *nodemask) { int group; cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); group = cpumask_first(nodemask); if (sg) *sg = &per_cpu(sched_group_allnodes, group).sg; return group; } static void init_numa_sched_groups_power(struct sched_group *group_head) { struct sched_group *sg = group_head; int j; if (!sg) return; do { for_each_cpu(j, sched_group_cpus(sg)) { struct sched_domain *sd; sd = &per_cpu(phys_domains, j).sd; if (j != group_first_cpu(sd->groups)) { /* * Only add "power" once for each * physical package. */ continue; } sg->cpu_power += sd->groups->cpu_power; } sg = sg->next; } while (sg != group_head); } static int build_numa_sched_groups(struct s_data *d, const struct cpumask *cpu_map, int num) { struct sched_domain *sd; struct sched_group *sg, *prev; int n, j; cpumask_clear(d->covered); cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); if (cpumask_empty(d->nodemask)) { d->sched_group_nodes[num] = NULL; goto out; } sched_domain_node_span(num, d->domainspan); cpumask_and(d->domainspan, d->domainspan, cpu_map); sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, num); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for node %d\n", num); return -ENOMEM; } d->sched_group_nodes[num] = sg; for_each_cpu(j, d->nodemask) { sd = &per_cpu(node_domains, j).sd; sd->groups = sg; } sg->cpu_power = 0; cpumask_copy(sched_group_cpus(sg), d->nodemask); sg->next = sg; cpumask_or(d->covered, d->covered, d->nodemask); prev = sg; for (j = 0; j < nr_node_ids; j++) { n = (num + j) % nr_node_ids; cpumask_complement(d->notcovered, d->covered); cpumask_and(d->tmpmask, d->notcovered, cpu_map); cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); if (cpumask_empty(d->tmpmask)) break; cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); if (cpumask_empty(d->tmpmask)) continue; sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, num); if (!sg) { printk(KERN_WARNING "Can not alloc domain group for node %d\n", j); return -ENOMEM; } sg->cpu_power = 0; cpumask_copy(sched_group_cpus(sg), d->tmpmask); sg->next = prev->next; cpumask_or(d->covered, d->covered, d->tmpmask); prev->next = sg; prev = sg; } out: return 0; } #endif /* CONFIG_NUMA */ #ifdef CONFIG_NUMA /* Free memory allocated for various sched_group structures */ static void free_sched_groups(const struct cpumask *cpu_map, struct cpumask *nodemask) { int cpu, i; for_each_cpu(cpu, cpu_map) { struct sched_group **sched_group_nodes = sched_group_nodes_bycpu[cpu]; if (!sched_group_nodes) continue; for (i = 0; i < nr_node_ids; i++) { struct sched_group *oldsg, *sg = sched_group_nodes[i]; cpumask_and(nodemask, cpumask_of_node(i), cpu_map); if (cpumask_empty(nodemask)) continue; if (sg == NULL) continue; sg = sg->next; next_sg: oldsg = sg; sg = sg->next; kfree(oldsg); if (oldsg != sched_group_nodes[i]) goto next_sg; } kfree(sched_group_nodes); sched_group_nodes_bycpu[cpu] = NULL; } } #else /* !CONFIG_NUMA */ static void free_sched_groups(const struct cpumask *cpu_map, struct cpumask *nodemask) { } #endif /* CONFIG_NUMA */ /* * Initialize sched groups cpu_power. * * cpu_power indicates the capacity of sched group, which is used while * distributing the load between different sched groups in a sched domain. * Typically cpu_power for all the groups in a sched domain will be same unless * there are asymmetries in the topology. If there are asymmetries, group * having more cpu_power will pickup more load compared to the group having * less cpu_power. */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { struct sched_domain *child; struct sched_group *group; long power; int weight; WARN_ON(!sd || !sd->groups); if (cpu != group_first_cpu(sd->groups)) return; child = sd->child; sd->groups->cpu_power = 0; if (!child) { power = SCHED_LOAD_SCALE; weight = cpumask_weight(sched_domain_span(sd)); /* * SMT siblings share the power of a single core. * Usually multiple threads get a better yield out of * that one core than a single thread would have, * reflect that in sd->smt_gain. */ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { power *= sd->smt_gain; power /= weight; power >>= SCHED_LOAD_SHIFT; } sd->groups->cpu_power += power; return; } /* * Add cpu_power of each child group to this groups cpu_power. */ group = child->groups; do { sd->groups->cpu_power += group->cpu_power; group = group->next; } while (group != child->groups); } /* * Initializers for schedule domains * Non-inlined to reduce accumulated stack pressure in build_sched_domains() */ #ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(sd, type) sd->name = #type #else # define SD_INIT_NAME(sd, type) do { } while (0) #endif #define SD_INIT(sd, type) sd_init_##type(sd) #define SD_INIT_FUNC(type) \ static noinline void sd_init_##type(struct sched_domain *sd) \ { \ memset(sd, 0, sizeof(*sd)); \ *sd = SD_##type##_INIT; \ sd->level = SD_LV_##type; \ SD_INIT_NAME(sd, type); \ } SD_INIT_FUNC(CPU) #ifdef CONFIG_NUMA SD_INIT_FUNC(ALLNODES) SD_INIT_FUNC(NODE) #endif #ifdef CONFIG_SCHED_SMT SD_INIT_FUNC(SIBLING) #endif #ifdef CONFIG_SCHED_MC SD_INIT_FUNC(MC) #endif static int default_relax_domain_level = -1; static int __init setup_relax_domain_level(char *str) { unsigned long val; val = simple_strtoul(str, NULL, 0); if (val < SD_LV_MAX) default_relax_domain_level = val; return 1; } __setup("relax_domain_level=", setup_relax_domain_level); static void set_domain_attribute(struct sched_domain *sd, struct sched_domain_attr *attr) { int request; if (!attr || attr->relax_domain_level < 0) { if (default_relax_domain_level < 0) return; else request = default_relax_domain_level; } else request = attr->relax_domain_level; if (request < sd->level) { /* turn off idle balance on this domain */ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } else { /* turn on idle balance on this domain */ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); } } static void __free_domain_allocs(struct s_data *d, enum s_alloc what, const struct cpumask *cpu_map) { switch (what) { case sa_sched_groups: free_sched_groups(cpu_map, d->tmpmask); /* fall through */ d->sched_group_nodes = NULL; case sa_rootdomain: free_rootdomain(d->rd); /* fall through */ case sa_tmpmask: free_cpumask_var(d->tmpmask); /* fall through */ case sa_send_covered: free_cpumask_var(d->send_covered); /* fall through */ case sa_this_core_map: free_cpumask_var(d->this_core_map); /* fall through */ case sa_this_sibling_map: free_cpumask_var(d->this_sibling_map); /* fall through */ case sa_nodemask: free_cpumask_var(d->nodemask); /* fall through */ case sa_sched_group_nodes: #ifdef CONFIG_NUMA kfree(d->sched_group_nodes); /* fall through */ case sa_notcovered: free_cpumask_var(d->notcovered); /* fall through */ case sa_covered: free_cpumask_var(d->covered); /* fall through */ case sa_domainspan: free_cpumask_var(d->domainspan); /* fall through */ #endif case sa_none: break; } } static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) { #ifdef CONFIG_NUMA if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) return sa_none; if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) return sa_domainspan; if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) return sa_covered; /* Allocate the per-node list of sched groups */ d->sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), GFP_KERNEL); if (!d->sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); return sa_notcovered; } sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; #endif if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) return sa_sched_group_nodes; if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) return sa_nodemask; if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) return sa_this_sibling_map; if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) return sa_this_core_map; if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) return sa_send_covered; d->rd = alloc_rootdomain(); if (!d->rd) { printk(KERN_WARNING "Cannot alloc root domain\n"); return sa_tmpmask; } return sa_rootdomain; } static struct sched_domain *__build_numa_sched_domains(struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) { struct sched_domain *sd = NULL; #ifdef CONFIG_NUMA struct sched_domain *parent; d->sd_allnodes = 0; if (cpumask_weight(cpu_map) > SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { sd = &per_cpu(allnodes_domains, i).sd; SD_INIT(sd, ALLNODES); set_domain_attribute(sd, attr); cpumask_copy(sched_domain_span(sd), cpu_map); cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); d->sd_allnodes = 1; } parent = sd; sd = &per_cpu(node_domains, i).sd; SD_INIT(sd, NODE); set_domain_attribute(sd, attr); sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); sd->parent = parent; if (parent) parent->child = sd; cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); #endif return sd; } static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *parent, int i) { struct sched_domain *sd; sd = &per_cpu(phys_domains, i).sd; SD_INIT(sd, CPU); set_domain_attribute(sd, attr); cpumask_copy(sched_domain_span(sd), d->nodemask); sd->parent = parent; if (parent) parent->child = sd; cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); return sd; } static struct sched_domain *__build_mc_sched_domain(struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *parent, int i) { struct sched_domain *sd = parent; #ifdef CONFIG_SCHED_MC sd = &per_cpu(core_domains, i).sd; SD_INIT(sd, MC); set_domain_attribute(sd, attr); cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); sd->parent = parent; parent->child = sd; cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); #endif return sd; } static struct sched_domain *__build_smt_sched_domain(struct s_data *d, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *parent, int i) { struct sched_domain *sd = parent; #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i).sd; SD_INIT(sd, SIBLING); set_domain_attribute(sd, attr); cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); sd->parent = parent; parent->child = sd; cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); #endif return sd; } static void build_sched_groups(struct s_data *d, enum sched_domain_level l, const struct cpumask *cpu_map, int cpu) { switch (l) { #ifdef CONFIG_SCHED_SMT case SD_LV_SIBLING: /* set up CPU (sibling) groups */ cpumask_and(d->this_sibling_map, cpu_map, topology_thread_cpumask(cpu)); if (cpu == cpumask_first(d->this_sibling_map)) init_sched_build_groups(d->this_sibling_map, cpu_map, &cpu_to_cpu_group, d->send_covered, d->tmpmask); break; #endif #ifdef CONFIG_SCHED_MC case SD_LV_MC: /* set up multi-core groups */ cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); if (cpu == cpumask_first(d->this_core_map)) init_sched_build_groups(d->this_core_map, cpu_map, &cpu_to_core_group, d->send_covered, d->tmpmask); break; #endif case SD_LV_CPU: /* set up physical groups */ cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); if (!cpumask_empty(d->nodemask)) init_sched_build_groups(d->nodemask, cpu_map, &cpu_to_phys_group, d->send_covered, d->tmpmask); break; #ifdef CONFIG_NUMA case SD_LV_ALLNODES: init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, d->send_covered, d->tmpmask); break; #endif default: break; } } /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus */ static int __build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) { enum s_alloc alloc_state = sa_none; struct s_data d; struct sched_domain *sd; int i; #ifdef CONFIG_NUMA d.sd_allnodes = 0; #endif alloc_state = __visit_domain_allocation_hell(&d, cpu_map); if (alloc_state != sa_rootdomain) goto error; alloc_state = sa_sched_groups; /* * Set up domains for cpus specified by the cpu_map. */ for_each_cpu(i, cpu_map) { cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); sd = __build_numa_sched_domains(&d, cpu_map, attr, i); sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); } for_each_cpu(i, cpu_map) { build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); build_sched_groups(&d, SD_LV_MC, cpu_map, i); } /* Set up physical groups */ for (i = 0; i < nr_node_ids; i++) build_sched_groups(&d, SD_LV_CPU, cpu_map, i); #ifdef CONFIG_NUMA /* Set up node groups */ if (d.sd_allnodes) build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); for (i = 0; i < nr_node_ids; i++) if (build_numa_sched_groups(&d, cpu_map, i)) goto error; #endif /* Calculate CPU power for physical packages and nodes */ #ifdef CONFIG_SCHED_SMT for_each_cpu(i, cpu_map) { sd = &per_cpu(cpu_domains, i).sd; init_sched_groups_power(i, sd); } #endif #ifdef CONFIG_SCHED_MC for_each_cpu(i, cpu_map) { sd = &per_cpu(core_domains, i).sd; init_sched_groups_power(i, sd); } #endif for_each_cpu(i, cpu_map) { sd = &per_cpu(phys_domains, i).sd; init_sched_groups_power(i, sd); } #ifdef CONFIG_NUMA for (i = 0; i < nr_node_ids; i++) init_numa_sched_groups_power(d.sched_group_nodes[i]); if (d.sd_allnodes) { struct sched_group *sg; cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, d.tmpmask); init_numa_sched_groups_power(sg); } #endif /* Attach the domains */ for_each_cpu(i, cpu_map) { #ifdef CONFIG_SCHED_SMT sd = &per_cpu(cpu_domains, i).sd; #elif defined(CONFIG_SCHED_MC) sd = &per_cpu(core_domains, i).sd; #else sd = &per_cpu(phys_domains, i).sd; #endif cpu_attach_domain(sd, d.rd, i); } d.sched_group_nodes = NULL; /* don't free this we still need it */ __free_domain_allocs(&d, sa_tmpmask, cpu_map); return 0; error: __free_domain_allocs(&d, alloc_state, cpu_map); return -ENOMEM; } static int build_sched_domains(const struct cpumask *cpu_map) { return __build_sched_domains(cpu_map, NULL); } static cpumask_var_t *doms_cur; /* current sched domains */ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ static struct sched_domain_attr *dattr_cur; /* attribues of custom domains in 'doms_cur' */ /* * Special case: If a kmalloc of a doms_cur partition (array of * cpumask) fails, then fallback to a single sched domain, * as determined by the single cpumask fallback_doms. */ static cpumask_var_t fallback_doms; /* * arch_update_cpu_topology lets virtualized architectures update the * cpu core maps. It is supposed to return 1 if the topology changed * or 0 if it stayed the same. */ int __attribute__((weak)) arch_update_cpu_topology(void) { return 0; } cpumask_var_t *alloc_sched_domains(unsigned int ndoms) { int i; cpumask_var_t *doms; doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); if (!doms) return NULL; for (i = 0; i < ndoms; i++) { if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { free_sched_domains(doms, i); return NULL; } } return doms; } void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) { unsigned int i; for (i = 0; i < ndoms; i++) free_cpumask_var(doms[i]); kfree(doms); } /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ static int arch_init_sched_domains(const struct cpumask *cpu_map) { int err; arch_update_cpu_topology(); ndoms_cur = 1; doms_cur = alloc_sched_domains(ndoms_cur); if (!doms_cur) doms_cur = &fallback_doms; cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); dattr_cur = NULL; err = build_sched_domains(doms_cur[0]); register_sched_domain_sysctl(); return err; } static void arch_destroy_sched_domains(const struct cpumask *cpu_map, struct cpumask *tmpmask) { free_sched_groups(cpu_map, tmpmask); } /* * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ static void detach_destroy_domains(const struct cpumask *cpu_map) { /* Save because hotplug lock held. */ static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); int i; for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); } /* handle null as "default" */ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, struct sched_domain_attr *new, int idx_new) { struct sched_domain_attr tmp; /* fast path */ if (!new && !cur) return 1; tmp = SD_ATTR_INIT; return !memcmp(cur ? (cur + idx_cur) : &tmp, new ? (new + idx_new) : &tmp, sizeof(struct sched_domain_attr)); } /* * Partition sched domains as specified by the 'ndoms_new' * cpumasks in the array doms_new[] of cpumasks. This compares * doms_new[] to the current sched domain partitioning, doms_cur[]. * It destroys each deleted domain and builds each new domain. * * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. * The masks don't intersect (don't overlap.) We should setup one * sched domain for each mask. CPUs not in any of the cpumasks will * not be load balanced. If the same cpumask appears both in the * current 'doms_cur' domains and in the new 'doms_new', we can leave * it as it is. * * The passed in 'doms_new' should be allocated using * alloc_sched_domains. This routine takes ownership of it and will * free_sched_domains it when done with it. If the caller failed the * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, * and partition_sched_domains() will fallback to the single partition * 'fallback_doms', it also forces the domains to be rebuilt. * * If doms_new == NULL it will be replaced with cpu_online_mask. * ndoms_new == 0 is a special case for destroying existing domains, * and it will not create the default domain. * * Call with hotplug lock held */ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { int i, j, n; int new_topology; mutex_lock(&sched_domains_mutex); /* always unregister in case we don't destroy any domains */ unregister_sched_domain_sysctl(); /* Let architecture update cpu core mappings. */ new_topology = arch_update_cpu_topology(); n = doms_new ? ndoms_new : 0; /* Destroy deleted domains */ for (i = 0; i < ndoms_cur; i++) { for (j = 0; j < n && !new_topology; j++) { if (cpumask_equal(doms_cur[i], doms_new[j]) && dattrs_equal(dattr_cur, i, dattr_new, j)) goto match1; } /* no match - a current sched domain not in new doms_new[] */ detach_destroy_domains(doms_cur[i]); match1: ; } if (doms_new == NULL) { ndoms_cur = 0; doms_new = &fallback_doms; cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); WARN_ON_ONCE(dattr_new); } /* Build new domains */ for (i = 0; i < ndoms_new; i++) { for (j = 0; j < ndoms_cur && !new_topology; j++) { if (cpumask_equal(doms_new[i], doms_cur[j]) && dattrs_equal(dattr_new, i, dattr_cur, j)) goto match2; } /* no match - add a new doms_new */ __build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); match2: ; } /* Remember the new sched domains */ if (doms_cur != &fallback_doms) free_sched_domains(doms_cur, ndoms_cur); kfree(dattr_cur); /* kfree(NULL) is safe */ doms_cur = doms_new; dattr_cur = dattr_new; ndoms_cur = ndoms_new; register_sched_domain_sysctl(); mutex_unlock(&sched_domains_mutex); } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) static void arch_reinit_sched_domains(void) { get_online_cpus(); /* Destroy domains first to force the rebuild */ partition_sched_domains(0, NULL, NULL); rebuild_sched_domains(); put_online_cpus(); } static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) { unsigned int level = 0; if (sscanf(buf, "%u", &level) != 1) return -EINVAL; /* * level is always be positive so don't check for * level < POWERSAVINGS_BALANCE_NONE which is 0 * What happens on 0 or 1 byte write, * need to check for count as well? */ if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) return -EINVAL; if (smt) sched_smt_power_savings = level; else sched_mc_power_savings = level; arch_reinit_sched_domains(); return count; } #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, struct sysdev_class_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_mc_power_savings); } static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, struct sysdev_class_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); } static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, sched_mc_power_savings_store); #endif #ifdef CONFIG_SCHED_SMT static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, struct sysdev_class_attribute *attr, char *page) { return sprintf(page, "%u\n", sched_smt_power_savings); } static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, struct sysdev_class_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); } static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ /* * Update cpusets according to cpu_active mask. If cpusets are * disabled, cpuset_update_active_cpus() becomes a simple wrapper * around partition_sched_domains(). */ static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: cpuset_update_active_cpus(); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int __cpuexit cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: cpuset_update_active_cpus(); return NOTIFY_OK; default: return NOTIFY_DONE; } } static int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (int)(long)hcpu; switch (action) { case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: disable_runtime(cpu_rq(cpu)); return NOTIFY_OK; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: case CPU_ONLINE: case CPU_ONLINE_FROZEN: enable_runtime(cpu_rq(cpu)); return NOTIFY_OK; default: return NOTIFY_DONE; } } void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); #if defined(CONFIG_NUMA) sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), GFP_KERNEL); BUG_ON(sched_group_nodes_bycpu == NULL); #endif get_online_cpus(); mutex_lock(&sched_domains_mutex); arch_init_sched_domains(cpu_active_mask); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); if (cpumask_empty(non_isolated_cpus)) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); mutex_unlock(&sched_domains_mutex); put_online_cpus(); hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); /* RT runtime code needs to handle some hotplug events */ hotcpu_notifier(update_runtime, 0); init_hrtick(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) BUG(); sched_init_granularity(); free_cpumask_var(non_isolated_cpus); init_sched_rt_class(); } #else void __init sched_init_smp(void) { sched_init_granularity(); } #endif /* CONFIG_SMP */ const_debug unsigned int sysctl_timer_migration = 1; int in_sched_functions(unsigned long addr) { return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); } static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) { cfs_rq->tasks_timeline = RB_ROOT; INIT_LIST_HEAD(&cfs_rq->tasks); #ifdef CONFIG_FAIR_GROUP_SCHED cfs_rq->rq = rq; #endif cfs_rq->min_vruntime = (u64)(-(1LL << 20)); } static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) { struct rt_prio_array *array; int i; array = &rt_rq->active; for (i = 0; i < MAX_RT_PRIO; i++) { INIT_LIST_HEAD(array->queue + i); __clear_bit(i, array->bitmap); } /* delimiter for bitsearch: */ __set_bit(MAX_RT_PRIO, array->bitmap); #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED rt_rq->highest_prio.curr = MAX_RT_PRIO; #ifdef CONFIG_SMP rt_rq->highest_prio.next = MAX_RT_PRIO; #endif #endif #ifdef CONFIG_SMP rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); #endif rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); #ifdef CONFIG_RT_GROUP_SCHED rt_rq->rt_nr_boosted = 0; rt_rq->rq = rq; #endif } #ifdef CONFIG_FAIR_GROUP_SCHED static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, int add, struct sched_entity *parent) { struct rq *rq = cpu_rq(cpu); tg->cfs_rq[cpu] = cfs_rq; init_cfs_rq(cfs_rq, rq); cfs_rq->tg = tg; if (add) list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); tg->se[cpu] = se; /* se could be NULL for init_task_group */ if (!se) return; if (!parent) se->cfs_rq = &rq->cfs; else se->cfs_rq = parent->my_q; se->my_q = cfs_rq; se->load.weight = tg->shares; se->load.inv_weight = 0; se->parent = parent; } #endif #ifdef CONFIG_RT_GROUP_SCHED static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, int add, struct sched_rt_entity *parent) { struct rq *rq = cpu_rq(cpu); tg->rt_rq[cpu] = rt_rq; init_rt_rq(rt_rq, rq); rt_rq->tg = tg; rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; if (add) list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); tg->rt_se[cpu] = rt_se; if (!rt_se) return; if (!parent) rt_se->rt_rq = &rq->rt; else rt_se->rt_rq = parent->my_q; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); } #endif void __init sched_init(void) { int i, j; unsigned long alloc_size = 0, ptr; sec_gaf_supply_rqinfo(offsetof(struct rq, curr), offsetof(struct cfs_rq, rq)); #ifdef CONFIG_FAIR_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif #ifdef CONFIG_RT_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif #ifdef CONFIG_CPUMASK_OFFSTACK alloc_size += num_possible_cpus() * cpumask_size(); #endif if (alloc_size) { ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); #ifdef CONFIG_FAIR_GROUP_SCHED init_task_group.se = (struct sched_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); init_task_group.cfs_rq = (struct cfs_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED init_task_group.rt_se = (struct sched_rt_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); init_task_group.rt_rq = (struct rt_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CPUMASK_OFFSTACK for_each_possible_cpu(i) { per_cpu(load_balance_tmpmask, i) = (void *)ptr; ptr += cpumask_size(); } #endif /* CONFIG_CPUMASK_OFFSTACK */ } #ifdef CONFIG_SMP init_defrootdomain(); #endif init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); #ifdef CONFIG_RT_GROUP_SCHED init_rt_bandwidth(&init_task_group.rt_bandwidth, global_rt_period(), global_rt_runtime()); #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CGROUP_SCHED list_add(&init_task_group.list, &task_groups); INIT_LIST_HEAD(&init_task_group.children); #endif /* CONFIG_CGROUP_SCHED */ #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), __alignof__(unsigned long)); #endif for_each_possible_cpu(i) { struct rq *rq; rq = cpu_rq(i); raw_spin_lock_init(&rq->lock); rq->nr_running = 0; rq->calc_load_active = 0; rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED init_task_group.shares = init_task_group_load; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); #ifdef CONFIG_CGROUP_SCHED /* * How much cpu bandwidth does init_task_group get? * * In case of task-groups formed thr' the cgroup filesystem, it * gets 100% of the cpu resources in the system. This overall * system cpu resource is divided among the tasks of * init_task_group and its child task-groups in a fair manner, * based on each entity's (task or task-group's) weight * (se->load.weight). * * In other words, if init_task_group has 10 tasks of weight * 1024) and two child groups A0 and A1 (of weight 1024 each), * then A0's share of the cpu resource is: * * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% * * We achieve this by letting init_task_group's tasks sit * directly in rq->cfs (i.e init_task_group->se[] = NULL). */ init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); #endif #endif /* CONFIG_FAIR_GROUP_SCHED */ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; #ifdef CONFIG_RT_GROUP_SCHED INIT_LIST_HEAD(&rq->leaf_rt_rq_list); #ifdef CONFIG_CGROUP_SCHED init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); #endif #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; rq->last_load_update_tick = jiffies; #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; rq->cpu_power = SCHED_LOAD_SCALE; rq->post_schedule = 0; rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; rq->cpu = i; rq->online = 0; rq->idle_stamp = 0; rq->avg_idle = 2*sysctl_sched_migration_cost; rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ rq->nohz_balance_kick = 0; init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); #endif #endif init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); } set_load_weight(&init_task); #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&init_task.preempt_notifiers); #endif #ifdef CONFIG_SMP open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); #endif #ifdef CONFIG_RT_MUTEXES plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); #endif /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current); /* * Make us the idle thread. Technically, schedule() should not be * called from this thread, however somewhere below it might be, * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); calc_load_update = jiffies + LOAD_FREQ; /* * During early bootup we pretend to be a normal task: */ current->sched_class = &fair_sched_class; /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); atomic_set(&nohz.load_balancer, nr_cpu_ids); atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); #endif /* May be allocated at isolcpus cmdline parse time */ if (cpu_isolated_map == NULL) zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); #endif /* SMP */ perf_event_init(); scheduler_running = 1; } #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP static inline int preempt_count_equals(int preempt_offset) { int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); } static int __might_sleep_init_called; int __init __might_sleep_init(void) { __might_sleep_init_called = 1; return 0; } early_initcall(__might_sleep_init); void __might_sleep(const char *file, int line, int preempt_offset) { #ifdef in_atomic static unsigned long prev_jiffy; /* ratelimiting */ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || oops_in_progress) return; if (system_state != SYSTEM_RUNNING && (!__might_sleep_init_called || system_state != SYSTEM_BOOTING)) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; printk(KERN_ERR "BUG: sleeping function called from invalid context at %s:%d\n", file, line); printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", in_atomic(), irqs_disabled(), current->pid, current->comm); debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); dump_stack(); #endif } EXPORT_SYMBOL(__might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ static void normalize_task(struct rq *rq, struct task_struct *p) { int on_rq; on_rq = p->se.on_rq; if (on_rq) deactivate_task(rq, p, 0); __setscheduler(rq, p, SCHED_NORMAL, 0); if (on_rq) { activate_task(rq, p, 0); resched_task(rq->curr); } } void normalize_rt_tasks(void) { struct task_struct *g, *p; unsigned long flags; struct rq *rq; read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { /* * Only normalize user tasks: */ if (!p->mm) continue; p->se.exec_start = 0; #ifdef CONFIG_SCHEDSTATS p->se.statistics.wait_start = 0; p->se.statistics.sleep_start = 0; p->se.statistics.block_start = 0; #endif if (!rt_task(p)) { /* * Renice negative nice level userspace * tasks back to 0: */ if (TASK_NICE(p) < 0 && p->mm) set_user_nice(p, 0); continue; } raw_spin_lock(&p->pi_lock); rq = __task_rq_lock(p); normalize_task(rq, p); __task_rq_unlock(rq); raw_spin_unlock(&p->pi_lock); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); } #endif /* CONFIG_MAGIC_SYSRQ */ #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) /* * These functions are only useful for the IA64 MCA handling, or kdb. * * They can only be called when the whole system has been * stopped - every CPU needs to be quiescent, and no scheduling * activity can take place. Using them for anything else would * be a serious bug, and as a result, they aren't even visible * under any other configuration. */ /** * curr_task - return the current task for a given cpu. * @cpu: the processor in question. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ #ifdef CONFIG_IA64 /** * set_curr_task - set the current task for a given cpu. * @cpu: the processor in question. * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts * are serviced on a separate stack. It allows the architecture to switch the * notion of the current task on a cpu in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and * re-starting the system. * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } #endif #ifdef CONFIG_FAIR_GROUP_SCHED static void free_fair_sched_group(struct task_group *tg) { int i; for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); if (tg->se) kfree(tg->se[i]); } kfree(tg->cfs_rq); kfree(tg->se); } static int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct cfs_rq *cfs_rq; struct sched_entity *se; struct rq *rq; int i; tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->cfs_rq) goto err; tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); if (!tg->se) goto err; tg->shares = NICE_0_LOAD; for_each_possible_cpu(i) { rq = cpu_rq(i); cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); if (!cfs_rq) goto err; se = kzalloc_node(sizeof(struct sched_entity), GFP_KERNEL, cpu_to_node(i)); if (!se) goto err_free_rq; init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); } return 1; err_free_rq: kfree(cfs_rq); err: return 0; } static inline void register_fair_sched_group(struct task_group *tg, int cpu) { list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list, &cpu_rq(cpu)->leaf_cfs_rq_list); } static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); } #else /* !CONFG_FAIR_GROUP_SCHED */ static inline void free_fair_sched_group(struct task_group *tg) { } static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { return 1; } static inline void register_fair_sched_group(struct task_group *tg, int cpu) { } static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static void free_rt_sched_group(struct task_group *tg) { int i; destroy_rt_bandwidth(&tg->rt_bandwidth); for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); if (tg->rt_se) kfree(tg->rt_se[i]); } kfree(tg->rt_rq); kfree(tg->rt_se); } static int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; struct rq *rq; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err; init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { rq = cpu_rq(i); rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) goto err; rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i)); if (!rt_se) goto err_free_rq; init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); } return 1; err_free_rq: kfree(rt_rq); err: return 0; } static inline void register_rt_sched_group(struct task_group *tg, int cpu) { list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list, &cpu_rq(cpu)->leaf_rt_rq_list); } static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); } #else /* !CONFIG_RT_GROUP_SCHED */ static inline void free_rt_sched_group(struct task_group *tg) { } static inline int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { return 1; } static inline void register_rt_sched_group(struct task_group *tg, int cpu) { } static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { } #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CGROUP_SCHED static void free_sched_group(struct task_group *tg) { free_fair_sched_group(tg); free_rt_sched_group(tg); kfree(tg); } /* allocate runqueue etc for a new task group */ struct task_group *sched_create_group(struct task_group *parent) { struct task_group *tg; unsigned long flags; int i; tg = kzalloc(sizeof(*tg), GFP_KERNEL); if (!tg) return ERR_PTR(-ENOMEM); if (!alloc_fair_sched_group(tg, parent)) goto err; if (!alloc_rt_sched_group(tg, parent)) goto err; spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) { register_fair_sched_group(tg, i); register_rt_sched_group(tg, i); } list_add_rcu(&tg->list, &task_groups); WARN_ON(!parent); /* root should already exist */ tg->parent = parent; INIT_LIST_HEAD(&tg->children); list_add_rcu(&tg->siblings, &parent->children); spin_unlock_irqrestore(&task_group_lock, flags); return tg; err: free_sched_group(tg); return ERR_PTR(-ENOMEM); } /* rcu callback to free various structures associated with a task group */ static void free_sched_group_rcu(struct rcu_head *rhp) { /* now it should be safe to free those cfs_rqs */ free_sched_group(container_of(rhp, struct task_group, rcu)); } /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { unsigned long flags; int i; spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) { unregister_fair_sched_group(tg, i); unregister_rt_sched_group(tg, i); } list_del_rcu(&tg->list); list_del_rcu(&tg->siblings); spin_unlock_irqrestore(&task_group_lock, flags); /* wait for possible concurrent references to cfs_rqs complete */ call_rcu(&tg->rcu, free_sched_group_rcu); } /* change task's runqueue when it moves between groups. * The caller of this function should have put the task in its new group * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to * reflect its new group. */ void sched_move_task(struct task_struct *tsk) { int on_rq, running; unsigned long flags = 0; struct rq *rq; rq = task_rq_lock(tsk, &flags); running = task_current(rq, tsk); on_rq = tsk->se.on_rq; if (on_rq) dequeue_task(rq, tsk, 0); if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_move_group) tsk->sched_class->task_move_group(tsk, on_rq); else #endif set_task_rq(tsk, task_cpu(tsk)); if (unlikely(running)) tsk->sched_class->set_curr_task(rq); if (on_rq) enqueue_task(rq, tsk, 0); task_rq_unlock(rq, &flags); } #endif /* CONFIG_CGROUP_SCHED */ #ifdef CONFIG_FAIR_GROUP_SCHED static void __set_se_shares(struct sched_entity *se, unsigned long shares) { struct cfs_rq *cfs_rq = se->cfs_rq; int on_rq; on_rq = se->on_rq; if (on_rq) dequeue_entity(cfs_rq, se, 0); se->load.weight = shares; se->load.inv_weight = 0; if (on_rq) enqueue_entity(cfs_rq, se, 0); } static void set_se_shares(struct sched_entity *se, unsigned long shares) { struct cfs_rq *cfs_rq = se->cfs_rq; struct rq *rq = cfs_rq->rq; unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); __set_se_shares(se, shares); raw_spin_unlock_irqrestore(&rq->lock, flags); } static DEFINE_MUTEX(shares_mutex); int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; unsigned long flags; /* * We can't change the weight of the root cgroup. */ if (!tg->se[0]) return -EINVAL; if (shares < MIN_SHARES) shares = MIN_SHARES; else if (shares > MAX_SHARES) shares = MAX_SHARES; mutex_lock(&shares_mutex); if (tg->shares == shares) goto done; spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) unregister_fair_sched_group(tg, i); list_del_rcu(&tg->siblings); spin_unlock_irqrestore(&task_group_lock, flags); /* wait for any ongoing reference to this group to finish */ synchronize_sched(); /* * Now we are free to modify the group's share on each cpu * w/o tripping rebalance_share or load_balance_fair. */ tg->shares = shares; for_each_possible_cpu(i) { /* * force a rebalance */ cfs_rq_set_shares(tg->cfs_rq[i], 0); set_se_shares(tg->se[i], shares); } /* * Enable load balance activity on this group, by inserting it back on * each cpu's rq->leaf_cfs_rq_list. */ spin_lock_irqsave(&task_group_lock, flags); for_each_possible_cpu(i) register_fair_sched_group(tg, i); list_add_rcu(&tg->siblings, &tg->parent->children); spin_unlock_irqrestore(&task_group_lock, flags); done: mutex_unlock(&shares_mutex); return 0; } unsigned long sched_group_shares(struct task_group *tg) { return tg->shares; } #endif #ifdef CONFIG_RT_GROUP_SCHED /* * Ensure that the real time constraints are schedulable. */ static DEFINE_MUTEX(rt_constraints_mutex); static unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) return 1ULL << 20; return div64_u64(runtime << 20, period); } /* Must be called with tasklist_lock held */ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; do_each_thread(g, p) { if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) return 1; } while_each_thread(g, p); return 0; } struct rt_schedulable_data { struct task_group *tg; u64 rt_period; u64 rt_runtime; }; static int tg_schedulable(struct task_group *tg, void *data) { struct rt_schedulable_data *d = data; struct task_group *child; unsigned long total, sum = 0; u64 period, runtime; period = ktime_to_ns(tg->rt_bandwidth.rt_period); runtime = tg->rt_bandwidth.rt_runtime; if (tg == d->tg) { period = d->rt_period; runtime = d->rt_runtime; } /* * Cannot have more runtime than the period. */ if (runtime > period && runtime != RUNTIME_INF) return -EINVAL; /* * Ensure we don't starve existing RT tasks. */ if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) return -EBUSY; total = to_ratio(period, runtime); /* * Nobody can have more than the global setting allows. */ if (total > to_ratio(global_rt_period(), global_rt_runtime())) return -EINVAL; /* * The sum of our children's runtime should not exceed our own. */ list_for_each_entry_rcu(child, &tg->children, siblings) { period = ktime_to_ns(child->rt_bandwidth.rt_period); runtime = child->rt_bandwidth.rt_runtime; if (child == d->tg) { period = d->rt_period; runtime = d->rt_runtime; } sum += to_ratio(period, runtime); } if (sum > total) return -EINVAL; return 0; } static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { struct rt_schedulable_data data = { .tg = tg, .rt_period = period, .rt_runtime = runtime, }; return walk_tg_tree(tg_schedulable, tg_nop, &data); } static int tg_set_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime) { int i, err = 0; mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); if (err) goto unlock; raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); tg->rt_bandwidth.rt_runtime = rt_runtime; for_each_possible_cpu(i) { struct rt_rq *rt_rq = tg->rt_rq[i]; raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_runtime; raw_spin_unlock(&rt_rq->rt_runtime_lock); } raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); unlock: read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return err; } int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) { u64 rt_runtime, rt_period; rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; if (rt_runtime_us < 0) rt_runtime = RUNTIME_INF; return tg_set_bandwidth(tg, rt_period, rt_runtime); } long sched_group_rt_runtime(struct task_group *tg) { u64 rt_runtime_us; if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) return -1; rt_runtime_us = tg->rt_bandwidth.rt_runtime; do_div(rt_runtime_us, NSEC_PER_USEC); return rt_runtime_us; } int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) { u64 rt_runtime, rt_period; rt_period = (u64)rt_period_us * NSEC_PER_USEC; rt_runtime = tg->rt_bandwidth.rt_runtime; if (rt_period == 0) return -EINVAL; return tg_set_bandwidth(tg, rt_period, rt_runtime); } long sched_group_rt_period(struct task_group *tg) { u64 rt_period_us; rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); do_div(rt_period_us, NSEC_PER_USEC); return rt_period_us; } static int sched_rt_global_constraints(void) { u64 runtime, period; int ret = 0; if (sysctl_sched_rt_period <= 0) return -EINVAL; runtime = global_rt_runtime(); period = global_rt_period(); /* * Sanity check on the sysctl variables. */ if (runtime > period && runtime != RUNTIME_INF) return -EINVAL; mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); ret = __rt_schedulable(NULL, 0, 0); read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return ret; } int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) { /* Don't accept realtime tasks when there is no way for them to run */ if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) return 0; return 1; } #else /* !CONFIG_RT_GROUP_SCHED */ static int sched_rt_global_constraints(void) { unsigned long flags; int i; if (sysctl_sched_rt_period <= 0) return -EINVAL; /* * There's always some RT tasks in the root group * -- migration, kstopmachine etc.. */ if (sysctl_sched_rt_runtime == 0) return -EBUSY; raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); for_each_possible_cpu(i) { struct rt_rq *rt_rq = &cpu_rq(i)->rt; raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = global_rt_runtime(); raw_spin_unlock(&rt_rq->rt_runtime_lock); } raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); return 0; } #endif /* CONFIG_RT_GROUP_SCHED */ int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; int old_period, old_runtime; static DEFINE_MUTEX(mutex); mutex_lock(&mutex); old_period = sysctl_sched_rt_period; old_runtime = sysctl_sched_rt_runtime; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (!ret && write) { ret = sched_rt_global_constraints(); if (ret) { sysctl_sched_rt_period = old_period; sysctl_sched_rt_runtime = old_runtime; } else { def_rt_bandwidth.rt_runtime = global_rt_runtime(); def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); } } mutex_unlock(&mutex); return ret; } #ifdef CONFIG_CGROUP_SCHED /* return corresponding task_group object of a cgroup */ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), struct task_group, css); } static struct cgroup_subsys_state * cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct task_group *tg, *parent; if (!cgrp->parent) { /* This is early initialization for the top cgroup */ return &init_task_group.css; } parent = cgroup_tg(cgrp->parent); tg = sched_create_group(parent); if (IS_ERR(tg)) return ERR_PTR(-ENOMEM); return &tg->css; } static void cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct task_group *tg = cgroup_tg(cgrp); sched_destroy_group(tg); } static int cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { if ((current != tsk) && (!capable(CAP_SYS_NICE))) { const struct cred *cred = current_cred(), *tcred; tcred = __task_cred(tsk); if (cred->euid != tcred->uid && cred->euid != tcred->suid) return -EPERM; } #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) return -EINVAL; #else /* We don't support RT-tasks being in separate groups */ if (tsk->sched_class != &fair_sched_class) return -EINVAL; #endif return 0; } static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, struct task_struct *tsk, bool threadgroup) { int retval = cpu_cgroup_can_attach_task(cgrp, tsk); if (retval) return retval; if (threadgroup) { struct task_struct *c; rcu_read_lock(); list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { retval = cpu_cgroup_can_attach_task(cgrp, c); if (retval) { rcu_read_unlock(); return retval; } } rcu_read_unlock(); } return 0; } static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, struct cgroup *old_cont, struct task_struct *tsk, bool threadgroup) { sched_move_task(tsk); if (threadgroup) { struct task_struct *c; rcu_read_lock(); list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { sched_move_task(c); } rcu_read_unlock(); } } #ifdef CONFIG_FAIR_GROUP_SCHED static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, u64 shareval) { return sched_group_set_shares(cgroup_tg(cgrp), shareval); } static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) { struct task_group *tg = cgroup_tg(cgrp); return (u64) tg->shares; } #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, s64 val) { return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); } static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) { return sched_group_rt_runtime(cgroup_tg(cgrp)); } static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, u64 rt_period_us) { return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us); } static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft) { return sched_group_rt_period(cgroup_tg(cgrp)); } #endif /* CONFIG_RT_GROUP_SCHED */ static struct cftype cpu_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { .name = "shares", .read_u64 = cpu_shares_read_u64, .write_u64 = cpu_shares_write_u64, }, #endif #ifdef CONFIG_RT_GROUP_SCHED { .name = "rt_runtime_us", .read_s64 = cpu_rt_runtime_read, .write_s64 = cpu_rt_runtime_write, }, { .name = "rt_period_us", .read_u64 = cpu_rt_period_read_uint, .write_u64 = cpu_rt_period_write_uint, }, #endif }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) { return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); } struct cgroup_subsys cpu_cgroup_subsys = { .name = "cpu", .create = cpu_cgroup_create, .destroy = cpu_cgroup_destroy, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .populate = cpu_cgroup_populate, .subsys_id = cpu_cgroup_subsys_id, .early_init = 1, }; #endif /* CONFIG_CGROUP_SCHED */ #ifdef CONFIG_CGROUP_CPUACCT /* * CPU accounting code for task groups. * * Based on the work by Paul Menage (menage@google.com) and Balbir Singh * (balbir@in.ibm.com). */ /* track cpu usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every cpu */ u64 __percpu *cpuusage; struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct cpuacct *parent; struct cpuacct_charge_calls *cpufreq_fn; void *cpuacct_data; }; static struct cpuacct *cpuacct_root; /* Default calls for cpufreq accounting */ static struct cpuacct_charge_calls *cpuacct_cpufreq; int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn) { cpuacct_cpufreq = fn; /* * Root node is created before platform can register callbacks, * initalize here. */ if (cpuacct_root && fn) { cpuacct_root->cpufreq_fn = fn; if (fn->init) fn->init(&cpuacct_root->cpuacct_data); } return 0; } struct cgroup_subsys cpuacct_subsys; /* return cpu accounting group corresponding to this container */ static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), struct cpuacct, css); } /* return cpu accounting group to which this task belongs */ static inline struct cpuacct *task_ca(struct task_struct *tsk) { return container_of(task_subsys_state(tsk, cpuacct_subsys_id), struct cpuacct, css); } /* create a new cpu accounting group */ static struct cgroup_subsys_state *cpuacct_create( struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); int i; if (!ca) goto out; ca->cpuusage = alloc_percpu(u64); if (!ca->cpuusage) goto out_free_ca; for (i = 0; i < CPUACCT_STAT_NSTATS; i++) if (percpu_counter_init(&ca->cpustat[i], 0)) goto out_free_counters; ca->cpufreq_fn = cpuacct_cpufreq; /* If available, have platform code initalize cpu frequency table */ if (ca->cpufreq_fn && ca->cpufreq_fn->init) ca->cpufreq_fn->init(&ca->cpuacct_data); if (cgrp->parent) ca->parent = cgroup_ca(cgrp->parent); else cpuacct_root = ca; return &ca->css; out_free_counters: while (--i >= 0) percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); out_free_ca: kfree(ca); out: return ERR_PTR(-ENOMEM); } /* destroy an existing cpu accounting group */ static void cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = cgroup_ca(cgrp); int i; for (i = 0; i < CPUACCT_STAT_NSTATS; i++) percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); kfree(ca); } static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); u64 data; #ifndef CONFIG_64BIT /* * Take rq->lock to make 64-bit read safe on 32-bit platforms. */ raw_spin_lock_irq(&cpu_rq(cpu)->lock); data = *cpuusage; raw_spin_unlock_irq(&cpu_rq(cpu)->lock); #else data = *cpuusage; #endif return data; } static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); #ifndef CONFIG_64BIT /* * Take rq->lock to make 64-bit write safe on 32-bit platforms. */ raw_spin_lock_irq(&cpu_rq(cpu)->lock); *cpuusage = val; raw_spin_unlock_irq(&cpu_rq(cpu)->lock); #else *cpuusage = val; #endif } /* return total cpu usage (in nanoseconds) of a group */ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) { struct cpuacct *ca = cgroup_ca(cgrp); u64 totalcpuusage = 0; int i; for_each_present_cpu(i) totalcpuusage += cpuacct_cpuusage_read(ca, i); return totalcpuusage; } static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, u64 reset) { struct cpuacct *ca = cgroup_ca(cgrp); int err = 0; int i; if (reset) { err = -EINVAL; goto out; } for_each_present_cpu(i) cpuacct_cpuusage_write(ca, i, 0); out: return err; } static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, struct seq_file *m) { struct cpuacct *ca = cgroup_ca(cgroup); u64 percpu; int i; for_each_present_cpu(i) { percpu = cpuacct_cpuusage_read(ca, i); seq_printf(m, "%llu ", (unsigned long long) percpu); } seq_printf(m, "\n"); return 0; } static const char *cpuacct_stat_desc[] = { [CPUACCT_STAT_USER] = "user", [CPUACCT_STAT_SYSTEM] = "system", }; static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct cpuacct *ca = cgroup_ca(cgrp); int i; for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { s64 val = percpu_counter_read(&ca->cpustat[i]); val = cputime64_to_clock_t(val); cb->fill(cb, cpuacct_stat_desc[i], val); } return 0; } static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct cpuacct *ca = cgroup_ca(cgrp); if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show) ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb); return 0; } /* return total cpu power usage (milliWatt second) of a group */ static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft) { int i; struct cpuacct *ca = cgroup_ca(cgrp); u64 totalpower = 0; if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage) for_each_present_cpu(i) { totalpower += ca->cpufreq_fn->power_usage( ca->cpuacct_data); } return totalpower; } static struct cftype files[] = { { .name = "usage", .read_u64 = cpuusage_read, .write_u64 = cpuusage_write, }, { .name = "usage_percpu", .read_seq_string = cpuacct_percpu_seq_read, }, { .name = "stat", .read_map = cpuacct_stats_show, }, { .name = "cpufreq", .read_map = cpuacct_cpufreq_show, }, { .name = "power", .read_u64 = cpuacct_powerusage_read }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) { return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files)); } /* * charge this task's execution time to its accounting group. * * called with rq->lock held. */ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) { struct cpuacct *ca; int cpu; if (unlikely(!cpuacct_subsys.active)) return; cpu = task_cpu(tsk); rcu_read_lock(); ca = task_ca(tsk); for (; ca; ca = ca->parent) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; /* Call back into platform code to account for CPU speeds */ if (ca->cpufreq_fn && ca->cpufreq_fn->charge) ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu); } rcu_read_unlock(); } /* * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large * in cputime_t units. As a result, cpuacct_update_stats calls * percpu_counter_add with values large enough to always overflow the * per cpu batch limit causing bad SMP scalability. * * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled * and enabled. We cap it at INT_MAX which is the largest allowed batch value. */ #ifdef CONFIG_SMP #define CPUACCT_BATCH \ min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) #else #define CPUACCT_BATCH 0 #endif /* * Charge the system/user time to the task's accounting group. */ static void cpuacct_update_stats(struct task_struct *tsk, enum cpuacct_stat_index idx, cputime_t val) { struct cpuacct *ca; int batch = CPUACCT_BATCH; if (unlikely(!cpuacct_subsys.active)) return; rcu_read_lock(); ca = task_ca(tsk); do { __percpu_counter_add(&ca->cpustat[idx], val, batch); ca = ca->parent; } while (ca); rcu_read_unlock(); } struct cgroup_subsys cpuacct_subsys = { .name = "cpuacct", .create = cpuacct_create, .destroy = cpuacct_destroy, .populate = cpuacct_populate, .subsys_id = cpuacct_subsys_id, }; #endif /* CONFIG_CGROUP_CPUACCT */ #ifndef CONFIG_SMP void synchronize_sched_expedited(void) { barrier(); } EXPORT_SYMBOL_GPL(synchronize_sched_expedited); #else /* #ifndef CONFIG_SMP */ static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); static int synchronize_sched_expedited_cpu_stop(void *data) { /* * There must be a full memory barrier on each affected CPU * between the time that try_stop_cpus() is called and the * time that it returns. * * In the current initial implementation of cpu_stop, the * above condition is already met when the control reaches * this point and the following smp_mb() is not strictly * necessary. Do smp_mb() anyway for documentation and * robustness against future implementation changes. */ smp_mb(); /* See above comment block. */ return 0; } /* * Wait for an rcu-sched grace period to elapse, but use "big hammer" * approach to force grace period to end quickly. This consumes * significant time on all CPUs, and is thus not recommended for * any sort of common-case code. * * Note that it is illegal to call this function while holding any * lock that is acquired by a CPU-hotplug notifier. Failing to * observe this restriction will result in deadlock. */ void synchronize_sched_expedited(void) { int snap, trycount = 0; smp_mb(); /* ensure prior mod happens before capturing snap. */ snap = atomic_read(&synchronize_sched_expedited_count) + 1; get_online_cpus(); while (try_stop_cpus(cpu_online_mask, synchronize_sched_expedited_cpu_stop, NULL) == -EAGAIN) { put_online_cpus(); if (trycount++ < 10) udelay(trycount * num_online_cpus()); else { synchronize_sched(); return; } if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { smp_mb(); /* ensure test happens before caller kfree */ return; } get_online_cpus(); } atomic_inc(&synchronize_sched_expedited_count); smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ put_online_cpus(); } EXPORT_SYMBOL_GPL(synchronize_sched_expedited); #endif /* #else #ifndef CONFIG_SMP */ #endif /* CONFIG_SCHED_BFS */
simone201/neak-kernel-sgs2
kernel/sched.c
C
gpl-2.0
226,280
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.apache.tomcat.util.bcel.classfile; import java.io.DataInput; import java.io.IOException; import org.apache.tomcat.util.bcel.Constants; /** * This class is derived from the abstract * <A HREF="org.apache.tomcat.util.bcel.classfile.Constant.html">Constant</A> class * and represents a reference to a Double object. * * @author <A HREF="mailto:m.dahm@gmx.de">M. Dahm</A> * @see Constant */ public final class ConstantDouble extends Constant { private static final long serialVersionUID = 3450743772468544760L; private double bytes; /** * @param bytes Data */ public ConstantDouble(double bytes) { super(Constants.CONSTANT_Double); this.bytes = bytes; } /** * Initialize instance from file data. * * @param file Input stream * @throws IOException */ ConstantDouble(DataInput file) throws IOException { this(file.readDouble()); } /** * @return data, i.e., 8 bytes. */ public final double getBytes() { return bytes; } /** * @return String representation. */ @Override public final String toString() { return super.toString() + "(bytes = " + bytes + ")"; } }
deathspeeder/class-guard
apache-tomcat-7.0.53-src/java/org/apache/tomcat/util/bcel/classfile/ConstantDouble.java
Java
gpl-2.0
2,065
<?php /** * * @package InfinityCoreCMS * @version $Id$ * @copyright (c) 2008 InfinityCoreCMS * @license http://opensource.org/licenses/gpl-license.php GNU Public License * */ /** * * @Extra credits for this file * Vjacheslav Trushkin (http://www.stsoftware.biz) * */ define('IN_INFINITYCORECMS', true); if (!defined('IP_ROOT_PATH')) define('IP_ROOT_PATH', './../'); if (!defined('PHP_EXT')) define('PHP_EXT', substr(strrchr(__FILE__, '.'), 1)); $no_page_header = true; require('pagestart.' . PHP_EXT); // Mighty Gorgon - ACP Privacy - BEGIN $is_allowed = check_acp_module_access(); if (empty($is_allowed)) { message_die(GENERAL_MESSAGE, $lang['Not_Auth_View']); } // Mighty Gorgon - ACP Privacy - END define('IN_XS', true); include_once('xs_include.' . PHP_EXT); // check filter $filter = isset($_GET['filter']) ? stripslashes($_GET['filter']) : (isset($_POST['filter']) ? stripslashes($_POST['filter']) : ''); if(isset($_POST['filter_update'])) { $filter_data = array( 'ext' => trim(stripslashes($_POST['filter_ext'])), 'data' => trim(stripslashes($_POST['filter_data'])) ); $filter = serialize($filter_data); } else { $filter_data = @unserialize($filter); if(empty($filter_data['ext'])) { $filter_data['ext'] = ''; } if(empty($filter_data['data'])) { $filter_data['data'] = ''; } } $filter_str = '?filter=' . urlencode($filter); $template->assign_block_vars('nav_left',array('ITEM' => '&raquo; <a href="' . append_sid('xs_edit.' . PHP_EXT.$filter_str) . '">' . $lang['xs_edit_templates'] . '</a>')); $editable = array('.htm', '.html', '.tpl', '.css', '.txt', '.cfg', '.xml', '.php', '.htaccess'); // get current directory $current_dir = isset($_GET['dir']) ? stripslashes($_GET['dir']) : (isset($_POST['dir']) ? stripslashes($_POST['dir']) : 'templates'); $current_dir = xs_fix_dir($current_dir); if(defined('DEMO_MODE') && substr($current_dir, 0, 9) !== 'templates') { // limit access to "templates" in demo mode $current_dir = 'templates'; } $dirs = explode('/', $current_dir); for($i = 0; $i < sizeof($dirs); $i++) { if(!$dirs[$i] || $dirs[$i] === '.') { unset($dirs[$i]); } } $current_dir = implode('/', $dirs); $current_dir_full = $current_dir; //'templates' . ($current_dir ? '/' . $current_dir : ''); $current_dir_root = $current_dir ? $current_dir . '/' : ''; $return_dir = str_replace('{URL}', append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dir=' . urlencode($current_dir)), $lang['xs_edittpl_back_dir']); $return_url = $return_dir; $return_url_root = str_replace('{URL}', append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dir='), $lang['xs_edittpl_back_dir']); $template->assign_vars(array( 'FILTER_EXT' => htmlspecialchars($filter_data['ext']), 'FILTER_DATA' => htmlspecialchars($filter_data['data']), 'FILTER_URL' => append_sid('xs_edit.' . PHP_EXT), 'FILTER_DIR' => htmlspecialchars($current_dir), 'S_FILTER' => '<input type="hidden" name="filter" value="' . htmlspecialchars($filter) . '" />' ) ); /* * show edit form */ if(isset($_GET['edit']) && !empty($_GET['restore'])) { $file = stripslashes($_GET['edit']); $file = xs_fix_dir($file); $fullfile = $current_dir_root . $file; $localfile = '../' . $fullfile; $hash = md5($localfile); $backup_name = XS_TEMP_DIR . XS_BACKUP_PREFIX . $hash . '.' . intval($_GET['restore']) . XS_BACKUP_EXT; if(@file_exists($backup_name)) { // restore file $_POST['edit'] = $_GET['edit']; $_POST['content'] = addslashes(implode('', @file($backup_name))); unset($_GET['edit']); $return_file = str_replace('{URL}', append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file)), $lang['xs_edittpl_back_edit']); $return_url = $return_file . '<br /><br />' . $return_dir; } } /* * save modified file */ if(isset($_POST['edit']) && !defined('DEMO_MODE')) { $file = stripslashes($_POST['edit']); $content = stripslashes($_POST['content']); $fullfile = $current_dir_root . $file; $localfile = '../' . $fullfile; if(!empty($_POST['trim'])) { $content = trim($content); } if(!empty($_FILES['upload']['tmp_name']) && @file_exists($_FILES['upload']['tmp_name'])) { $content = @implode('', @file($_FILES['upload']['tmp_name'])); } $params = array( 'edit' => $file, 'dir' => $current_dir, 'content' => $content, 'filter' => $filter, ); $return_file = str_replace('{URL}', append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file)), $lang['xs_edittpl_back_edit']); $return_url = $return_file . '<br /><br />' . $return_dir; // get ftp configuration $write_local = false; if(!get_ftp_config(append_sid('xs_edit.' . PHP_EXT), $params, true)) { xs_exit(); } xs_ftp_connect(append_sid('xs_edit.' . PHP_EXT), $params, true); if($ftp === XS_FTP_LOCAL) { $write_local = true; $local_filename = $localfile; } else { $local_filename = XS_TEMP_DIR . 'edit_' . time() . '.tmp'; } $f = @fopen($local_filename, 'wb'); if(!$f) { xs_error($lang['xs_error_cannot_open'] . '<br /><br />' . $return_url); } fwrite($f, $content); fclose($f); if($write_local) { xs_message($lang['Information'], $lang['xs_edit_file_saved'] . '<br /><br />' . $return_url); } // generate ftp actions $actions = array(); // chdir to template directory for($i = 0; $i < sizeof($dirs); $i++) { $actions[] = array( 'command' => 'chdir', 'dir' => $dirs[$i] ); } $actions[] = array( 'command' => 'upload', 'local' => $local_filename, 'remote' => $fullfile ); $ftp_log = array(); $ftp_error = ''; $res = ftp_myexec($actions); echo "<!--\n\n"; echo "\$actions dump:\n\n"; print_r($actions); echo "\n\n\$ftp_log dump:\n\n"; print_r($ftp_log); echo "\n\n -->"; @unlink($local_filename); if($res) { xs_message($lang['Information'], $lang['xs_edit_file_saved'] . '<br /><br />' . $return_url); } xs_error($ftp_error . '<br /><br />' . $return_url); } /* * show edit form */ if(isset($_GET['edit'])) { $file = stripslashes($_GET['edit']); $file = xs_fix_dir($file); $fullfile = $current_dir_root . $file; $localfile = '../' . $fullfile; $hash = md5($localfile); if(!@file_exists($localfile)) { xs_error($lang['xs_edit_not_found'] . '<br /><br />' . $return_url); } $content = @file($localfile); if(!is_array($content)) { xs_error($lang['xs_edit_not_found'] . '<br /><br />' . $return_url); } $content = implode('', $content); if(isset($_GET['download']) && !defined('DEMO_MODE')) { xs_download_file($file, $content); xs_exit(); } if(isset($_GET['downloadbackup']) && !defined('DEMO_MODE')) { $backup_name = XS_TEMP_DIR . XS_BACKUP_PREFIX . $hash . '.' . intval($_GET['downloadbackup']) . XS_BACKUP_EXT; xs_download_file($file, implode('', @file($backup_name))); xs_exit(); } $return_file = str_replace('{URL}', append_sid('xs_edit.' . PHP_EXT.$filter_str.'&amp;dir=' . urlencode($current_dir).'&edit='.urlencode($file)), $lang['xs_edittpl_back_edit']); $return_url = $return_file . '<br /><br />' . $return_dir; $template->assign_vars(array( 'U_ACTION' => append_sid('xs_edit.' . PHP_EXT), 'U_BROWSE' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir)), 'U_EDIT' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file)), 'U_BACKUP' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dobackup=1&amp;dir='.urlencode($current_dir) . '&amp;edit=' . urlencode($file)), 'U_DOWNLOAD' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&download=1&amp;dir='.urlencode($current_dir) . '&amp;edit=' . urlencode($file)), 'CURRENT_DIR' => htmlspecialchars($current_dir_full), 'DIR' => htmlspecialchars($current_dir), 'FILE' => htmlspecialchars($file), 'FULLFILE' => htmlspecialchars($fullfile), 'CONTENT' => defined('DEMO_MODE') ? $lang['xs_error_demo_edit'] : htmlspecialchars($content), ) ); if($current_dir_full) { $template->assign_block_vars('nav_left',array('ITEM' => '&raquo; <a href="' . append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dir='.$current_dir) . '">' . htmlspecialchars($current_dir_full) . '</a>')); } // show tree $arr = array(); $template->assign_block_vars('tree', array( 'ITEM' => 'InfinityCoreCMS', 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dir='), 'SEPARATOR' => '', )); $back_dir = ''; for($i = 0; $i < sizeof($dirs); $i++) { $arr[] = $dirs[$i]; $str = implode('/', $arr); if(sizeof($dirs) > ($i + 1)) { $back_dir = $str; } $template->assign_block_vars('tree', array( 'ITEM' => htmlspecialchars($dirs[$i]), 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($str)), 'SEPARATOR' => '/', )); } // view backup if(!empty($_GET['viewbackup']) && !defined('DEMO_MODE')) { $backup_name = XS_TEMP_DIR . XS_BACKUP_PREFIX . $hash . '.' . intval($_GET['viewbackup']) . XS_BACKUP_EXT; $template->assign_vars(array( 'CONTENT' => implode('', @file($backup_name)) ) ); } // save backup if(isset($_GET['dobackup']) && !defined('DEMO_MODE')) { $backup_name = XS_TEMP_DIR . XS_BACKUP_PREFIX . $hash . '.' . time() . XS_BACKUP_EXT; $f = @fopen($backup_name, 'wb'); if(!$f) { xs_error(str_replace('{FILE}', $backup_name, $lang['xs_error_cannot_create_tmp']) . '<br /><br />' . $return_url); } fwrite($f, $content); fclose($f); @chmod($backup_name, 0777); } // delete backup if(isset($_GET['delbackup']) && !defined('DEMO_MODE')) { $backup_name = XS_TEMP_DIR . XS_BACKUP_PREFIX . $hash . '.' . intval($_GET['delbackup']) . XS_BACKUP_EXT; @unlink($backup_name); } // show backups $backups = array(); $res = opendir(XS_TEMP_DIR); $match = XS_BACKUP_PREFIX . $hash . '.'; $match_len = strlen($match); while(($f = readdir($res)) !== false) { if(substr($f, 0, $match_len) === $match) { $str = substr($f, $match_len, strlen($f) - $match_len - strlen(XS_BACKUP_EXT)); if(intval($str)) { $backups[] = intval($str); } } } closedir($res); sort($backups); for($i = 0; $i < sizeof($backups); $i++) { $template->assign_block_vars('backup', array( 'TIME' => create_date($config['default_dateformat'], $backups[$i], $config['board_timezone']), 'U_RESTORE' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file) . '&amp;restore=' . $backups[$i]), 'U_DELETE' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file) . '&amp;delbackup=' . $backups[$i]), 'U_DOWNLOAD' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file) . '&amp;downloadbackup=' . $backups[$i]), 'U_VIEW' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file) . '&amp;viewbackup=' . $backups[$i]), ) ); } // show template $template->set_filenames(array('body' => XS_TPL_PATH . 'edit_file.tpl')); $template->pparse('body'); xs_exit(); } /* * show file browser */ // show tree $arr = array(); $template->assign_block_vars('tree', array( 'ITEM' => 'InfinityCoreCMS', 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&dir='), 'SEPARATOR' => '', )); $back_dir = ''; for($i = 0; $i < sizeof($dirs); $i++) { $arr[] = $dirs[$i]; $str = implode('/', $arr); if(sizeof($dirs) > ($i + 1)) { $back_dir = $str; } $template->assign_block_vars('tree', array( 'ITEM' => htmlspecialchars($dirs[$i]), 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($str)), 'SEPARATOR' => '/', ) ); } // get list of files/directories $list_files = array(); // non-editable files $list_files_editable = array(); // editable files $list_dirs = array(); // directories $res = @opendir('../' . $current_dir_full); if(!$res) { xs_error(str_replace('{DIR}', $current_dir_full, $lang['xs_export_no_open_dir']) . '<br /><br />' . $return_url_root); } while(($file = readdir($res)) !== false) { if($file !== '.' && $file !== '..') { $filename = '../' . ($current_dir_full ? $current_dir_full . '/' : '') . $file; if(is_dir($filename)) { $list_dirs[] = $file; } else { $pos = strrpos($file, '.'); if($pos !== false) { $ext = strtolower(substr($file, $pos)); $ext1 = substr($ext, 1); if((!$filter_data['ext'] && xs_in_array($ext, $editable)) || $ext1 === $filter_data['ext']) { // check filter if($filter_data['data']) { $content = @implode('', @file($filename)); if(strpos($content, $filter_data['data']) !== false) { $list_files_editable[] = $file; } } else { $list_files_editable[] = $file; } } else { $list_files[] = $file; } } } } } closedir($res); $list_dirs_count = sizeof($list_dirs); $list_files_count = sizeof($list_files) + sizeof($list_files_editable); if($current_dir || sizeof($list_dirs)) { $template->assign_block_vars('begin_dirs', array( 'COUNT' => sizeof($list_dirs), 'L_COUNT' => str_replace('{COUNT}', sizeof($list_dirs), $lang['xs_fileman_dircount']) )); } else { $template->assign_block_vars('begin_nodirs', array()); } if($current_dir) { $template->assign_block_vars('begin_dirs.dir', array( 'NAME' => '..', 'FULLNAME' => htmlspecialchars($back_dir ? $back_dir . '/' : ''), 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($back_dir)), ) ); } // show subdirectories sort($list_dirs); for($i = 0; $i < sizeof($list_dirs); $i++) { $dir = $list_dirs[$i]; $str = $current_dir_root . $dir; $template->assign_block_vars('begin_dirs.dir', array( 'NAME' => htmlspecialchars($dir), 'FULLNAME' => htmlspecialchars($current_dir_root . $dir), 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($str)), ) ); } // show editable files if(sizeof($list_files_editable)) { $template->assign_block_vars('begin_files', array('COUNT' => sizeof($list_files_editable))); } else { $template->assign_block_vars('begin_nofiles', array('COUNT' => sizeof($list_files_editable))); } sort($list_files_editable); // get today start $today = floor((time() + 3600 * $config['board_timezone']) / 86400) * 86400 - (3600 * $config['board_timezone']); for($i = 0; $i < sizeof($list_files_editable); $i++) { $file = $list_files_editable[$i]; $fullfile = $current_dir_root . $file; $localfile = '../' . $fullfile; $row_class = $xs_row_class[$i % 2]; $t = @filemtime($localfile); $filetime = $t ? create_date($config['default_dateformat'], $t, $config['board_timezone']) : '&nbsp;'; $template->assign_block_vars('begin_files.file', array( 'ROW_CLASS' => $row_class, 'NAME' => htmlspecialchars($file), 'FULLNAME' => htmlspecialchars($fullfile), 'SIZE' => @filesize($localfile), 'TIME' => $filetime, 'URL' => append_sid('xs_edit.' . PHP_EXT . $filter_str . '&amp;dir=' . urlencode($current_dir) . '&amp;edit=' . urlencode($file)) ) ); if($t < $today) { $template->assign_block_vars('begin_files.file.old', array()); } else { $template->assign_block_vars('begin_files.file.today', array()); } } $template->set_filenames(array('body' => XS_TPL_PATH . 'edit.tpl')); $template->pparse('body'); xs_exit(); ?>
LordPsyan/InfinityCoreCMS
adm/xs_edit.php
PHP
gpl-2.0
15,350
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html><head><title></title> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <link rel="stylesheet" type="text/css" href="search.css"/> <script type="text/javascript" src="search.js"></script> </head> <body class="SRPage"> <div id="SRIndex"> <div class="SRStatus" id="Loading">Loading...</div> <div class="SRResult" id="SR__5f_5fsizekeyvalueofstringstring"> <div class="SREntry"> <a id="Item0" onkeydown="return searchResults.Nav(event,0)" onkeypress="return searchResults.Nav(event,0)" onkeyup="return searchResults.Nav(event,0)" class="SRSymbol" href="../classns5____ArrayOfKeyValueOfstringstring.html#adb2f8f97f65827333afed34858f5df83" target="_parent">__sizeKeyValueOfstringstring</a> <span class="SRScope">ns5__ArrayOfKeyValueOfstringstring</span> </div> </div> <div class="SRResult" id="SR__5f_5fsizestring"> <div class="SREntry"> <a id="Item1" onkeydown="return searchResults.Nav(event,1)" onkeypress="return searchResults.Nav(event,1)" onkeyup="return searchResults.Nav(event,1)" class="SRSymbol" href="../classns5____ArrayOfstring.html#ac6f1705cd7d04a2561b05ceac49197ec" target="_parent">__sizestring</a> <span class="SRScope">ns5__ArrayOfstring</span> </div> </div> <div class="SRStatus" id="Searching">Searching...</div> <div class="SRStatus" id="NoMatches">No Matches</div> <script type="text/javascript"><!-- document.getElementById("Loading").style.display="none"; document.getElementById("NoMatches").style.display="none"; var searchResults = new SearchResults("searchResults"); searchResults.Search(); --></script> </div> </body> </html>
darring/lca-gpl
steward/lib_soap/html/search/variables_5f.html
HTML
gpl-2.0
1,708
#!/bin/sh if [ -z "${CFG_TARGET}" ] ; then CFG_TARGET=`pwd`/target echo "CFG_TARGET=${CFG_TARGET}" fi if [ -z "${CFG_SOURCE}" ] ; then CFG_SOURCE=`pwd`/source echo "CFG_SOURCE=${CFG_SOURCE}" fi if [ -z "${CFG_BUILD}" ] ; then CFG_BUILD=`pwd`/build echo "CFG_BUILD=${CFG_BUILD}" fi if [ -z "${CFG_LINUX_CONFIG}" ] ; then CFG_LINUX_CONFIG=i386_defconfig echo "CFG_LINUX_CONFIG=${CFG_LINUX_CONFIG}" fi if [ -z "${CFG_LINUX_IMAGE}" ] ; then CFG_LINUX_IMAGE=bzImage echo "CFG_LINUX_IMAGE=${CFG_LINUX_IMAGE}" fi mkdir -p $CFG_TARGET mkdir -p $CFG_SOURCE mkdir -p $CFG_BUILD cd $CFG_SOURCE make O=$CFG_BUILD mrproper || exit 1 make O=$CFG_BUILD $CFG_LINUX_CONFIG || exit 1 make O=$CFG_BUILD dep || exit 1 make O=$CFG_BUILD clean || exit 1 make O=$CFG_BUILD $CFG_LINUX_IMAGE modules || exit 1 make O=$CFG_BUILD INSTALL_MOD_PATH=$CFG_TARGET modules_install || exit 1 exit 0
robacklin/linux-3.7.2
quick-make.sh
Shell
gpl-2.0
878
#ifndef LX86_LINUXMT_FCNTL_H #define LX86_LINUXMT_FCNTL_H /* * Definitions taken from the i386 Linux kernel. */ /* open/fcntl */ #define O_ACCMODE 0003 #define O_RDONLY 00 #define O_WRONLY 01 #define O_RDWR 02 #define O_CREAT 0100 /* not fcntl */ #define O_EXCL 0200 /* not fcntl */ #define O_NOCTTY 0400 /* not fcntl */ #define O_TRUNC 01000 /* not fcntl */ #define O_APPEND 02000 #define O_NONBLOCK 04000 #define O_NDELAY O_NONBLOCK #if 0 #define O_SYNC 010000 /* Not supported */ #define FASYNC 020000 /* Not supported */ #endif #define F_DUPFD 0 /* dup */ #define F_GETFD 1 /* get f_flags */ #define F_SETFD 2 /* set f_flags */ #define F_GETFL 3 /* more flags (cloexec) */ #define F_SETFL 4 #define F_GETLK 5 #define F_SETLK 6 #define F_SETLKW 7 #define F_SETOWN 8 /* for sockets. */ #define F_GETOWN 9 /* for sockets. */ /* for F_[GET|SET]FL */ #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ /* for posix fcntl() and lockf() */ #define F_RDLCK 0 #define F_WRLCK 1 #define F_UNLCK 2 /* for old implementation of bsd flock () */ #define F_EXLCK 4 /* or 3 */ #define F_SHLCK 8 /* or 4 */ /* operations for bsd flock(), also used by the kernel implementation */ #define LOCK_SH 1 /* shared lock */ #define LOCK_EX 2 /* exclusive lock */ #define LOCK_NB 4 /* or'd with one of the above to prevent * blocking */ #define LOCK_UN 8 /* remove lock */ #ifdef __KERNEL__ #define F_POSIX 1 #define F_FLOCK 2 #endif #endif
lkundrak/elks
include/linuxmt/fcntl.h
C
gpl-2.0
1,503
<?php // No direct access to this file defined('_JEXEC') or die('Restricted access'); // import Joomla view library jimport('joomla.application.component.view'); jimport('joomla.application.component.controller'); /** * HTML View class for the HelloWorld Component */ class HoroscopeViewNavamsha extends JViewLegacy { public $data; function display($tpl = null) { $this->data = $this->get('Data'); if (count($errors = $this->get('Errors'))) { JError::raiseError(500, implode('<br />', $errors)); return false; } if(isset($_GET['chart']) && (empty($this->data))) { $app = JFactory::getApplication(); $link = Juri::base().'horoscope?chart='.$_GET['chart']; $app->redirect($link); } else { $tpl = null; parent::display($tpl); } } }
luffy22/aisha
components/com_horoscope/views/navamsha/view.html.php
PHP
gpl-2.0
967
/* * ext4.h * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/include/linux/minix_fs.h * * Copyright (C) 1991, 1992 Linus Torvalds */ #ifndef _EXT4_H #define _EXT4_H #include <linux/types.h> #include <linux/blkdev.h> #include <linux/magic.h> #include <linux/jbd2.h> #include <linux/quota.h> #include <linux/rwsem.h> #include <linux/rbtree.h> #include <linux/seqlock.h> #include <linux/mutex.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/blockgroup_lock.h> #include <linux/percpu_counter.h> #include <linux/ratelimit.h> #include <crypto/hash.h> #include <linux/falloc.h> #ifdef __KERNEL__ #include <linux/compat.h> #endif /* * The fourth extended filesystem constants/structures */ /* * Define EXT4FS_DEBUG to produce debug messages */ #undef EXT4FS_DEBUG /* * Debug code */ #ifdef EXT4FS_DEBUG #define ext4_debug(f, a...) \ do { \ printk(KERN_DEBUG "EXT4-fs DEBUG (%s, %d): %s:", \ __FILE__, __LINE__, __func__); \ printk(KERN_DEBUG f, ## a); \ } while (0) #else #define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif /* * Turn on EXT_DEBUG to get lots of info about extents operations. */ #define EXT_DEBUG__ #ifdef EXT_DEBUG #define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__) #else #define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif #define EXT4_ERROR_INODE(inode, fmt, a...) \ ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a) #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \ ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a) #define EXT4_ERROR_FILE(file, block, fmt, a...) \ ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a) /* data type for block offset of block group */ typedef int ext4_grpblk_t; /* data type for filesystem-wide blocks number */ typedef unsigned long long ext4_fsblk_t; /* data type for file logical block number */ typedef __u32 ext4_lblk_t; /* data type for block group number */ typedef unsigned int ext4_group_t; /* * Flags used in mballoc's allocation_context flags field. * * Also used to show what's going on for debugging purposes when the * flag field is exported via the traceport interface */ /* prefer goal again. length */ #define EXT4_MB_HINT_MERGE 0x0001 /* blocks already reserved */ #define EXT4_MB_HINT_RESERVED 0x0002 /* metadata is being allocated */ #define EXT4_MB_HINT_METADATA 0x0004 /* first blocks in the file */ #define EXT4_MB_HINT_FIRST 0x0008 /* search for the best chunk */ #define EXT4_MB_HINT_BEST 0x0010 /* data is being allocated */ #define EXT4_MB_HINT_DATA 0x0020 /* don't preallocate (for tails) */ #define EXT4_MB_HINT_NOPREALLOC 0x0040 /* allocate for locality group */ #define EXT4_MB_HINT_GROUP_ALLOC 0x0080 /* allocate goal blocks or none */ #define EXT4_MB_HINT_GOAL_ONLY 0x0100 /* goal is meaningful */ #define EXT4_MB_HINT_TRY_GOAL 0x0200 /* blocks already pre-reserved by delayed allocation */ #define EXT4_MB_DELALLOC_RESERVED 0x0400 /* We are doing stream allocation */ #define EXT4_MB_STREAM_ALLOC 0x0800 /* Use reserved root blocks if needed */ #define EXT4_MB_USE_ROOT_BLOCKS 0x1000 /* Use blocks from reserved pool */ #define EXT4_MB_USE_RESERVED 0x2000 struct ext4_allocation_request { /* target inode for block we're allocating */ struct inode *inode; /* how many blocks we want to allocate */ unsigned int len; /* logical block in target inode */ ext4_lblk_t logical; /* the closest logical allocated block to the left */ ext4_lblk_t lleft; /* the closest logical allocated block to the right */ ext4_lblk_t lright; /* phys. target (a hint) */ ext4_fsblk_t goal; /* phys. block for the closest logical allocated block to the left */ ext4_fsblk_t pleft; /* phys. block for the closest logical allocated block to the right */ ext4_fsblk_t pright; /* flags. see above EXT4_MB_HINT_* */ unsigned int flags; }; /* * Logical to physical block mapping, used by ext4_map_blocks() * * This structure is used to pass requests into ext4_map_blocks() as * well as to store the information returned by ext4_map_blocks(). It * takes less room on the stack than a struct buffer_head. */ #define EXT4_MAP_NEW (1 << BH_New) #define EXT4_MAP_MAPPED (1 << BH_Mapped) #define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten) #define EXT4_MAP_BOUNDARY (1 << BH_Boundary) /* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of * ext4_map_blocks wants to know whether or not the underlying cluster has * already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that * the requested mapping was from previously mapped (or delayed allocated) * cluster. We use BH_AllocFromCluster only for this flag. BH_AllocFromCluster * should never appear on buffer_head's state flags. */ #define EXT4_MAP_FROM_CLUSTER (1 << BH_AllocFromCluster) #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\ EXT4_MAP_FROM_CLUSTER) struct ext4_map_blocks { ext4_fsblk_t m_pblk; ext4_lblk_t m_lblk; unsigned int m_len; unsigned int m_flags; }; /* * Flags for ext4_io_end->flags */ #define EXT4_IO_END_UNWRITTEN 0x0001 /* * For converting unwritten extents on a work queue. 'handle' is used for * buffered writeback. */ typedef struct ext4_io_end { struct list_head list; /* per-file finished IO list */ handle_t *handle; /* handle reserved for extent * conversion */ struct inode *inode; /* file being written to */ struct bio *bio; /* Linked list of completed * bios covering the extent */ unsigned int flag; /* unwritten or not */ loff_t offset; /* offset in the file */ ssize_t size; /* size of the extent */ atomic_t count; /* reference counter */ } ext4_io_end_t; struct ext4_io_submit { int io_op; struct bio *io_bio; ext4_io_end_t *io_end; sector_t io_next_block; }; /* * Special inodes numbers */ #define EXT4_BAD_INO 1 /* Bad blocks inode */ #define EXT4_ROOT_INO 2 /* Root inode */ #define EXT4_USR_QUOTA_INO 3 /* User quota inode */ #define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */ #define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */ #define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */ #define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */ #define EXT4_JOURNAL_INO 8 /* Journal inode */ /* First non-reserved inode for old ext4 filesystems */ #define EXT4_GOOD_OLD_FIRST_INO 11 /* * Maximal count of links to a file */ #define EXT4_LINK_MAX 65000 /* * Macro-instructions used to manage several block sizes */ #define EXT4_MIN_BLOCK_SIZE 1024 #define EXT4_MAX_BLOCK_SIZE 65536 #define EXT4_MIN_BLOCK_LOG_SIZE 10 #define EXT4_MAX_BLOCK_LOG_SIZE 16 #define EXT4_MAX_CLUSTER_LOG_SIZE 30 #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) #else # define EXT4_BLOCK_SIZE(s) (EXT4_MIN_BLOCK_SIZE << (s)->s_log_block_size) #endif #define EXT4_ADDR_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / sizeof(__u32)) #define EXT4_CLUSTER_SIZE(s) (EXT4_BLOCK_SIZE(s) << \ EXT4_SB(s)->s_cluster_bits) #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits) # define EXT4_CLUSTER_BITS(s) (EXT4_SB(s)->s_cluster_bits) #else # define EXT4_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10) #endif #ifdef __KERNEL__ #define EXT4_ADDR_PER_BLOCK_BITS(s) (EXT4_SB(s)->s_addr_per_block_bits) #define EXT4_INODE_SIZE(s) (EXT4_SB(s)->s_inode_size) #define EXT4_FIRST_INO(s) (EXT4_SB(s)->s_first_ino) #else #define EXT4_INODE_SIZE(s) (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \ EXT4_GOOD_OLD_INODE_SIZE : \ (s)->s_inode_size) #define EXT4_FIRST_INO(s) (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \ EXT4_GOOD_OLD_FIRST_INO : \ (s)->s_first_ino) #endif #define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits))) /* Translate a block number to a cluster number */ #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits) /* Translate a cluster number to a block number */ #define EXT4_C2B(sbi, cluster) ((cluster) << (sbi)->s_cluster_bits) /* Translate # of blks to # of clusters */ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ (sbi)->s_cluster_bits) /* Mask out the low bits to get the starting block of the cluster */ #define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) #define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) /* Get the cluster offset */ #define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) #define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) /* * Structure of a blocks group descriptor */ struct ext4_group_desc { __le32 bg_block_bitmap_lo; /* Blocks bitmap block */ __le32 bg_inode_bitmap_lo; /* Inodes bitmap block */ __le32 bg_inode_table_lo; /* Inodes table block */ __le16 bg_free_blocks_count_lo;/* Free blocks count */ __le16 bg_free_inodes_count_lo;/* Free inodes count */ __le16 bg_used_dirs_count_lo; /* Directories count */ __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */ __le32 bg_exclude_bitmap_lo; /* Exclude bitmap for snapshots */ __le16 bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */ __le16 bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */ __le16 bg_itable_unused_lo; /* Unused inodes count */ __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */ __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ __le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */ __le32 bg_inode_table_hi; /* Inodes table block MSB */ __le16 bg_free_blocks_count_hi;/* Free blocks count MSB */ __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */ __le16 bg_used_dirs_count_hi; /* Directories count MSB */ __le16 bg_itable_unused_hi; /* Unused inodes count MSB */ __le32 bg_exclude_bitmap_hi; /* Exclude bitmap block MSB */ __le16 bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */ __le16 bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */ __u32 bg_reserved; }; #define EXT4_BG_INODE_BITMAP_CSUM_HI_END \ (offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \ sizeof(__le16)) #define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END \ (offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \ sizeof(__le16)) /* * Structure of a flex block group info */ struct flex_groups { atomic64_t free_clusters; atomic_t free_inodes; atomic_t used_dirs; }; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ #define EXT4_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ #define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ /* * Macro-instructions used to manage group descriptors */ #define EXT4_MIN_DESC_SIZE 32 #define EXT4_MIN_DESC_SIZE_64BIT 64 #define EXT4_MAX_DESC_SIZE EXT4_MIN_BLOCK_SIZE #define EXT4_DESC_SIZE(s) (EXT4_SB(s)->s_desc_size) #ifdef __KERNEL__ # define EXT4_BLOCKS_PER_GROUP(s) (EXT4_SB(s)->s_blocks_per_group) # define EXT4_CLUSTERS_PER_GROUP(s) (EXT4_SB(s)->s_clusters_per_group) # define EXT4_DESC_PER_BLOCK(s) (EXT4_SB(s)->s_desc_per_block) # define EXT4_INODES_PER_GROUP(s) (EXT4_SB(s)->s_inodes_per_group) # define EXT4_DESC_PER_BLOCK_BITS(s) (EXT4_SB(s)->s_desc_per_block_bits) #else # define EXT4_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group) # define EXT4_DESC_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / EXT4_DESC_SIZE(s)) # define EXT4_INODES_PER_GROUP(s) ((s)->s_inodes_per_group) #endif /* * Constants relative to the data blocks */ #define EXT4_NDIR_BLOCKS 12 #define EXT4_IND_BLOCK EXT4_NDIR_BLOCKS #define EXT4_DIND_BLOCK (EXT4_IND_BLOCK + 1) #define EXT4_TIND_BLOCK (EXT4_DIND_BLOCK + 1) #define EXT4_N_BLOCKS (EXT4_TIND_BLOCK + 1) /* * Inode flags */ #define EXT4_SECRM_FL 0x00000001 /* Secure deletion */ #define EXT4_UNRM_FL 0x00000002 /* Undelete */ #define EXT4_COMPR_FL 0x00000004 /* Compress file */ #define EXT4_SYNC_FL 0x00000008 /* Synchronous updates */ #define EXT4_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define EXT4_APPEND_FL 0x00000020 /* writes to file may only append */ #define EXT4_NODUMP_FL 0x00000040 /* do not dump file */ #define EXT4_NOATIME_FL 0x00000080 /* do not update atime */ /* Reserved for compression usage... */ #define EXT4_DIRTY_FL 0x00000100 #define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ #define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */ #define EXT4_ECOMPR_FL 0x00000800 /* Compression error */ /* End compression flags --- maybe not all used */ #define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */ #define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */ #define EXT4_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */ #define EXT4_NOTAIL_FL 0x00008000 /* file tail should not be merged */ #define EXT4_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */ #define EXT4_FL_USER_MODIFIABLE 0x004380FF /* User modifiable flags */ /* Flags that should be inherited by new inodes from their parent. */ #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\ EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\ EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL) /* Flags that are appropriate for regular files (all but dir-specific ones). */ #define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL)) /* Flags that are appropriate for non-directories/regular files. */ #define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL) /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) { if (S_ISDIR(mode)) return flags; else if (S_ISREG(mode)) return flags & EXT4_REG_FLMASK; else return flags & EXT4_OTHER_FLMASK; } /* * Inode flags used for atomic set/get */ enum { EXT4_INODE_SECRM = 0, /* Secure deletion */ EXT4_INODE_UNRM = 1, /* Undelete */ EXT4_INODE_COMPR = 2, /* Compress file */ EXT4_INODE_SYNC = 3, /* Synchronous updates */ EXT4_INODE_IMMUTABLE = 4, /* Immutable file */ EXT4_INODE_APPEND = 5, /* writes to file may only append */ EXT4_INODE_NODUMP = 6, /* do not dump file */ EXT4_INODE_NOATIME = 7, /* do not update atime */ /* Reserved for compression usage... */ EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ EXT4_INODE_NOCOMPR = 10, /* Don't compress */ EXT4_INODE_ECOMPR = 11, /* Compression error */ /* End compression flags --- maybe not all used */ EXT4_INODE_INDEX = 12, /* hash-indexed directory */ EXT4_INODE_IMAGIC = 13, /* AFS directory */ EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */ EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */ EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */ EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/ EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */ EXT4_INODE_EXTENTS = 19, /* Inode uses extents */ EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */ EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */ EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */ }; /* * Since it's pretty easy to mix up bit numbers and hex values, we use a * build-time check to make sure that EXT4_XXX_FL is consistent with respect to * EXT4_INODE_XXX. If all is well, the macros will be dropped, so, it won't cost * any extra space in the compiled kernel image, otherwise, the build will fail. * It's important that these values are the same, since we are using * EXT4_INODE_XXX to test for flag values, but EXT4_XXX_FL must be consistent * with the values of FS_XXX_FL defined in include/linux/fs.h and the on-disk * values found in ext2, ext3 and ext4 filesystems, and of course the values * defined in e2fsprogs. * * It's not paranoia if the Murphy's Law really *is* out to get you. :-) */ #define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG)) #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG)) static inline void ext4_check_flag_values(void) { CHECK_FLAG_VALUE(SECRM); CHECK_FLAG_VALUE(UNRM); CHECK_FLAG_VALUE(COMPR); CHECK_FLAG_VALUE(SYNC); CHECK_FLAG_VALUE(IMMUTABLE); CHECK_FLAG_VALUE(APPEND); CHECK_FLAG_VALUE(NODUMP); CHECK_FLAG_VALUE(NOATIME); CHECK_FLAG_VALUE(DIRTY); CHECK_FLAG_VALUE(COMPRBLK); CHECK_FLAG_VALUE(NOCOMPR); CHECK_FLAG_VALUE(ECOMPR); CHECK_FLAG_VALUE(INDEX); CHECK_FLAG_VALUE(IMAGIC); CHECK_FLAG_VALUE(JOURNAL_DATA); CHECK_FLAG_VALUE(NOTAIL); CHECK_FLAG_VALUE(DIRSYNC); CHECK_FLAG_VALUE(TOPDIR); CHECK_FLAG_VALUE(HUGE_FILE); CHECK_FLAG_VALUE(EXTENTS); CHECK_FLAG_VALUE(EA_INODE); CHECK_FLAG_VALUE(EOFBLOCKS); CHECK_FLAG_VALUE(INLINE_DATA); CHECK_FLAG_VALUE(RESERVED); } /* Used to pass group descriptor data when online resize is done */ struct ext4_new_group_input { __u32 group; /* Group number for this data */ __u64 block_bitmap; /* Absolute block number of block bitmap */ __u64 inode_bitmap; /* Absolute block number of inode bitmap */ __u64 inode_table; /* Absolute block number of inode table start */ __u32 blocks_count; /* Total number of blocks in this group */ __u16 reserved_blocks; /* Number of reserved blocks in this group */ __u16 unused; }; #if defined(__KERNEL__) && defined(CONFIG_COMPAT) struct compat_ext4_new_group_input { u32 group; compat_u64 block_bitmap; compat_u64 inode_bitmap; compat_u64 inode_table; u32 blocks_count; u16 reserved_blocks; u16 unused; }; #endif /* The struct ext4_new_group_input in kernel space, with free_blocks_count */ struct ext4_new_group_data { __u32 group; __u64 block_bitmap; __u64 inode_bitmap; __u64 inode_table; __u32 blocks_count; __u16 reserved_blocks; __u16 unused; __u32 free_blocks_count; }; /* Indexes used to index group tables in ext4_new_group_data */ enum { BLOCK_BITMAP = 0, /* block bitmap */ INODE_BITMAP, /* inode bitmap */ INODE_TABLE, /* inode tables */ GROUP_TABLE_COUNT, }; /* * Flags used by ext4_map_blocks() */ /* Allocate any needed blocks and/or convert an unwritten extent to be an initialized ext4 */ #define EXT4_GET_BLOCKS_CREATE 0x0001 /* Request the creation of an unwritten extent */ #define EXT4_GET_BLOCKS_UNWRIT_EXT 0x0002 #define EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT (EXT4_GET_BLOCKS_UNWRIT_EXT|\ EXT4_GET_BLOCKS_CREATE) /* Caller is from the delayed allocation writeout path * finally doing the actual allocation of delayed blocks */ #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 /* caller is from the direct IO path, request to creation of an unwritten extents if not allocated, split the unwritten extent if blocks has been preallocated already*/ #define EXT4_GET_BLOCKS_PRE_IO 0x0008 #define EXT4_GET_BLOCKS_CONVERT 0x0010 #define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT) /* Convert extent to initialized after IO complete */ #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT) /* Eventual metadata allocation (due to growing extent tree) * should not fail, so try to use reserved blocks for that.*/ #define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020 /* Don't normalize allocation size (used for fallocate) */ #define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040 /* Request will not result in inode size update (user for fallocate) */ #define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080 /* Do not take i_data_sem locking in ext4_map_blocks */ #define EXT4_GET_BLOCKS_NO_LOCK 0x0100 /* Do not put hole in extent cache */ #define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200 /* Convert written extents to unwritten */ #define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0400 /* * The bit position of these flags must not overlap with any of the * EXT4_GET_BLOCKS_*. They are used by ext4_find_extent(), * read_extent_tree_block(), ext4_split_extent_at(), * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf(). * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be * caching the extents when reading from the extent tree while a * truncate or punch hole operation is in progress. */ #define EXT4_EX_NOCACHE 0x40000000 #define EXT4_EX_FORCE_CACHE 0x20000000 /* * Flags used by ext4_free_blocks */ #define EXT4_FREE_BLOCKS_METADATA 0x0001 #define EXT4_FREE_BLOCKS_FORGET 0x0002 #define EXT4_FREE_BLOCKS_VALIDATED 0x0004 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 /* * ioctl commands */ #define EXT4_IOC_GETFLAGS FS_IOC_GETFLAGS #define EXT4_IOC_SETFLAGS FS_IOC_SETFLAGS #define EXT4_IOC_GETVERSION _IOR('f', 3, long) #define EXT4_IOC_SETVERSION _IOW('f', 4, long) #define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION #define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION #define EXT4_IOC_GETRSVSZ _IOR('f', 5, long) #define EXT4_IOC_SETRSVSZ _IOW('f', 6, long) #define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long) #define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input) #define EXT4_IOC_MIGRATE _IO('f', 9) /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */ /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */ #define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12) #define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent) #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) #define EXT4_IOC_SWAP_BOOT _IO('f', 17) #define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* * ioctl commands in 32 bit emulation */ #define EXT4_IOC32_GETFLAGS FS_IOC32_GETFLAGS #define EXT4_IOC32_SETFLAGS FS_IOC32_SETFLAGS #define EXT4_IOC32_GETVERSION _IOR('f', 3, int) #define EXT4_IOC32_SETVERSION _IOW('f', 4, int) #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) #define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int) #define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int) #define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input) #define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION #define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION #endif /* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF /* * Structure of an inode on the disk */ struct ext4_inode { __le16 i_mode; /* File mode */ __le16 i_uid; /* Low 16 bits of Owner Uid */ __le32 i_size_lo; /* Size in bytes */ __le32 i_atime; /* Access time */ __le32 i_ctime; /* Inode Change time */ __le32 i_mtime; /* Modification time */ __le32 i_dtime; /* Deletion Time */ __le16 i_gid; /* Low 16 bits of Group Id */ __le16 i_links_count; /* Links count */ __le32 i_blocks_lo; /* Blocks count */ __le32 i_flags; /* File flags */ union { struct { __le32 l_i_version; } linux1; struct { __u32 h_i_translator; } hurd1; struct { __u32 m_i_reserved1; } masix1; } osd1; /* OS dependent 1 */ __le32 i_block[EXT4_N_BLOCKS];/* Pointers to blocks */ __le32 i_generation; /* File version (for NFS) */ __le32 i_file_acl_lo; /* File ACL */ __le32 i_size_high; __le32 i_obso_faddr; /* Obsoleted fragment address */ union { struct { __le16 l_i_blocks_high; /* were l_i_reserved1 */ __le16 l_i_file_acl_high; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ __le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */ __le16 l_i_reserved; } linux2; struct { __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ __u16 h_i_mode_high; __u16 h_i_uid_high; __u16 h_i_gid_high; __u32 h_i_author; } hurd2; struct { __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ __le16 m_i_file_acl_high; __u32 m_i_reserved2[2]; } masix2; } osd2; /* OS dependent 2 */ __le16 i_extra_isize; __le16 i_checksum_hi; /* crc32c(uuid+inum+inode) BE */ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ __le32 i_crtime; /* File Creation time */ __le32 i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */ __le32 i_version_hi; /* high 32 bits for 64-bit version */ }; struct move_extent { __u32 reserved; /* should be zero */ __u32 donor_fd; /* donor file descriptor */ __u64 orig_start; /* logical start offset in block for orig */ __u64 donor_start; /* logical start offset in block for donor */ __u64 len; /* block length to be moved */ __u64 moved_len; /* moved block length */ }; #define EXT4_EPOCH_BITS 2 #define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1) #define EXT4_NSEC_MASK (~0UL << EXT4_EPOCH_BITS) /* * Extended fields will fit into an inode if the filesystem was formatted * with large inodes (-I 256 or larger) and there are not currently any EAs * consuming all of the available space. For new inodes we always reserve * enough space for the kernel's known extended fields, but for inodes * created with an old kernel this might not have been the case. None of * the extended inode fields is critical for correct filesystem operation. * This macro checks if a certain field fits in the inode. Note that * inode-size = GOOD_OLD_INODE_SIZE + i_extra_isize */ #define EXT4_FITS_IN_INODE(ext4_inode, einode, field) \ ((offsetof(typeof(*ext4_inode), field) + \ sizeof((ext4_inode)->field)) \ <= (EXT4_GOOD_OLD_INODE_SIZE + \ (einode)->i_extra_isize)) \ static inline __le32 ext4_encode_extra_time(struct timespec *time) { return cpu_to_le32((sizeof(time->tv_sec) > 4 ? (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) | ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK)); } static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) { if (sizeof(time->tv_sec) > 4) time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32; time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; } #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ do { \ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ (raw_inode)->xtime ## _extra = \ ext4_encode_extra_time(&(inode)->xtime); \ } while (0) #define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \ do { \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ (raw_inode)->xtime ## _extra = \ ext4_encode_extra_time(&(einode)->xtime); \ } while (0) #define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \ do { \ (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ ext4_decode_extra_time(&(inode)->xtime, \ raw_inode->xtime ## _extra); \ else \ (inode)->xtime.tv_nsec = 0; \ } while (0) #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \ do { \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ (einode)->xtime.tv_sec = \ (signed)le32_to_cpu((raw_inode)->xtime); \ else \ (einode)->xtime.tv_sec = 0; \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ ext4_decode_extra_time(&(einode)->xtime, \ raw_inode->xtime ## _extra); \ else \ (einode)->xtime.tv_nsec = 0; \ } while (0) #define i_disk_version osd1.linux1.l_i_version #if defined(__KERNEL__) || defined(__linux__) #define i_reserved1 osd1.linux1.l_i_reserved1 #define i_file_acl_high osd2.linux2.l_i_file_acl_high #define i_blocks_high osd2.linux2.l_i_blocks_high #define i_uid_low i_uid #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high #define i_checksum_lo osd2.linux2.l_i_checksum_lo #elif defined(__GNU__) #define i_translator osd1.hurd1.h_i_translator #define i_uid_high osd2.hurd2.h_i_uid_high #define i_gid_high osd2.hurd2.h_i_gid_high #define i_author osd2.hurd2.h_i_author #elif defined(__masix__) #define i_reserved1 osd1.masix1.m_i_reserved1 #define i_file_acl_high osd2.masix2.m_i_file_acl_high #define i_reserved2 osd2.masix2.m_i_reserved2 #endif /* defined(__KERNEL__) || defined(__linux__) */ #include "extents_status.h" /* * Lock subclasses for i_data_sem in the ext4_inode_info structure. * * These are needed to avoid lockdep false positives when we need to * allocate blocks to the quota inode during ext4_map_blocks(), while * holding i_data_sem for a normal (non-quota) inode. Since we don't * do quota tracking for the quota inode, this avoids deadlock (as * well as infinite recursion, since it isn't turtles all the way * down...) * * I_DATA_SEM_NORMAL - Used for most inodes * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode * where the second inode has larger inode number * than the first * I_DATA_SEM_QUOTA - Used for quota inodes only */ enum { I_DATA_SEM_NORMAL = 0, I_DATA_SEM_OTHER, I_DATA_SEM_QUOTA, }; /* * fourth extended file system inode data in memory */ struct ext4_inode_info { __le32 i_data[15]; /* unconverted */ __u32 i_dtime; ext4_fsblk_t i_file_acl; /* * i_block_group is the number of the block group which contains * this file's inode. Constant across the lifetime of the inode, * it is ued for making block allocation decisions - we try to * place a file's data blocks near its inode block, and new inodes * near to their parent directory's inode. */ ext4_group_t i_block_group; ext4_lblk_t i_dir_start_lookup; #if (BITS_PER_LONG < 64) unsigned long i_state_flags; /* Dynamic state flags */ #endif unsigned long i_flags; /* * Extended attributes can be read independently of the main file * data. Taking i_mutex even when reading would cause contention * between readers of EAs and writers of regular file data, so * instead we synchronize on xattr_sem when reading or changing * EAs. */ struct rw_semaphore xattr_sem; struct list_head i_orphan; /* unlinked but open inodes */ /* * i_disksize keeps track of what the inode size is ON DISK, not * in memory. During truncate, i_size is set to the new size by * the VFS prior to calling ext4_truncate(), but the filesystem won't * set i_disksize to 0 until the truncate is actually under way. * * The intent is that i_disksize always represents the blocks which * are used by this file. This allows recovery to restart truncate * on orphans if we crash during truncate. We actually write i_disksize * into the on-disk inode when writing inodes out, instead of i_size. * * The only time when i_disksize and i_size may be different is when * a truncate is in progress. The only things which change i_disksize * are ext4_get_block (growth) and ext4_truncate (shrinkth). */ loff_t i_disksize; /* * i_data_sem is for serialising ext4_truncate() against * ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's * data tree are chopped off during truncate. We can't do that in * ext4 because whenever we perform intermediate commits during * truncate, the inode and all the metadata blocks *must* be in a * consistent state which allows truncation of the orphans to restart * during recovery. Hence we must fix the get_block-vs-truncate race * by other means, so we have i_data_sem. */ struct rw_semaphore i_data_sem; struct inode vfs_inode; struct jbd2_inode *jinode; spinlock_t i_raw_lock; /* protects updates to the raw inode */ /* * File creation time. Its function is same as that of * struct timespec i_{a,c,m}time in the generic inode. */ struct timespec i_crtime; /* mballoc */ struct list_head i_prealloc_list; spinlock_t i_prealloc_lock; /* extents status tree */ struct ext4_es_tree i_es_tree; rwlock_t i_es_lock; struct list_head i_es_lru; unsigned int i_es_all_nr; /* protected by i_es_lock */ unsigned int i_es_lru_nr; /* protected by i_es_lock */ unsigned long i_touch_when; /* jiffies of last accessing */ /* ialloc */ ext4_group_t i_last_alloc_group; /* allocation reservation info for delalloc */ /* In case of bigalloc, these refer to clusters rather than blocks */ unsigned int i_reserved_data_blocks; unsigned int i_reserved_meta_blocks; unsigned int i_allocated_meta_blocks; ext4_lblk_t i_da_metadata_calc_last_lblock; int i_da_metadata_calc_len; /* on-disk additional length */ __u16 i_extra_isize; /* Indicate the inline data space. */ u16 i_inline_off; u16 i_inline_size; #ifdef CONFIG_QUOTA /* quota space reservation, managed internally by quota code */ qsize_t i_reserved_quota; #endif /* Lock protecting lists below */ spinlock_t i_completed_io_lock; /* * Completed IOs that need unwritten extents handling and have * transaction reserved */ struct list_head i_rsv_conversion_list; /* * Completed IOs that need unwritten extents handling and don't have * transaction reserved */ atomic_t i_ioend_count; /* Number of outstanding io_end structs */ atomic_t i_unwritten; /* Nr. of inflight conversions pending */ struct work_struct i_rsv_conversion_work; spinlock_t i_block_reservation_lock; /* * Transactions that contain inode's metadata needed to complete * fsync and fdatasync, respectively. */ tid_t i_sync_tid; tid_t i_datasync_tid; /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ __u32 i_csum_seed; }; /* * File system states */ #define EXT4_VALID_FS 0x0001 /* Unmounted cleanly */ #define EXT4_ERROR_FS 0x0002 /* Errors detected */ #define EXT4_ORPHAN_FS 0x0004 /* Orphans being recovered */ /* * Misc. filesystem flags */ #define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */ #define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */ #define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */ /* * Mount flags set via mount options or defaults */ #define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */ #define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */ #define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */ #define EXT4_MOUNT_ERRORS_RO 0x00020 /* Remount fs ro on errors */ #define EXT4_MOUNT_ERRORS_PANIC 0x00040 /* Panic on errors */ #define EXT4_MOUNT_ERRORS_MASK 0x00070 #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */ #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/ #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */ #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */ #define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */ #define EXT4_MOUNT_WRITEBACK_DATA 0x00C00 /* No data ordering */ #define EXT4_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */ #define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */ #define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */ #define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */ #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */ #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ #define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */ #define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */ /* * Mount flags set either automatically (could not be set by mount option) * based on per file system feature or property or in special cases such as * distinguishing between explicit mount option definition and default. */ #define EXT4_MOUNT2_EXPLICIT_DELALLOC 0x00000001 /* User explicitly specified delalloc */ #define EXT4_MOUNT2_STD_GROUP_SIZE 0x00000002 /* We have standard group size of blocksize * 8 blocks */ #define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated file systems */ #define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \ ~EXT4_MOUNT_##opt #define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \ EXT4_MOUNT_##opt #define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \ EXT4_MOUNT_##opt) #define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \ ~EXT4_MOUNT2_##opt #define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \ EXT4_MOUNT2_##opt #define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \ EXT4_MOUNT2_##opt) #define ext4_test_and_set_bit __test_and_set_bit_le #define ext4_set_bit __set_bit_le #define ext4_set_bit_atomic ext2_set_bit_atomic #define ext4_test_and_clear_bit __test_and_clear_bit_le #define ext4_clear_bit __clear_bit_le #define ext4_clear_bit_atomic ext2_clear_bit_atomic #define ext4_test_bit test_bit_le #define ext4_find_next_zero_bit find_next_zero_bit_le #define ext4_find_next_bit find_next_bit_le extern void ext4_set_bits(void *bm, int cur, int len); /* * Maximal mount counts between two filesystem checks */ #define EXT4_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */ #define EXT4_DFL_CHECKINTERVAL 0 /* Don't use interval check */ /* * Behaviour when detecting errors */ #define EXT4_ERRORS_CONTINUE 1 /* Continue execution */ #define EXT4_ERRORS_RO 2 /* Remount fs read-only */ #define EXT4_ERRORS_PANIC 3 /* Panic */ #define EXT4_ERRORS_DEFAULT EXT4_ERRORS_CONTINUE /* Metadata checksum algorithm codes */ #define EXT4_CRC32C_CHKSUM 1 /* * Structure of the super block */ struct ext4_super_block { /*00*/ __le32 s_inodes_count; /* Inodes count */ __le32 s_blocks_count_lo; /* Blocks count */ __le32 s_r_blocks_count_lo; /* Reserved blocks count */ __le32 s_free_blocks_count_lo; /* Free blocks count */ /*10*/ __le32 s_free_inodes_count; /* Free inodes count */ __le32 s_first_data_block; /* First Data Block */ __le32 s_log_block_size; /* Block size */ __le32 s_log_cluster_size; /* Allocation cluster size */ /*20*/ __le32 s_blocks_per_group; /* # Blocks per group */ __le32 s_clusters_per_group; /* # Clusters per group */ __le32 s_inodes_per_group; /* # Inodes per group */ __le32 s_mtime; /* Mount time */ /*30*/ __le32 s_wtime; /* Write time */ __le16 s_mnt_count; /* Mount count */ __le16 s_max_mnt_count; /* Maximal mount count */ __le16 s_magic; /* Magic signature */ __le16 s_state; /* File system state */ __le16 s_errors; /* Behaviour when detecting errors */ __le16 s_minor_rev_level; /* minor revision level */ /*40*/ __le32 s_lastcheck; /* time of last check */ __le32 s_checkinterval; /* max. time between checks */ __le32 s_creator_os; /* OS */ __le32 s_rev_level; /* Revision level */ /*50*/ __le16 s_def_resuid; /* Default uid for reserved blocks */ __le16 s_def_resgid; /* Default gid for reserved blocks */ /* * These fields are for EXT4_DYNAMIC_REV superblocks only. * * Note: the difference between the compatible feature set and * the incompatible feature set is that if there is a bit set * in the incompatible feature set that the kernel doesn't * know about, it should refuse to mount the filesystem. * * e2fsck's requirements are more strict; if it doesn't know * about a feature in either the compatible or incompatible * feature set, it must abort and not try to meddle with * things it doesn't understand... */ __le32 s_first_ino; /* First non-reserved inode */ __le16 s_inode_size; /* size of inode structure */ __le16 s_block_group_nr; /* block group # of this superblock */ __le32 s_feature_compat; /* compatible feature set */ /*60*/ __le32 s_feature_incompat; /* incompatible feature set */ __le32 s_feature_ro_compat; /* readonly-compatible feature set */ /*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ /*78*/ char s_volume_name[16]; /* volume name */ /*88*/ char s_last_mounted[64]; /* directory where last mounted */ /*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ /* * Performance hints. Directory preallocation should only * happen if the EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on. */ __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/ __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */ __le16 s_reserved_gdt_blocks; /* Per group desc for online growth */ /* * Journaling support valid if EXT4_FEATURE_COMPAT_HAS_JOURNAL set. */ /*D0*/ __u8 s_journal_uuid[16]; /* uuid of journal superblock */ /*E0*/ __le32 s_journal_inum; /* inode number of journal file */ __le32 s_journal_dev; /* device number of journal file */ __le32 s_last_orphan; /* start of list of inodes to delete */ __le32 s_hash_seed[4]; /* HTREE hash seed */ __u8 s_def_hash_version; /* Default hash version to use */ __u8 s_jnl_backup_type; __le16 s_desc_size; /* size of group descriptor */ /*100*/ __le32 s_default_mount_opts; __le32 s_first_meta_bg; /* First metablock block group */ __le32 s_mkfs_time; /* When the filesystem was created */ __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */ /*150*/ __le32 s_blocks_count_hi; /* Blocks count */ __le32 s_r_blocks_count_hi; /* Reserved blocks count */ __le32 s_free_blocks_count_hi; /* Free blocks count */ __le16 s_min_extra_isize; /* All inodes have at least # bytes */ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ __le32 s_flags; /* Miscellaneous flags */ __le16 s_raid_stride; /* RAID stride */ __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */ __le64 s_mmp_block; /* Block for multi-mount protection */ __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ __u8 s_log_groups_per_flex; /* FLEX_BG group size */ __u8 s_checksum_type; /* metadata checksum algorithm used */ __le16 s_reserved_pad; __le64 s_kbytes_written; /* nr of lifetime kilobytes written */ __le32 s_snapshot_inum; /* Inode number of active snapshot */ __le32 s_snapshot_id; /* sequential ID of active snapshot */ __le64 s_snapshot_r_blocks_count; /* reserved blocks for active snapshot's future use */ __le32 s_snapshot_list; /* inode number of the head of the on-disk snapshot list */ #define EXT4_S_ERR_START offsetof(struct ext4_super_block, s_error_count) __le32 s_error_count; /* number of fs errors */ __le32 s_first_error_time; /* first time an error happened */ __le32 s_first_error_ino; /* inode involved in first error */ __le64 s_first_error_block; /* block involved of first error */ __u8 s_first_error_func[32]; /* function where the error happened */ __le32 s_first_error_line; /* line number where error happened */ __le32 s_last_error_time; /* most recent time of an error */ __le32 s_last_error_ino; /* inode involved in last error */ __le32 s_last_error_line; /* line number where error happened */ __le64 s_last_error_block; /* block involved of last error */ __u8 s_last_error_func[32]; /* function where the error happened */ #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) __u8 s_mount_opts[64]; __le32 s_usr_quota_inum; /* inode for tracking user quota */ __le32 s_grp_quota_inum; /* inode for tracking group quota */ __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ __le32 s_reserved[106]; /* Padding to the end of the block */ __le32 s_checksum; /* crc32c(superblock) */ }; #define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START) #ifdef __KERNEL__ /* * run-time mount flags */ #define EXT4_MF_MNTDIR_SAMPLED 0x0001 #define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ /* Number of quota types we support */ #define EXT4_MAXQUOTAS 2 /* * fourth extended-fs super-block data in memory */ struct ext4_sb_info { unsigned long s_desc_size; /* Size of a group descriptor in bytes */ unsigned long s_inodes_per_block;/* Number of inodes per block */ unsigned long s_blocks_per_group;/* Number of blocks in a group */ unsigned long s_clusters_per_group; /* Number of clusters in a group */ unsigned long s_inodes_per_group;/* Number of inodes in a group */ unsigned long s_itb_per_group; /* Number of inode table blocks per group */ unsigned long s_gdb_count; /* Number of group descriptor blocks */ unsigned long s_desc_per_block; /* Number of group descriptors per block */ ext4_group_t s_groups_count; /* Number of groups in the fs */ ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */ unsigned long s_overhead; /* # of fs overhead clusters */ unsigned int s_cluster_ratio; /* Number of blocks per cluster */ unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ struct buffer_head * s_sbh; /* Buffer containing the super block */ struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ struct buffer_head **s_group_desc; unsigned int s_mount_opt; unsigned int s_mount_opt2; unsigned int s_mount_flags; unsigned int s_def_mount_opt; ext4_fsblk_t s_sb_block; atomic64_t s_resv_clusters; kuid_t s_resuid; kgid_t s_resgid; unsigned short s_mount_state; unsigned short s_pad; int s_addr_per_block_bits; int s_desc_per_block_bits; int s_inode_size; int s_first_ino; unsigned int s_inode_readahead_blks; unsigned int s_inode_goal; spinlock_t s_next_gen_lock; u32 s_next_generation; u32 s_hash_seed[4]; int s_def_hash_version; int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ struct percpu_counter s_freeclusters_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; struct percpu_counter s_dirtyclusters_counter; struct blockgroup_lock *s_blockgroup_lock; struct proc_dir_entry *s_proc; struct kobject s_kobj; struct completion s_kobj_unregister; struct super_block *s_sb; /* Journaling */ struct journal_s *s_journal; struct list_head s_orphan; struct mutex s_orphan_lock; unsigned long s_resize_flags; /* Flags indicating if there is a resizer */ unsigned long s_commit_interval; u32 s_max_batch_time; u32 s_min_batch_time; struct block_device *journal_bdev; #ifdef CONFIG_QUOTA char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */ int s_jquota_fmt; /* Format of quota to use */ #endif unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ struct rb_root system_blks; #ifdef EXTENTS_STATS /* ext4 extents stats */ unsigned long s_ext_min; unsigned long s_ext_max; unsigned long s_depth_max; spinlock_t s_ext_stats_lock; unsigned long s_ext_blocks; unsigned long s_ext_extents; #endif /* for buddy allocator */ struct ext4_group_info ***s_group_info; struct inode *s_buddy_cache; spinlock_t s_md_lock; unsigned short *s_mb_offsets; unsigned int *s_mb_maxs; unsigned int s_group_info_size; /* tunables */ unsigned long s_stripe; unsigned int s_mb_stream_request; unsigned int s_mb_max_to_scan; unsigned int s_mb_min_to_scan; unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; unsigned long s_mb_last_start; /* stats for buddy allocator */ atomic_t s_bal_reqs; /* number of reqs with len > 1 */ atomic_t s_bal_success; /* we found long enough chunks */ atomic_t s_bal_allocated; /* in blocks */ atomic_t s_bal_ex_scanned; /* total extents scanned */ atomic_t s_bal_goals; /* goal hits */ atomic_t s_bal_breaks; /* too long searches */ atomic_t s_bal_2orders; /* 2^order hits */ spinlock_t s_bal_lock; unsigned long s_mb_buddies_generated; unsigned long long s_mb_generation_time; atomic_t s_mb_lost_chunks; atomic_t s_mb_preallocated; atomic_t s_mb_discarded; atomic_t s_lock_busy; /* locality groups */ struct ext4_locality_group __percpu *s_locality_groups; /* for write statistics */ unsigned long s_sectors_written_start; u64 s_kbytes_written; /* the size of zero-out chunk */ unsigned int s_extent_max_zeroout_kb; unsigned int s_log_groups_per_flex; struct flex_groups *s_flex_groups; ext4_group_t s_flex_groups_allocated; /* workqueue for reserved extent conversions (buffered io) */ struct workqueue_struct *rsv_conversion_wq; /* timer for periodic error stats printing */ struct timer_list s_err_report; /* Lazy inode table initialization info */ struct ext4_li_request *s_li_request; /* Wait multiplier for lazy initialization thread */ unsigned int s_li_wait_mult; /* Kernel thread for multiple mount protection */ struct task_struct *s_mmp_tsk; /* record the last minlen when FITRIM is called. */ atomic_t s_last_trim_minblks; /* Reference to checksum algorithm driver via cryptoapi */ struct crypto_shash *s_chksum_driver; /* Precomputed FS UUID checksum for seeding other checksums */ __u32 s_csum_seed; /* Reclaim extents from extent status tree */ struct shrinker s_es_shrinker; struct list_head s_es_lru; struct ext4_es_stats s_es_stats; struct mb_cache *s_mb_cache; spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; /* Ratelimit ext4 messages. */ struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct ext4_inode_info *EXT4_I(struct inode *inode) { return container_of(inode, struct ext4_inode_info, vfs_inode); } static inline struct timespec ext4_current_time(struct inode *inode) { return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ? current_fs_time(inode->i_sb) : CURRENT_TIME_SEC; } static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) { return ino == EXT4_ROOT_INO || ino == EXT4_USR_QUOTA_INO || ino == EXT4_GRP_QUOTA_INO || ino == EXT4_BOOT_LOADER_INO || ino == EXT4_JOURNAL_INO || ino == EXT4_RESIZE_INO || (ino >= EXT4_FIRST_INO(sb) && ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); } static inline void ext4_set_io_unwritten_flag(struct inode *inode, struct ext4_io_end *io_end) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { io_end->flag |= EXT4_IO_END_UNWRITTEN; atomic_inc(&EXT4_I(inode)->i_unwritten); } } static inline ext4_io_end_t *ext4_inode_aio(struct inode *inode) { return inode->i_private; } static inline void ext4_inode_aio_set(struct inode *inode, ext4_io_end_t *io) { inode->i_private = io; } /* * Inode dynamic state flags */ enum { EXT4_STATE_JDATA, /* journaled data exists */ EXT4_STATE_NEW, /* inode is newly created */ EXT4_STATE_XATTR, /* has in-inode xattrs */ EXT4_STATE_NO_EXPAND, /* No space for expansion */ EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ EXT4_STATE_NEWENTRY, /* File just added to dir */ EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read nolocking */ EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ EXT4_STATE_ORDERED_MODE, /* data=ordered mode */ EXT4_STATE_EXT_PRECACHED, /* extents have been precached */ }; #define EXT4_INODE_BIT_FNS(name, field, offset) \ static inline int ext4_test_inode_##name(struct inode *inode, int bit) \ { \ return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } \ static inline void ext4_set_inode_##name(struct inode *inode, int bit) \ { \ set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } \ static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \ { \ clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } /* Add these declarations here only so that these functions can be * found by name. Otherwise, they are very hard to locate. */ static inline int ext4_test_inode_flag(struct inode *inode, int bit); static inline void ext4_set_inode_flag(struct inode *inode, int bit); static inline void ext4_clear_inode_flag(struct inode *inode, int bit); EXT4_INODE_BIT_FNS(flag, flags, 0) /* Add these declarations here only so that these functions can be * found by name. Otherwise, they are very hard to locate. */ static inline int ext4_test_inode_state(struct inode *inode, int bit); static inline void ext4_set_inode_state(struct inode *inode, int bit); static inline void ext4_clear_inode_state(struct inode *inode, int bit); #if (BITS_PER_LONG < 64) EXT4_INODE_BIT_FNS(state, state_flags, 0) static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) { (ei)->i_state_flags = 0; } #else EXT4_INODE_BIT_FNS(state, flags, 32) static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) { /* We depend on the fact that callers will set i_flags */ } #endif #else /* Assume that user mode programs are passing in an ext4fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test * macros from user land. */ #define EXT4_SB(sb) (sb) #endif #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime /* * Codes for operating systems */ #define EXT4_OS_LINUX 0 #define EXT4_OS_HURD 1 #define EXT4_OS_MASIX 2 #define EXT4_OS_FREEBSD 3 #define EXT4_OS_LITES 4 /* * Revision levels */ #define EXT4_GOOD_OLD_REV 0 /* The good old (original) format */ #define EXT4_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */ #define EXT4_CURRENT_REV EXT4_GOOD_OLD_REV #define EXT4_MAX_SUPP_REV EXT4_DYNAMIC_REV #define EXT4_GOOD_OLD_INODE_SIZE 128 /* * Feature set definitions */ #define EXT4_HAS_COMPAT_FEATURE(sb,mask) \ ((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0) #define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \ ((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0) #define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \ ((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0) #define EXT4_SET_COMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask) #define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask) #define EXT4_SET_INCOMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask) #define EXT4_CLEAR_COMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask) #define EXT4_CLEAR_RO_COMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask) #define EXT4_CLEAR_INCOMPAT_FEATURE(sb,mask) \ EXT4_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask) #define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT4_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010 #define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020 #define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200 #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT4_FEATURE_RO_COMPAT_HUGE_FILE 0x0008 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 #define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200 /* * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM). When * METADATA_CSUM is set, group descriptor checksums use the same algorithm as * all other data structures' checksums. However, the METADATA_CSUM and * GDT_CSUM bits are mutually exclusive. */ #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT4_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */ #define EXT4_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */ #define EXT4_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */ #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_MMP 0x0100 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */ #define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */ #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ #define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR #define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ EXT4_FEATURE_INCOMPAT_META_BG) #define EXT2_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ EXT4_FEATURE_RO_COMPAT_BTREE_DIR) #define EXT3_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR #define EXT3_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ EXT4_FEATURE_INCOMPAT_RECOVER| \ EXT4_FEATURE_INCOMPAT_META_BG) #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ EXT4_FEATURE_RO_COMPAT_BTREE_DIR) #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ EXT4_FEATURE_INCOMPAT_RECOVER| \ EXT4_FEATURE_INCOMPAT_META_BG| \ EXT4_FEATURE_INCOMPAT_EXTENTS| \ EXT4_FEATURE_INCOMPAT_64BIT| \ EXT4_FEATURE_INCOMPAT_FLEX_BG| \ EXT4_FEATURE_INCOMPAT_MMP | \ EXT4_FEATURE_INCOMPAT_INLINE_DATA) #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\ EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\ EXT4_FEATURE_RO_COMPAT_BIGALLOC |\ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM|\ EXT4_FEATURE_RO_COMPAT_QUOTA) /* * Default values for user and/or group using reserved blocks */ #define EXT4_DEF_RESUID 0 #define EXT4_DEF_RESGID 0 #define EXT4_DEF_INODE_READAHEAD_BLKS 32 /* * Default mount options */ #define EXT4_DEFM_DEBUG 0x0001 #define EXT4_DEFM_BSDGROUPS 0x0002 #define EXT4_DEFM_XATTR_USER 0x0004 #define EXT4_DEFM_ACL 0x0008 #define EXT4_DEFM_UID16 0x0010 #define EXT4_DEFM_JMODE 0x0060 #define EXT4_DEFM_JMODE_DATA 0x0020 #define EXT4_DEFM_JMODE_ORDERED 0x0040 #define EXT4_DEFM_JMODE_WBACK 0x0060 #define EXT4_DEFM_NOBARRIER 0x0100 #define EXT4_DEFM_BLOCK_VALIDITY 0x0200 #define EXT4_DEFM_DISCARD 0x0400 #define EXT4_DEFM_NODELALLOC 0x0800 /* * Default journal batch times */ #define EXT4_DEF_MIN_BATCH_TIME 0 #define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */ /* * Minimum number of groups in a flexgroup before we separate out * directories into the first block group of a flexgroup */ #define EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 4 /* * Structure of a directory entry */ #define EXT4_NAME_LEN 255 struct ext4_dir_entry { __le32 inode; /* Inode number */ __le16 rec_len; /* Directory entry length */ __le16 name_len; /* Name length */ char name[EXT4_NAME_LEN]; /* File name */ }; /* * The new version of the directory entry. Since EXT4 structures are * stored in intel byte order, and the name_len field could never be * bigger than 255 chars, it's safe to reclaim the extra byte for the * file_type field. */ struct ext4_dir_entry_2 { __le32 inode; /* Inode number */ __le16 rec_len; /* Directory entry length */ __u8 name_len; /* Name length */ __u8 file_type; char name[EXT4_NAME_LEN]; /* File name */ }; /* * This is a bogus directory entry at the end of each leaf block that * records checksums. */ struct ext4_dir_entry_tail { __le32 det_reserved_zero1; /* Pretend to be unused */ __le16 det_rec_len; /* 12 */ __u8 det_reserved_zero2; /* Zero name length */ __u8 det_reserved_ft; /* 0xDE, fake file type */ __le32 det_checksum; /* crc32c(uuid+inum+dirblock) */ }; #define EXT4_DIRENT_TAIL(block, blocksize) \ ((struct ext4_dir_entry_tail *)(((void *)(block)) + \ ((blocksize) - \ sizeof(struct ext4_dir_entry_tail)))) /* * Ext4 directory file types. Only the low 3 bits are used. The * other bits are reserved for now. */ #define EXT4_FT_UNKNOWN 0 #define EXT4_FT_REG_FILE 1 #define EXT4_FT_DIR 2 #define EXT4_FT_CHRDEV 3 #define EXT4_FT_BLKDEV 4 #define EXT4_FT_FIFO 5 #define EXT4_FT_SOCK 6 #define EXT4_FT_SYMLINK 7 #define EXT4_FT_MAX 8 #define EXT4_FT_DIR_CSUM 0xDE /* * EXT4_DIR_PAD defines the directory entries boundaries * * NOTE: It must be a multiple of 4 */ #define EXT4_DIR_PAD 4 #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1) #define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \ ~EXT4_DIR_ROUND) #define EXT4_MAX_REC_LEN ((1<<16)-1) /* * If we ever get support for fs block sizes > page_size, we'll need * to remove the #if statements in the next two functions... */ static inline unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize) { unsigned len = le16_to_cpu(dlen); #if (PAGE_CACHE_SIZE >= 65536) if (len == EXT4_MAX_REC_LEN || len == 0) return blocksize; return (len & 65532) | ((len & 3) << 16); #else return len; #endif } static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) { if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) BUG(); #if (PAGE_CACHE_SIZE >= 65536) if (len < 65536) return cpu_to_le16(len); if (len == blocksize) { if (blocksize == 65536) return cpu_to_le16(EXT4_MAX_REC_LEN); else return cpu_to_le16(0); } return cpu_to_le16((len & 65532) | ((len >> 16) & 3)); #else return cpu_to_le16(len); #endif } /* * Hash Tree Directory indexing * (c) Daniel Phillips, 2001 */ #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \ EXT4_FEATURE_COMPAT_DIR_INDEX) && \ ext4_test_inode_flag((dir), EXT4_INODE_INDEX)) #define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX) #define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1) /* Legal values for the dx_root hash_version field: */ #define DX_HASH_LEGACY 0 #define DX_HASH_HALF_MD4 1 #define DX_HASH_TEA 2 #define DX_HASH_LEGACY_UNSIGNED 3 #define DX_HASH_HALF_MD4_UNSIGNED 4 #define DX_HASH_TEA_UNSIGNED 5 static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, const void *address, unsigned int length) { struct { struct shash_desc shash; char ctx[4]; } desc; int err; BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx)); desc.shash.tfm = sbi->s_chksum_driver; desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); BUG_ON(err); return *(u32 *)desc.ctx; } #ifdef __KERNEL__ /* hash info structure used by the directory hash */ struct dx_hash_info { u32 hash; u32 minor_hash; int hash_version; u32 *seed; }; /* 32 and 64 bit signed EOF for dx directories */ #define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1) #define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1) /* * Control parameters used by ext4_htree_next_block */ #define HASH_NB_ALWAYS 1 /* * Describe an inode's exact location on disk and in memory */ struct ext4_iloc { struct buffer_head *bh; unsigned long offset; ext4_group_t block_group; }; static inline struct ext4_inode *ext4_raw_inode(struct ext4_iloc *iloc) { return (struct ext4_inode *) (iloc->bh->b_data + iloc->offset); } /* * This structure is stuffed into the struct file's private_data field * for directories. It is where we put information so that we can do * readdir operations in hash tree order. */ struct dir_private_info { struct rb_root root; struct rb_node *curr_node; struct fname *extra_fname; loff_t last_pos; __u32 curr_hash; __u32 curr_minor_hash; __u32 next_hash; }; /* calculate the first block number of the group */ static inline ext4_fsblk_t ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no) { return group_no * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); } /* * Special error return code only used by dx_probe() and its callers. */ #define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1)) /* * Timeout and state flag for lazy initialization inode thread. */ #define EXT4_DEF_LI_WAIT_MULT 10 #define EXT4_DEF_LI_MAX_START_DELAY 5 #define EXT4_LAZYINIT_QUIT 0x0001 #define EXT4_LAZYINIT_RUNNING 0x0002 /* * Lazy inode table initialization info */ struct ext4_lazy_init { unsigned long li_state; struct list_head li_request_list; struct mutex li_list_mtx; }; struct ext4_li_request { struct super_block *lr_super; struct ext4_sb_info *lr_sbi; ext4_group_t lr_next_group; struct list_head lr_request; unsigned long lr_next_sched; unsigned long lr_timeout; }; struct ext4_features { struct kobject f_kobj; struct completion f_kobj_unregister; }; /* * This structure will be used for multiple mount protection. It will be * written into the block number saved in the s_mmp_block field in the * superblock. Programs that check MMP should assume that if * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe * to use the filesystem, regardless of how old the timestamp is. */ #define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */ #define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */ #define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */ #define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */ struct mmp_struct { __le32 mmp_magic; /* Magic number for MMP */ __le32 mmp_seq; /* Sequence no. updated periodically */ /* * mmp_time, mmp_nodename & mmp_bdevname are only used for information * purposes and do not affect the correctness of the algorithm */ __le64 mmp_time; /* Time last updated */ char mmp_nodename[64]; /* Node which last updated MMP block */ char mmp_bdevname[32]; /* Bdev which last updated MMP block */ /* * mmp_check_interval is used to verify if the MMP block has been * updated on the block device. The value is updated based on the * maximum time to write the MMP block during an update cycle. */ __le16 mmp_check_interval; __le16 mmp_pad1; __le32 mmp_pad2[226]; __le32 mmp_checksum; /* crc32c(uuid+mmp_block) */ }; /* arguments passed to the mmp thread */ struct mmpd_data { struct buffer_head *bh; /* bh from initial read_mmp_block() */ struct super_block *sb; /* super block of the fs */ }; /* * Check interval multiplier * The MMP block is written every update interval and initially checked every * update interval x the multiplier (the value is then adapted based on the * write latency). The reason is that writes can be delayed under load and we * don't want readers to incorrectly assume that the filesystem is no longer * in use. */ #define EXT4_MMP_CHECK_MULT 2UL /* * Minimum interval for MMP checking in seconds. */ #define EXT4_MMP_MIN_CHECK_INTERVAL 5UL /* * Maximum interval for MMP checking in seconds. */ #define EXT4_MMP_MAX_CHECK_INTERVAL 300UL /* * Function prototypes */ /* * Ok, these declarations are also in <linux/kernel.h> but none of the * ext4 source programs needs to include it so they are duplicated here. */ # define NORET_TYPE /**/ # define ATTRIB_NORET __attribute__((noreturn)) # define NORET_AND noreturn, /* bitmap.c */ extern unsigned int ext4_count_free(char *bitmap, unsigned numchars); void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh, int sz); int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh, int sz); void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh); int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct buffer_head *bh); /* balloc.c */ extern void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp); extern ext4_group_t ext4_get_group_number(struct super_block *sb, ext4_fsblk_t block); extern unsigned int ext4_block_group(struct super_block *sb, ext4_fsblk_t blocknr); extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb, ext4_fsblk_t blocknr); extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group); extern unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group); extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t goal, unsigned int flags, unsigned long *count, int *errp); extern int ext4_claim_free_clusters(struct ext4_sb_info *sbi, s64 nclusters, unsigned int flags); extern ext4_fsblk_t ext4_count_free_clusters(struct super_block *); extern void ext4_check_blocks_bitmap(struct super_block *); extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, ext4_group_t block_group, struct buffer_head ** bh); extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group); extern int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh); extern struct buffer_head *ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group); extern unsigned ext4_free_clusters_after_init(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp); ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); /* dir.c */ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, struct file *, struct ext4_dir_entry_2 *, struct buffer_head *, char *, int, unsigned int); #define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset) \ unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \ (de), (bh), (buf), (size), (offset))) extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, struct ext4_dir_entry_2 *dirent); extern void ext4_htree_free_dir_info(struct dir_private_info *p); extern int ext4_find_dest_de(struct inode *dir, struct inode *inode, struct buffer_head *bh, void *buf, int buf_size, const char *name, int namelen, struct ext4_dir_entry_2 **dest_de); void ext4_insert_dentry(struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, const char *name, int namelen); static inline void ext4_update_dx_flag(struct inode *inode) { if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); } static unsigned char ext4_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; static inline unsigned char get_dtype(struct super_block *sb, int filetype) { if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) || (filetype >= EXT4_FT_MAX)) return DT_UNKNOWN; return ext4_filetype_table[filetype]; } extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf, int buf_size); /* fsync.c */ extern int ext4_sync_file(struct file *, loff_t, loff_t, int); /* hash.c */ extern int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo); /* ialloc.c */ extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t, const struct qstr *qstr, __u32 goal, uid_t *owner, int handle_type, unsigned int line_no, int nblocks); #define ext4_new_inode(handle, dir, mode, qstr, goal, owner) \ __ext4_new_inode((handle), (dir), (mode), (qstr), (goal), (owner), \ 0, 0, 0) #define ext4_new_inode_start_handle(dir, mode, qstr, goal, owner, \ type, nblocks) \ __ext4_new_inode(NULL, (dir), (mode), (qstr), (goal), (owner), \ (type), __LINE__, (nblocks)) extern void ext4_free_inode(handle_t *, struct inode *); extern struct inode * ext4_orphan_get(struct super_block *, unsigned long); extern unsigned long ext4_count_free_inodes(struct super_block *); extern unsigned long ext4_count_dirs(struct super_block *); extern void ext4_check_inodes_bitmap(struct super_block *); extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap); extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int barrier); extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate); /* mballoc.c */ extern long ext4_mb_stats; extern long ext4_mb_max_to_scan; extern int ext4_mb_init(struct super_block *); extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); extern int ext4_mb_reserve_blocks(struct super_block *, int); extern void ext4_discard_preallocations(struct inode *); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); extern void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags); extern int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups); extern int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t i, struct ext4_group_desc *desc); extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count); extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); /* inode.c */ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create); int ext4_walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)); int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh); #define FALL_BACK_TO_NONDELALLOC 1 #define CONVERT_INLINE_DATA 2 extern struct inode *ext4_iget(struct super_block *, unsigned long); extern struct inode *ext4_iget_normal(struct super_block *, unsigned long); extern int ext4_write_inode(struct inode *, struct writeback_control *); extern int ext4_setattr(struct dentry *, struct iattr *); extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); extern void ext4_evict_inode(struct inode *); extern void ext4_clear_inode(struct inode *); extern int ext4_sync_inode(handle_t *, struct inode *); extern void ext4_dirty_inode(struct inode *, int); extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); extern int ext4_inode_attach_jinode(struct inode *inode); extern int ext4_can_truncate(struct inode *inode); extern void ext4_truncate(struct inode *); extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); extern void ext4_set_inode_flags(struct inode *); extern void ext4_get_inode_flags(struct ext4_inode_info *); extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); extern int ext4_writepage_trans_blocks(struct inode *); extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t lend); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim); /* indirect.c */ extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset); extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); extern void ext4_ind_truncate(handle_t *, struct inode *inode); extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode, ext4_lblk_t start, ext4_lblk_t end); /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); /* migrate.c */ extern int ext4_ext_migrate(struct inode *); extern int ext4_ind_migrate(struct inode *inode); /* namei.c */ extern int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent); extern int ext4_orphan_add(handle_t *, struct inode *); extern int ext4_orphan_del(handle_t *, struct inode *); extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash); extern int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, const struct qstr *d_name, unsigned int offset, struct ext4_dir_entry_2 **res_dir); extern int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, int buf_size, int csum_size); /* resize.c */ extern int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input); extern int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, ext4_fsblk_t n_blocks_count); extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); /* super.c */ extern int ext4_calculate_overhead(struct super_block *sb); extern void ext4_superblock_csum_set(struct super_block *sb); extern void *ext4_kvmalloc(size_t size, gfp_t flags); extern void *ext4_kvzalloc(size_t size, gfp_t flags); extern void ext4_kvfree(void *ptr); extern int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup); extern const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]); extern __printf(4, 5) void __ext4_error(struct super_block *, const char *, unsigned int, const char *, ...); extern __printf(5, 6) void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern __printf(5, 6) void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, const char *, ...); extern void __ext4_std_error(struct super_block *, const char *, unsigned int, int); extern __printf(4, 5) void __ext4_abort(struct super_block *, const char *, unsigned int, const char *, ...); extern __printf(4, 5) void __ext4_warning(struct super_block *, const char *, unsigned int, const char *, ...); extern __printf(3, 4) void __ext4_msg(struct super_block *, const char *, const char *, ...); extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp, const char *, unsigned int, const char *); extern __printf(7, 8) void __ext4_grp_locked_error(const char *, unsigned int, struct super_block *, ext4_group_t, unsigned long, ext4_fsblk_t, const char *, ...); #ifdef CONFIG_PRINTK #define ext4_error_inode(inode, func, line, block, fmt, ...) \ __ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__) #define ext4_error_file(file, func, line, block, fmt, ...) \ __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__) #define ext4_error(sb, fmt, ...) \ __ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) #define ext4_abort(sb, fmt, ...) \ __ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) #define ext4_warning(sb, fmt, ...) \ __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) #define ext4_msg(sb, level, fmt, ...) \ __ext4_msg(sb, level, fmt, ##__VA_ARGS__) #define dump_mmp_msg(sb, mmp, msg) \ __dump_mmp_msg(sb, mmp, __func__, __LINE__, msg) #define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ __ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \ fmt, ##__VA_ARGS__) #else #define ext4_error_inode(inode, func, line, block, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_error_inode(inode, "", 0, block, " "); \ } while (0) #define ext4_error_file(file, func, line, block, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_error_file(file, "", 0, block, " "); \ } while (0) #define ext4_error(sb, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_error(sb, "", 0, " "); \ } while (0) #define ext4_abort(sb, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_abort(sb, "", 0, " "); \ } while (0) #define ext4_warning(sb, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_warning(sb, "", 0, " "); \ } while (0) #define ext4_msg(sb, level, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_msg(sb, "", " "); \ } while (0) #define dump_mmp_msg(sb, mmp, msg) \ __dump_mmp_msg(sb, mmp, "", 0, "") #define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \ do { \ no_printk(fmt, ##__VA_ARGS__); \ __ext4_grp_locked_error("", 0, sb, grp, ino, block, " "); \ } while (0) #endif extern void ext4_update_dynamic_rev(struct super_block *sb); extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, __u32 compat); extern int ext4_update_rocompat_feature(handle_t *handle, struct super_block *sb, __u32 rocompat); extern int ext4_update_incompat_feature(handle_t *handle, struct super_block *sb, __u32 incompat); extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg); extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg); extern ext4_fsblk_t ext4_inode_table(struct super_block *sb, struct ext4_group_desc *bg); extern __u32 ext4_free_group_clusters(struct super_block *sb, struct ext4_group_desc *bg); extern __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg); extern __u32 ext4_used_dirs_count(struct super_block *sb, struct ext4_group_desc *bg); extern __u32 ext4_itable_unused_count(struct super_block *sb, struct ext4_group_desc *bg); extern void ext4_block_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk); extern void ext4_inode_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk); extern void ext4_inode_table_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk); extern void ext4_free_group_clusters_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); extern void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); extern void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); extern void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group, struct ext4_group_desc *gdp); extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group, struct ext4_group_desc *gdp); extern int ext4_register_li_request(struct super_block *sb, ext4_group_t first_not_zeroed); static inline int ext4_has_group_desc_csum(struct super_block *sb) { return EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) || (EXT4_SB(sb)->s_chksum_driver != NULL); } static inline int ext4_has_metadata_csum(struct super_block *sb) { WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && !EXT4_SB(sb)->s_chksum_driver); return (EXT4_SB(sb)->s_chksum_driver != NULL); } static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) { return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | le32_to_cpu(es->s_blocks_count_lo); } static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es) { return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) | le32_to_cpu(es->s_r_blocks_count_lo); } static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es) { return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) | le32_to_cpu(es->s_free_blocks_count_lo); } static inline void ext4_blocks_count_set(struct ext4_super_block *es, ext4_fsblk_t blk) { es->s_blocks_count_lo = cpu_to_le32((u32)blk); es->s_blocks_count_hi = cpu_to_le32(blk >> 32); } static inline void ext4_free_blocks_count_set(struct ext4_super_block *es, ext4_fsblk_t blk) { es->s_free_blocks_count_lo = cpu_to_le32((u32)blk); es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32); } static inline void ext4_r_blocks_count_set(struct ext4_super_block *es, ext4_fsblk_t blk) { es->s_r_blocks_count_lo = cpu_to_le32((u32)blk); es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32); } static inline loff_t ext4_isize(struct ext4_inode *raw_inode) { if (S_ISREG(le16_to_cpu(raw_inode->i_mode))) return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) | le32_to_cpu(raw_inode->i_size_lo); else return (loff_t) le32_to_cpu(raw_inode->i_size_lo); } static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) { raw_inode->i_size_lo = cpu_to_le32(i_size); raw_inode->i_size_high = cpu_to_le32(i_size >> 32); } static inline struct ext4_group_info *ext4_get_group_info(struct super_block *sb, ext4_group_t group) { struct ext4_group_info ***grp_info; long indexv, indexh; BUG_ON(group >= EXT4_SB(sb)->s_groups_count); grp_info = EXT4_SB(sb)->s_group_info; indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); return grp_info[indexv][indexh]; } /* * Reading s_groups_count requires using smp_rmb() afterwards. See * the locking protocol documented in the comments of ext4_group_add() * in resize.c */ static inline ext4_group_t ext4_get_groups_count(struct super_block *sb) { ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; smp_rmb(); return ngroups; } static inline ext4_group_t ext4_flex_group(struct ext4_sb_info *sbi, ext4_group_t block_group) { return block_group >> sbi->s_log_groups_per_flex; } static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi) { return 1 << sbi->s_log_groups_per_flex; } #define ext4_std_error(sb, errno) \ do { \ if ((errno)) \ __ext4_std_error((sb), __func__, __LINE__, (errno)); \ } while (0) #ifdef CONFIG_SMP /* Each CPU can accumulate percpu_counter_batch clusters in their local * counters. So we need to make sure we have free clusters more * than percpu_counter_batch * nr_cpu_ids. Also add a window of 4 times. */ #define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids)) #else #define EXT4_FREECLUSTERS_WATERMARK 0 #endif /* Update i_disksize. Requires i_mutex to avoid races with truncate */ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) { WARN_ON_ONCE(S_ISREG(inode->i_mode) && !mutex_is_locked(&inode->i_mutex)); down_write(&EXT4_I(inode)->i_data_sem); if (newsize > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = newsize; up_write(&EXT4_I(inode)->i_data_sem); } /* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize) { int changed = 0; if (newsize > inode->i_size) { i_size_write(inode, newsize); changed = 1; } if (newsize > EXT4_I(inode)->i_disksize) { ext4_update_i_disksize(inode, newsize); changed |= 2; } return changed; } struct ext4_group_info { unsigned long bb_state; struct rb_root bb_free_root; ext4_grpblk_t bb_first_free; /* first free block */ ext4_grpblk_t bb_free; /* total free blocks */ ext4_grpblk_t bb_fragments; /* nr of freespace fragments */ ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */ struct list_head bb_prealloc_list; #ifdef DOUBLE_CHECK void *bb_bitmap; #endif struct rw_semaphore alloc_sem; ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block * regions, index is order. * bb_counters[3] = 5 means * 5 free 8-block regions. */ }; #define EXT4_GROUP_INFO_NEED_INIT_BIT 0 #define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1 #define EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT 2 #define EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT 3 #define EXT4_MB_GRP_NEED_INIT(grp) \ (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_BBITMAP_CORRUPT(grp) \ (test_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_IBITMAP_CORRUPT(grp) \ (test_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_WAS_TRIMMED(grp) \ (test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_SET_TRIMMED(grp) \ (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) #define EXT4_MAX_CONTENTION 8 #define EXT4_CONTENTION_THRESHOLD 2 static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb, ext4_group_t group) { return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group); } /* * Returns true if the filesystem is busy enough that attempts to * access the block group locks has run into contention. */ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi) { return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD); } static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) { spinlock_t *lock = ext4_group_lock_ptr(sb, group); if (spin_trylock(lock)) /* * We're able to grab the lock right away, so drop the * lock contention counter. */ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0); else { /* * The lock is busy, so bump the contention counter, * and then wait on the spin lock. */ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1, EXT4_MAX_CONTENTION); spin_lock(lock); } } static inline void ext4_unlock_group(struct super_block *sb, ext4_group_t group) { spin_unlock(ext4_group_lock_ptr(sb, group)); } /* * Block validity checking */ #define ext4_check_indirect_blockref(inode, bh) \ ext4_check_blockref(__func__, __LINE__, inode, \ (__le32 *)(bh)->b_data, \ EXT4_ADDR_PER_BLOCK((inode)->i_sb)) #define ext4_ind_check_inode(inode) \ ext4_check_blockref(__func__, __LINE__, inode, \ EXT4_I(inode)->i_data, \ EXT4_NDIR_BLOCKS) /* * Inodes and files operations */ /* dir.c */ extern const struct file_operations ext4_dir_operations; /* file.c */ extern const struct inode_operations ext4_file_inode_operations; extern const struct file_operations ext4_file_operations; extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin); /* inline.c */ extern int ext4_get_max_inline_size(struct inode *inode); extern int ext4_find_inline_data_nolock(struct inode *inode); extern int ext4_init_inline_data(handle_t *handle, struct inode *inode, unsigned int len); extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode); extern int ext4_readpage_inline(struct inode *inode, struct page *page); extern int ext4_try_to_write_inline_data(struct address_space *mapping, struct inode *inode, loff_t pos, unsigned len, unsigned flags, struct page **pagep); extern int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, struct page *page); extern struct buffer_head * ext4_journalled_write_inline_data(struct inode *inode, unsigned len, struct page *page); extern int ext4_da_write_inline_data_begin(struct address_space *mapping, struct inode *inode, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, struct page *page); extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); extern int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent, struct inode *inode); extern int ext4_read_inline_dir(struct file *filp, struct dir_context *ctx, int *has_inline_data); extern int htree_inlinedir_to_tree(struct file *dir_file, struct inode *dir, ext4_lblk_t block, struct dx_hash_info *hinfo, __u32 start_hash, __u32 start_minor_hash, int *has_inline_data); extern struct buffer_head *ext4_find_inline_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *has_inline_data); extern int ext4_delete_inline_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, int *has_inline_data); extern int empty_inline_dir(struct inode *dir, int *has_inline_data); extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode, struct ext4_dir_entry_2 **parent_de, int *retval); extern int ext4_inline_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int *has_inline); extern int ext4_try_to_evict_inline_data(handle_t *handle, struct inode *inode, int needed); extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline); extern int ext4_convert_inline_data(struct inode *inode); static inline int ext4_has_inline_data(struct inode *inode) { return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) && EXT4_I(inode)->i_inline_off; } /* namei.c */ extern const struct inode_operations ext4_dir_inode_operations; extern const struct inode_operations ext4_special_inode_operations; extern struct dentry *ext4_get_parent(struct dentry *child); extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len); extern void initialize_dirent_tail(struct ext4_dir_entry_tail *t, unsigned int blocksize); extern int ext4_handle_dirty_dirent_node(handle_t *handle, struct inode *inode, struct buffer_head *bh); #define S_SHIFT 12 static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXT4_FT_DIR, [S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK, }; static inline void ext4_set_de_type(struct super_block *sb, struct ext4_dir_entry_2 *de, umode_t mode) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } /* symlink.c */ extern const struct inode_operations ext4_symlink_inode_operations; extern const struct inode_operations ext4_fast_symlink_inode_operations; /* block_validity */ extern void ext4_release_system_zone(struct super_block *sb); extern int ext4_setup_system_zone(struct super_block *sb); extern int __init ext4_init_system_zone(void); extern void ext4_exit_system_zone(void); extern int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, unsigned int count); extern int ext4_check_blockref(const char *, unsigned int, struct inode *, __le32 *, unsigned int); /* extents.c */ struct ext4_ext_path; struct ext4_extent; /* * Maximum number of logical blocks in a file; ext4_extent's ee_block is * __le32. */ #define EXT_MAX_BLOCKS 0xffffffff extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_writepage_trans_blocks(struct inode *, int); extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents); extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern void ext4_ext_truncate(handle_t *, struct inode *); extern int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end); extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); extern long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, loff_t offset, ssize_t len); extern int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblocks); extern int ext4_extent_tree_init(handle_t *, struct inode *); extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int num, struct ext4_ext_path *path); extern int ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, struct ext4_extent *ex2); extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path **, struct ext4_extent *, int); extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t, struct ext4_ext_path **, int flags); extern void ext4_ext_drop_refs(struct ext4_ext_path *); extern int ext4_ext_check_inode(struct inode *inode); extern int ext4_find_delalloc_range(struct inode *inode, ext4_lblk_t lblk_start, ext4_lblk_t lblk_end); extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk); extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path); extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); extern int ext4_ext_precache(struct inode *inode); extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); extern int ext4_swap_extents(handle_t *handle, struct inode *inode1, struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, ext4_lblk_t count, int mark_unwritten,int *err); /* move_extent.c */ extern void ext4_double_down_write_data_sem(struct inode *first, struct inode *second); extern void ext4_double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode); extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 start_orig, __u64 start_donor, __u64 len, __u64 *moved_len); /* page-io.c */ extern int __init ext4_init_pageio(void); extern void ext4_exit_pageio(void); extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end); extern int ext4_put_io_end(ext4_io_end_t *io_end); extern void ext4_put_io_end_defer(ext4_io_end_t *io_end); extern void ext4_io_submit_init(struct ext4_io_submit *io, struct writeback_control *wbc); extern void ext4_end_io_rsv_work(struct work_struct *work); extern void ext4_io_submit(struct ext4_io_submit *io); extern int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc, bool keep_towrite); /* mmp.c */ extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); /* * Note that these flags will never ever appear in a buffer_head's state flag. * See EXT4_MAP_... to see where this is used. */ enum ext4_state_bits { BH_AllocFromCluster /* allocated blocks were part of already * allocated cluster. */ = BH_JBDPrivateStart }; /* * Add new method to test whether block and inode bitmaps are properly * initialized. With uninit_bg reading the block from disk is not enough * to mark the bitmap uptodate. We need to also zero-out the bitmap */ #define BH_BITMAP_UPTODATE BH_JBDPrivateStart static inline int bitmap_uptodate(struct buffer_head *bh) { return (buffer_uptodate(bh) && test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state)); } static inline void set_bitmap_uptodate(struct buffer_head *bh) { set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); } /* * Disable DIO read nolock optimization, so new dioreaders will be forced * to grab i_mutex */ static inline void ext4_inode_block_unlocked_dio(struct inode *inode) { ext4_set_inode_state(inode, EXT4_STATE_DIOREAD_LOCK); smp_mb(); } static inline void ext4_inode_resume_unlocked_dio(struct inode *inode) { smp_mb(); ext4_clear_inode_state(inode, EXT4_STATE_DIOREAD_LOCK); } #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) /* For ioend & aio unwritten conversion wait queues */ #define EXT4_WQ_HASH_SZ 37 #define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\ EXT4_WQ_HASH_SZ]) #define ext4_aio_mutex(v) (&ext4__aio_mutex[((unsigned long)(v)) %\ EXT4_WQ_HASH_SZ]) extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; #define EXT4_RESIZING 0 extern int ext4_resize_begin(struct super_block *sb); extern void ext4_resize_end(struct super_block *sb); #endif /* __KERNEL__ */ #endif /* _EXT4_H */
Excito/kernel-3.18
fs/ext4/ext4.h
C
gpl-2.0
103,874
#include <locale.h> #include <langinfo.h> #include <stdio.h> /* * Localization features */ const int l10n_LC_ALL = LC_ALL; // All of the locale const int l10n_LC_COLLATE = LC_COLLATE; // String collation const int l10n_LC_MESSAGES = LC_MESSAGES; // Localizable natural-language messages const int l10n_LC_MONETARY = LC_MONETARY; // Formatting of monetary values const int l10n_LC_CTYPE = LC_CTYPE; // Character classification const int l10n_LC_TIME = LC_TIME; // Formatting of date and time values /* LC_ADDRESS Formatting of addresses and geography-related items (*) LC_IDENTIFICATION Metadata describing the locale (*) LC_MEASUREMENT Settings related to measurements (metric versus US customary) (*) LC_NAME Formatting of salutations for persons (*) LC_NUMERIC Formatting of nonmonetary numeric values LC_PAPER Settings related to the standard paper size (*) LC_TELEPHONE Formats to be used with telephone services (*) */ /** * GET LOCAL */ char *getlocale(int lc) { return setlocale( lc, NULL ); } // Define missing items for macOS #if defined(__APPLE__) #define DECIMAL_POINT -1 #define THOUSANDS_SEP -1 #define CURRENCY_SYMBOL -1 #define POSITIVE_SIGN -1 #define NEGATIVE_SIGN -1 #define INT_CURR_SYMBOL -1 #define GROUPING -1 #define FRAC_DIGITS -1 #define INT_FRAC_DIGITS -1 #define P_CS_PRECEDES -1 #define P_SEP_BY_SPACE -1 #define P_SIGN_POSN -1 #define N_CS_PRECEDES -1 #define N_SEP_BY_SPACE -1 #define N_SIGN_POSN -1 #define MON_DECIMAL_POINT -1 #define MON_THOUSANDS_SEP -1 #define MON_GROUPING -1 #define ERA_YEAR -1 #endif /** * LANGINFO CODESET * */ char *langinfo_codeset() { return nl_langinfo( CODESET ); } /** * LANGINFO D_T_FMT * */ char *langinfo_d_t_fmt() { return nl_langinfo( D_T_FMT ); } /** * LANGINFO D_FMT * */ char *langinfo_d_fmt() { return nl_langinfo( D_FMT ); } /** * LANGINFO T_FMT * */ char *langinfo_t_fmt() { return nl_langinfo( T_FMT ); } /** * LANGINFO DAY 1 * */ char *langinfo_day_1() { return nl_langinfo( DAY_1 ); } /** * LANGINFO DAY 2 * */ char *langinfo_day_2() { return nl_langinfo( DAY_2 ); } /** * LANGINFO DAY 3 * */ char *langinfo_day_3() { return nl_langinfo( DAY_3 ); } /** * LANGINFO DAY 4 * */ char *langinfo_day_4() { return nl_langinfo( DAY_4 ); } /** * LANGINFO DAY 5 * */ char *langinfo_day_5() { return nl_langinfo( DAY_5 ); } /** * LANGINFO DAY 6 * */ char *langinfo_day_6() { return nl_langinfo( DAY_6 ); } /** * LANGINFO DAY 7 * */ char *langinfo_day_7() { return nl_langinfo( DAY_7 ); } /** * LANGINFO AB(breviated) DAY 1 * */ char *langinfo_abday_1() { return nl_langinfo( ABDAY_1 ); } /** * LANGINFO AB(breviated) DAY 2 * */ char *langinfo_abday_2() { return nl_langinfo( ABDAY_2 ); } /** * LANGINFO AB(breviated) DAY 3 * */ char *langinfo_abday_3() { return nl_langinfo( ABDAY_3 ); } /** * LANGINFO AB(breviated) DAY 4 * */ char *langinfo_abday_4() { return nl_langinfo( ABDAY_4 ); } /** * LANGINFO AB(breviated) DAY 5 * */ char *langinfo_abday_5() { return nl_langinfo( ABDAY_5 ); } /** * LANGINFO AB(breviated) DAY 6 * */ char *langinfo_abday_6() { return nl_langinfo( ABDAY_6 ); } /** * LANGINFO AB(breviated) DAY 7 * */ char *langinfo_abday_7() { return nl_langinfo( ABDAY_7 ); } /** * LANGINFO MON(th) 1 * */ char *langinfo_mon_1() { return nl_langinfo( MON_1 ); } /** * LANGINFO MON(th) 2 * */ char *langinfo_mon_2() { return nl_langinfo( MON_2 ); } /** * LANGINFO MON(th) 3 * */ char *langinfo_mon_3() { return nl_langinfo( MON_3 ); } /** * LANGINFO MON(th) 4 * */ char *langinfo_mon_4() { return nl_langinfo( MON_4 ); } /** * LANGINFO MON(th) 5 * */ char *langinfo_mon_5() { return nl_langinfo( MON_5 ); } /** * LANGINFO MON(th) 6 * */ char *langinfo_mon_6() { return nl_langinfo( MON_6 ); } /** * LANGINFO MON(th) 7 * */ char *langinfo_mon_7() { return nl_langinfo( MON_7 ); } /** * LANGINFO MON(th) 8 * */ char *langinfo_mon_8() { return nl_langinfo( MON_8 ); } /** * LANGINFO MON(th) 9 * */ char *langinfo_mon_9() { return nl_langinfo( MON_9 ); } /** * LANGINFO MON(th) 10 * */ char *langinfo_mon_10() { return nl_langinfo( MON_10 ); } /** * LANGINFO MON(th) 11 * */ char *langinfo_mon_11() { return nl_langinfo( MON_11 ); } /** * LANGINFO MON(th) 12 * */ char *langinfo_mon_12() { return nl_langinfo( MON_12 ); } /** * LANGINFO AB(breviated) MON(th) 1 * */ char *langinfo_abmon_1() { return nl_langinfo( ABMON_1 ); } /** * LANGINFO AB(breviated) MON(th) 2 * */ char *langinfo_abmon_2() { return nl_langinfo( ABMON_2 ); } /** * LANGINFO AB(breviated) MON(th) 3 * */ char *langinfo_abmon_3() { return nl_langinfo( ABMON_3 ); } /** * LANGINFO AB(breviated) MON(th) 4 * */ char *langinfo_abmon_4() { return nl_langinfo( ABMON_4 ); } /** * LANGINFO AB(breviated) MON(th) 5 * */ char *langinfo_abmon_5() { return nl_langinfo( ABMON_5 ); } /** * LANGINFO AB(breviated) MON(th) 6 * */ char *langinfo_abmon_6() { return nl_langinfo( ABMON_6 ); } /** * LANGINFO AB(breviated) MON(th) 7 * */ char *langinfo_abmon_7() { return nl_langinfo( ABMON_7 ); } /** * LANGINFO AB(breviated) MON(th) 8 * */ char *langinfo_abmon_8() { return nl_langinfo( ABMON_8 ); } /** * LANGINFO AB(breviated) MON(th) 9 * */ char *langinfo_abmon_9() { return nl_langinfo( ABMON_9 ); } /** * LANGINFO AB(breviated) MON(th) 10 * */ char *langinfo_abmon_10() { return nl_langinfo( ABMON_10 ); } /** * LANGINFO AB(breviated) MON(th) 11 * */ char *langinfo_abmon_11() { return nl_langinfo( ABMON_11 ); } /** * LANGINFO AB(breviated) MON(th) 12 * */ char *langinfo_abmon_12() { return nl_langinfo( ABMON_12 ); } /** * LANGINFO DECIMAL POINT * */ char *langinfo_decimal_point() { return nl_langinfo( DECIMAL_POINT ); } /** * LANGINFO THOUSANDS SEP(erator) * */ char *langinfo_thousands_sep() { return nl_langinfo( THOUSANDS_SEP ); } /** * LANGINFO YES EXPR * */ char *langinfo_yesexpr() { return nl_langinfo( YESEXPR ); } /** * LANGINFO NO EXPR * */ char *langinfo_noexpr() { return nl_langinfo( NOEXPR ); } /** * LANGINFO CURRENCY SYMBOL * */ char *langinfo_currency_symbol() { return nl_langinfo( CURRENCY_SYMBOL ); } /** * LANGINFO AM STR * */ char *langinfo_amstr() { return nl_langinfo( AM_STR ); } /** * LANGINFO PM STR * */ char *langinfo_pmstr() { return nl_langinfo( PM_STR ); } /** * LANGINFO T_FMT_AMPM (12 hour time) * */ char *langinfo_t_fmt_ampm() { return nl_langinfo( T_FMT_AMPM ); } /** * LANGINFO POSITIVE SIGN * */ char *langinfo_positive_sign() { return nl_langinfo( POSITIVE_SIGN ); } /** * LANGINFO NEGATIVE SIGN * */ char *langinfo_negative_sign() { return nl_langinfo( NEGATIVE_SIGN ); } /** * LANGINFO INT CURR SYMBOL * */ char *langinfo_int_curr_symbol() { return nl_langinfo( INT_CURR_SYMBOL ); } /** * LANGINFO GROUPING (number of digits) * */ char *langinfo_grouping() { return nl_langinfo( GROUPING ); } /** * LANGINFO FRAC (tional) DIGITS * */ char *langinfo_frac_digits() { return nl_langinfo( FRAC_DIGITS ); } /** * LANGINFO INT(ternational) FRAC (tional) DIGITS * */ char *langinfo_int_frac_digits() { return nl_langinfo( INT_FRAC_DIGITS ); } /** * LANGINFO P(ositive) CS PRECEDES * */ char *langinfo_p_cs_precedes() { return nl_langinfo( P_CS_PRECEDES ); } /** * LANGINFO P(ositive) SEP BY PRECEDES * */ char *langinfo_p_sep_by_space() { return nl_langinfo( P_SEP_BY_SPACE ); } /** * LANGINFO P(ositive) SIGN POSN (position) * */ char *langinfo_p_sign_posn() { return nl_langinfo( P_SIGN_POSN ); } /** * LANGINFO N(egative) CS PRECEDES * */ char *langinfo_n_cs_precedes() { return nl_langinfo( N_CS_PRECEDES ); } /** * LANGINFO N(egative) SEP BY PRECEDES * */ char *langinfo_n_sep_by_space() { return nl_langinfo( N_SEP_BY_SPACE ); } /** * LANGINFO N(egative) SIGN POSN (position) * */ char *langinfo_n_sign_posn() { return nl_langinfo( N_SIGN_POSN ); } /** * LANGINFO MON(etary) DECIMAL POINT * */ char *langinfo_mon_decimal_point() { return nl_langinfo( MON_DECIMAL_POINT ); } /** * LANGINFO MON(etary) THOUSANDS SEP(erator) * */ char *langinfo_mon_thousands_sep() { return nl_langinfo( MON_THOUSANDS_SEP ); } /** * LANGINFO MON(etary) GROUPING * */ char *langinfo_mon_grouping() { return nl_langinfo( MON_GROUPING ); } /** * LANGINFO (Alternate) ERA * */ char *langinfo_era() { return nl_langinfo( ERA ); } /** * LANGINFO (Alternate) ERA YEAR * */ char *langinfo_era_year() { return nl_langinfo( ERA_YEAR ); } /** * LANGINFO (Alternate) ERA D(ate) T(ime) FMT (Format) * */ char *langinfo_era_d_t_fmt() { return nl_langinfo( ERA_D_T_FMT ); } /** * LANGINFO (Alternate) ERA D(ate) FMT (Format) * */ char *langinfo_era_d_fmt() { return nl_langinfo( ERA_D_FMT ); } /** * LANGINFO (Alternate) ERA T(ime) FMT (Format) * */ char *langinfo_era_t_fmt() { return nl_langinfo( ERA_T_FMT ); }
kburtch/SparForte
src/c_l10n.c
C
gpl-2.0
9,449
<?php /** * @package Joomla.Libraries * @subpackage Form * * @copyright Copyright (C) 2005 - 2016 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE */ defined('JPATH_PLATFORM') or die; JFormHelper::loadFieldClass('groupedlist'); $app=JFactory::getApplication(); $client=$app->getClientId(); if($client==0){ require_once realpath(JPATH_SITE. '/components/com_menus/helpers/menus.php'); }else { // Import the com_menus helper. require_once realpath(JPATH_ADMINISTRATOR . '/components/com_menus/helpers/menus.php'); } /** * Supports an HTML grouped select list of menu item grouped by menu * * @since 1.6 */ class JFormFieldMenuitem extends JFormFieldGroupedList { /** * The form field type. * * @var string * @since 1.6 */ public $type = 'MenuItem'; /** * The menu type. * * @var string * @since 3.2 */ protected $menuType; /** * The language. * * @var array * @since 3.2 */ protected $language; /** * The published status. * * @var array * @since 3.2 */ protected $published; /** * The disabled status. * * @var array * @since 3.2 */ protected $disable; /** * Method to get certain otherwise inaccessible properties from the form field object. * * @param string $name The property name for which to the the value. * * @return mixed The property value or null. * * @since 3.2 */ public function __get($name) { switch ($name) { case 'menuType': case 'language': case 'published': case 'disable': return $this->$name; } return parent::__get($name); } /** * Method to set certain otherwise inaccessible properties of the form field object. * * @param string $name The property name for which to the the value. * @param mixed $value The value of the property. * * @return void * * @since 3.2 */ public function __set($name, $value) { switch ($name) { case 'menuType': $this->menuType = (string) $value; break; case 'language': case 'published': case 'disable': $value = (string) $value; $this->$name = $value ? explode(',', $value) : array(); break; default: parent::__set($name, $value); } } /** * Method to attach a JForm object to the field. * * @param SimpleXMLElement $element The SimpleXMLElement object representing the `<field>` tag for the form field object. * @param mixed $value The form field value to validate. * @param string $group The field name group control value. This acts as as an array container for the field. * For example if the field has name="foo" and the group value is set to "bar" then the * full field name would end up being "bar[foo]". * * @return boolean True on success. * * @see JFormField::setup() * @since 3.2 */ public function setup(SimpleXMLElement $element, $value, $group = null) { $result = parent::setup($element, $value, $group); if ($result == true) { $this->menuType = (string) $this->element['menu_type']; $this->published = $this->element['published'] ? explode(',', (string) $this->element['published']) : array(); $this->disable = $this->element['disable'] ? explode(',', (string) $this->element['disable']) : array(); $this->language = $this->element['language'] ? explode(',', (string) $this->element['language']) : array(); $this->disableChosen = $this->element['disableChosen'] ? true:false; } return $result; } /** * Method to get the field option groups. * * @return array The field option objects as a nested array in groups. * * @since 1.6 */ protected function getGroups() { $groups = array(); $menuType = $this->menuType; // Get the menu items. $items = MenusHelper::getMenuLinks($menuType, 0, 0, $this->published, $this->language); // Build group for a specific menu type. if ($menuType) { // If the menutype is empty, group the items by menutype. $db = JFactory::getDbo(); $query = $db->getQuery(true) ->select($db->quoteName('title')) ->from($db->quoteName('#__menu_types')) ->where($db->quoteName('menutype') . ' = ' . $db->quote($menuType)); $db->setQuery($query); try { $menuTitle = $db->loadResult(); } catch (RuntimeException $e) { $menuTitle = $menuType; } // Initialize the group. $groups[$menuTitle] = array(); // Build the options array. foreach ($items as $link) { $levelPrefix = str_repeat('- ', max(0, $link->level - 1)); // Displays language code if not set to All if ($link->language !== '*') { $lang = ' (' . $link->language . ')'; } else { $lang = ''; } $groups[$menuTitle][] = JHtml::_('select.option', $link->value, $levelPrefix . $link->text . $lang, 'value', 'text', in_array($link->type, $this->disable) ); } } // Build groups for all menu types. else { // Build the groups arrays. foreach ($items as $menu) { // Initialize the group. $groups[$menu->title] = array(); // Build the options array. foreach ($menu->links as $link) { $levelPrefix = str_repeat('- ', $link->level - 1); // Displays language code if not set to All if ($link->language !== '*') { $lang = ' (' . $link->language . ')'; } else { $lang = ''; } $groups[$menu->title][] = JHtml::_('select.option', $link->value, $levelPrefix . $link->text . $lang, 'value', 'text', in_array($link->type, $this->disable) ); } } } // Merge any additional groups in the XML definition. $groups = array_merge(parent::getGroups(), $groups); return $groups; } }
cuongnd/banhangonline88_joomla
libraries/cms/form/field/menuitem.php
PHP
gpl-2.0
5,948
//////////////////////////////////////////////////////////////////////////////// // // Copyright 2016 RWS Inc, All Rights Reserved // // This program is free software; you can redistribute it and/or modify // it under the terms of version 2 of the GNU General Public License as published by // the Free Software Foundation // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this program; if not, write to the Free Software Foundation, Inc., // 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // ////////////////////////////////////////////////////////////////////////////// // // MultiBtn.cpp // // History: // 04/10/97 JMI Started this using RPushBtn as a template. // // 04/17/97 JMI Added Load and Save components. // // 04/22/97 JMI Added NextState(). // CursorEvent() now uses NextState(). // Also, DrawBackgroundRes() now chooses the image indexed // by m_sState instead of m_sState + 1. // // 09/25/97 JMI ReadMembers() was not clearing states that had no // corresponding images which, since SetNumStates() // preserves existing state images, could result in old // images persisting through loads that contained no image // for that state. // Also, now, in file version 7, reads and writes the // current state. // ////////////////////////////////////////////////////////////////////////////// // // This a GUI item that is based on RBtn. // This overrides CursorEvent() to get information about where a click in its // RHot occurred. // This overrides Compose() to add text. // // Enhancements/Uses: // To change the look of a button when pressed, you may want to override the // Compose() or DrawBorder() in a derived class. // To get a callback on a click/release pair in the button, set m_bcUser. // ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Headers. ////////////////////////////////////////////////////////////////////////////// #include "Blue.h" #ifdef PATHS_IN_INCLUDES #include "ORANGE/GUI/MultiBtn.h" #else #include "multibtn.h" #endif // PATHS_IN_INCLUDES ////////////////////////////////////////////////////////////////////////////// // Module specific macros. ////////////////////////////////////////////////////////////////////////////// // Sets val to def if val is -1. #define DEF(val, def) ((val == -1) ? def : val) ////////////////////////////////////////////////////////////////////////////// // Module specific typedefs. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Module specific (static) variables. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Construction/Destruction. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // // Default constructor. // ////////////////////////////////////////////////////////////////////////////// RMultiBtn::RMultiBtn() { // Override RGuiItem's/RBtn's defaults. m_type = MultiBtn; // Indicates type of GUI item. // Initialize RMultiBtn members. m_sState = 0; // The button's current state, 0..m_sNumStates - 1. m_sNumStates = 0; // Number of button states. m_papimStates = NULL; // Ptr to array of m_sNumStates ptrs to button // state images. } ////////////////////////////////////////////////////////////////////////////// // // Destructor. // ////////////////////////////////////////////////////////////////////////////// RMultiBtn::~RMultiBtn() { DestroyStates(); } //////////////////////////////////////////////////////////////////////// // Methods. //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Cursor event notification. // Events in event area. // (virtual). // //////////////////////////////////////////////////////////////////////// void RMultiBtn::CursorEvent( // Returns nothing. RInputEvent* pie) // In: Most recent user input event. // Out: pie->sUsed = TRUE, if used. { switch (pie->sEvent) { case RSP_MB0_DOUBLECLICK: case RSP_MB0_RELEASED: // If we were clicked in . . . if (m_sPressed != FALSE) { // Do change of state right away so user callback gets the new // value. // If within event area . . . if ( pie->sPosX >= m_sEventAreaX && pie->sPosX < m_sEventAreaX + m_sEventAreaW && pie->sPosY >= m_sEventAreaY && pie->sPosY < m_sEventAreaY + m_sEventAreaH) { // Change state. NextState(); } } break; } // Call base. RGuiItem::CursorEvent(pie); switch (pie->sEvent) { case RSP_MB0_DOUBLECLICK: case RSP_MB0_PRESSED: // Always recompose on press, since there's so many possibilities // with this button. Compose(); // Note that we used it. pie->sUsed = TRUE; break; case RSP_MB0_RELEASED: // Always recompose on release, since there's so many possibilities // with this button. Compose(); // Note that we used it. pie->sUsed = TRUE; break; } } //////////////////////////////////////////////////////////////////////// // Draw background resource, if one is specified. // Utilizes base class version to place and BLiT the resource. // (virtual). //////////////////////////////////////////////////////////////////////// void RMultiBtn::DrawBackgroundRes( // Returns nothing. RImage* pim /*= NULL*/) // Dest image, uses m_im, if NULL. { // Store old bkd res. RImage* pimBkdRes = m_pimBkdRes; // If we have any states . . . if (m_papimStates != NULL) { // Choose proper image. if (m_sPressed == FALSE) { // If the state is available . . . if (m_sState <= m_sNumStates) { // Get the state. m_pimBkdRes = m_papimStates[m_sState]; } } else { // Get the pressed feedback. m_pimBkdRes = m_papimStates[0]; } } // Call base. RBtn::DrawBackgroundRes(pim); // Restore bkd res. m_pimBkdRes = pimBkdRes; } //////////////////////////////////////////////////////////////////////// // Set number of states. // This will clear all existing state images. //////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::SetNumStates( // Returns 0 on success. int16_t sNumStates) // In: New number of states. { int16_t sRes = 0; // Assume success. // Allocate an array of image ptrs and clear them all . . . RImage** papimNewStates = new RImage*[sNumStates + 1]; if (papimNewStates != NULL) { // Clear all the ptrs to NULL. memset(papimNewStates, 0, sizeof(RImage*) * (sNumStates + 1)); // If there was an old array . . . if (m_papimStates != NULL) { // Copy any currently valid ptrs within new range. int16_t i; for (i = 0; i <= sNumStates && i <= m_sNumStates; i++) { // Copy entry. papimNewStates[i] = m_papimStates[i]; // Clear entry so it is not deleted. m_papimStates[i] = NULL; } // Destroy any current states plus array. DestroyStates(); } // Store the new number of states. m_sNumStates = sNumStates; // Store new arrray. m_papimStates = papimNewStates; } else { TRACE("SetNumStates(): Failed to allocate new array of Image ptrs.\n"); sRes = -1; } return sRes; } //////////////////////////////////////////////////////////////////////// // Set button state or feedback state image. //////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::SetState( // Returns 0 on success. RImage* pim, // In: Image for state sState. int16_t sState) // In: State to update (0 == feedback state, // 1..n == state number). { int16_t sRes = 0; // Assume success. if (m_papimStates == NULL || sState >= m_sNumStates) { sRes = SetNumStates(sState); } // If successful so far . . . if (sRes == 0) { // Clear current value. delete m_papimStates[sState]; // Allocate new one . . . m_papimStates[sState] = new RImage; if (m_papimStates[sState] != NULL) { // Copy specified image. *(m_papimStates[sState]) = *pim; } else { TRACE("SetState(): Failed to allocate new RImage.\n"); sRes = -1; } } return sRes; } ////////////////////////////////////////////////////////////////////////////// // Set button state or feedback state image. // The feedback state image is always the last image (m_sNumStates). ////////////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::SetState( // Returns 0 on success. char* pszImageName, // In: File name of image for state sState. int16_t sState) // In: State to update (0 == feedback state, // 1..n == state number). { int16_t sRes = 0; // Assume success. RImage im; if (RFileEZLoad(&im, pszImageName, "rb", RFile::LittleEndian) == 0) { sRes = SetState(&im, sState); } else { TRACE("SetState(): RFileEZLoad() failed for \"%s\".\n", pszImageName); sRes = -1; } return sRes; } ////////////////////////////////////////////////////////////////////////////// // Clear button state or feedback state image. // The feedback state image is always the first image. ////////////////////////////////////////////////////////////////////////////// void RMultiBtn::ClearState( // Returns nothing. int16_t sState) // In: State to clear (0 == feedback state, // 1..n == state number). { if (sState >= 0 && sState <= m_sNumStates) { if (m_papimStates != NULL) { // State be gone. Safe for already deallocated states as long as // they're NULL. delete m_papimStates[sState]; m_papimStates[sState] = NULL; } } } ////////////////////////////////////////////////////////////////////////////// // Go to the next logical state. ////////////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::NextState(void) // Returns new state. { if (m_sNumStates > 0) { m_sState = (m_sState % m_sNumStates) + 1; } else { m_sState = 0; } return m_sState; } ////////////////////////////////////////////////////////////////////////////// // Querries. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Get the current image for the specified state. ////////////////////////////////////////////////////////////////////////////// RImage* RMultiBtn::GetState( // Returns image, if available; NULL, otherwise. int16_t sState) // In: State to get (0 == feedback state, // 1..n == state number). { RImage* pimRes = NULL; // Assume not available. if (sState >= 0 && sState <= m_sNumStates) { if (m_papimStates != NULL) { pimRes = m_papimStates[sState]; } } return pimRes; } ////////////////////////////////////////////////////////////////////////////// // Internal functions. ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Destroys current state bitmaps. ////////////////////////////////////////////////////////////////////////////// void RMultiBtn::DestroyStates(void) // Returns nothing. { if (m_papimStates != NULL) { int16_t i; for (i = 0; i <= m_sNumStates; i++) { delete m_papimStates[i]; } delete []m_papimStates; m_sNumStates = 0; } } //////////////////////////////////////////////////////////////////////// // Read item's members from file. // (virtual/protected (overriden here)). //////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::ReadMembers( // Returns 0 on success. RFile* pfile, // File to read from. U32 u32Version) // File format version to use. { int16_t sRes = 0; // Assume success. // Invoke base class to read base members. sRes = RBtn::ReadMembers(pfile, u32Version); // If okay so far . . . if (sRes == 0) { ASSERT(pfile != NULL); ASSERT(pfile->IsOpen() != FALSE); // Switch on version. switch (u32Version) { default: // Insert additional version numbers here! case 7: pfile->Read(&m_sState); case 6: case 5: case 4: case 3: case 2: case 1: { int16_t sNumStates = 0; // Safety. // Read this class's members. pfile->Read(&sNumStates); // Set number of states. if (SetNumStates(sNumStates) == 0) { // Read all the images. int16_t sCurState; int16_t sImageForState; for (sCurState = 0; sCurState <= m_sNumStates && sRes == 0; sCurState++) { pfile->Read(&sImageForState); if (sImageForState != FALSE) { // There is an image. Load it. RImage imState; if (imState.Load(pfile) == 0) { // Set that state. if (SetState(&imState, sCurState) == 0) { // Successfully loaded and set state image. } else { TRACE("ReadMembers9): SetState() failed for state #%d.\n", sCurState); sRes = -3; } } else { TRACE("ReadMembers(): GetState() failed for state #%d.\n", sCurState); sRes = -2; } } else { // Make sure the state is clear. ClearState(sCurState); } } } else { TRACE("ReadMembers(): SetNumStates() failed.\n"); sRes = -1; } } case 0: // In version 0, only base class RGuiItem members were stored. // If successful . . . if (pfile->Error() == FALSE) { // Success. } else { TRACE("ReadMembers(): Error reading RMultiBtn members.\n"); sRes = -1; } break; } } return sRes; } //////////////////////////////////////////////////////////////////////// // Write item's members to file. // (virtual/protected (overriden here)). //////////////////////////////////////////////////////////////////////// int16_t RMultiBtn::WriteMembers( // Returns 0 on success. RFile* pfile) // File to write to. { int16_t sRes = 0; // Assume success. // Invoke base class to read base members. sRes = RBtn::WriteMembers(pfile); // If okay so far . . . if (sRes == 0) { ASSERT(pfile != NULL); ASSERT(pfile->IsOpen() != FALSE); // Write this class's members. pfile->Write(m_sState); pfile->Write(m_sNumStates); // Write all the images. int16_t sCurState; for (sCurState = 0; sCurState <= m_sNumStates && sRes == 0; sCurState++) { // If there is a bitmap for this state . . . RImage* pimState = GetState(sCurState); if (pimState != NULL) { // There is an image. Write flag indicating such. pfile->Write((int16_t)TRUE); // Write image. sRes = pimState->Save(pfile); } else { // No image. Write flag indicating such. pfile->Write((int16_t)FALSE); } } // If successful . . . if (pfile->Error() == FALSE) { // Success. } else { TRACE("WriteMembers(): Error writing RMultiBtn members.\n"); sRes = -1; } } return sRes; } ////////////////////////////////////////////////////////////////////////////// // EOF //////////////////////////////////////////////////////////////////////////////
PixelDevLabs/Ezia-Cleaner_Build-934afd57b26a
RSPiX/Src/ORANGE/GUI/MultiBtn.cpp
C++
gpl-2.0
15,860
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <!-- template designed by Marco Von Ballmoos --> <title>Docs For Class server_side_validation</title> <link rel="stylesheet" href="../../media/stylesheet.css" /> <meta http-equiv='Content-Type' content='text/html; charset=iso-8859-1'/> </head> <body> <div class="page-body"> <h2 class="class-name"><img src="../../media/images/Class_logo.png" alt=" Class" title=" Class" style="vertical-align: middle"> server_side_validation</h2> <a name="sec-description"></a> <div class="info-box"> <div class="info-box-title">Description</div> <div class="nav-bar"> <span class="disabled">Description</span> | <a href="#sec-method-summary">Methods</a> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Server Side Validation Class</p> <p class="notes"> Located in <a class="field" href="_plugins---ssv.class.php.html">/plugins/ssv.class.php</a> (line <span class="field"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a21">21</a></span>) </p> <pre></pre> </div> </div> <a name="sec-method-summary"></a> <div class="info-box"> <div class="info-box-title">Method Summary</span></div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <span class="disabled">Methods</span> (<a href="#sec-methods">details</a>) </div> <div class="info-box-body"> <div class="method-summary"> <div class="method-definition"> <img src="../../media/images/Constructor.png" alt=" "/> <span class="method-result">server_side_validation</span> <a href="#__construct" title="details" class="method-name">__construct</a> ([<span class="var-type">string</span>&nbsp;<span class="var-name">$check_post</span> = <span class="var-default">true</span>], [<span class="var-type"></span>&nbsp;<span class="var-name">$check_get</span> = <span class="var-default">false</span>]) </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">void</span> <a href="#add_check" title="details" class="method-name">add_check</a> (<span class="var-type">string</span>&nbsp;<span class="var-name">$field_name</span>, <span class="var-type">string</span>&nbsp;<span class="var-name">$valid_type</span>, [<span class="var-type">string</span>&nbsp;<span class="var-name">$valid_txt</span> = <span class="var-default">''</span>], [<span class="var-type">string</span>&nbsp;<span class="var-name">$field2_name</span> = <span class="var-default">''</span>]) </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">void</span> <a href="#check_post_sub_array" title="details" class="method-name">check_post_sub_array</a> (<span class="var-type">string</span>&nbsp;<span class="var-name">$sub_array</span>) </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">void</span> <a href="#debug_mode" title="details" class="method-name">debug_mode</a> (<span class="var-type">bool</span>&nbsp;<span class="var-name">$tmp_bool</span>) </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">void</span> <a href="#display_fail_messages" title="details" class="method-name">display_fail_messages</a> ([<span class="var-type">string</span>&nbsp;<span class="var-name">$new_xsl</span> = <span class="var-default">null</span>]) </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">integer</span> <a href="#failed_checks" title="details" class="method-name">failed_checks</a> () </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">array</span> <a href="#fail_messages" title="details" class="method-name">fail_messages</a> () </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">bool</span> <a href="#status" title="details" class="method-name">status</a> () </div> <div class="method-definition"> <img src="../../media/images/Method.png" alt=" "/> <span class="method-result">bool</span> <a href="#validate" title="details" class="method-name">validate</a> () </div> </div> </div> </div> <a name="sec-methods"></a> <div class="info-box"> <div class="info-box-title">Methods</div> <div class="nav-bar"> <a href="#sec-description">Description</a> | <a href="#sec-method-summary">Methods</a> (<span class="disabled">details</span>) </div> <div class="info-box-body"> <A NAME='method_detail'></A> <a name="method__construct" id="__construct"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Constructor.png" /> <span class="method-title">Constructor __construct</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a91">91</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Constructor function</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">server_side_validation</span> <span class="method-name"> __construct </span> ([<span class="var-type">string</span>&nbsp;<span class="var-name">$check_post</span> = <span class="var-default">true</span>], [<span class="var-type"></span>&nbsp;<span class="var-name">$check_get</span> = <span class="var-default">false</span>]) </div> <ul class="parameters"> <li> <span class="var-type">string</span> <span class="var-name">$check_post</span> </li> <li> <span class="var-type"></span> <span class="var-name">$check_get</span> </li> </ul> </div> <a name="methodadd_check" id="add_check"><!-- --></a> <div class="oddrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">add_check</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a129">129</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Add Check</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">void</span> <span class="method-name"> add_check </span> (<span class="var-type">string</span>&nbsp;<span class="var-name">$field_name</span>, <span class="var-type">string</span>&nbsp;<span class="var-name">$valid_type</span>, [<span class="var-type">string</span>&nbsp;<span class="var-name">$valid_txt</span> = <span class="var-default">''</span>], [<span class="var-type">string</span>&nbsp;<span class="var-name">$field2_name</span> = <span class="var-default">''</span>]) </div> <ul class="parameters"> <li> <span class="var-type">string</span> <span class="var-name">$field_name</span><span class="var-description">: Field name (First field if more than one)</span> </li> <li> <span class="var-type">string</span> <span class="var-name">$valid_type</span><span class="var-description">: Validation Type</span> </li> <li> <span class="var-type">string</span> <span class="var-name">$valid_txt</span><span class="var-description">: Error Message</span> </li> <li> <span class="var-type">string</span> <span class="var-name">$field2_name</span><span class="var-description">: Field name 2 (or other parameter)</span> </li> </ul> </div> <a name="methodcheck_post_sub_array" id="check_post_sub_array"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">check_post_sub_array</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a150">150</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Add Post Sub Array to Check</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">void</span> <span class="method-name"> check_post_sub_array </span> (<span class="var-type">string</span>&nbsp;<span class="var-name">$sub_array</span>) </div> <ul class="parameters"> <li> <span class="var-type">string</span> <span class="var-name">$sub_array</span><span class="var-description">: POST Sub Array Name</span> </li> </ul> </div> <a name="methoddebug_mode" id="debug_mode"><!-- --></a> <div class="oddrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">debug_mode</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a310">310</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Toggle Debug Mode</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">void</span> <span class="method-name"> debug_mode </span> (<span class="var-type">bool</span>&nbsp;<span class="var-name">$tmp_bool</span>) </div> <ul class="parameters"> <li> <span class="var-type">bool</span> <span class="var-name">$tmp_bool</span><span class="var-description">: True - On, False - Off</span> </li> </ul> </div> <a name="methoddisplay_fail_messages" id="display_fail_messages"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">display_fail_messages</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a319">319</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Display failed check messages using XSL</p> <ul class="tags"> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">void</span> <span class="method-name"> display_fail_messages </span> ([<span class="var-type">string</span>&nbsp;<span class="var-name">$new_xsl</span> = <span class="var-default">null</span>]) </div> <ul class="parameters"> <li> <span class="var-type">string</span> <span class="var-name">$new_xsl</span><span class="var-description">: File path to other XSL Stylesheet</span> </li> </ul> </div> <a name="methodfailed_checks" id="failed_checks"><!-- --></a> <div class="oddrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">failed_checks</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a301">301</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Number of failed validations Function</p> <ul class="tags"> <li><span class="field">return:</span> Returns the number of failed validations produced by the server side validation</li> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">integer</span> <span class="method-name"> failed_checks </span> () </div> </div> <a name="methodfail_messages" id="fail_messages"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">fail_messages</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a292">292</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Fail Messages Function</p> <ul class="tags"> <li><span class="field">return:</span> Returns the failure messages produced by the server side validation</li> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">array</span> <span class="method-name"> fail_messages </span> () </div> </div> <a name="methodstatus" id="status"><!-- --></a> <div class="oddrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">status</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a283">283</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Status Function</p> <ul class="tags"> <li><span class="field">return:</span> Returns the status of the server side validation (null before / true or false after)</li> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">bool</span> <span class="method-name"> status </span> () </div> </div> <a name="methodvalidate" id="validate"><!-- --></a> <div class="evenrow"> <div class="method-header"> <img src="../../media/images/Method.png" /> <span class="method-title">validate</span> (line <span class="line-number"><a href="../../__filesource/fsource_phpOpenFW_Plugin_pluginsssv.class.php.html#a167">167</a></span>) </div> <!-- ========== Info from phpDoc block ========= --> <p class="short-description">Validate Function</p> <ul class="tags"> <li><span class="field">return:</span> Success - True, Failure - False</li> <li><span class="field">access:</span> public</li> </ul> <div class="method-signature"> <span class="method-result">bool</span> <span class="method-name"> validate </span> () </div> </div> </div> </div> <p class="notes" id="credit"> Documentation generated on Wed, 21 Nov 2012 18:25:38 +0000 by <a href="http://www.phpdoc.org" target="_blank">phpDocumentor 1.4.1</a> </p> </div></body> </html>
cclark61/phpOpenFW
doc/developer/phpOpenFW/Plugin/server_side_validation.html
HTML
gpl-2.0
15,311
/* * Copyright (C) 2013-2015 DeathCore <http://www.noffearrdeathproject.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptPCH.h" #include "dragon_soul.h" #include "Map.h" class instance_dragon_soul: public InstanceMapScript { public: instance_dragon_soul() : InstanceMapScript("instance_dragon_soul", 967) { } struct instance_dragon_soul_InstanceMapScript : public InstanceScript { instance_dragon_soul_InstanceMapScript(InstanceMap* map) : InstanceScript(map) { } // Creatures uint64 MorchokGUID; uint64 UnsleepingGUID; uint64 WarlordGUID; uint64 HagaraGUID; uint64 UltraxionGUID; uint64 WarmasterGUID; uint64 PortalGUID; uint64 Maelstrom_trall; uint64 Maelstrom_kalecgos; uint64 Maelstrom_ysera; uint64 Maelstrom_nozdormy; uint64 Maelstrom_alexstrasza; uint64 Aspect_Of_MagicGUID; uint64 AlexstraszaGUID; uint64 YseraGUID; uint64 NozdormuGUID; uint64 Trall_Vs_UltraxionGUID; uint64 DeathwingGUID; uint64 arm_tentacle_1; uint64 arm_tentacle_2; uint64 wing_tentacle_1; uint64 wing_tentacle_2; void Initialize() { SetBossNumber(MAX_ENCOUNTER); MorchokGUID = 0; UnsleepingGUID = 0; WarlordGUID = 0; HagaraGUID = 0; UltraxionGUID = 0; WarmasterGUID = 0; PortalGUID = 0; Maelstrom_trall = 0; Maelstrom_kalecgos = 0; Maelstrom_ysera = 0; Maelstrom_nozdormy = 0; Maelstrom_alexstrasza = 0; Aspect_Of_MagicGUID = 0; AlexstraszaGUID = 0; YseraGUID = 0; NozdormuGUID = 0; Trall_Vs_UltraxionGUID = 0; DeathwingGUID = 0; arm_tentacle_1 = 0; arm_tentacle_2 = 0; wing_tentacle_1 = 0; wing_tentacle_2 = 0; } void OnCreatureCreate(Creature* creature) { switch (creature->GetEntry()) { case NPC_MORCHOK: MorchokGUID = creature->GetGUID(); break; case NPC_WARLORD: UnsleepingGUID = creature->GetGUID(); break; case NPC_UNSLEEPING: WarlordGUID = creature->GetGUID(); break; case NPC_HAGARA: HagaraGUID = creature->GetGUID(); break; case NPC_ULTRAXION: UltraxionGUID = creature->GetGUID(); break; case NPC_WARMASTER: WarmasterGUID = creature->GetGUID(); break; case NPC_PORTAL: PortalGUID = creature->GetGUID(); break; case NPC_TRALL_VS_ULTRAXION: Trall_Vs_UltraxionGUID = creature->GetGUID(); break; case NPC_ALEXSTRASZA: AlexstraszaGUID = creature->GetGUID(); break; case NPC_YSERA: YseraGUID = creature->GetGUID(); break; case NPC_NOZDORMU: NozdormuGUID = creature->GetGUID(); break; case NPC_ASPECT_OF_MAGIC: Aspect_Of_MagicGUID = creature->GetGUID(); break; case NPC_MAELSTROM_TRALL: Maelstrom_trall = creature->GetGUID(); break; case NPC_MAELSTROM_KALECGOS: Maelstrom_kalecgos = creature->GetGUID(); break; case NPC_MAELSTROM_YSERA: Maelstrom_ysera = creature->GetGUID(); break; case NPC_MAELSTROM_NOZDORMY: Maelstrom_nozdormy = creature->GetGUID(); break; case NPC_MAELSTROM_ALEXSTRASZA: Maelstrom_alexstrasza = creature->GetGUID(); break; case NPC_DEATHWING_1: DeathwingGUID = creature->GetGUID(); break; case NPC_ARM_TENTACLE_1: arm_tentacle_1 = creature->GetGUID(); break; case NPC_ARM_TENTACLE_2: arm_tentacle_2 = creature->GetGUID(); break; case NPC_WING_TENTACLE_1: wing_tentacle_1 = creature->GetGUID(); break; case NPC_WING_TENTACLE_2: wing_tentacle_2 = creature->GetGUID(); break; } } void SetData(uint32 type, uint32 data) { switch (type) { case DATA_DAMAGE_DEATHWING: if(data == DONE) if(Creature* creature = instance->GetCreature(DeathwingGUID)) creature->CastSpell(creature, 106548); SaveToDB(); break; case DATA_ATTACK_DEATHWING: { switch (data) { case IN_PROGRESS: if(Creature* creature = instance->GetCreature(arm_tentacle_1)) { creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC); creature->SetVisible(true); } if(Creature* creature = instance->GetCreature(arm_tentacle_2)) { creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC); creature->SetVisible(true); } if(Creature* creature = instance->GetCreature(wing_tentacle_1)) { creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC); creature->SetVisible(true); } if(Creature* creature = instance->GetCreature(wing_tentacle_2)) { creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC); creature->SetVisible(true); } break; } SaveToDB(); break; } default: break; } } uint64 GetData64 (uint32 identifier) const { switch (identifier) { case NPC_MAELSTROM_TRALL: return Maelstrom_trall; case NPC_DEATHWING_1: return DeathwingGUID; } return 0; } bool SetBossState(uint32 type, EncounterState state) { if (!InstanceScript::SetBossState(type, state)) return false; switch(type) { case BOSS_MORCHOK: case BOSS_WARLORD: case BOSS_UNSLEEPING: case BOSS_HAGARA: case BOSS_WARMASTER: case BOSS_DEATHWING: break; case BOSS_ULTRAXION: if(state == DONE) if(Creature* creature = instance->GetCreature(Trall_Vs_UltraxionGUID)) creature->SummonCreature(NPC_TRAVEL_TO_THE_DECK_OF_THE_SKYFIRE, -1802.141f, -2364.638f, 340.796f, 5.234f, TEMPSUMMON_CORPSE_DESPAWN, 900000); break; case DATA_TRALL_VS_ULTRAXION: switch (state) { case DONE: if(Creature* creature = instance->GetCreature(Trall_Vs_UltraxionGUID)) { creature->AddAura(LAST_DEFENDER_OF_AZEROTH, creature); creature->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); } if(Creature* creature = instance->GetCreature(AlexstraszaGUID)) creature->CastSpell(creature, GIFT_OF_LIFE); if(Creature* creature = instance->GetCreature(YseraGUID)) creature->CastSpell(creature, ESSENCE_OF_DREAMS); if(Creature* creature = instance->GetCreature(NozdormuGUID)) creature->AddAura(TIMELOOP, creature); if(Creature* creature = instance->GetCreature(Aspect_Of_MagicGUID)) creature->CastSpell(creature, SOURCE_OF_MAGIC); case FAIL: DoRemoveAurasDueToSpellOnPlayers(105554); DoRemoveAurasDueToSpellOnPlayers(106368); DoRemoveAurasDueToSpellOnPlayers(LAST_DEFENDER_OF_AZEROTH); DoRemoveAurasDueToSpellOnPlayers(TIMELOOP); DoRemoveAurasDueToSpellOnPlayers(SOURCE_OF_MAGIC); DoRemoveAurasDueToSpellOnPlayers(ESSENCE_OF_DREAMS); DoRemoveAurasDueToSpellOnPlayers(GIFT_OF_LIFE); break; default: break; } break; case DATA_PORTALS_ON_OFF: break; } return true; } std::string GetSaveData() { OUT_SAVE_INST_DATA; std::ostringstream saveStream; saveStream << "D S " << GetBossSaveData(); OUT_SAVE_INST_DATA_COMPLETE; return saveStream.str(); } void Load(const char* str) { if (!str) { OUT_LOAD_INST_DATA_FAIL; return; } OUT_LOAD_INST_DATA(str); char dataHead1, dataHead2; std::istringstream loadStream(str); loadStream >> dataHead1 >> dataHead2; if(dataHead1 == 'D' && dataHead2 == 'S') { for(uint32 i = 0; i < MAX_ENCOUNTER; ++i) { uint32 tmpState; loadStream >> tmpState; if (tmpState == IN_PROGRESS || tmpState > SPECIAL) tmpState = NOT_STARTED; SetBossState(i, EncounterState(tmpState)); } } OUT_LOAD_INST_DATA_COMPLETE; } }; InstanceScript* GetInstanceScript(InstanceMap* map) const { return new instance_dragon_soul_InstanceMapScript(map); } }; void AddSC_instance_dragon_soul() { new instance_dragon_soul(); }
Ginfred/DeathCore
src/server/scripts/Kalimdor/CavernsOfTime/DragonSoul/instance_dragon_soul.cpp
C++
gpl-2.0
10,625
var config = require('comm/script/config') App({ globalData: { userInfo: null }, onLaunch: function() { // 获取用户信息 this.getUserInfo() //初始化缓存 this.initStorage() }, getUserInfo:function(cb){ var that = this wx.login({ success: function () { wx.getUserInfo({ success: function (res) { that.globalData.userInfo = res.userInfo typeof cb == "function" && cb(that.globalData.userInfo) } }) } }) }, getCity: function(cb) { var that = this wx.getLocation({ type: 'gcj02', success: function (res) { var locationParam = res.latitude + ',' + res.longitude + '1' wx.request({ url: config.apiList.baiduMap, data: { ak: config.baiduAK, location: locationParam, output: 'json', pois: '1' }, method: 'GET', success: function(res){ config.city = res.data.result.addressComponent.city.slice(0,-1) typeof cb == "function" && cb(res.data.result.addressComponent.city.slice(0,-1)) }, fail: function(res) { // 重新定位 that.getCity(); } }) } }) }, initStorage: function() { wx.getStorageInfo({ success: function(res) { // 判断电影收藏是否存在,没有则创建 if (!('film_favorite' in res.keys)) { wx.setStorage({ key: 'film_favorite', data: [] }) } // 判断人物收藏是否存在,没有则创建 if (!('person_favorite' in res.keys)) { wx.setStorage({ key: 'person_favorite', data: [] }) } // 判断电影浏览记录是否存在,没有则创建 if (!('film_history' in res.keys)) { wx.setStorage({ key: 'film_history', data: [] }) } // 判断人物浏览记录是否存在,没有则创建 if (!('person_history' in res.keys)) { wx.setStorage({ key: 'person_history', data: [] }) } // 个人信息默认数据 var personInfo = { name: '', nickName: '', gender: '', age: '', birthday: '', constellation: '', company: '', school: '', tel: '', email:'', intro: '' } // 判断个人信息是否存在,没有则创建 if (!('person_info' in res.keys)) { wx.setStorage({ key: 'person_info', data: personInfo }) } // 判断相册数据是否存在,没有则创建 if (!('gallery' in res.keys)) { wx.setStorage({ key: 'gallery', data: [] }) } // 判断背景卡选择数据是否存在,没有则创建 if (!('skin' in res.keys)) { wx.setStorage({ key: 'skin', data: '' }) } } }) } })
YHaven/sjqTest
wxCode/allMovie/app.js
JavaScript
gpl-2.0
3,189
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <title>lldp: liblldpctl: library to interface with lldpd</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">lldp &#160;<span id="projectnumber">0.7.11</span> </div> </td> </tr> </tbody> </table> </div> <!-- Generated by Doxygen 1.7.6.1 --> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li><a href="modules.html"><span>Modules</span></a></li> <li><a href="annotated.html"><span>Data&#160;Structures</span></a></li> <li><a href="files.html"><span>Files</span></a></li> </ul> </div> </div> <div class="header"> <div class="summary"> <a href="#groups">Modules</a> </div> <div class="headertitle"> <div class="title">liblldpctl: library to interface with lldpd</div> </div> </div><!--header--> <div class="contents"> <div class="dynheader"> Collaboration diagram for liblldpctl: library to interface with lldpd:</div> <div class="dyncontent"> <center><table><tr><td><img src="group__liblldpctl.png" border="0" alt="" usemap="#group____liblldpctl"/> <map name="group____liblldpctl" id="group____liblldpctl"> </map> </td></tr></table></center> </div> <table class="memberdecls"> <tr><td colspan="2"><h2><a name="groups"></a> Modules</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__lldpctl__connection.html">Managing connection to lldpd</a></td></tr> <tr><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__lldpctl__errors__logs.html">Errors and logs handling</a></td></tr> <tr><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__lldpctl__atoms.html">Extracting information: atoms</a></td></tr> </table> <hr/><a name="details" id="details"></a><h2>Detailed Description</h2> <p>`liblldpctl` allows any program to convenienty query and modify the behaviour of a running lldpd daemon.</p> <p>To use this library, use `pkg-config` to get the appropriate options: * `pkg-config --libs lldpctl` for `LIBS` or `LDFLAGS` * `pkg-config --cflags lldpctl` for `CFLAGS`</p> <dl class="warning"><dt><b>Warning:</b></dt><dd>This library is tightly coupled with lldpd. The library to use should be the one shipped with lldpd. Clients of the library are then tied by the classic API/ABI rules and may be compiled separatly.</dd></dl> <p>There are two important structures in this library: <code><a class="el" href="structlldpctl__conn__t.html">lldpctl_conn_t</a></code> which represents a connection and <code><a class="el" href="structlldpctl__atom__t.html">lldpctl_atom_t</a></code> which represents a piece of information. Those types are opaque. No direct access to them should be done.</p> <p>The library is expected to be reentrant and therefore thread-safe. It is however not expected that a connection to be used in several thread simultaneously. This also applies to the different pieces of information gathered through this connection. Several connection to lldpd can be used simultaneously.</p> <p>The first step is to establish a connection. See <a class="el" href="group__lldpctl__connection.html">Managing connection to lldpd</a> for more information about this. The next step is to query the lldpd daemon. See <a class="el" href="group__lldpctl__atoms.html">Extracting information: atoms</a> on how to do this.</p> <p>`liblldpctl` tries to handle errors in a coherent way. Any function returning a pointer will return <code>NULL</code> on error and the last error can be retrieved through <a class="el" href="group__lldpctl__errors__logs.html#ga591c61c3e5c5dc5ab87ec238bece4571">lldpctl_last_error()</a> function. Most functions returning integers will return a negative integer representing the error if something goes wrong. The use of <a class="el" href="group__lldpctl__errors__logs.html#ga591c61c3e5c5dc5ab87ec238bece4571">lldpctl_last_error()</a> allows one to check if this is a real error if there is a doubt. See <a class="el" href="group__lldpctl__errors__logs.html">Errors and logs handling</a> for more about this. </p> </div><!-- contents --> <hr class="footer"/><address class="footer"><small> Generated on Mon Nov 17 2014 22:16:07 for lldp by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.7.6.1 </small></address> </body> </html>
diy19901030/lldpd
doc/html/group__liblldpctl.html
HTML
gpl-2.0
5,172
/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Copyright (c) 2010 High Tech Computer Corporation Module Name: ds2746_battery.c Abstract: This module implements the power algorithm, including below concepts: 1. Charging function control. 2. Charging full condition. 3. Recharge control. 4. Battery capacity maintainance. 5. Battery full capacity calibration. Original Auther: Andy.YS Wang June-01-2010 ---------------------------------------------------------------------------------*/ #include <linux/module.h> #include <linux/param.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/android_alarm.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/wakelock.h> #include <asm/gpio.h> #include <linux/delay.h> #include <linux/ds2746_battery.h> #include <linux/ds2746_battery_config.h> #include <linux/ds2746_param.h> /*#include <linux/ds2746_param_config.h>*/ #include <linux/wrapper_types.h> #include <linux/smb329.h> #include <mach/htc_battery.h> #include <asm/mach-types.h> #include "../../arch/arm/mach-msm/proc_comm.h" #include <linux/i2c.h> /* for i2c_adapter, i2c_client define*/ /*#include "../w1/w1.h"*/ /*#include "../w1/slaves/w1_ds2784.h"*/ #include <linux/time.h> #include <linux/rtc.h> struct ds2746_device_info { struct device *dev; struct device *w1_dev; struct workqueue_struct *monitor_wqueue; struct work_struct monitor_work; /* lock to protect the battery info */ struct mutex lock; /* DS2784 data, valid after calling ds2784_battery_read_status() */ unsigned long update_time; /* jiffies when data read */ struct alarm alarm; struct wake_lock work_wake_lock; u8 slow_poll; ktime_t last_poll; }; static struct wake_lock vbus_wake_lock; /*======================================================================================== HTC power algorithm helper member and functions ========================================================================================*/ static struct poweralg_type poweralg = {0}; static struct poweralg_config_type config = {0}; static struct poweralg_config_type debug_config = {0}; #define FAST_POLL (1 * 60) #define SLOW_POLL (10 * 60) #define PREDIC_POLL 20 #define SOURCE_NONE 0 #define SOURCE_USB 1 #define SOURCE_AC 2 #define CHARGE_OFF 0 #define CHARGE_SLOW 1 #define CHARGE_FAST 2 #define BATTERY_ID_UNKNOWN 0 #define HTC_BATTERY_DS2746_DEBUG_ENABLE 0 /* DS2746 I2C BUS*/ #define DS2746_I2C_BUS_ID 0 #define DS2746_I2C_SLAVE_ADDR 0x26 /*======================================================================================== IC dependent defines ========================================================================================*/ /* DS2746 I2C register address*/ #define DS2746_STATUS_REG 0x01 #define DS2746_AUX0_MSB 0x08 #define DS2746_AUX0_LSB 0x09 #define DS2746_AUX1_MSB 0x0A #define DS2746_AUX1_LSB 0x0B #define DS2746_VOLT_MSB 0x0C #define DS2746_VOLT_LSB 0x0D #define DS2746_CURRENT_MSB 0x0E #define DS2746_CURRENT_LSB 0x0F #define DS2746_ACR_MSB 0x10 #define DS2746_ACR_LSB 0x11 /* DS2746 I2C I/O*/ static struct i2c_adapter *i2c2 = NULL; static struct i2c_client *ds2746_i2c = NULL; static int htc_battery_initial = 0; int ds2746_i2c_write_u8(u8 value, u8 reg) { int ret; u8 buf[2]; struct i2c_msg *msg; struct i2c_msg xfer_msg[1]; /* [MSG1] fill the register address data and fill the data Tx buffer */ msg = &xfer_msg[0]; msg->addr = ds2746_i2c->addr; msg->len = 2; msg->flags = 0; /* Read the register value */ msg->buf = buf; buf[0] = reg; buf[1] = value; ret = i2c_transfer(ds2746_i2c->adapter, xfer_msg, 1); if (ret <= 0){ printk(DRIVER_ZONE "[%s] fail.\n", __func__); } #if HTC_BATTERY_DS2746_DEBUG_ENABLE printk(DRIVER_ZONE "[%s] ds2746[0x%x]<-0x%x.\n", __func__, reg, value); #endif return ret; } int ds2746_i2c_read_u8(u8 *value, u8 reg) { int ret; struct i2c_msg *msg; struct i2c_msg xfer_msg[2]; /* [MSG1] fill the register address data */ msg = &xfer_msg[0]; msg->addr = ds2746_i2c->addr; msg->len = 1; msg->flags = 0; /* Read the register value */ msg->buf = &reg; /* [MSG2] fill the data rx buffer */ msg = &xfer_msg[1]; msg->addr = ds2746_i2c->addr; msg->len = 1; msg->flags = I2C_M_RD; /* Read the register value */ msg->buf = value; ret = i2c_transfer(ds2746_i2c->adapter, xfer_msg, 2); if (ret <= 0){ printk(DRIVER_ZONE "[%s] fail.\n", __func__); } #if HTC_BATTERY_DS2746_DEBUG_ENABLE printk(DRIVER_ZONE "[%s] ds2746[0x%x]=0x%x.\n", __func__, reg, *value); #endif return ret; } static void ds2746_i2c_exit(void) { if (ds2746_i2c != NULL){ kfree(ds2746_i2c); ds2746_i2c = NULL; } if (i2c2 != NULL){ i2c_put_adapter(i2c2); i2c2 = NULL; } } static int ds2746_i2c_init(void) { i2c2 = i2c_get_adapter(DS2746_I2C_BUS_ID); ds2746_i2c = kzalloc(sizeof(*ds2746_i2c), GFP_KERNEL); if (i2c2 == NULL || ds2746_i2c == NULL){ printk(DRIVER_ZONE "[%s] fail (0x%x, 0x%x).\n", __func__, (int) i2c2, (int) ds2746_i2c); ds2746_i2c_exit(); return -ENOMEM; } ds2746_i2c->adapter = i2c2; ds2746_i2c->addr = DS2746_I2C_SLAVE_ADDR; return 0; } /*======================================================================================== HTC supporting MFG testing member and functions =========================================================================================*/ static BOOL b_is_charge_off_by_bounding = FALSE; static void bounding_fullly_charged_level(int upperbd) { static int pingpong = 1; int lowerbd; int current_level; b_is_charge_off_by_bounding = FALSE; if (upperbd <= 0) return; /* doesn't activated this function */ lowerbd = upperbd - 5; /* 5% range */ if (lowerbd < 0) lowerbd = 0; current_level = CEILING(poweralg.capacity_01p, 10); if (pingpong == 1 && upperbd <= current_level) { printk(DRIVER_ZONE "MFG: lowerbd=%d, upperbd=%d, current=%d, pingpong:1->0 turn off\n", lowerbd, upperbd, current_level); b_is_charge_off_by_bounding = TRUE; pingpong = 0; } else if (pingpong == 0 && lowerbd < current_level) { printk(DRIVER_ZONE "MFG: lowerbd=%d, upperbd=%d, current=%d, toward 0, turn off\n", lowerbd, upperbd, current_level); b_is_charge_off_by_bounding = TRUE; } else if (pingpong == 0 && current_level <= lowerbd) { printk(DRIVER_ZONE "MFG: lowerbd=%d, upperbd=%d, current=%d, pingpong:0->1 turn on\n", lowerbd, upperbd, current_level); pingpong = 1; } else { printk(DRIVER_ZONE "MFG: lowerbd=%d, upperbd=%d, current=%d, toward %d, turn on\n", lowerbd, upperbd, current_level, pingpong); } } static BOOL is_charge_off_by_bounding_condition(void) { return b_is_charge_off_by_bounding; } void calibrate_id_ohm(struct battery_type *battery) { if (!poweralg.charging_source || !poweralg.charging_enable){ battery->id_ohm += 500; /* If device is in discharge mode, Rid=Rid_1 + 0.5Kohm*/ } else if (poweralg.charging_source == 2 && battery->current_mA >= 400 && battery->id_ohm >= 1500){ battery->id_ohm -= 1500; /* If device is in charge mode and ISET=1 (charge current is <800mA), Rid=Rid_1 - 1.5Kohm*/ } else if (battery->id_ohm >= 700){ battery->id_ohm -= 700; /* If device is in charge mode and ISET=0 (charge current is <400mA), Rid=Rid_1 - 0.7Kohm*/ } } static BOOL is_charging_avaiable(void) { if (poweralg.is_software_charger_timeout) return FALSE; if (!poweralg.protect_flags.is_charging_enable_available)return FALSE; if (!poweralg.is_cable_in) return FALSE; if (poweralg.charge_state == CHARGE_STATE_PENDING) return FALSE; if (poweralg.charge_state == CHARGE_STATE_FULL_PENDING) return FALSE; if (poweralg.charge_state == CHARGE_STATE_PREDICTION) return FALSE; if (is_charge_off_by_bounding_condition()) return FALSE; return TRUE; /* CHARGE_STATE_UNKNOWN, SET_LED_BATTERY_CHARGING is available to be charged by default*/ } static BOOL is_high_current_charging_avaialable(void) { if (!poweralg.protect_flags.is_charging_high_current_avaialble) return FALSE; if (!poweralg.is_china_ac_in) return FALSE; if (poweralg.charge_state == CHARGE_STATE_UNKNOWN) return FALSE; return TRUE; } static void update_next_charge_state(void) { static UINT32 count_charging_full_condition; static UINT32 count_charge_over_load; int next_charge_state; int i; /* unknown -> prediction -> unknown -> discharge/charging/pending charging -> full-wait-stable -> full-charging -> full-pending full-pending -> full-charging -> charging *(cable in group) -> discharge, charge-pending, dead *(cable out group), full-wait-stable, charge-pending, dead -> charging*/ for (i = 0; i < 25; i++) /* maximun 25 times state transition to prevent from busy loop; ideally the transition time shall be less than 5 times.*/ { next_charge_state = poweralg.charge_state; /* 0. enter prediction state or not*/ if (poweralg.charge_state == CHARGE_STATE_UNKNOWN){ if (poweralg.battery.is_power_on_reset || config.debug_always_predict){ if (poweralg.protect_flags.is_battery_dead){ /* keep poweralg.charge_state unchanged, set capacity to 0% directly*/ printk(DRIVER_ZONE " dead battery, \ p=0%%\n"); poweralg.capacity_01p = 0; battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; } else{ /* battery replaced, recalculate capacity based on battery voltage*/ printk(DRIVER_ZONE " start predict discharge...\n"); next_charge_state = CHARGE_STATE_PREDICTION; } config.debug_always_predict = FALSE; } } if (next_charge_state == poweralg.charge_state){ /*---------------------------------------------------------------------------------------------------*/ /* 1. cable in group*/ if (poweralg.charge_state == CHARGE_STATE_UNKNOWN || poweralg.charge_state == CHARGE_STATE_CHARGING || poweralg.charge_state == CHARGE_STATE_PENDING || poweralg.charge_state == CHARGE_STATE_FULL_WAIT_STABLE || poweralg.charge_state == CHARGE_STATE_FULL_CHARGING || poweralg.charge_state == CHARGE_STATE_FULL_PENDING){ if (!poweralg.is_cable_in){ next_charge_state = CHARGE_STATE_DISCHARGE; } else if (!poweralg.protect_flags.is_charging_enable_available){ next_charge_state = CHARGE_STATE_PENDING; } } /*---------------------------------------------------------------------------------------------------*/ /* 2. cable out group*/ if (poweralg.charge_state == CHARGE_STATE_UNKNOWN || poweralg.charge_state == CHARGE_STATE_DISCHARGE){ if (poweralg.is_cable_in){ next_charge_state = CHARGE_STATE_CHARGING; } } } /*---------------------------------------------------------------------------------------------------*/ /* 3. state handler/transition, if the charge state is not changed due to cable/protect flags*/ if (next_charge_state == poweralg.charge_state){ switch (poweralg.charge_state){ case CHARGE_STATE_PREDICTION: { UINT32 end_time_ms = BAHW_MyGetMSecs(); if (end_time_ms - poweralg.state_start_time_ms >= config.predict_timeout_sec * 1000){ printk(DRIVER_ZONE "predict done [%d->%d]\n", poweralg.state_start_time_ms, end_time_ms); next_charge_state = CHARGE_STATE_UNKNOWN; } } break; case CHARGE_STATE_CHARGING: if (!poweralg.battery.is_power_on_reset){ /* -> full-charging, pending, dead*/ if (poweralg.capacity_01p > 990){ /* only ever charge-full, the capacity can be larger than 99.0%*/ next_charge_state = CHARGE_STATE_FULL_CHARGING; } else if (poweralg.battery.voltage_mV >= config.full_charging_mv && poweralg.battery.current_mA >= 0 && poweralg.battery.current_mA <= config.full_charging_ma){ /* meet charge full terminate condition, check again*/ next_charge_state = CHARGE_STATE_FULL_WAIT_STABLE; } } if (poweralg.battery.current_mA <= 0){ /* count_charge_over_load is 5 as max*/ if (count_charge_over_load < 5) count_charge_over_load++; else poweralg.is_charge_over_load = TRUE; } else{ count_charge_over_load = 0; poweralg.is_charge_over_load = FALSE; } /* is_software_charger_timeout: only triggered when AC adapter in*/ if (config.software_charger_timeout_sec && poweralg.is_china_ac_in){ /* software charger timer is enabled; for AC charge only*/ UINT32 end_time_ms = BAHW_MyGetMSecs(); if (end_time_ms - poweralg.state_start_time_ms >= config.software_charger_timeout_sec * 1000){ printk(DRIVER_ZONE "software charger timer timeout [%d->%d]\n", poweralg.state_start_time_ms, end_time_ms); poweralg.is_software_charger_timeout = TRUE; } } break; case CHARGE_STATE_FULL_WAIT_STABLE: { /* -> full-charging, pending, dead*/ if (poweralg.battery.voltage_mV >= config.full_charging_mv && poweralg.battery.current_mA >= 0 && poweralg.battery.current_mA <= config.full_charging_ma){ count_charging_full_condition++; } else{ count_charging_full_condition = 0; next_charge_state = CHARGE_STATE_CHARGING; } if (count_charging_full_condition >= 3){ poweralg.capacity_01p = 1000; battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); next_charge_state = CHARGE_STATE_FULL_CHARGING; } } break; case CHARGE_STATE_FULL_CHARGING: { /* -> full-pending, charging*/ UINT32 end_time_ms = BAHW_MyGetMSecs(); if (poweralg.battery.voltage_mV < config.voltage_exit_full_mv){ if (poweralg.capacity_01p > 990) poweralg.capacity_01p = 990; next_charge_state = CHARGE_STATE_CHARGING; } else if (config.full_pending_ma != 0 && poweralg.battery.current_mA >= 0 && poweralg.battery.current_mA <= config.full_pending_ma){ printk(DRIVER_ZONE " charge-full pending(%dmA)(%d:%d)\n", poweralg.battery.current_mA, poweralg.state_start_time_ms, end_time_ms); next_charge_state = CHARGE_STATE_FULL_PENDING; } else if (end_time_ms - poweralg.state_start_time_ms >= config.full_charging_timeout_sec * 1000){ printk(DRIVER_ZONE " charge-full (expect:%dsec)(%d:%d)\n", config.full_charging_timeout_sec, poweralg.state_start_time_ms, end_time_ms); next_charge_state = CHARGE_STATE_FULL_PENDING; } } break; case CHARGE_STATE_FULL_PENDING: if ((poweralg.battery.voltage_mV >= 0 && poweralg.battery.voltage_mV < config.voltage_recharge_mv) || (poweralg.battery.RARC_01p >= 0 && poweralg.battery.RARC_01p <= config.capacity_recharge_p * 10)){ /* -> full-charging*/ next_charge_state = CHARGE_STATE_FULL_CHARGING; } break; case CHARGE_STATE_PENDING: case CHARGE_STATE_DISCHARGE: { UINT32 end_time_ms = BAHW_MyGetMSecs(); if (!poweralg.is_voltage_stable){ if (end_time_ms - poweralg.state_start_time_ms >= config.wait_votlage_statble_sec * 1000){ printk(DRIVER_ZONE " voltage stable\n"); poweralg.is_voltage_stable = TRUE; } } } if (poweralg.is_cable_in && poweralg.protect_flags.is_charging_enable_available){ /* -> charging*/ next_charge_state = CHARGE_STATE_CHARGING; } break; } } /*---------------------------------------------------------------------------------------------------*/ /* 4. state transition*/ if (next_charge_state != poweralg.charge_state){ /* state exit*/ switch (poweralg.charge_state){ case CHARGE_STATE_UNKNOWN: poweralg.capacity_01p = poweralg.battery.RARC_01p; if (poweralg.capacity_01p > 990) poweralg.capacity_01p = 990; if (poweralg.capacity_01p < 0) poweralg.capacity_01p = 0; poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; break; case CHARGE_STATE_PREDICTION: battery_param_update(&poweralg.battery, &poweralg.protect_flags); poweralg.capacity_01p = poweralg.battery.KADC_01p; if (poweralg.capacity_01p > 990) poweralg.capacity_01p = 990; if (poweralg.capacity_01p < 0) poweralg.capacity_01p = 0; battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; break; } /* state init*/ poweralg.state_start_time_ms = BAHW_MyGetMSecs(); switch (next_charge_state){ case CHARGE_STATE_DISCHARGE: case CHARGE_STATE_PENDING: /*! star_lee 20100426 - always set ACR=FULL when discharge starts and ACR>FULL*/ if (poweralg.battery.RARC_01p > 1000) battery_capacity_update(&poweralg.battery, 1000); poweralg.is_need_calibrate_at_49p = TRUE; poweralg.is_need_calibrate_at_14p = TRUE; poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; poweralg.is_voltage_stable = FALSE; break; case CHARGE_STATE_CHARGING: poweralg.is_software_charger_timeout = FALSE; /* reset software charger timer every time when charging re-starts*/ poweralg.is_charge_over_load = FALSE; count_charge_over_load = 0; poweralg.battery.charge_full_real_mAh = poweralg.battery.charge_full_design_mAh; battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); break; case CHARGE_STATE_FULL_WAIT_STABLE: /* set to 0 first; the cournter will be add to 1 soon in CHARGE_STATE_FULL_WAIT_STABLE state handler*/ count_charging_full_condition = 0; break; } printk(DRIVER_ZONE " state change(%d->%d), full count=%d, over load count=%d [%d]\n", poweralg.charge_state, next_charge_state, count_charging_full_condition, count_charge_over_load, poweralg.state_start_time_ms); poweralg.charge_state = next_charge_state; continue; } break; } } static void __update_capacity(void) { INT32 next_capacity_01p; #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("ds2746_batt:__update_capacity start\n"); #endif if (poweralg.charge_state == CHARGE_STATE_PREDICTION || poweralg.charge_state == CHARGE_STATE_UNKNOWN){ /*! star_lee 20100429 - return 99%~25% when in prediction mode*/ poweralg.capacity_01p = max(min(990, poweralg.battery.KADC_01p), 250); printk(DRIVER_ZONE "fake percentage (%d) during prediction.\n", poweralg.capacity_01p); } else if (poweralg.charge_state == CHARGE_STATE_FULL_CHARGING || poweralg.charge_state == CHARGE_STATE_FULL_PENDING){ poweralg.capacity_01p = 1000; } else if (!is_charging_avaiable() && poweralg.is_voltage_stable){ /* DISCHARGE ALG: capacity is based on KADC/RARC; only do this after cable in 3 minutes later*/ if (poweralg.battery.KADC_01p <= 0){ if (poweralg.capacity_01p > 0) poweralg.capacity_01p -= 10; if (poweralg.capacity_01p > 0){ /* capacity is still not 0 when KADC is 0; record capacity for next boot time*/ battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); } } else{ if ((config.enable_weight_percentage) && (poweralg.capacity_01p <150 || poweralg.battery.RARC_01p> poweralg.battery.KADC_01p)){ #define Padc 200 #define Pw 5 /* 500=<W_KADC<=1000*/ #define W_KADC(RARC, Percentage) Padc+(INT32)abs(RARC-Percentage)*Pw /*! star_lee 20100426 - W_KADC must be larger or equal to 0*/ INT32 w_kadc = min(max(W_KADC(poweralg.battery.RARC_01p, poweralg.battery.KADC_01p), 0), 1000); INT32 w_rarc = 1000 - w_kadc; next_capacity_01p = (w_kadc * poweralg.battery.KADC_01p + w_rarc * poweralg.battery.RARC_01p)/1000; } else{ next_capacity_01p = poweralg.battery.RARC_01p; } if (next_capacity_01p > 1000) next_capacity_01p = 1000; if (next_capacity_01p < 0) next_capacity_01p = 0; if (next_capacity_01p < poweralg.capacity_01p){ poweralg.capacity_01p -= min(10, poweralg.capacity_01p-next_capacity_01p); } } if (config.enable_full_calibration){ if (poweralg.is_need_calibrate_at_49p && poweralg.capacity_01p <= 500 && poweralg.fst_discharge_capacity_01p >= 600){ poweralg.battery.charge_full_real_mAh = (poweralg.fst_discharge_acr_mAh-poweralg.battery.charge_counter_mAh)*1000/ (poweralg.fst_discharge_capacity_01p-poweralg.capacity_01p); battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); poweralg.is_need_calibrate_at_49p = FALSE; poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; printk(DRIVER_ZONE " 1.full calibrate: full=%d\n", poweralg.battery.charge_full_real_mAh); } else if (poweralg.is_need_calibrate_at_14p && poweralg.capacity_01p <= 150 && poweralg.fst_discharge_capacity_01p >= 250){ poweralg.battery.charge_full_real_mAh = (poweralg.fst_discharge_acr_mAh-poweralg.battery.charge_counter_mAh)*1000/ (poweralg.fst_discharge_capacity_01p - poweralg.capacity_01p); battery_capacity_update(&poweralg.battery, poweralg.capacity_01p); poweralg.is_need_calibrate_at_14p = FALSE; poweralg.fst_discharge_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_acr_mAh = poweralg.battery.charge_counter_mAh; printk(DRIVER_ZONE " 2.full calibrate: full=%d\n", poweralg.battery.charge_full_real_mAh); } } } else{ /* CHARGE ALG: capacity is always based on ACR 1. plus 1% as max when charge, if the orignal capacity is <= 99%, the result is no more than 99% 2. minus 1% as max when discharge, not less than 0%*/ next_capacity_01p = poweralg.battery.RARC_01p; if (next_capacity_01p > 1000) next_capacity_01p = 1000; if (next_capacity_01p < 0) next_capacity_01p = 0; if (next_capacity_01p > poweralg.capacity_01p){ /* charge case*/ next_capacity_01p = poweralg.capacity_01p + min(next_capacity_01p - poweralg.capacity_01p, 10); if (poweralg.capacity_01p > 990) poweralg.capacity_01p = next_capacity_01p; else poweralg.capacity_01p = min(next_capacity_01p, 990); } else if (next_capacity_01p < poweralg.capacity_01p){ /* discharge case*/ poweralg.capacity_01p -= min(poweralg.capacity_01p - next_capacity_01p, 10); if (poweralg.capacity_01p < 0) poweralg.capacity_01p = 0; } } } /*======================================================================================== HTC power algorithm implemetation ========================================================================================*/ int get_state_check_interval_min_sec(void) { /*the minimal check interval of each states in seconds reserve for change polling rate UINT32 elapse_time_ms = BAHW_MyGetMSecs() - poweralg.state_start_time_ms; switch (poweralg.charge_state) { case CHARGE_STATE_FULL_WAIT_STABLE: //! star_lee 20100429 - takes 30 seconds(10 seconds*3 times) to confirm charge full condition return 10; case CHARGE_STATE_PREDICTION: return min(config.predict_timeout_sec, max((INT32)(config.predict_timeout_sec - elapse_time_ms/1000), (INT32)1)); default: if ( BAHW_IsChargeSourceIn() ) return config.polling_time_in_charging_sec; else return config.polling_time_in_discharging_sec; } */ return 0; } BOOL do_power_alg(BOOL is_event_triggered) { /* is_event_triggered - TRUE: handle event only, do not update capacity; FALSE; always update capacity*/ static BOOL s_bFirstEntry = TRUE; static UINT32 s_pre_time_ms; static INT32 s_level; UINT32 now_time_ms = BAHW_MyGetMSecs(); /*------------------------------------------------------ 1 get battery data and update charge state*/ if (!battery_param_update(&poweralg.battery, &poweralg.protect_flags)){ printk(DRIVER_ZONE "battery_param_update fail, please retry next time.\n"); return FALSE; } update_next_charge_state(); /*----------------------------------------------------- 2 calculate battery capacity (predict if necessary)*/ if (s_bFirstEntry || now_time_ms - s_pre_time_ms > 10000 || !is_event_triggered){ /* DO not update capacity when plug/unplug cable less than 10 seconds*/ __update_capacity(); s_bFirstEntry = FALSE; s_pre_time_ms = now_time_ms; } if (config.debug_disable_shutdown){ if (poweralg.capacity_01p <= 0){ poweralg.capacity_01p = 1; } } s_level = CEILING(poweralg.capacity_01p, 10); if (CEILING(poweralg.last_capacity_01p, 10) != s_level || poweralg.battery.last_temp_01c != poweralg.battery.temp_01c) { poweralg.battery.last_temp_01c = poweralg.battery.temp_01c; poweralg.last_capacity_01p = poweralg.capacity_01p; ds2746_blocking_notify(DS2784_LEVEL_UPDATE, &s_level); } bounding_fullly_charged_level(config.full_level); /*------------------------------------------------------ 3 charging function change*/ if (is_charging_avaiable()){ if (is_high_current_charging_avaialable()){ ds2746_charger_control(CHARGE_FAST); } else{ ds2746_charger_control(CHARGE_SLOW); } } else{ ds2746_charger_control(CHARGE_OFF); } if (config.debug_disable_hw_timer && poweralg.is_charge_over_load){ ds2746_charger_control(CHARGE_OFF); printk(DRIVER_ZONE "Toggle charger due to HW disable charger.\n"); } /*------------------------------------------------------ 4 debug messages and update os battery status*/ /*powerlog_to_file(&poweralg); update_os_batt_status(&poweralg);*/ #if HTC_BATTERY_DS2746_DEBUG_ENABLE printk(DRIVER_ZONE "[%d] P=%d cable=%d%d flags=%d%d%d debug=%d%d%d%d fst_discharge=%d/%d [%u]\n", poweralg.charge_state, poweralg.capacity_01p, poweralg.is_cable_in, poweralg.is_china_ac_in, poweralg.protect_flags.is_charging_enable_available, poweralg.protect_flags.is_charging_high_current_avaialble, poweralg.protect_flags.is_battery_dead, config.debug_disable_shutdown, config.debug_fake_room_temp, config.debug_disable_hw_timer, config.debug_always_predict, poweralg.fst_discharge_capacity_01p, poweralg.fst_discharge_acr_mAh, BAHW_MyGetMSecs()); #endif return TRUE; } void power_alg_init(struct poweralg_config_type *debug_config) { /*------------------------------------------------------------- 1. setup default poweralg data*/ poweralg.charge_state = CHARGE_STATE_UNKNOWN; poweralg.capacity_01p = 990; poweralg.last_capacity_01p = poweralg.capacity_01p; poweralg.fst_discharge_capacity_01p = 0; poweralg.fst_discharge_acr_mAh = 0; poweralg.is_need_calibrate_at_49p = TRUE; poweralg.is_need_calibrate_at_14p = TRUE; poweralg.is_charge_over_load = FALSE; poweralg.is_china_ac_in = FALSE; poweralg.is_cable_in = FALSE; poweralg.is_voltage_stable = FALSE; poweralg.is_software_charger_timeout = FALSE; poweralg.state_start_time_ms = 0; if(get_cable_status() == SOURCE_USB) { poweralg.is_cable_in = TRUE; poweralg.charging_source = SOURCE_USB; ds2746_charger_control(CHARGE_SLOW); } else if (get_cable_status() == SOURCE_AC) { poweralg.is_cable_in = TRUE; poweralg.is_china_ac_in = TRUE; poweralg.charging_source = SOURCE_AC; ds2746_charger_control(CHARGE_FAST); } else{ poweralg.charging_source = SOURCE_NONE; } /*------------------------------------------------------------- 2. setup default config flags (board dependent)*/ poweralg_config_init(&config); if (debug_config){ config.debug_disable_shutdown = debug_config->debug_disable_shutdown; config.debug_fake_room_temp = debug_config->debug_fake_room_temp; config.debug_disable_hw_timer = debug_config->debug_disable_hw_timer; config.debug_always_predict = debug_config->debug_always_predict; } /* if ( BAHW_IsTestMode() ) { config.debug_disable_shutdown = TRUE; config.debug_fake_room_temp = TRUE; config.debug_disable_hw_timer = TRUE; }*/ /*------------------------------------------------------------- 3. setup default protect flags*/ poweralg.protect_flags.is_charging_enable_available = TRUE; poweralg.protect_flags.is_battery_dead = FALSE; poweralg.protect_flags.is_charging_high_current_avaialble = FALSE; poweralg.protect_flags.is_fake_room_temp = config.debug_fake_room_temp; /*------------------------------------------------------------- 4. setup default battery structure*/ battery_param_init(&poweralg.battery); /*pr_info("power alg inited with board name <%s>\n", HTC_BATT_BOARD_NAME);*/ } void power_alg_preinit(void) { /* make sure cable and battery is in when off mode charging*/ } static BLOCKING_NOTIFIER_HEAD(ds2746_notifier_list); int ds2746_register_notifier(struct notifier_block *nb) { #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("%s\n", __func__); #endif return blocking_notifier_chain_register(&ds2746_notifier_list, nb); } int ds2746_unregister_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&ds2746_notifier_list, nb); } int ds2746_blocking_notify(unsigned long val, void *v) { int chg_ctl; #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("%s\n", __func__); #endif if (val == DS2784_CHARGING_CONTROL){ chg_ctl = *(int *) v; if (machine_is_passionc()){ pr_info("[ds2746_batt] Switch charging %d\n", chg_ctl); if (chg_ctl <= 2){ gpio_direction_output(22, !(!!chg_ctl));/*PNC*/ set_charger_ctrl(chg_ctl); } return 0; } else if (poweralg.battery.id_index != BATTERY_ID_UNKNOWN){ /* only notify at changes */ if (poweralg.charging_enable == chg_ctl) return 0; else poweralg.charging_enable = chg_ctl; } else{ /* poweralg.charging_enable = DISABLE; v = DISABLE; pr_info("[HTC_BATT] Unknow battery\n");*/ if (poweralg.charging_enable == chg_ctl) return 0; else poweralg.charging_enable = chg_ctl; } } return blocking_notifier_call_chain(&ds2746_notifier_list, val, v); } int ds2746_get_battery_info(struct battery_info_reply *batt_info) { batt_info->batt_id = poweralg.battery.id_index; /*Mbat ID*/ batt_info->batt_vol = poweralg.battery.voltage_mV; /*VMbat*/ batt_info->batt_temp = poweralg.battery.temp_01c; /*Temperature*/ batt_info->batt_current = poweralg.battery.current_mA; /*Current*/ batt_info->level = CEILING(poweralg.capacity_01p, 10); /*last_show%*/ batt_info->charging_source = poweralg.charging_source; batt_info->charging_enabled = poweralg.charging_enable; batt_info->full_bat = poweralg.battery.charge_full_real_mAh; return 0; } ssize_t htc_battery_show_attr(struct device_attribute *attr, char *buf) { int len = 0; pr_info("%s\n", __func__); if (!strcmp(attr->attr.name, "batt_attr_text")){ len += scnprintf(buf + len, PAGE_SIZE - len, "Percentage(%%): %d;\n" "KADC(%%): %d;\n" "RARC(%%): %d;\n" "V_MBAT(mV): %d;\n" "Main_battery_ID(Kohm): %d;\n" "pd_M: %d;\n" "Current(mA): %d;\n" "Temp: %d;\n" "Charging_source: %d;\n" "ACR(mAh): %d;\n" "FULL(mAh): %d;\n" "1st_dis_percentage(%%): %d;\n" "1st_dis_ACR: %d;\n", CEILING(poweralg.capacity_01p, 10), CEILING(poweralg.battery.KADC_01p, 10), CEILING(poweralg.battery.RARC_01p, 10), poweralg.battery.voltage_mV, poweralg.battery.id_ohm, poweralg.battery.pd_m, poweralg.battery.current_mA, CEILING(poweralg.battery.temp_01c, 10), poweralg.charging_source, poweralg.battery.charge_counter_mAh, poweralg.battery.charge_full_real_mAh, CEILING(poweralg.fst_discharge_capacity_01p, 10), poweralg.fst_discharge_acr_mAh ); } return len; } static int cable_status_handler_func(struct notifier_block *nfb, unsigned long action, void *param) { u32 cable_type = (u32) action; pr_info("[ds2746_batt] cable change to %d\n", cable_type); /* When the cable plug out, reset all the related flag, Let algorithm machine to judge latest state */ if (cable_type == 0){ poweralg.is_cable_in = 0; poweralg.is_china_ac_in = 0; /*htc_batt_info.rep.OTP_Flag = 0; htc_batt_info.rep.charging_sts_flag = 0; htc_batt_info.full_charge_count = 0;*/ } else if (cable_type == 1){ poweralg.is_cable_in = 1; poweralg.is_china_ac_in = 0; } else if (cable_type == 2){ poweralg.is_cable_in = 1; poweralg.is_china_ac_in = 1; } else if (cable_type == 0xff){ if (param) config.full_level = *(INT32 *)param; pr_info("[ds2746_batt] Set the full level to %d\n", config.full_level); return NOTIFY_OK; } else if (cable_type == 0x10){ poweralg.protect_flags.is_fake_room_temp = TRUE; pr_info("[ds2746_batt] enable fake temp mode\n"); return NOTIFY_OK; } if (cable_type <= 2){ poweralg.charging_source = cable_type; ds2746_blocking_notify(DS2784_CHARGING_CONTROL, &poweralg.charging_source); } return NOTIFY_OK; } static struct notifier_block cable_status_handler = { .notifier_call = cable_status_handler_func, }; void ds2746_charger_control(int type) { int chg_ctl = DISABLE; int charge_type = type; switch (charge_type){ case CHARGE_OFF: /* CHARGER_EN is active low. Set to 1 to disable. */ chg_ctl = DISABLE; ds2746_blocking_notify(DS2784_CHARGING_CONTROL, &chg_ctl); /*if (temp >= TEMP_CRITICAL) pr_info("batt: charging OFF [OVERTEMP]\n"); else if (htc_batt_info.rep.cooldown) pr_info("batt: charging OFF [COOLDOWN]\n"); else if (htc_batt_info.rep.battery_full) pr_info("batt: charging OFF [FULL]\n"); else*/ #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("batt: charging OFF\n"); #endif break; case CHARGE_SLOW: chg_ctl = ENABLE_SLOW_CHG; ds2746_blocking_notify(DS2784_CHARGING_CONTROL, &chg_ctl); #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("batt: charging SLOW\n"); #endif break; case CHARGE_FAST: chg_ctl = ENABLE_FAST_CHG; ds2746_blocking_notify(DS2784_CHARGING_CONTROL, &chg_ctl); #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("batt: charging FAST\n"); #endif break; } } static void ds2746_program_alarm(struct ds2746_device_info *di, int seconds) { ktime_t low_interval = ktime_set(seconds - 10, 0); ktime_t slack = ktime_set(20, 0); ktime_t next; next = ktime_add(di->last_poll, low_interval); alarm_start_range(&di->alarm, next, ktime_add(next, slack)); } static void ds2746_battery_work(struct work_struct *work) { struct ds2746_device_info *di = container_of(work, struct ds2746_device_info, monitor_work); unsigned long flags; if (!htc_battery_initial) return; #if HTC_BATTERY_DS2746_DEBUG_ENABLE pr_info("[ds2746_batt] ds2746_battery_work*\n"); #endif do_power_alg(0); get_state_check_interval_min_sec(); di->last_poll = alarm_get_elapsed_realtime(); /* prevent suspend before starting the alarm */ local_irq_save(flags); wake_unlock(&di->work_wake_lock); if (poweralg.battery.is_power_on_reset) ds2746_program_alarm(di, PREDIC_POLL); else ds2746_program_alarm(di, FAST_POLL); local_irq_restore(flags); } static void ds2746_battery_alarm(struct alarm *alarm) { struct ds2746_device_info *di = container_of(alarm, struct ds2746_device_info, alarm); if (!htc_battery_initial) return; wake_lock(&di->work_wake_lock); queue_work(di->monitor_wqueue, &di->monitor_work); } static int ds2746_battery_probe(struct platform_device *pdev) { int rc; struct ds2746_device_info *di; struct ds2746_platform_data *pdata = pdev->dev.platform_data; pr_info("[ds2746_batt] ds2746_battery_prob\n"); poweralg.battery.thermal_id = pdata->func_get_thermal_id(); power_alg_preinit(); power_alg_init(&debug_config); di = kzalloc(sizeof(*di), GFP_KERNEL); if (!di){ rc = -ENOMEM; goto fail_register; } di->update_time = jiffies; platform_set_drvdata(pdev, di); di->dev = &pdev->dev; INIT_WORK(&di->monitor_work, ds2746_battery_work); di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev)); /* init to something sane */ di->last_poll = alarm_get_elapsed_realtime(); if (!di->monitor_wqueue){ rc = -ESRCH; goto fail_workqueue; } wake_lock_init(&di->work_wake_lock, WAKE_LOCK_SUSPEND, "ds2746-battery"); alarm_init(&di->alarm, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, ds2746_battery_alarm); wake_lock(&di->work_wake_lock); queue_work(di->monitor_wqueue, &di->monitor_work); htc_battery_initial = 1; return 0; fail_workqueue : fail_register : kfree(di); return rc; } static int ds2746_battery_remove(struct platform_device *pdev) { struct ds2746_device_info *di = platform_get_drvdata(pdev); cancel_work_sync(&di->monitor_work); destroy_workqueue(di->monitor_wqueue); return 0; } /* FIXME: power down DQ master when not in use. */ static int ds2746_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ds2746_device_info *di = platform_get_drvdata(pdev); unsigned long flags; pr_info("ds2746_batt:ds2746_suspend\n"); /* If we are on battery, reduce our update rate until * we next resume.*/ if (poweralg.charging_source == SOURCE_NONE){ local_irq_save(flags); ds2746_program_alarm(di, SLOW_POLL); di->slow_poll = 1; local_irq_restore(flags); } /*gpio_direction_output(87, 0);*/ return 0; } static void ds2746_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ds2746_device_info *di = platform_get_drvdata(pdev); unsigned long flags; /* We might be on a slow sample cycle. If we're * resuming we should resample the battery state * if it's been over a minute since we last did * so, and move back to sampling every minute until * we suspend again.*/ /*gpio_direction_output(87, 1);*/ ndelay(100 * 1000); pr_info("ds2746_batt:ds2746_resume\n"); if (di->slow_poll){ local_irq_save(flags); ds2746_program_alarm(di, FAST_POLL); di->slow_poll = 0; local_irq_restore(flags); } } static struct dev_pm_ops ds2746_pm_ops = { .prepare = ds2746_suspend, .complete = ds2746_resume, }; MODULE_ALIAS("platform:ds2746-battery"); static struct platform_driver ds2746_battery_driver = { .driver = { .name = "ds2746-battery", .pm = &ds2746_pm_ops, }, .probe = ds2746_battery_probe, .remove = ds2746_battery_remove, }; static int __init ds2746_battery_init(void) { int ret; pr_info("[ds2746_batt]ds2746_battery_init"); wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); register_notifier_cable_status(&cable_status_handler); ret = ds2746_i2c_init(); if (ret < 0){ return ret; } /*mutex_init(&htc_batt_info.lock);*/ return platform_driver_register(&ds2746_battery_driver); } static void __exit ds2746_battery_exit(void) { ds2746_i2c_exit(); platform_driver_unregister(&ds2746_battery_driver); } module_init(ds2746_battery_init); module_exit(ds2746_battery_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy.YS Wang <Andy.ys_wang@htc.com>"); MODULE_DESCRIPTION("ds2746 battery driver");
ajeet17181/fathom-kernel
drivers/power/ds2746_battery.c
C
gpl-2.0
39,067
package org.totalboumboum.ai.v201112.ais.gungorkavus.v3.criterion; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.totalboumboum.ai.v201112.adapter.agent.AiUtilityCriterionBoolean; import org.totalboumboum.ai.v201112.adapter.communication.StopRequestException; import org.totalboumboum.ai.v201112.adapter.data.AiHero; import org.totalboumboum.ai.v201112.adapter.data.AiTile; import org.totalboumboum.ai.v201112.adapter.data.AiZone; import org.totalboumboum.ai.v201112.ais.gungorkavus.v3.GungorKavus; /** * Cette classe est un simple exemple de * critère binaire. Copiez-la, renommez-la, modifiez-la * pour l'adapter à vos besoin. * * @author Eyüp Burak Güngör * @author Umit Kavus */ @SuppressWarnings("deprecation") public class VDAdvPertinent extends AiUtilityCriterionBoolean { /** Nom de ce critère */ public static final String NAME = "Pertinent"; /** * Crée un nouveau critère binaire. * * @param ai * ? * @throws StopRequestException * Au cas où le moteur demande la terminaison de l'agent. */ public VDAdvPertinent(GungorKavus ai) throws StopRequestException { // init nom super(NAME); ai.checkInterruption(); // init agent this.ai = ai; } ///////////////////////////////////////////////////////////////// // ARTIFICIAL INTELLIGENCE ///////////////////////////////////// ///////////////////////////////////////////////////////////////// /** */ protected GungorKavus ai; ///////////////////////////////////////////////////////////////// // PROCESS ///////////////////////////////////// ///////////////////////////////////////////////////////////////// @Override public Boolean processValue(AiTile tile) throws StopRequestException { ai.checkInterruption(); boolean result = false; AiZone zone = ai.getZone(); AiHero ownHero = zone.getOwnHero(); int ownForce = ownHero.getBombNumberMax()*50+ownHero.getBombRange()*70; int opForce = 0; Set<AiHero> opponentL = new TreeSet<AiHero>(); List<AiTile> tileNL = tile.getNeighbors(); List<AiHero> remainingOp = zone.getRemainingHeroes(); for(int i = 0;i<tileNL.size();i++){ ai.checkInterruption(); for(int j = 0;j<tileNL.get(i).getHeroes().size();j++){ ai.checkInterruption(); if(remainingOp.contains(tileNL.get(i).getHeroes().get(j))) opponentL.add(tileNL.get(i).getHeroes().get(j)); } } Iterator<AiHero> heroIt = opponentL.iterator(); while(heroIt.hasNext()){ ai.checkInterruption(); AiHero opponent = heroIt.next(); opForce = opponent.getBombNumberMax()*50+opponent.getBombRange()*70; if(opForce<=ownForce){ result = true; } } return result; } }
vlabatut/totalboumboum
resources/ai/org/totalboumboum/ai/v201112/ais/gungorkavus/v3/criterion/VDAdvPertinent.java
Java
gpl-2.0
2,816
/* This file is part of the KDE project Copyright (C) 1998, 1999 Torben Weis <weis@kde.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __konq_treeviewitem_h__ #define __konq_treeviewitem_h__ #include <tqstring.h> #include "konq_listviewitems.h" class KFileItem; class KonqTreeViewWidget; /** * An item specialized for directories */ class KonqListViewDir : public KonqListViewItem { public: /** * Create an item in the tree toplevel representing a directory * @param _parent the parent widget, the tree view * @param _fileitem the file item created by KDirLister */ KonqListViewDir( KonqTreeViewWidget *_parent, KFileItem *_fileitem ); /** * Create an item representing a directory, inside a directory * @param _treeview the parent tree view * @param _parent the parent widget, a directory item in the tree view * @param _fileitem the file item created by KDirLister */ KonqListViewDir( KonqTreeViewWidget *_treeview, KonqListViewDir *_parent, KFileItem *_fileitem ); /** * Called when user opens the directory (inherited from TQListViewItem). * Just calls @ref #open(). */ virtual void setOpen( bool _open ); /** * Called by setOpen, called when opening the directoy via restoreState and called * when the user presses "Reload". * Checks whether its contents are known (@see #setComplete) or whether * to reload the directory. */ void open( bool _open, bool _reload ); /** * Set to true when contents are completely known (one sublevel only). */ virtual void setComplete( bool _b ) { m_bComplete = _b; } /** * URL of this directory */ KURL kurl(); /** * URL of this directory * @param _trailing set to true for a trailing slash (see KURL) */ TQString url( int _trailing ); protected: bool m_bComplete; }; #endif
Fat-Zer/tdebase
konqueror/listview/konq_treeviewitem.h
C
gpl-2.0
2,542
let sentryCaptureException export function logException(ex: Error): void { if (__DEV__) { window.console && console.error && console.error(ex) } else if (sentryCaptureException) { sentryCaptureException(ex) } } export async function initSentryLogger(): Promise<void> { const [{ init: sentryInit }, { captureException }] = await Promise.all([ import(/* webpackChunkName: 'sentry' */ '@sentry/browser/dist/sdk'), import(/* webpackChunkName: 'sentry' */ '@sentry/browser'), ]) if (__CONFIG__.sentry.dsn) { sentryCaptureException = captureException sentryInit({ dsn: __CONFIG__.sentry.dsn, environment: 'prototype', maxBreadcrumbs: 10, ignoreErrors: ['top.GLOBALS'], }) } }
zsebtanar/zsebtanar-proto
src/client/generic/utils/logger.ts
TypeScript
gpl-2.0
738
define(["idb"],function(){"use strict";function setup(){dbPromise=idb.open(dbName,dbVersion,function(upgradeDB){switch(upgradeDB.oldVersion){case 0:upgradeDB.createObjectStore(dbName)}})}function getAll(){return dbPromise.then(function(db){return db.transaction(dbName).objectStore(dbName).getAll(null,1e4)})}function get(key){return dbPromise.then(function(db){return db.transaction(dbName).objectStore(dbName).get(key)})}function set(key,val){return dbPromise.then(function(db){var tx=db.transaction(dbName,"readwrite");return tx.objectStore(dbName).put(val,key),tx.complete})}function remove(key){return dbPromise.then(function(db){var tx=db.transaction(dbName,"readwrite");return tx.objectStore(dbName).delete(key),tx.complete})}function clear(){return dbPromise.then(function(db){var tx=db.transaction(dbName,"readwrite");return tx.objectStore(dbName).clear(key),tx.complete})}var dbPromise,dbName="users",dbVersion=1;return setup(),{get:get,set:set,remove:remove,clear:clear,getAll:getAll}});
daknin/Emby
MediaBrowser.WebDashboard/dashboard-ui/bower_components/emby-apiclient/sync/userrepository.js
JavaScript
gpl-2.0
998
#include <cut.h> CUT_DEFINE_TEST(module1_test1) { CUT_CHECK(5 == 5); CUT_CHECK(3 == 3); CUT_CHECK(1 == 1); } CUT_DEFINE_TEST(module1_test2) { CUT_CHECK_OPERATOR_INT32(5, ==, 5); CUT_CHECK_OPERATOR_INT32(3, ==, 3); CUT_CHECK_OPERATOR_INT32(1, ==, 1); } CUT_DEFINE_TEST(module1_test3) { CUT_CHECK_OPERATOR_INT32(5, >=, 5); CUT_CHECK_OPERATOR_INT32(3, >=, 3); CUT_CHECK_OPERATOR_INT32(1, > , 1); } CUT_DEFINE_MODULE(module1) CUT_CALL_TEST(module1_test1); CUT_CALL_TEST(module1_test2); CUT_CALL_TEST(module1_test3); CUT_END_MODULE
EnachescuAlin/CUT
sample/module1.c
C
gpl-2.0
583
/** $lic$ * Copyright (C) 2012-2014 by Massachusetts Institute of Technology * Copyright (C) 2010-2013 by The Board of Trustees of Stanford University * * This file is part of zsim. * * zsim is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software * Foundation, version 2. * * If you use this software in your research, we request that you reference * the zsim paper ("ZSim: Fast and Accurate Microarchitectural Simulation of * Thousand-Core Systems", Sanchez and Kozyrakis, ISCA-40, June 2013) as the * source of the simulator in any publications that use this software, and that * you send us a citation of your work. * * zsim is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef GALLOC_H_ #define GALLOC_H_ #include <stdlib.h> #include <string.h> int gm_init(size_t segmentSize); void gm_attach(int shmid); // C-style interface void* gm_malloc(size_t size); void* __gm_calloc(size_t num, size_t size); //deprecated, only used internally void* __gm_memalign(size_t blocksize, size_t bytes); // deprecated, only used internally char* gm_strdup(const char* str); void gm_free(void* ptr); // C++-style alloc interface (preferred) template <typename T> T* gm_malloc() {return static_cast<T*>(gm_malloc(sizeof(T)));} template <typename T> T* gm_malloc(size_t objs) {return static_cast<T*>(gm_malloc(sizeof(T)*objs));} template <typename T> T* gm_calloc() {return static_cast<T*>(__gm_calloc(1, sizeof(T)));} template <typename T> T* gm_calloc(size_t objs) {return static_cast<T*>(__gm_calloc(objs, sizeof(T)));} template <typename T> T* gm_memalign(size_t blocksize) {return static_cast<T*>(__gm_memalign(blocksize, sizeof(T)));} template <typename T> T* gm_memalign(size_t blocksize, size_t objs) {return static_cast<T*>(__gm_memalign(blocksize, sizeof(T)*objs));} template <typename T> T* gm_dup(T* src, size_t objs) { T* dst = gm_malloc<T>(objs); memcpy(dst, src, sizeof(T)*objs); return dst; } void gm_set_glob_ptr(void* ptr); void* gm_get_glob_ptr(); void gm_set_secondary_ptr(void* ptr); void* gm_get_secondary_ptr(); void gm_stats(); bool gm_isready(); void gm_detach(); class GlobAlloc { public: virtual ~GlobAlloc() {} inline void* operator new (size_t sz) { return gm_malloc(sz); } //Placement new inline void* operator new (size_t sz, void* ptr) { return ptr; } inline void operator delete(void *p, size_t sz) { gm_free(p); } //Placement delete... make ICC happy. This would only fire on an exception void operator delete (void* p, void* ptr) {} }; #endif // GALLOC_H_
AXLEproject/axle-zsim-nvmain
src/galloc.h
C
gpl-2.0
3,065
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0_18) on Tue Nov 02 13:16:47 CET 2010 --> <TITLE> Format </TITLE> <META NAME="date" CONTENT="2010-11-02"> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Format"; } } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <HR> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/FilterType.html" title="class in com.redhat.rhn.domain.monitoring.notification"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/MatchType.html" title="class in com.redhat.rhn.domain.monitoring.notification"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../index.html?com/redhat/rhn/domain/monitoring/notification/Format.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="Format.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_summary">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_detail">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <!-- ======== START OF CLASS DATA ======== --> <H2> <FONT SIZE="-1"> com.redhat.rhn.domain.monitoring.notification</FONT> <BR> Class Format</H2> <PRE> java.lang.Object <IMG SRC="../../../../../../resources/inherit.gif" ALT="extended by "><B>com.redhat.rhn.domain.monitoring.notification.Format</B> </PRE> <HR> <DL> <DT><PRE>public class <B>Format</B><DT>extends java.lang.Object</DL> </PRE> <P> Format - Class representation of the table rhn_notification_formats. <P> <P> <HR> <P> <!-- ======== CONSTRUCTOR SUMMARY ======== --> <A NAME="constructor_summary"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2"> <B>Constructor Summary</B></FONT></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#Format()">Format</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</TD> </TR> </TABLE> &nbsp; <!-- ========== METHOD SUMMARY =========== --> <A NAME="method_summary"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2"> <B>Method Summary</B></FONT></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.String</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getBodyFormat()">getBodyFormat</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for bodyFormat</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.Long</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getCustomerId()">getCustomerId</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for customerId</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.String</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getDescription()">getDescription</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for description</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.Long</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getId()">getId</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for id</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.Long</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getMaxBodyLength()">getMaxBodyLength</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for maxBodyLength</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.Long</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getMaxSubjectLength()">getMaxSubjectLength</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for maxSubjectLength</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.String</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getReplyFormat()">getReplyFormat</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for replyFormat</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;java.lang.String</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#getSubjectFormat()">getSubjectFormat</A></B>()</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Getter for subjectFormat</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setBodyFormat(java.lang.String)">setBodyFormat</A></B>(java.lang.String&nbsp;bodyFormatIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for bodyFormat</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setCustomerId(java.lang.Long)">setCustomerId</A></B>(java.lang.Long&nbsp;customerIdIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for customerId</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setDescription(java.lang.String)">setDescription</A></B>(java.lang.String&nbsp;descriptionIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for description</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setId(java.lang.Long)">setId</A></B>(java.lang.Long&nbsp;idIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for id</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setMaxBodyLength(java.lang.Long)">setMaxBodyLength</A></B>(java.lang.Long&nbsp;maxBodyLengthIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for maxBodyLength</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setMaxSubjectLength(java.lang.Long)">setMaxSubjectLength</A></B>(java.lang.Long&nbsp;maxSubjectLengthIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for maxSubjectLength</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setReplyFormat(java.lang.String)">setReplyFormat</A></B>(java.lang.String&nbsp;replyFormatIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for replyFormat</TD> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1"> <CODE>&nbsp;void</CODE></FONT></TD> <TD><CODE><B><A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/Format.html#setSubjectFormat(java.lang.String)">setSubjectFormat</A></B>(java.lang.String&nbsp;subjectFormatIn)</CODE> <BR> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Setter for subjectFormat</TD> </TR> </TABLE> &nbsp;<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor"> <TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH> </TR> <TR BGCOLOR="white" CLASS="TableRowColor"> <TD><CODE>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</CODE></TD> </TR> </TABLE> &nbsp; <P> <!-- ========= CONSTRUCTOR DETAIL ======== --> <A NAME="constructor_detail"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2"> <B>Constructor Detail</B></FONT></TH> </TR> </TABLE> <A NAME="Format()"><!-- --></A><H3> Format</H3> <PRE> public <B>Format</B>()</PRE> <DL> </DL> <!-- ============ METHOD DETAIL ========== --> <A NAME="method_detail"><!-- --></A> <TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY=""> <TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor"> <TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2"> <B>Method Detail</B></FONT></TH> </TR> </TABLE> <A NAME="getId()"><!-- --></A><H3> getId</H3> <PRE> public java.lang.Long <B>getId</B>()</PRE> <DL> <DD>Getter for id <P> <DD><DL> <DT><B>Returns:</B><DD>Long to get</DL> </DD> </DL> <HR> <A NAME="setId(java.lang.Long)"><!-- --></A><H3> setId</H3> <PRE> public void <B>setId</B>(java.lang.Long&nbsp;idIn)</PRE> <DL> <DD>Setter for id <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>idIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getCustomerId()"><!-- --></A><H3> getCustomerId</H3> <PRE> public java.lang.Long <B>getCustomerId</B>()</PRE> <DL> <DD>Getter for customerId <P> <DD><DL> <DT><B>Returns:</B><DD>Long to get</DL> </DD> </DL> <HR> <A NAME="setCustomerId(java.lang.Long)"><!-- --></A><H3> setCustomerId</H3> <PRE> public void <B>setCustomerId</B>(java.lang.Long&nbsp;customerIdIn)</PRE> <DL> <DD>Setter for customerId <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>customerIdIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getDescription()"><!-- --></A><H3> getDescription</H3> <PRE> public java.lang.String <B>getDescription</B>()</PRE> <DL> <DD>Getter for description <P> <DD><DL> <DT><B>Returns:</B><DD>String to get</DL> </DD> </DL> <HR> <A NAME="setDescription(java.lang.String)"><!-- --></A><H3> setDescription</H3> <PRE> public void <B>setDescription</B>(java.lang.String&nbsp;descriptionIn)</PRE> <DL> <DD>Setter for description <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>descriptionIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getSubjectFormat()"><!-- --></A><H3> getSubjectFormat</H3> <PRE> public java.lang.String <B>getSubjectFormat</B>()</PRE> <DL> <DD>Getter for subjectFormat <P> <DD><DL> <DT><B>Returns:</B><DD>String to get</DL> </DD> </DL> <HR> <A NAME="setSubjectFormat(java.lang.String)"><!-- --></A><H3> setSubjectFormat</H3> <PRE> public void <B>setSubjectFormat</B>(java.lang.String&nbsp;subjectFormatIn)</PRE> <DL> <DD>Setter for subjectFormat <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>subjectFormatIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getBodyFormat()"><!-- --></A><H3> getBodyFormat</H3> <PRE> public java.lang.String <B>getBodyFormat</B>()</PRE> <DL> <DD>Getter for bodyFormat <P> <DD><DL> <DT><B>Returns:</B><DD>String to get</DL> </DD> </DL> <HR> <A NAME="setBodyFormat(java.lang.String)"><!-- --></A><H3> setBodyFormat</H3> <PRE> public void <B>setBodyFormat</B>(java.lang.String&nbsp;bodyFormatIn)</PRE> <DL> <DD>Setter for bodyFormat <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>bodyFormatIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getMaxSubjectLength()"><!-- --></A><H3> getMaxSubjectLength</H3> <PRE> public java.lang.Long <B>getMaxSubjectLength</B>()</PRE> <DL> <DD>Getter for maxSubjectLength <P> <DD><DL> <DT><B>Returns:</B><DD>Long to get</DL> </DD> </DL> <HR> <A NAME="setMaxSubjectLength(java.lang.Long)"><!-- --></A><H3> setMaxSubjectLength</H3> <PRE> public void <B>setMaxSubjectLength</B>(java.lang.Long&nbsp;maxSubjectLengthIn)</PRE> <DL> <DD>Setter for maxSubjectLength <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>maxSubjectLengthIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getMaxBodyLength()"><!-- --></A><H3> getMaxBodyLength</H3> <PRE> public java.lang.Long <B>getMaxBodyLength</B>()</PRE> <DL> <DD>Getter for maxBodyLength <P> <DD><DL> <DT><B>Returns:</B><DD>Long to get</DL> </DD> </DL> <HR> <A NAME="setMaxBodyLength(java.lang.Long)"><!-- --></A><H3> setMaxBodyLength</H3> <PRE> public void <B>setMaxBodyLength</B>(java.lang.Long&nbsp;maxBodyLengthIn)</PRE> <DL> <DD>Setter for maxBodyLength <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>maxBodyLengthIn</CODE> - to set</DL> </DD> </DL> <HR> <A NAME="getReplyFormat()"><!-- --></A><H3> getReplyFormat</H3> <PRE> public java.lang.String <B>getReplyFormat</B>()</PRE> <DL> <DD>Getter for replyFormat <P> <DD><DL> <DT><B>Returns:</B><DD>String to get</DL> </DD> </DL> <HR> <A NAME="setReplyFormat(java.lang.String)"><!-- --></A><H3> setReplyFormat</H3> <PRE> public void <B>setReplyFormat</B>(java.lang.String&nbsp;replyFormatIn)</PRE> <DL> <DD>Setter for replyFormat <P> <DD><DL> <DT><B>Parameters:</B><DD><CODE>replyFormatIn</CODE> - to set</DL> </DD> </DL> <!-- ========= END OF CLASS DATA ========= --> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/FilterType.html" title="class in com.redhat.rhn.domain.monitoring.notification"><B>PREV CLASS</B></A>&nbsp; &nbsp;<A HREF="../../../../../../com/redhat/rhn/domain/monitoring/notification/MatchType.html" title="class in com.redhat.rhn.domain.monitoring.notification"><B>NEXT CLASS</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../../../index.html?com/redhat/rhn/domain/monitoring/notification/Format.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="Format.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> <TR> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> SUMMARY:&nbsp;NESTED&nbsp;|&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_summary">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_summary">METHOD</A></FONT></TD> <TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2"> DETAIL:&nbsp;FIELD&nbsp;|&nbsp;<A HREF="#constructor_detail">CONSTR</A>&nbsp;|&nbsp;<A HREF="#method_detail">METHOD</A></FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> </BODY> </HTML>
colloquium/spacewalk
documentation/javadoc/com/redhat/rhn/domain/monitoring/notification/Format.html
HTML
gpl-2.0
20,115