repo_name
string
path
string
copies
string
size
string
content
string
license
string
dinhchitrung/Windows-universal-samples
d2dcustomeffects/cpp/computeshader/dfteffect.cpp
24
7439
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF //// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO //// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A //// PARTICULAR PURPOSE. //// //// Copyright (c) Microsoft Corporation. All rights reserved #include "pch.h" #include <initguid.h> #include "DftEffect.h" // Helper macro for defining XML strings. #define XML(X) TEXT(#X) // This effect demonstrates how to implement a basic Discrete // Fourier Transform (DFT) custom Direct2D effect using DirectCompute. DftEffect::DftEffect() : m_cRef(0) { } // Register the effect, its properties, and its creation method with D2D. HRESULT DftEffect::Register(_In_ ID2D1Factory1* pFactory) { PCWSTR pszXml = XML( <?xml version='1.0'?> <Effect> <!-- System Properties --> <Property name='DisplayName' type='string' value='Sample Discrete Fourier Transform (DFT) Compute Effect'/> <Property name='Author' type='string' value='Microsoft Corporation'/> <Property name='Category' type='string' value='Transform'/> <Property name='Description' type='string' value='A sample compute shader effect that computes the Discrete Fourier Transform of a given input.'/> <Inputs> <Input name='Source'/> </Inputs> </Effect> ); return pFactory->RegisterEffectFromString(CLSID_CustomDftEffect, pszXml, nullptr, 0, Create); } // A static method to create and return an instance of the effect. HRESULT __stdcall DftEffect::Create(_Outptr_ IUnknown** ppEffectImpl) { // Use the nothrow syntax so that we can return an HRESULT in the event of an allocation failure. // No try/catch is necessary here since there is no code / no possible exceptions in the constructor. *ppEffectImpl = static_cast<ID2D1EffectImpl *>(new (std::nothrow) DftEffect()); if (*ppEffectImpl == nullptr) { return E_OUTOFMEMORY; } else { (*ppEffectImpl)->AddRef(); return S_OK; } } // This effect method is called first, after the effect is instantiated with Create(). IFACEMETHODIMP DftEffect::Initialize( _In_ ID2D1EffectContext* pEffectContext, _In_ ID2D1TransformGraph* pTransformGraph ) { // Check for compute shader support - DX Feature Level 10_0 is not guaranteed to support compute shaders. // Immediately return if compute is not supported. D2D1_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS hardwareOptions; HRESULT hr = pEffectContext->CheckFeatureSupport( D2D1_FEATURE_D3D10_X_HARDWARE_OPTIONS, &hardwareOptions, sizeof(hardwareOptions) ); // As stated above, not all DX Feature Level 10_0 parts support compute shaders. In this app's case, // it checks for compute shader support at device creation in DirectXBase.cpp. If support is missing, // it uses a software fallback (WARP). All effects that use compute shaders should perform this // check at instantiation. if (SUCCEEDED(hr)) { if (!hardwareOptions.computeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x) { return D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES; } } else { return hr; } // Create new Transform nodes. The vertical transform must run after all of the horizontal code // has run, so it is performed in its own transform. Smart pointers are used here to avoid memory leaks // in the event of a failure. Microsoft::WRL::ComPtr<DftTransform> dftHorizontalTransform; Microsoft::WRL::ComPtr<DftTransform> dftVerticalTransform; if (SUCCEEDED(hr)) { try { dftHorizontalTransform = new (std::nothrow) DftTransform(pEffectContext, TransformType::Horizontal); dftVerticalTransform = new (std::nothrow) DftTransform(pEffectContext, TransformType::Vertical); if (dftHorizontalTransform == nullptr || dftVerticalTransform == nullptr) { return E_OUTOFMEMORY; } } catch (Platform::Exception^ e) { // Return HRESULT if transform throws exception during creation. return e->HResult; } } if (SUCCEEDED(hr)) { // Add horizontal transform node to the graph. Still need to connect its inputs/outputs (done below). hr = pTransformGraph->AddNode(dftHorizontalTransform.Get()); } if (SUCCEEDED(hr)) { // Connect the input of this transform node to the parent effect's input. hr = pTransformGraph->ConnectToEffectInput(0, dftHorizontalTransform.Get(), 0); } if (SUCCEEDED(hr)) { // Add vertical transform node to the graph. Still need to connect its inputs/outputs (done below). // Logically, this transform relies on the horizontal transform's output, and must be located // after it in the effect graph. hr = pTransformGraph->AddNode(dftVerticalTransform.Get()); } if (SUCCEEDED(hr)) { // Connects the input of the DFTV transform node to the output of the DFTH transform node. hr = pTransformGraph->ConnectNode(dftHorizontalTransform.Get(), dftVerticalTransform.Get(), 0); } if (SUCCEEDED(hr)) { // Designates the output of the DFTV transform node as the output of the effect. hr = pTransformGraph->SetOutputNode(dftVerticalTransform.Get()); } return hr; } // This method provides the effect implementation with an interface for // specifying its transform graph and transform graph changes. This will be // called when the effect is first initialized, and if the number of inputs // to the effect changes (variable-input effects). IFACEMETHODIMP DftEffect::SetGraph(_In_ ID2D1TransformGraph* pTransformGraph) { // Because this effect only has one input, it doesn't need to do anything here. return E_NOTIMPL; } // During this method, the effect must finalize the topology of its internal // transform graph. As in this example effect, the transform graph may already // have been configured by the effect's Initialize method. // // This effect method is called before rendering the effect but after // property changes occur. IFACEMETHODIMP DftEffect::PrepareForRender(D2D1_CHANGE_TYPE changeType) { return S_OK; } IFACEMETHODIMP_(ULONG) DftEffect::AddRef() { // D2D ensures that that effects are only referenced from one thread at a time. // To improve performance, we simply increment/decrement our reference count // rather than use atomic InterlockedIncrement()/InterlockedDecrement() functions. m_cRef++; return m_cRef; } IFACEMETHODIMP_(ULONG) DftEffect::Release() { m_cRef--; if (m_cRef == 0) { delete this; return 0; } else { return m_cRef; } } IFACEMETHODIMP DftEffect::QueryInterface( REFIID riid, _Outptr_ void** ppOutput ) { *ppOutput = nullptr; HRESULT hr = S_OK; if (riid == __uuidof(ID2D1EffectImpl)) { *ppOutput = static_cast<ID2D1EffectImpl*>(this); } else if (riid == __uuidof(IUnknown)) { *ppOutput = this; } else { hr = E_NOINTERFACE; } if (*ppOutput != nullptr) { AddRef(); } return hr; }
mit
lunastorm/wissbi
3rd_party/libcxx/test/utilities/memory/unique.ptr/unique.ptr.single/unique.ptr.single.ctor/move_convert07.fail.cpp
29
1168
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // <memory> // unique_ptr // Test unique_ptr converting move ctor #include <memory> #include <cassert> // test converting move ctor. Should only require a MoveConstructible deleter, or if // deleter is a reference, not even that. // Explicit version struct A { static int count; A() {++count;} A(const A&) {++count;} virtual ~A() {--count;} }; int A::count = 0; struct B : public A { static int count; B() {++count;} B(const B&) {++count;} virtual ~B() {--count;} }; int B::count = 0; int main() { { const std::unique_ptr<B> s(new B); A* p = s.get(); std::unique_ptr<A> s2(s); assert(s2.get() == p); assert(s.get() == 0); assert(A::count == 1); assert(B::count == 1); } assert(A::count == 0); assert(B::count == 0); }
mit
Sridhar-MS/coreclr
src/pal/tests/palsuite/filemapping_memmgt/CreateFileMappingA/test8/createfilemapping.c
31
2594
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // /*============================================================= ** ** Source: createfilemapping.c (test 8) ** ** Purpose: Positive test the CreateFileMapping API. ** Test the un-verifiable parameter combinations. ** ** **============================================================*/ #include <palsuite.h> const int MAPPINGSIZE = 2048; HANDLE SWAP_HANDLE = ((VOID *)(-1)); int __cdecl main(int argc, char *argv[]) { HANDLE hFileMap; /* Initialize the PAL environment. */ if(0 != PAL_Initialize(argc, argv)) { return FAIL; } /* Create a READONLY, "swap", un-named file mapping. * This test is unverifiable since there is no hook back to the file map * because it is un-named. As well, since it resides in "swap", and is * initialized to zero, there is nothing to read. */ hFileMap = CreateFileMapping( SWAP_HANDLE, NULL, /*not inherited*/ PAGE_READONLY, /*read only*/ 0, /*high-order size*/ MAPPINGSIZE, /*low-order size*/ NULL); /*un-named object*/ if(NULL == hFileMap) { Fail("ERROR:%u: Failed to create File Mapping.\n", GetLastError()); } /* Create a COPYWRITE, "swap", un-named file mapping. * This test is unverifiable, here is a quote from MSDN: * * Copy on write access. If you create the map with PAGE_WRITECOPY and * the view with FILE_MAP_COPY, you will receive a view to file. If you * write to it, the pages are automatically swappable and the modifications * you make will not go to the original data file. * */ hFileMap = CreateFileMapping( SWAP_HANDLE, NULL, /*not inherited*/ PAGE_WRITECOPY, /*read only*/ 0, /*high-order size*/ MAPPINGSIZE, /*low-order size*/ NULL); /*unnamed object*/ if(NULL == hFileMap) { Fail("ERROR:%u: Failed to create File Mapping.\n", GetLastError()); } /* Terminate the PAL. */ PAL_Terminate(); return PASS; }
mit
miikama/cpp-math
eigen/test/smallvectors.cpp
291
2125
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_NO_STATIC_ASSERT #include "main.h" template<typename Scalar> void smallVectors() { typedef Matrix<Scalar, 1, 2> V2; typedef Matrix<Scalar, 3, 1> V3; typedef Matrix<Scalar, 1, 4> V4; typedef Matrix<Scalar, Dynamic, 1> VX; Scalar x1 = internal::random<Scalar>(), x2 = internal::random<Scalar>(), x3 = internal::random<Scalar>(), x4 = internal::random<Scalar>(); V2 v2(x1, x2); V3 v3(x1, x2, x3); V4 v4(x1, x2, x3, x4); VERIFY_IS_APPROX(x1, v2.x()); VERIFY_IS_APPROX(x1, v3.x()); VERIFY_IS_APPROX(x1, v4.x()); VERIFY_IS_APPROX(x2, v2.y()); VERIFY_IS_APPROX(x2, v3.y()); VERIFY_IS_APPROX(x2, v4.y()); VERIFY_IS_APPROX(x3, v3.z()); VERIFY_IS_APPROX(x3, v4.z()); VERIFY_IS_APPROX(x4, v4.w()); if (!NumTraits<Scalar>::IsInteger) { VERIFY_RAISES_ASSERT(V3(2, 1)) VERIFY_RAISES_ASSERT(V3(3, 2)) VERIFY_RAISES_ASSERT(V3(Scalar(3), 1)) VERIFY_RAISES_ASSERT(V3(3, Scalar(1))) VERIFY_RAISES_ASSERT(V3(Scalar(3), Scalar(1))) VERIFY_RAISES_ASSERT(V3(Scalar(123), Scalar(123))) VERIFY_RAISES_ASSERT(V4(1, 3)) VERIFY_RAISES_ASSERT(V4(2, 4)) VERIFY_RAISES_ASSERT(V4(1, Scalar(4))) VERIFY_RAISES_ASSERT(V4(Scalar(1), 4)) VERIFY_RAISES_ASSERT(V4(Scalar(1), Scalar(4))) VERIFY_RAISES_ASSERT(V4(Scalar(123), Scalar(123))) VERIFY_RAISES_ASSERT(VX(3, 2)) VERIFY_RAISES_ASSERT(VX(Scalar(3), 1)) VERIFY_RAISES_ASSERT(VX(3, Scalar(1))) VERIFY_RAISES_ASSERT(VX(Scalar(3), Scalar(1))) VERIFY_RAISES_ASSERT(VX(Scalar(123), Scalar(123))) } } void test_smallvectors() { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST(smallVectors<int>() ); CALL_SUBTEST(smallVectors<float>() ); CALL_SUBTEST(smallVectors<double>() ); } }
mit
peters/openalpr-windows
tesseract-ocr/dependencies/libtiff/libtiff/src/tif_tile.c
36
7879
/* $Id: tif_tile.c,v 1.12.2.1 2010-06-08 18:50:43 bfriesen Exp $ */ /* * Copyright (c) 1991-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ /* * TIFF Library. * * Tiled Image Support Routines. */ #include "tiffiop.h" static uint32 summarize(TIFF* tif, size_t summand1, size_t summand2, const char* where) { /* * XXX: We are using casting to uint32 here, because sizeof(size_t) * may be larger than sizeof(uint32) on 64-bit architectures. */ uint32 bytes = summand1 + summand2; if (bytes - summand1 != summand2) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "Integer overflow in %s", where); bytes = 0; } return (bytes); } static uint32 multiply(TIFF* tif, size_t nmemb, size_t elem_size, const char* where) { uint32 bytes = nmemb * elem_size; if (elem_size && bytes / elem_size != nmemb) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "Integer overflow in %s", where); bytes = 0; } return (bytes); } /* * Compute which tile an (x,y,z,s) value is in. */ ttile_t TIFFComputeTile(TIFF* tif, uint32 x, uint32 y, uint32 z, tsample_t s) { TIFFDirectory *td = &tif->tif_dir; uint32 dx = td->td_tilewidth; uint32 dy = td->td_tilelength; uint32 dz = td->td_tiledepth; ttile_t tile = 1; if (td->td_imagedepth == 1) z = 0; if (dx == (uint32) -1) dx = td->td_imagewidth; if (dy == (uint32) -1) dy = td->td_imagelength; if (dz == (uint32) -1) dz = td->td_imagedepth; if (dx != 0 && dy != 0 && dz != 0) { uint32 xpt = TIFFhowmany(td->td_imagewidth, dx); uint32 ypt = TIFFhowmany(td->td_imagelength, dy); uint32 zpt = TIFFhowmany(td->td_imagedepth, dz); if (td->td_planarconfig == PLANARCONFIG_SEPARATE) tile = (xpt*ypt*zpt)*s + (xpt*ypt)*(z/dz) + xpt*(y/dy) + x/dx; else tile = (xpt*ypt)*(z/dz) + xpt*(y/dy) + x/dx; } return (tile); } /* * Check an (x,y,z,s) coordinate * against the image bounds. */ int TIFFCheckTile(TIFF* tif, uint32 x, uint32 y, uint32 z, tsample_t s) { TIFFDirectory *td = &tif->tif_dir; if (x >= td->td_imagewidth) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "%lu: Col out of range, max %lu", (unsigned long) x, (unsigned long) (td->td_imagewidth - 1)); return (0); } if (y >= td->td_imagelength) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "%lu: Row out of range, max %lu", (unsigned long) y, (unsigned long) (td->td_imagelength - 1)); return (0); } if (z >= td->td_imagedepth) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "%lu: Depth out of range, max %lu", (unsigned long) z, (unsigned long) (td->td_imagedepth - 1)); return (0); } if (td->td_planarconfig == PLANARCONFIG_SEPARATE && s >= td->td_samplesperpixel) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "%lu: Sample out of range, max %lu", (unsigned long) s, (unsigned long) (td->td_samplesperpixel - 1)); return (0); } return (1); } /* * Compute how many tiles are in an image. */ ttile_t TIFFNumberOfTiles(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; uint32 dx = td->td_tilewidth; uint32 dy = td->td_tilelength; uint32 dz = td->td_tiledepth; ttile_t ntiles; if (dx == (uint32) -1) dx = td->td_imagewidth; if (dy == (uint32) -1) dy = td->td_imagelength; if (dz == (uint32) -1) dz = td->td_imagedepth; ntiles = (dx == 0 || dy == 0 || dz == 0) ? 0 : multiply(tif, multiply(tif, TIFFhowmany(td->td_imagewidth, dx), TIFFhowmany(td->td_imagelength, dy), "TIFFNumberOfTiles"), TIFFhowmany(td->td_imagedepth, dz), "TIFFNumberOfTiles"); if (td->td_planarconfig == PLANARCONFIG_SEPARATE) ntiles = multiply(tif, ntiles, td->td_samplesperpixel, "TIFFNumberOfTiles"); return (ntiles); } /* * Compute the # bytes in each row of a tile. */ tsize_t TIFFTileRowSize(TIFF* tif) { TIFFDirectory *td = &tif->tif_dir; tsize_t rowsize; if (td->td_tilelength == 0 || td->td_tilewidth == 0) return ((tsize_t) 0); rowsize = multiply(tif, td->td_bitspersample, td->td_tilewidth, "TIFFTileRowSize"); if (td->td_planarconfig == PLANARCONFIG_CONTIG) rowsize = multiply(tif, rowsize, td->td_samplesperpixel, "TIFFTileRowSize"); return ((tsize_t) TIFFhowmany8(rowsize)); } /* * Compute the # bytes in a variable length, row-aligned tile. */ tsize_t TIFFVTileSize(TIFF* tif, uint32 nrows) { TIFFDirectory *td = &tif->tif_dir; tsize_t tilesize; if (td->td_tilelength == 0 || td->td_tilewidth == 0 || td->td_tiledepth == 0) return ((tsize_t) 0); if (td->td_planarconfig == PLANARCONFIG_CONTIG && td->td_photometric == PHOTOMETRIC_YCBCR && !isUpSampled(tif)) { /* * Packed YCbCr data contain one Cb+Cr for every * HorizontalSampling*VerticalSampling Y values. * Must also roundup width and height when calculating * since images that are not a multiple of the * horizontal/vertical subsampling area include * YCbCr data for the extended image. */ tsize_t w = TIFFroundup(td->td_tilewidth, td->td_ycbcrsubsampling[0]); tsize_t rowsize = TIFFhowmany8(multiply(tif, w, td->td_bitspersample, "TIFFVTileSize")); tsize_t samplingarea = td->td_ycbcrsubsampling[0]*td->td_ycbcrsubsampling[1]; if (samplingarea == 0) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "Invalid YCbCr subsampling"); return 0; } nrows = TIFFroundup(nrows, td->td_ycbcrsubsampling[1]); /* NB: don't need TIFFhowmany here 'cuz everything is rounded */ tilesize = multiply(tif, nrows, rowsize, "TIFFVTileSize"); tilesize = summarize(tif, tilesize, multiply(tif, 2, tilesize / samplingarea, "TIFFVTileSize"), "TIFFVTileSize"); } else tilesize = multiply(tif, nrows, TIFFTileRowSize(tif), "TIFFVTileSize"); return ((tsize_t) multiply(tif, tilesize, td->td_tiledepth, "TIFFVTileSize")); } /* * Compute the # bytes in a row-aligned tile. */ tsize_t TIFFTileSize(TIFF* tif) { return (TIFFVTileSize(tif, tif->tif_dir.td_tilelength)); } /* * Compute a default tile size based on the image * characteristics and a requested value. If a * request is <1 then we choose a size according * to certain heuristics. */ void TIFFDefaultTileSize(TIFF* tif, uint32* tw, uint32* th) { (*tif->tif_deftilesize)(tif, tw, th); } void _TIFFDefaultTileSize(TIFF* tif, uint32* tw, uint32* th) { (void) tif; if (*(int32*) tw < 1) *tw = 256; if (*(int32*) th < 1) *th = 256; /* roundup to a multiple of 16 per the spec */ if (*tw & 0xf) *tw = TIFFroundup(*tw, 16); if (*th & 0xf) *th = TIFFroundup(*th, 16); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
mit
yan97ao/nanomsg
src/transports/ws/sha1.c
36
4563
/* Copyright (c) 2014 Wirebird Labs LLC. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "sha1.h" #define sha1_rol32(num,bits) ((num << bits) | (num >> (32 - bits))) void nn_sha1_init (struct nn_sha1 *self) { /* Detect endianness. */ union { uint32_t i; char c[4]; } test = { 0x00000001 }; self->is_little_endian = test.c[0]; /* Initial state of the hash. */ self->state [0] = 0x67452301; self->state [1] = 0xefcdab89; self->state [2] = 0x98badcfe; self->state [3] = 0x10325476; self->state [4] = 0xc3d2e1f0; self->bytes_hashed = 0; self->buffer_offset = 0; } static void nn_sha1_add (struct nn_sha1 *self, uint8_t data) { uint8_t i; uint32_t a, b, c, d, e, t; uint8_t * const buf = (uint8_t*) self->buffer; if (self->is_little_endian) buf [self->buffer_offset ^ 3] = data; else buf [self->buffer_offset] = data; self->buffer_offset++; if (self->buffer_offset == SHA1_BLOCK_LEN) { a = self->state [0]; b = self->state [1]; c = self->state [2]; d = self->state [3]; e = self->state [4]; for (i = 0; i < 80; i++) { if (i >= 16) { t = self->buffer [(i + 13) & 15] ^ self->buffer [(i + 8) & 15] ^ self->buffer [(i + 2) & 15] ^ self->buffer [i & 15]; self->buffer [i & 15] = sha1_rol32 (t, 1); } if (i < 20) t = (d ^ (b & (c ^ d))) + 0x5A827999; else if (i < 40) t = (b ^ c ^ d) + 0x6ED9EBA1; else if (i < 60) t = ((b & c) | (d & (b | c))) + 0x8F1BBCDC; else t = (b ^ c ^ d) + 0xCA62C1D6; t += sha1_rol32 (a, 5) + e + self->buffer [i & 15]; e = d; d = c; c = sha1_rol32 (b, 30); b = a; a = t; } self->state [0] += a; self->state [1] += b; self->state [2] += c; self->state [3] += d; self->state [4] += e; self->buffer_offset = 0; } } void nn_sha1_hashbyte (struct nn_sha1 *self, uint8_t data) { ++self->bytes_hashed; nn_sha1_add (self, data); } uint8_t* nn_sha1_result (struct nn_sha1 *self) { int i; /* Pad to complete the last block. */ nn_sha1_add (self, 0x80); while (self->buffer_offset != 56) nn_sha1_add (self, 0x00); /* Append length in the last 8 bytes. SHA-1 supports 64-bit hashes, so zero-pad the top bits. Shifting to multiply by 8 as SHA-1 supports bit- as well as byte-streams. */ nn_sha1_add (self, 0); nn_sha1_add (self, 0); nn_sha1_add (self, 0); nn_sha1_add (self, self->bytes_hashed >> 29); nn_sha1_add (self, self->bytes_hashed >> 21); nn_sha1_add (self, self->bytes_hashed >> 13); nn_sha1_add (self, self->bytes_hashed >> 5); nn_sha1_add (self, self->bytes_hashed << 3); /* Correct byte order for little-endian systems. */ if (self->is_little_endian) { for (i = 0; i < 5; i++) { self->state [i] = (((self->state [i]) << 24) & 0xFF000000) | (((self->state [i]) << 8) & 0x00FF0000) | (((self->state [i]) >> 8) & 0x0000FF00) | (((self->state [i]) >> 24) & 0x000000FF); } } /* 20-octet pointer to hash. */ return (uint8_t*) self->state; }
mit
CodeClubJUSL/codeclubjusl.github.io
vendor/bundle/gems/ffi-1.9.18/ext/ffi_c/libffi/testsuite/libffi.call/cls_multi_sshort.c
805
1685
/* Area: ffi_call, closure_call Purpose: Check passing of multiple signed short values. Limitations: none. PR: PR13221. Originator: <andreast@gcc.gnu.org> 20031129 */ /* { dg-do run } */ #include "ffitest.h" signed short test_func_fn(signed short a1, signed short a2) { signed short result; result = a1 + a2; printf("%d %d: %d\n", a1, a2, result); return result; } static void test_func_gn(ffi_cif *cif __UNUSED__, void *rval, void **avals, void *data __UNUSED__) { signed short a1, a2; a1 = *(signed short *)avals[0]; a2 = *(signed short *)avals[1]; *(ffi_arg *)rval = test_func_fn(a1, a2); } typedef signed short (*test_type)(signed short, signed short); int main (void) { ffi_cif cif; void *code; ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code); void * args_dbl[3]; ffi_type * cl_arg_types[3]; ffi_arg res_call; unsigned short a, b, res_closure; a = 2; b = 32765; args_dbl[0] = &a; args_dbl[1] = &b; args_dbl[2] = NULL; cl_arg_types[0] = &ffi_type_sshort; cl_arg_types[1] = &ffi_type_sshort; cl_arg_types[2] = NULL; /* Initialize the cif */ CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_sshort, cl_arg_types) == FFI_OK); ffi_call(&cif, FFI_FN(test_func_fn), &res_call, args_dbl); /* { dg-output "2 32765: 32767" } */ printf("res: %d\n", (unsigned short)res_call); /* { dg-output "\nres: 32767" } */ CHECK(ffi_prep_closure_loc(pcl, &cif, test_func_gn, NULL, code) == FFI_OK); res_closure = (*((test_type)code))(2, 32765); /* { dg-output "\n2 32765: 32767" } */ printf("res: %d\n", res_closure); /* { dg-output "\nres: 32767" } */ exit(0); }
mit
Kogser/bitcoin
db-4.8.30.NC/db/db_pr.c
42
39244
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1996-2009 Oracle. All rights reserved. * * $Id$ */ #include "db_config.h" #include "db_int.h" #include "dbinc/db_page.h" #include "dbinc/btree.h" #include "dbinc/hash.h" #include "dbinc/mp.h" #include "dbinc/partition.h" #include "dbinc/qam.h" #include "dbinc/db_verify.h" /* * __db_loadme -- * A nice place to put a breakpoint. * * PUBLIC: void __db_loadme __P((void)); */ void __db_loadme() { pid_t pid; __os_id(NULL, &pid, NULL); } #ifdef HAVE_STATISTICS static int __db_bmeta __P((DB *, BTMETA *, u_int32_t)); static int __db_hmeta __P((DB *, HMETA *, u_int32_t)); static void __db_meta __P((DB *, DBMETA *, FN const *, u_int32_t)); static const char *__db_pagetype_to_string __P((u_int32_t)); static void __db_prdb __P((DB *, u_int32_t)); static void __db_proff __P((ENV *, DB_MSGBUF *, void *)); static int __db_prtree __P((DB *, DB_TXN *, u_int32_t)); static int __db_qmeta __P((DB *, QMETA *, u_int32_t)); /* * __db_dumptree -- * Dump the tree to a file. * * PUBLIC: int __db_dumptree __P((DB *, DB_TXN *, char *, char *)); */ int __db_dumptree(dbp, txn, op, name) DB *dbp; DB_TXN *txn; char *op, *name; { ENV *env; FILE *fp, *orig_fp; u_int32_t flags; int ret; env = dbp->env; for (flags = 0; *op != '\0'; ++op) switch (*op) { case 'a': LF_SET(DB_PR_PAGE); break; case 'h': break; case 'r': LF_SET(DB_PR_RECOVERYTEST); break; default: return (EINVAL); } if (name != NULL) { if ((fp = fopen(name, "w")) == NULL) return (__os_get_errno()); orig_fp = dbp->dbenv->db_msgfile; dbp->dbenv->db_msgfile = fp; } else fp = orig_fp = NULL; __db_prdb(dbp, flags); __db_msg(env, "%s", DB_GLOBAL(db_line)); ret = __db_prtree(dbp, txn, flags); if (fp != NULL) { (void)fclose(fp); env->dbenv->db_msgfile = orig_fp; } return (ret); } static const FN __db_flags_fn[] = { { DB_AM_CHKSUM, "checksumming" }, { DB_AM_COMPENSATE, "created by compensating transaction" }, { DB_AM_CREATED, "database created" }, { DB_AM_CREATED_MSTR, "encompassing file created" }, { DB_AM_DBM_ERROR, "dbm/ndbm error" }, { DB_AM_DELIMITER, "variable length" }, { DB_AM_DISCARD, "discard cached pages" }, { DB_AM_DUP, "duplicates" }, { DB_AM_DUPSORT, "sorted duplicates" }, { DB_AM_ENCRYPT, "encrypted" }, { DB_AM_FIXEDLEN, "fixed-length records" }, { DB_AM_INMEM, "in-memory" }, { DB_AM_IN_RENAME, "file is being renamed" }, { DB_AM_NOT_DURABLE, "changes not logged" }, { DB_AM_OPEN_CALLED, "open called" }, { DB_AM_PAD, "pad value" }, { DB_AM_PGDEF, "default page size" }, { DB_AM_RDONLY, "read-only" }, { DB_AM_READ_UNCOMMITTED, "read-uncommitted" }, { DB_AM_RECNUM, "Btree record numbers" }, { DB_AM_RECOVER, "opened for recovery" }, { DB_AM_RENUMBER, "renumber" }, { DB_AM_REVSPLITOFF, "no reverse splits" }, { DB_AM_SECONDARY, "secondary" }, { DB_AM_SNAPSHOT, "load on open" }, { DB_AM_SUBDB, "subdatabases" }, { DB_AM_SWAP, "needswap" }, { DB_AM_TXN, "transactional" }, { DB_AM_VERIFYING, "verifier" }, { 0, NULL } }; /* * __db_get_flags_fn -- * Return the __db_flags_fn array. * * PUBLIC: const FN * __db_get_flags_fn __P((void)); */ const FN * __db_get_flags_fn() { return (__db_flags_fn); } /* * __db_prdb -- * Print out the DB structure information. */ static void __db_prdb(dbp, flags) DB *dbp; u_int32_t flags; { BTREE *bt; DB_MSGBUF mb; ENV *env; HASH *h; QUEUE *q; env = dbp->env; DB_MSGBUF_INIT(&mb); __db_msg(env, "In-memory DB structure:"); __db_msgadd(env, &mb, "%s: %#lx", __db_dbtype_to_string(dbp->type), (u_long)dbp->flags); __db_prflags(env, &mb, dbp->flags, __db_flags_fn, " (", ")"); DB_MSGBUF_FLUSH(env, &mb); switch (dbp->type) { case DB_BTREE: case DB_RECNO: bt = dbp->bt_internal; __db_msg(env, "bt_meta: %lu bt_root: %lu", (u_long)bt->bt_meta, (u_long)bt->bt_root); __db_msg(env, "bt_minkey: %lu", (u_long)bt->bt_minkey); if (!LF_ISSET(DB_PR_RECOVERYTEST)) __db_msg(env, "bt_compare: %#lx bt_prefix: %#lx", P_TO_ULONG(bt->bt_compare), P_TO_ULONG(bt->bt_prefix)); #ifdef HAVE_COMPRESSION if (!LF_ISSET(DB_PR_RECOVERYTEST)) __db_msg(env, "bt_compress: %#lx bt_decompress: %#lx", P_TO_ULONG(bt->bt_compress), P_TO_ULONG(bt->bt_decompress)); #endif __db_msg(env, "bt_lpgno: %lu", (u_long)bt->bt_lpgno); if (dbp->type == DB_RECNO) { __db_msg(env, "re_pad: %#lx re_delim: %#lx re_len: %lu re_source: %s", (u_long)bt->re_pad, (u_long)bt->re_delim, (u_long)bt->re_len, bt->re_source == NULL ? "" : bt->re_source); __db_msg(env, "re_modified: %d re_eof: %d re_last: %lu", bt->re_modified, bt->re_eof, (u_long)bt->re_last); } break; case DB_HASH: h = dbp->h_internal; __db_msg(env, "meta_pgno: %lu", (u_long)h->meta_pgno); __db_msg(env, "h_ffactor: %lu", (u_long)h->h_ffactor); __db_msg(env, "h_nelem: %lu", (u_long)h->h_nelem); if (!LF_ISSET(DB_PR_RECOVERYTEST)) __db_msg(env, "h_hash: %#lx", P_TO_ULONG(h->h_hash)); break; case DB_QUEUE: q = dbp->q_internal; __db_msg(env, "q_meta: %lu", (u_long)q->q_meta); __db_msg(env, "q_root: %lu", (u_long)q->q_root); __db_msg(env, "re_pad: %#lx re_len: %lu", (u_long)q->re_pad, (u_long)q->re_len); __db_msg(env, "rec_page: %lu", (u_long)q->rec_page); __db_msg(env, "page_ext: %lu", (u_long)q->page_ext); break; case DB_UNKNOWN: default: break; } } /* * __db_prtree -- * Print out the entire tree. */ static int __db_prtree(dbp, txn, flags) DB *dbp; DB_TXN *txn; u_int32_t flags; { DB_MPOOLFILE *mpf; PAGE *h; db_pgno_t i, last; int ret; mpf = dbp->mpf; if (dbp->type == DB_QUEUE) return (__db_prqueue(dbp, flags)); /* * Find out the page number of the last page in the database, then * dump each page. */ if ((ret = __memp_get_last_pgno(mpf, &last)) != 0) return (ret); for (i = 0; i <= last; ++i) { if ((ret = __memp_fget(mpf, &i, NULL, txn, 0, &h)) != 0) return (ret); (void)__db_prpage(dbp, h, flags); if ((ret = __memp_fput(mpf, NULL, h, dbp->priority)) != 0) return (ret); } return (0); } /* * __db_meta -- * Print out common metadata information. */ static void __db_meta(dbp, dbmeta, fn, flags) DB *dbp; DBMETA *dbmeta; FN const *fn; u_int32_t flags; { DB_MPOOLFILE *mpf; DB_MSGBUF mb; ENV *env; PAGE *h; db_pgno_t pgno; u_int8_t *p; int cnt, ret; const char *sep; env = dbp->env; mpf = dbp->mpf; DB_MSGBUF_INIT(&mb); __db_msg(env, "\tmagic: %#lx", (u_long)dbmeta->magic); __db_msg(env, "\tversion: %lu", (u_long)dbmeta->version); __db_msg(env, "\tpagesize: %lu", (u_long)dbmeta->pagesize); __db_msg(env, "\ttype: %lu", (u_long)dbmeta->type); __db_msg(env, "\tmetaflags %#lx", (u_long)dbmeta->metaflags); __db_msg(env, "\tkeys: %lu\trecords: %lu", (u_long)dbmeta->key_count, (u_long)dbmeta->record_count); if (dbmeta->nparts) __db_msg(env, "\tnparts: %lu", (u_long)dbmeta->nparts); /* * If we're doing recovery testing, don't display the free list, * it may have changed and that makes the dump diff not work. */ if (!LF_ISSET(DB_PR_RECOVERYTEST)) { __db_msgadd( env, &mb, "\tfree list: %lu", (u_long)dbmeta->free); for (pgno = dbmeta->free, cnt = 0, sep = ", "; pgno != PGNO_INVALID;) { if ((ret = __memp_fget(mpf, &pgno, NULL, NULL, 0, &h)) != 0) { DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "Unable to retrieve free-list page: %lu: %s", (u_long)pgno, db_strerror(ret)); break; } pgno = h->next_pgno; (void)__memp_fput(mpf, NULL, h, dbp->priority); __db_msgadd(env, &mb, "%s%lu", sep, (u_long)pgno); if (++cnt % 10 == 0) { DB_MSGBUF_FLUSH(env, &mb); cnt = 0; sep = "\t"; } else sep = ", "; } DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "\tlast_pgno: %lu", (u_long)dbmeta->last_pgno); } if (fn != NULL) { DB_MSGBUF_FLUSH(env, &mb); __db_msgadd(env, &mb, "\tflags: %#lx", (u_long)dbmeta->flags); __db_prflags(env, &mb, dbmeta->flags, fn, " (", ")"); } DB_MSGBUF_FLUSH(env, &mb); __db_msgadd(env, &mb, "\tuid: "); for (p = (u_int8_t *)dbmeta->uid, cnt = 0; cnt < DB_FILE_ID_LEN; ++cnt) { __db_msgadd(env, &mb, "%x", *p++); if (cnt < DB_FILE_ID_LEN - 1) __db_msgadd(env, &mb, " "); } DB_MSGBUF_FLUSH(env, &mb); } /* * __db_bmeta -- * Print out the btree meta-data page. */ static int __db_bmeta(dbp, h, flags) DB *dbp; BTMETA *h; u_int32_t flags; { static const FN fn[] = { { BTM_DUP, "duplicates" }, { BTM_RECNO, "recno" }, { BTM_RECNUM, "btree:recnum" }, { BTM_FIXEDLEN, "recno:fixed-length" }, { BTM_RENUMBER, "recno:renumber" }, { BTM_SUBDB, "multiple-databases" }, { BTM_DUPSORT, "sorted duplicates" }, { BTM_COMPRESS, "compressed" }, { 0, NULL } }; ENV *env; env = dbp->env; __db_meta(dbp, (DBMETA *)h, fn, flags); __db_msg(env, "\tminkey: %lu", (u_long)h->minkey); if (dbp->type == DB_RECNO) __db_msg(env, "\tre_len: %#lx re_pad: %#lx", (u_long)h->re_len, (u_long)h->re_pad); __db_msg(env, "\troot: %lu", (u_long)h->root); return (0); } /* * __db_hmeta -- * Print out the hash meta-data page. */ static int __db_hmeta(dbp, h, flags) DB *dbp; HMETA *h; u_int32_t flags; { static const FN fn[] = { { DB_HASH_DUP, "duplicates" }, { DB_HASH_SUBDB, "multiple-databases" }, { DB_HASH_DUPSORT, "sorted duplicates" }, { 0, NULL } }; ENV *env; DB_MSGBUF mb; int i; env = dbp->env; DB_MSGBUF_INIT(&mb); __db_meta(dbp, (DBMETA *)h, fn, flags); __db_msg(env, "\tmax_bucket: %lu", (u_long)h->max_bucket); __db_msg(env, "\thigh_mask: %#lx", (u_long)h->high_mask); __db_msg(env, "\tlow_mask: %#lx", (u_long)h->low_mask); __db_msg(env, "\tffactor: %lu", (u_long)h->ffactor); __db_msg(env, "\tnelem: %lu", (u_long)h->nelem); __db_msg(env, "\th_charkey: %#lx", (u_long)h->h_charkey); __db_msgadd(env, &mb, "\tspare points: "); for (i = 0; i < NCACHED; i++) __db_msgadd(env, &mb, "%lu ", (u_long)h->spares[i]); DB_MSGBUF_FLUSH(env, &mb); return (0); } /* * __db_qmeta -- * Print out the queue meta-data page. */ static int __db_qmeta(dbp, h, flags) DB *dbp; QMETA *h; u_int32_t flags; { ENV *env; env = dbp->env; __db_meta(dbp, (DBMETA *)h, NULL, flags); __db_msg(env, "\tfirst_recno: %lu", (u_long)h->first_recno); __db_msg(env, "\tcur_recno: %lu", (u_long)h->cur_recno); __db_msg(env, "\tre_len: %#lx re_pad: %lu", (u_long)h->re_len, (u_long)h->re_pad); __db_msg(env, "\trec_page: %lu", (u_long)h->rec_page); __db_msg(env, "\tpage_ext: %lu", (u_long)h->page_ext); return (0); } /* * __db_prnpage * -- Print out a specific page. * * PUBLIC: int __db_prnpage __P((DB *, DB_TXN *, db_pgno_t)); */ int __db_prnpage(dbp, txn, pgno) DB *dbp; DB_TXN *txn; db_pgno_t pgno; { DB_MPOOLFILE *mpf; PAGE *h; int ret, t_ret; mpf = dbp->mpf; if ((ret = __memp_fget(mpf, &pgno, NULL, txn, 0, &h)) != 0) return (ret); ret = __db_prpage(dbp, h, DB_PR_PAGE); if ((t_ret = __memp_fput(mpf, NULL, h, dbp->priority)) != 0 && ret == 0) ret = t_ret; return (ret); } /* * __db_prpage * -- Print out a page. * * PUBLIC: int __db_prpage __P((DB *, PAGE *, u_int32_t)); */ int __db_prpage(dbp, h, flags) DB *dbp; PAGE *h; u_int32_t flags; { BINTERNAL *bi; BKEYDATA *bk; DB_MSGBUF mb; ENV *env; HOFFPAGE a_hkd; QAMDATA *qp, *qep; RINTERNAL *ri; db_indx_t dlen, len, i, *inp; db_pgno_t pgno; db_recno_t recno; u_int32_t pagesize, qlen; u_int8_t *ep, *hk, *p; int deleted, ret; const char *s; void *sp; env = dbp->env; DB_MSGBUF_INIT(&mb); /* * If we're doing recovery testing and this page is P_INVALID, * assume it's a page that's on the free list, and don't display it. */ if (LF_ISSET(DB_PR_RECOVERYTEST) && TYPE(h) == P_INVALID) return (0); if ((s = __db_pagetype_to_string(TYPE(h))) == NULL) { __db_msg(env, "ILLEGAL PAGE TYPE: page: %lu type: %lu", (u_long)h->pgno, (u_long)TYPE(h)); return (EINVAL); } /* * !!! * Find out the page size. We don't want to do it the "right" way, * by reading the value from the meta-data page, that's going to be * slow. Reach down into the mpool region. */ pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize; /* Page number, page type. */ __db_msgadd(env, &mb, "page %lu: %s:", (u_long)h->pgno, s); /* * LSNs on a metadata page will be different from the original after an * abort, in some cases. Don't display them if we're testing recovery. */ if (!LF_ISSET(DB_PR_RECOVERYTEST) || (TYPE(h) != P_BTREEMETA && TYPE(h) != P_HASHMETA && TYPE(h) != P_QAMMETA && TYPE(h) != P_QAMDATA)) __db_msgadd(env, &mb, " LSN [%lu][%lu]:", (u_long)LSN(h).file, (u_long)LSN(h).offset); /* * Page level (only applicable for Btree/Recno, but we always display * it, for no particular reason. */ __db_msgadd(env, &mb, " level %lu", (u_long)h->level); /* Record count. */ if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO || (TYPE(h) == P_LRECNO && h->pgno == ((BTREE *)dbp->bt_internal)->bt_root)) __db_msgadd(env, &mb, " records: %lu", (u_long)RE_NREC(h)); DB_MSGBUF_FLUSH(env, &mb); switch (TYPE(h)) { case P_BTREEMETA: return (__db_bmeta(dbp, (BTMETA *)h, flags)); case P_HASHMETA: return (__db_hmeta(dbp, (HMETA *)h, flags)); case P_QAMMETA: return (__db_qmeta(dbp, (QMETA *)h, flags)); case P_QAMDATA: /* Should be meta->start. */ if (!LF_ISSET(DB_PR_PAGE)) return (0); qlen = ((QUEUE *)dbp->q_internal)->re_len; recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1; i = 0; qep = (QAMDATA *)((u_int8_t *)h + pagesize - qlen); for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep; recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) { if (!F_ISSET(qp, QAM_SET)) continue; __db_msgadd(env, &mb, "%s", F_ISSET(qp, QAM_VALID) ? "\t" : " D"); __db_msgadd(env, &mb, "[%03lu] %4lu ", (u_long)recno, (u_long)((u_int8_t *)qp - (u_int8_t *)h)); __db_prbytes(env, &mb, qp->data, qlen); } return (0); default: break; } s = "\t"; if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) { __db_msgadd(env, &mb, "%sprev: %4lu next: %4lu", s, (u_long)PREV_PGNO(h), (u_long)NEXT_PGNO(h)); s = " "; } if (TYPE(h) == P_OVERFLOW) { __db_msgadd(env, &mb, "%sref cnt: %4lu ", s, (u_long)OV_REF(h)); __db_prbytes(env, &mb, (u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h)); return (0); } __db_msgadd(env, &mb, "%sentries: %4lu", s, (u_long)NUM_ENT(h)); __db_msgadd(env, &mb, " offset: %4lu", (u_long)HOFFSET(h)); DB_MSGBUF_FLUSH(env, &mb); if (TYPE(h) == P_INVALID || !LF_ISSET(DB_PR_PAGE)) return (0); ret = 0; inp = P_INP(dbp, h); for (i = 0; i < NUM_ENT(h); i++) { if ((uintptr_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) < (uintptr_t)(P_OVERHEAD(dbp)) || (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) { __db_msg(env, "ILLEGAL PAGE OFFSET: indx: %lu of %lu", (u_long)i, (u_long)inp[i]); ret = EINVAL; continue; } deleted = 0; switch (TYPE(h)) { case P_HASH_UNSORTED: case P_HASH: case P_IBTREE: case P_IRECNO: sp = P_ENTRY(dbp, h, i); break; case P_LBTREE: sp = P_ENTRY(dbp, h, i); deleted = i % 2 == 0 && B_DISSET(GET_BKEYDATA(dbp, h, i + O_INDX)->type); break; case P_LDUP: case P_LRECNO: sp = P_ENTRY(dbp, h, i); deleted = B_DISSET(GET_BKEYDATA(dbp, h, i)->type); break; default: goto type_err; } __db_msgadd(env, &mb, "%s", deleted ? " D" : "\t"); __db_msgadd( env, &mb, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]); switch (TYPE(h)) { case P_HASH_UNSORTED: case P_HASH: hk = sp; switch (HPAGE_PTYPE(hk)) { case H_OFFDUP: memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t)); __db_msgadd(env, &mb, "%4lu [offpage dups]", (u_long)pgno); DB_MSGBUF_FLUSH(env, &mb); break; case H_DUPLICATE: /* * If this is the first item on a page, then * we cannot figure out how long it is, so * we only print the first one in the duplicate * set. */ if (i != 0) len = LEN_HKEYDATA(dbp, h, 0, i); else len = 1; __db_msgadd(env, &mb, "Duplicates:"); DB_MSGBUF_FLUSH(env, &mb); for (p = HKEYDATA_DATA(hk), ep = p + len; p < ep;) { memcpy(&dlen, p, sizeof(db_indx_t)); p += sizeof(db_indx_t); __db_msgadd(env, &mb, "\t\t"); __db_prbytes(env, &mb, p, dlen); p += sizeof(db_indx_t) + dlen; } break; case H_KEYDATA: __db_prbytes(env, &mb, HKEYDATA_DATA(hk), LEN_HKEYDATA(dbp, h, i == 0 ? pagesize : 0, i)); break; case H_OFFPAGE: memcpy(&a_hkd, hk, HOFFPAGE_SIZE); __db_msgadd(env, &mb, "overflow: total len: %4lu page: %4lu", (u_long)a_hkd.tlen, (u_long)a_hkd.pgno); DB_MSGBUF_FLUSH(env, &mb); break; default: DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "ILLEGAL HASH PAGE TYPE: %lu", (u_long)HPAGE_PTYPE(hk)); ret = EINVAL; break; } break; case P_IBTREE: bi = sp; if (F_ISSET(dbp, DB_AM_RECNUM)) __db_msgadd(env, &mb, "count: %4lu ", (u_long)bi->nrecs); __db_msgadd(env, &mb, "pgno: %4lu type: %lu ", (u_long)bi->pgno, (u_long)bi->type); switch (B_TYPE(bi->type)) { case B_KEYDATA: __db_prbytes(env, &mb, bi->data, bi->len); break; case B_DUPLICATE: case B_OVERFLOW: __db_proff(env, &mb, bi->data); break; default: DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "ILLEGAL BINTERNAL TYPE: %lu", (u_long)B_TYPE(bi->type)); ret = EINVAL; break; } break; case P_IRECNO: ri = sp; __db_msgadd(env, &mb, "entries %4lu pgno %4lu", (u_long)ri->nrecs, (u_long)ri->pgno); DB_MSGBUF_FLUSH(env, &mb); break; case P_LBTREE: case P_LDUP: case P_LRECNO: bk = sp; switch (B_TYPE(bk->type)) { case B_KEYDATA: __db_prbytes(env, &mb, bk->data, bk->len); break; case B_DUPLICATE: case B_OVERFLOW: __db_proff(env, &mb, bk); break; default: DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "ILLEGAL DUPLICATE/LBTREE/LRECNO TYPE: %lu", (u_long)B_TYPE(bk->type)); ret = EINVAL; break; } break; default: type_err: DB_MSGBUF_FLUSH(env, &mb); __db_msg(env, "ILLEGAL PAGE TYPE: %lu", (u_long)TYPE(h)); ret = EINVAL; continue; } } return (ret); } /* * __db_prbytes -- * Print out a data element. * * PUBLIC: void __db_prbytes __P((ENV *, DB_MSGBUF *, u_int8_t *, u_int32_t)); */ void __db_prbytes(env, mbp, bytes, len) ENV *env; DB_MSGBUF *mbp; u_int8_t *bytes; u_int32_t len; { u_int8_t *p; u_int32_t i; int msg_truncated; __db_msgadd(env, mbp, "len: %3lu", (u_long)len); if (len != 0) { __db_msgadd(env, mbp, " data: "); /* * Print the first 20 bytes of the data. If that chunk is * all printable characters, print it as text, else print it * in hex. We have this heuristic because we're displaying * things like lock objects that could be either text or data. */ if (len > 20) { len = 20; msg_truncated = 1; } else msg_truncated = 0; for (p = bytes, i = len; i > 0; --i, ++p) if (!isprint((int)*p) && *p != '\t' && *p != '\n') break; if (i == 0) for (p = bytes, i = len; i > 0; --i, ++p) __db_msgadd(env, mbp, "%c", *p); else for (p = bytes, i = len; i > 0; --i, ++p) __db_msgadd(env, mbp, "%#.2x", (u_int)*p); if (msg_truncated) __db_msgadd(env, mbp, "..."); } DB_MSGBUF_FLUSH(env, mbp); } /* * __db_proff -- * Print out an off-page element. */ static void __db_proff(env, mbp, vp) ENV *env; DB_MSGBUF *mbp; void *vp; { BOVERFLOW *bo; bo = vp; switch (B_TYPE(bo->type)) { case B_OVERFLOW: __db_msgadd(env, mbp, "overflow: total len: %4lu page: %4lu", (u_long)bo->tlen, (u_long)bo->pgno); break; case B_DUPLICATE: __db_msgadd( env, mbp, "duplicate: page: %4lu", (u_long)bo->pgno); break; default: /* NOTREACHED */ break; } DB_MSGBUF_FLUSH(env, mbp); } /* * __db_prflags -- * Print out flags values. * * PUBLIC: void __db_prflags __P((ENV *, DB_MSGBUF *, * PUBLIC: u_int32_t, const FN *, const char *, const char *)); */ void __db_prflags(env, mbp, flags, fn, prefix, suffix) ENV *env; DB_MSGBUF *mbp; u_int32_t flags; FN const *fn; const char *prefix, *suffix; { DB_MSGBUF mb; const FN *fnp; int found, standalone; const char *sep; if (fn == NULL) return; /* * If it's a standalone message, output the suffix (which will be the * label), regardless of whether we found anything or not, and flush * the line. */ if (mbp == NULL) { standalone = 1; mbp = &mb; DB_MSGBUF_INIT(mbp); } else standalone = 0; sep = prefix == NULL ? "" : prefix; for (found = 0, fnp = fn; fnp->mask != 0; ++fnp) if (LF_ISSET(fnp->mask)) { __db_msgadd(env, mbp, "%s%s", sep, fnp->name); sep = ", "; found = 1; } if ((standalone || found) && suffix != NULL) __db_msgadd(env, mbp, "%s", suffix); if (standalone) DB_MSGBUF_FLUSH(env, mbp); } /* * __db_lockmode_to_string -- * Return the name of the lock mode. * * PUBLIC: const char * __db_lockmode_to_string __P((db_lockmode_t)); */ const char * __db_lockmode_to_string(mode) db_lockmode_t mode; { switch (mode) { case DB_LOCK_NG: return ("Not granted"); case DB_LOCK_READ: return ("Shared/read"); case DB_LOCK_WRITE: return ("Exclusive/write"); case DB_LOCK_WAIT: return ("Wait for event"); case DB_LOCK_IWRITE: return ("Intent exclusive/write"); case DB_LOCK_IREAD: return ("Intent shared/read"); case DB_LOCK_IWR: return ("Intent to read/write"); case DB_LOCK_READ_UNCOMMITTED: return ("Read uncommitted"); case DB_LOCK_WWRITE: return ("Was written"); default: break; } return ("UNKNOWN LOCK MODE"); } /* * __db_pagetype_to_string -- * Return the name of the specified page type. */ static const char * __db_pagetype_to_string(type) u_int32_t type; { char *s; s = NULL; switch (type) { case P_BTREEMETA: s = "btree metadata"; break; case P_LDUP: s = "duplicate"; break; case P_HASH_UNSORTED: s = "hash unsorted"; break; case P_HASH: s = "hash"; break; case P_HASHMETA: s = "hash metadata"; break; case P_IBTREE: s = "btree internal"; break; case P_INVALID: s = "invalid"; break; case P_IRECNO: s = "recno internal"; break; case P_LBTREE: s = "btree leaf"; break; case P_LRECNO: s = "recno leaf"; break; case P_OVERFLOW: s = "overflow"; break; case P_QAMMETA: s = "queue metadata"; break; case P_QAMDATA: s = "queue"; break; default: /* Just return a NULL. */ break; } return (s); } #else /* !HAVE_STATISTICS */ /* * __db_dumptree -- * Dump the tree to a file. * * PUBLIC: int __db_dumptree __P((DB *, DB_TXN *, char *, char *)); */ int __db_dumptree(dbp, txn, op, name) DB *dbp; DB_TXN *txn; char *op, *name; { COMPQUIET(txn, NULL); COMPQUIET(op, NULL); COMPQUIET(name, NULL); return (__db_stat_not_built(dbp->env)); } /* * __db_get_flags_fn -- * Return the __db_flags_fn array. * * PUBLIC: const FN * __db_get_flags_fn __P((void)); */ const FN * __db_get_flags_fn() { /* * !!! * The Tcl API uses this interface, stub it off. */ return (NULL); } #endif /* * __db_dump_pp -- * DB->dump pre/post processing. * * PUBLIC: int __db_dump_pp __P((DB *, const char *, * PUBLIC: int (*)(void *, const void *), void *, int, int)); */ int __db_dump_pp(dbp, subname, callback, handle, pflag, keyflag) DB *dbp; const char *subname; int (*callback) __P((void *, const void *)); void *handle; int pflag, keyflag; { DB_THREAD_INFO *ip; ENV *env; int handle_check, ret, t_ret; env = dbp->env; DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->dump"); ENV_ENTER(env, ip); /* Check for replication block. */ handle_check = IS_ENV_REPLICATED(env); if (handle_check && (ret = __db_rep_enter(dbp, 1, 0, 1)) != 0) { handle_check = 0; goto err; } ret = __db_dump(dbp, subname, callback, handle, pflag, keyflag); /* Release replication block. */ if (handle_check && (t_ret = __env_db_rep_exit(env)) != 0 && ret == 0) ret = t_ret; err: ENV_LEAVE(env, ip); return (ret); } /* * __db_dump -- * DB->dump. * * PUBLIC: int __db_dump __P((DB *, const char *, * PUBLIC: int (*)(void *, const void *), void *, int, int)); */ int __db_dump(dbp, subname, callback, handle, pflag, keyflag) DB *dbp; const char *subname; int (*callback) __P((void *, const void *)); void *handle; int pflag, keyflag; { DBC *dbcp; DBT key, data; DBT keyret, dataret; ENV *env; db_recno_t recno; int is_recno, ret, t_ret; void *pointer; env = dbp->env; if ((ret = __db_prheader( dbp, subname, pflag, keyflag, handle, callback, NULL, 0)) != 0) return (ret); /* * Get a cursor and step through the database, printing out each * key/data pair. */ if ((ret = __db_cursor(dbp, NULL, NULL, &dbcp, 0)) != 0) return (ret); memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); if ((ret = __os_malloc(env, 1024 * 1024, &data.data)) != 0) goto err; data.ulen = 1024 * 1024; data.flags = DB_DBT_USERMEM; is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE); keyflag = is_recno ? keyflag : 1; if (is_recno) { keyret.data = &recno; keyret.size = sizeof(recno); } retry: while ((ret = __dbc_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) { DB_MULTIPLE_INIT(pointer, &data); for (;;) { if (is_recno) DB_MULTIPLE_RECNO_NEXT(pointer, &data, recno, dataret.data, dataret.size); else DB_MULTIPLE_KEY_NEXT(pointer, &data, keyret.data, keyret.size, dataret.data, dataret.size); if (dataret.data == NULL) break; if ((keyflag && (ret = __db_prdbt(&keyret, pflag, " ", handle, callback, is_recno)) != 0) || (ret = __db_prdbt(&dataret, pflag, " ", handle, callback, 0)) != 0) goto err; } } if (ret == DB_BUFFER_SMALL) { data.size = (u_int32_t)DB_ALIGN(data.size, 1024); if ((ret = __os_realloc(env, data.size, &data.data)) != 0) goto err; data.ulen = data.size; goto retry; } if (ret == DB_NOTFOUND) ret = 0; if ((t_ret = __db_prfooter(handle, callback)) != 0 && ret == 0) ret = t_ret; err: if ((t_ret = __dbc_close(dbcp)) != 0 && ret == 0) ret = t_ret; if (data.data != NULL) __os_free(env, data.data); return (ret); } /* * __db_prdbt -- * Print out a DBT data element. * * PUBLIC: int __db_prdbt __P((DBT *, int, const char *, void *, * PUBLIC: int (*)(void *, const void *), int)); */ int __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno) DBT *dbtp; int checkprint; const char *prefix; void *handle; int (*callback) __P((void *, const void *)); int is_recno; { static const u_char hex[] = "0123456789abcdef"; db_recno_t recno; size_t len; int ret; #define DBTBUFLEN 100 u_int8_t *p, *hp; char buf[DBTBUFLEN], hbuf[DBTBUFLEN]; /* * !!! * This routine is the routine that dumps out items in the format * used by db_dump(1) and db_load(1). This means that the format * cannot change. */ if (prefix != NULL && (ret = callback(handle, prefix)) != 0) return (ret); if (is_recno) { /* * We're printing a record number, and this has to be done * in a platform-independent way. So we use the numeral in * straight ASCII. */ (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno)); snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno); /* If we're printing data as hex, print keys as hex too. */ if (!checkprint) { for (len = strlen(buf), p = (u_int8_t *)buf, hp = (u_int8_t *)hbuf; len-- > 0; ++p) { *hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4]; *hp++ = hex[*p & 0x0f]; } *hp = '\0'; ret = callback(handle, hbuf); } else ret = callback(handle, buf); if (ret != 0) return (ret); } else if (checkprint) { for (len = dbtp->size, p = dbtp->data; len--; ++p) if (isprint((int)*p)) { if (*p == '\\' && (ret = callback(handle, "\\")) != 0) return (ret); snprintf(buf, DBTBUFLEN, "%c", *p); if ((ret = callback(handle, buf)) != 0) return (ret); } else { snprintf(buf, DBTBUFLEN, "\\%c%c", hex[(u_int8_t)(*p & 0xf0) >> 4], hex[*p & 0x0f]); if ((ret = callback(handle, buf)) != 0) return (ret); } } else for (len = dbtp->size, p = dbtp->data; len--; ++p) { snprintf(buf, DBTBUFLEN, "%c%c", hex[(u_int8_t)(*p & 0xf0) >> 4], hex[*p & 0x0f]); if ((ret = callback(handle, buf)) != 0) return (ret); } return (callback(handle, "\n")); } /* * __db_prheader -- * Write out header information in the format expected by db_load. * * PUBLIC: int __db_prheader __P((DB *, const char *, int, int, void *, * PUBLIC: int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t)); */ int __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno) DB *dbp; const char *subname; int pflag, keyflag; void *handle; int (*callback) __P((void *, const void *)); VRFY_DBINFO *vdp; db_pgno_t meta_pgno; { DBT dbt; DBTYPE dbtype; ENV *env; VRFY_PAGEINFO *pip; u_int32_t flags, tmp_u_int32; size_t buflen; char *buf; int using_vdp, ret, t_ret, tmp_int; ret = 0; buf = NULL; COMPQUIET(buflen, 0); /* * If dbp is NULL, then pip is guaranteed to be non-NULL; we only ever * call __db_prheader with a NULL dbp from one case inside __db_prdbt, * and this is a special subdatabase for "lost" items. In this case * we have a vdp (from which we'll get a pip). In all other cases, we * will have a non-NULL dbp (and vdp may or may not be NULL depending * on whether we're salvaging). */ if (dbp == NULL) env = NULL; else env = dbp->env; DB_ASSERT(env, dbp != NULL || vdp != NULL); /* * If we've been passed a verifier statistics object, use that; we're * being called in a context where dbp->stat is unsafe. * * Also, the verifier may set the pflag on a per-salvage basis. If so, * respect that. */ if (vdp != NULL) { if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0) return (ret); if (F_ISSET(vdp, SALVAGE_PRINTABLE)) pflag = 1; using_vdp = 1; } else { pip = NULL; using_vdp = 0; } /* * If dbp is NULL, make it a btree. Otherwise, set dbtype to whatever * appropriate type for the specified meta page, or the type of the dbp. */ if (dbp == NULL) dbtype = DB_BTREE; else if (using_vdp) switch (pip->type) { case P_BTREEMETA: if (F_ISSET(pip, VRFY_IS_RECNO)) dbtype = DB_RECNO; else dbtype = DB_BTREE; break; case P_HASHMETA: dbtype = DB_HASH; break; case P_QAMMETA: dbtype = DB_QUEUE; break; default: /* * If the meta page is of a bogus type, it's because * we have a badly corrupt database. (We must be in * the verifier for pip to be non-NULL.) Pretend we're * a Btree and salvage what we can. */ DB_ASSERT(env, F_ISSET(dbp, DB_AM_VERIFYING)); dbtype = DB_BTREE; break; } else dbtype = dbp->type; if ((ret = callback(handle, "VERSION=3\n")) != 0) goto err; if (pflag) { if ((ret = callback(handle, "format=print\n")) != 0) goto err; } else if ((ret = callback(handle, "format=bytevalue\n")) != 0) goto err; /* * 64 bytes is long enough, as a minimum bound, for any of the * fields besides subname. Subname uses __db_prdbt and therefore * does not need buffer space here. */ buflen = 64; if ((ret = __os_malloc(env, buflen, &buf)) != 0) goto err; if (subname != NULL) { snprintf(buf, buflen, "database="); if ((ret = callback(handle, buf)) != 0) goto err; DB_INIT_DBT(dbt, subname, strlen(subname)); if ((ret = __db_prdbt(&dbt, 1, NULL, handle, callback, 0)) != 0) goto err; } switch (dbtype) { case DB_BTREE: if ((ret = callback(handle, "type=btree\n")) != 0) goto err; if (using_vdp) tmp_int = F_ISSET(pip, VRFY_HAS_RECNUMS) ? 1 : 0; else { if ((ret = __db_get_flags(dbp, &flags)) != 0) { __db_err(env, ret, "DB->get_flags"); goto err; } tmp_int = F_ISSET(dbp, DB_AM_RECNUM) ? 1 : 0; } if (tmp_int && (ret = callback(handle, "recnum=1\n")) != 0) goto err; if (using_vdp) tmp_u_int32 = pip->bt_minkey; else if ((ret = __bam_get_bt_minkey(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_bt_minkey"); goto err; } if (tmp_u_int32 != 0 && tmp_u_int32 != DEFMINKEYPAGE) { snprintf(buf, buflen, "bt_minkey=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; case DB_HASH: #ifdef HAVE_HASH if ((ret = callback(handle, "type=hash\n")) != 0) goto err; if (using_vdp) tmp_u_int32 = pip->h_ffactor; else if ((ret = __ham_get_h_ffactor(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_h_ffactor"); goto err; } if (tmp_u_int32 != 0) { snprintf(buf, buflen, "h_ffactor=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } if (using_vdp) tmp_u_int32 = pip->h_nelem; else if ((ret = __ham_get_h_nelem(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_h_nelem"); goto err; } /* * Hash databases have an h_nelem field of 0 or 1, neither * of those values is interesting. */ if (tmp_u_int32 > 1) { snprintf(buf, buflen, "h_nelem=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; #else ret = __db_no_hash_am(env); goto err; #endif case DB_QUEUE: #ifdef HAVE_QUEUE if ((ret = callback(handle, "type=queue\n")) != 0) goto err; if (using_vdp) tmp_u_int32 = vdp->re_len; else if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_re_len"); goto err; } snprintf(buf, buflen, "re_len=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; if (using_vdp) tmp_int = (int)vdp->re_pad; else if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) { __db_err(env, ret, "DB->get_re_pad"); goto err; } if (tmp_int != 0 && tmp_int != ' ') { snprintf(buf, buflen, "re_pad=%#x\n", tmp_int); if ((ret = callback(handle, buf)) != 0) goto err; } if (using_vdp) tmp_u_int32 = vdp->page_ext; else if ((ret = __qam_get_extentsize(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_q_extentsize"); goto err; } if (tmp_u_int32 != 0) { snprintf(buf, buflen, "extentsize=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; } break; #else ret = __db_no_queue_am(env); goto err; #endif case DB_RECNO: if ((ret = callback(handle, "type=recno\n")) != 0) goto err; if (using_vdp) tmp_int = F_ISSET(pip, VRFY_IS_RRECNO) ? 1 : 0; else tmp_int = F_ISSET(dbp, DB_AM_RENUMBER) ? 1 : 0; if (tmp_int != 0 && (ret = callback(handle, "renumber=1\n")) != 0) goto err; if (using_vdp) tmp_int = F_ISSET(pip, VRFY_IS_FIXEDLEN) ? 1 : 0; else tmp_int = F_ISSET(dbp, DB_AM_FIXEDLEN) ? 1 : 0; if (tmp_int) { if (using_vdp) tmp_u_int32 = pip->re_len; else if ((ret = __ram_get_re_len(dbp, &tmp_u_int32)) != 0) { __db_err(env, ret, "DB->get_re_len"); goto err; } snprintf(buf, buflen, "re_len=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; if (using_vdp) tmp_int = (int)pip->re_pad; else if ((ret = __ram_get_re_pad(dbp, &tmp_int)) != 0) { __db_err(env, ret, "DB->get_re_pad"); goto err; } if (tmp_int != 0 && tmp_int != ' ') { snprintf(buf, buflen, "re_pad=%#x\n", (u_int)tmp_int); if ((ret = callback(handle, buf)) != 0) goto err; } } break; case DB_UNKNOWN: /* Impossible. */ ret = __db_unknown_path(env, "__db_prheader"); goto err; } if (using_vdp) { if (F_ISSET(pip, VRFY_HAS_CHKSUM)) if ((ret = callback(handle, "chksum=1\n")) != 0) goto err; if (F_ISSET(pip, VRFY_HAS_DUPS)) if ((ret = callback(handle, "duplicates=1\n")) != 0) goto err; if (F_ISSET(pip, VRFY_HAS_DUPSORT)) if ((ret = callback(handle, "dupsort=1\n")) != 0) goto err; #ifdef HAVE_COMPRESSION if (F_ISSET(pip, VRFY_HAS_COMPRESS)) if ((ret = callback(handle, "compressed=1\n")) != 0) goto err; #endif /* * !!! * We don't know if the page size was the default if we're * salvaging. It doesn't seem that interesting to have, so * we ignore it for now. */ } else { if (F_ISSET(dbp, DB_AM_CHKSUM)) if ((ret = callback(handle, "chksum=1\n")) != 0) goto err; if (F_ISSET(dbp, DB_AM_DUP)) if ((ret = callback(handle, "duplicates=1\n")) != 0) goto err; if (F_ISSET(dbp, DB_AM_DUPSORT)) if ((ret = callback(handle, "dupsort=1\n")) != 0) goto err; #ifdef HAVE_COMPRESSION if (DB_IS_COMPRESSED(dbp)) if ((ret = callback(handle, "compressed=1\n")) != 0) goto err; #endif if (!F_ISSET(dbp, DB_AM_PGDEF)) { snprintf(buf, buflen, "db_pagesize=%lu\n", (u_long)dbp->pgsize); if ((ret = callback(handle, buf)) != 0) goto err; } } #ifdef HAVE_PARTITION if (DB_IS_PARTITIONED(dbp) && F_ISSET((DB_PARTITION *)dbp->p_internal, PART_RANGE)) { DBT *keys; u_int32_t i; if ((ret = __partition_get_keys(dbp, &tmp_u_int32, &keys)) != 0) goto err; if (tmp_u_int32 != 0) { snprintf(buf, buflen, "nparts=%lu\n", (u_long)tmp_u_int32); if ((ret = callback(handle, buf)) != 0) goto err; for (i = 0; i < tmp_u_int32 - 1; i++) if ((ret = __db_prdbt(&keys[i], pflag, " ", handle, callback, 0)) != 0) goto err; } } #endif if (keyflag && (ret = callback(handle, "keys=1\n")) != 0) goto err; ret = callback(handle, "HEADER=END\n"); err: if (using_vdp && (t_ret = __db_vrfy_putpageinfo(env, vdp, pip)) != 0 && ret == 0) ret = t_ret; if (buf != NULL) __os_free(env, buf); return (ret); } /* * __db_prfooter -- * Print the footer that marks the end of a DB dump. This is trivial, * but for consistency's sake we don't want to put its literal contents * in multiple places. * * PUBLIC: int __db_prfooter __P((void *, int (*)(void *, const void *))); */ int __db_prfooter(handle, callback) void *handle; int (*callback) __P((void *, const void *)); { return (callback(handle, "DATA=END\n")); } /* * __db_pr_callback -- * Callback function for using pr_* functions from C. * * PUBLIC: int __db_pr_callback __P((void *, const void *)); */ int __db_pr_callback(handle, str_arg) void *handle; const void *str_arg; { char *str; FILE *f; str = (char *)str_arg; f = (FILE *)handle; if (fprintf(f, "%s", str) != (int)strlen(str)) return (EIO); return (0); } /* * __db_dbtype_to_string -- * Return the name of the database type. * * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE)); */ const char * __db_dbtype_to_string(type) DBTYPE type; { switch (type) { case DB_BTREE: return ("btree"); case DB_HASH: return ("hash"); case DB_RECNO: return ("recno"); case DB_QUEUE: return ("queue"); case DB_UNKNOWN: default: break; } return ("UNKNOWN TYPE"); }
mit
Kogser/bitcoin
db-4.8.30.NC/tcl/tcl_db_pkg.c
42
108684
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1999-2009 Oracle. All rights reserved. * * $Id$ */ #include "db_config.h" #ifdef CONFIG_TEST #define DB_DBM_HSEARCH 1 #endif #include "db_int.h" #ifdef HAVE_SYSTEM_INCLUDE_FILES #include <tcl.h> #endif #include "dbinc/db_page.h" #include "dbinc/hash.h" #include "dbinc/tcl_db.h" /* XXX we must declare global data in just one place */ DBTCL_GLOBAL __dbtcl_global; /* * Prototypes for procedures defined later in this file: */ static int berkdb_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_EnvOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBTCL_INFO *, DB_ENV **)); static int bdb_DbOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBTCL_INFO *, DB **)); static int bdb_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); #ifdef HAVE_64BIT_TYPES static int bdb_SeqOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBTCL_INFO *, DB_SEQUENCE **)); #endif #ifdef CONFIG_TEST static int bdb_DbUpgrade __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBTCL_INFO *)); static int bdb_GetConfig __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int bdb_MsgType __P((Tcl_Interp *, int, Tcl_Obj * CONST*)); static int tcl_bt_compare __P((DB *, const DBT *, const DBT *)); static int tcl_compare_callback __P((DB *, const DBT *, const DBT *, Tcl_Obj *, char *)); static void tcl_db_free __P((void *)); static void * tcl_db_malloc __P((size_t)); static void * tcl_db_realloc __P((void *, size_t)); static int tcl_dup_compare __P((DB *, const DBT *, const DBT *)); static u_int32_t tcl_h_hash __P((DB *, const void *, u_int32_t)); static int tcl_isalive __P((DB_ENV *, pid_t, db_threadid_t, u_int32_t)); static u_int32_t tcl_part_callback __P((DB *, DBT *)); static int tcl_set_partition_dirs __P((Tcl_Interp *, DB *, Tcl_Obj *)); static int tcl_set_partition_keys __P((Tcl_Interp *, DB *, Tcl_Obj *, DBT **)); #endif int Db_tcl_Init __P((Tcl_Interp *)); /* * Db_tcl_Init -- * * This is a package initialization procedure, which is called by Tcl when * this package is to be added to an interpreter. The name is based on the * name of the shared library, currently libdb_tcl-X.Y.so, which Tcl uses * to determine the name of this function. */ int Db_tcl_Init(interp) Tcl_Interp *interp; /* Interpreter in which the package is * to be made available. */ { int code; char pkg[12]; snprintf(pkg, sizeof(pkg), "%d.%d", DB_VERSION_MAJOR, DB_VERSION_MINOR); code = Tcl_PkgProvide(interp, "Db_tcl", pkg); if (code != TCL_OK) return (code); /* * Don't allow setuid/setgid scripts for the Tcl API because some Tcl * functions evaluate the arguments and could otherwise allow a user * to inject Tcl commands. */ #if defined(HAVE_SETUID) && defined(HAVE_GETUID) (void)setuid(getuid()); #endif #if defined(HAVE_SETGID) && defined(HAVE_GETGID) (void)setgid(getgid()); #endif (void)Tcl_CreateObjCommand(interp, "berkdb", (Tcl_ObjCmdProc *)berkdb_Cmd, (ClientData)0, NULL); /* * Create shared global debugging variables */ (void)Tcl_LinkVar( interp, "__debug_on", (char *)&__debug_on, TCL_LINK_INT); (void)Tcl_LinkVar( interp, "__debug_print", (char *)&__debug_print, TCL_LINK_INT); (void)Tcl_LinkVar( interp, "__debug_stop", (char *)&__debug_stop, TCL_LINK_INT); (void)Tcl_LinkVar( interp, "__debug_test", (char *)&__debug_test, TCL_LINK_INT); LIST_INIT(&__db_infohead); return (TCL_OK); } /* * berkdb_cmd -- * Implements the "berkdb" command. * This command supports three sub commands: * berkdb version - Returns a list {major minor patch} * berkdb env - Creates a new DB_ENV and returns a binding * to a new command of the form dbenvX, where X is an * integer starting at 0 (dbenv0, dbenv1, ...) * berkdb open - Creates a new DB (optionally within * the given environment. Returns a binding to a new * command of the form dbX, where X is an integer * starting at 0 (db0, db1, ...) */ static int berkdb_Cmd(notused, interp, objc, objv) ClientData notused; /* Not used. */ Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *berkdbcmds[] = { #ifdef CONFIG_TEST "dbverify", "getconfig", "handles", "msgtype", "upgrade", #endif "dbremove", "dbrename", "env", "envremove", "open", #ifdef HAVE_64BIT_TYPES "sequence", #endif "version", #ifdef CONFIG_TEST /* All below are compatibility functions */ "hcreate", "hsearch", "hdestroy", "dbminit", "fetch", "store", "delete", "firstkey", "nextkey", "ndbm_open", "dbmclose", #endif /* All below are convenience functions */ "rand", "random_int", "srand", "debug_check", NULL }; /* * All commands enums below ending in X are compatibility */ enum berkdbcmds { #ifdef CONFIG_TEST BDB_DBVERIFY, BDB_GETCONFIG, BDB_HANDLES, BDB_MSGTYPE, BDB_UPGRADE, #endif BDB_DBREMOVE, BDB_DBRENAME, BDB_ENV, BDB_ENVREMOVE, BDB_OPEN, #ifdef HAVE_64BIT_TYPES BDB_SEQUENCE, #endif BDB_VERSION, #ifdef CONFIG_TEST BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX, BDB_DBMINITX, BDB_FETCHX, BDB_STOREX, BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX, BDB_NDBMOPENX, BDB_DBMCLOSEX, #endif BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX, BDB_DBGCKX }; static int env_id = 0; static int db_id = 0; #ifdef HAVE_64BIT_TYPES static int seq_id = 0; #endif DB *dbp; #ifdef HAVE_64BIT_TYPES DB_SEQUENCE *seq; #endif #ifdef CONFIG_TEST DBM *ndbmp; static int ndbm_id = 0; #endif DBTCL_INFO *ip; DB_ENV *dbenv; Tcl_Obj *res; int cmdindex, result; char newname[MSG_SIZE]; COMPQUIET(notused, NULL); Tcl_ResetResult(interp); memset(newname, 0, MSG_SIZE); result = TCL_OK; if (objc <= 1) { Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs"); return (TCL_ERROR); } /* * Get the command name index from the object based on the berkdbcmds * defined above. */ if (Tcl_GetIndexFromObj(interp, objv[1], berkdbcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK) return (IS_HELP(objv[1])); res = NULL; switch ((enum berkdbcmds)cmdindex) { #ifdef CONFIG_TEST case BDB_DBVERIFY: snprintf(newname, sizeof(newname), "db%d", db_id); ip = _NewInfo(interp, NULL, newname, I_DB); if (ip != NULL) { result = bdb_DbVerify(interp, objc, objv, ip); _DeleteInfo(ip); } else { Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); result = TCL_ERROR; } break; case BDB_GETCONFIG: result = bdb_GetConfig(interp, objc, objv); break; case BDB_HANDLES: result = bdb_Handles(interp, objc, objv); break; case BDB_MSGTYPE: result = bdb_MsgType(interp, objc, objv); break; case BDB_UPGRADE: result = bdb_DbUpgrade(interp, objc, objv); break; #endif case BDB_VERSION: _debug_check(); result = bdb_Version(interp, objc, objv); break; case BDB_ENV: snprintf(newname, sizeof(newname), "env%d", env_id); ip = _NewInfo(interp, NULL, newname, I_ENV); if (ip != NULL) { result = bdb_EnvOpen(interp, objc, objv, ip, &dbenv); if (result == TCL_OK && dbenv != NULL) { env_id++; (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)env_Cmd, (ClientData)dbenv, NULL); /* Use ip->i_name - newname is overwritten */ res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, dbenv); } else _DeleteInfo(ip); } else { Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); result = TCL_ERROR; } break; case BDB_DBREMOVE: result = bdb_DbRemove(interp, objc, objv); break; case BDB_DBRENAME: result = bdb_DbRename(interp, objc, objv); break; case BDB_ENVREMOVE: result = tcl_EnvRemove(interp, objc, objv, NULL, NULL); break; case BDB_OPEN: snprintf(newname, sizeof(newname), "db%d", db_id); ip = _NewInfo(interp, NULL, newname, I_DB); if (ip != NULL) { result = bdb_DbOpen(interp, objc, objv, ip, &dbp); if (result == TCL_OK && dbp != NULL) { db_id++; (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)db_Cmd, (ClientData)dbp, NULL); /* Use ip->i_name - newname is overwritten */ res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, dbp); } else _DeleteInfo(ip); } else { Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); result = TCL_ERROR; } break; #ifdef HAVE_64BIT_TYPES case BDB_SEQUENCE: snprintf(newname, sizeof(newname), "seq%d", seq_id); ip = _NewInfo(interp, NULL, newname, I_SEQ); if (ip != NULL) { result = bdb_SeqOpen(interp, objc, objv, ip, &seq); if (result == TCL_OK && seq != NULL) { seq_id++; (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)seq_Cmd, (ClientData)seq, NULL); /* Use ip->i_name - newname is overwritten */ res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, seq); } else _DeleteInfo(ip); } else { Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); result = TCL_ERROR; } break; #endif #ifdef CONFIG_TEST case BDB_HCREATEX: case BDB_HSEARCHX: case BDB_HDESTROYX: result = bdb_HCommand(interp, objc, objv); break; case BDB_DBMINITX: case BDB_DBMCLOSEX: case BDB_FETCHX: case BDB_STOREX: case BDB_DELETEX: case BDB_FIRSTKEYX: case BDB_NEXTKEYX: result = bdb_DbmCommand(interp, objc, objv, DBTCL_DBM, NULL); break; case BDB_NDBMOPENX: snprintf(newname, sizeof(newname), "ndbm%d", ndbm_id); ip = _NewInfo(interp, NULL, newname, I_NDBM); if (ip != NULL) { result = bdb_NdbmOpen(interp, objc, objv, &ndbmp); if (result == TCL_OK) { ndbm_id++; (void)Tcl_CreateObjCommand(interp, newname, (Tcl_ObjCmdProc *)ndbm_Cmd, (ClientData)ndbmp, NULL); /* Use ip->i_name - newname is overwritten */ res = NewStringObj(newname, strlen(newname)); _SetInfoData(ip, ndbmp); } else _DeleteInfo(ip); } else { Tcl_SetResult(interp, "Could not set up info", TCL_STATIC); result = TCL_ERROR; } break; #endif case BDB_RANDX: case BDB_RAND_INTX: case BDB_SRANDX: result = bdb_RandCommand(interp, objc, objv); break; case BDB_DBGCKX: _debug_check(); res = Tcl_NewIntObj(0); break; } /* * For each different arg call different function to create * new commands (or if version, get/return it). */ if (result == TCL_OK && res != NULL) Tcl_SetObjResult(interp, res); return (result); } /* * bdb_EnvOpen - * Implements the environment open command. * There are many, many options to the open command. * Here is the general flow: * * 1. Call db_env_create to create the env handle. * 2. Parse args tracking options. * 3. Make any pre-open setup calls necessary. * 4. Call DB_ENV->open to open the env. * 5. Return env widget handle to user. */ static int bdb_EnvOpen(interp, objc, objv, ip, dbenvp) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ DBTCL_INFO *ip; /* Our internal info */ DB_ENV **dbenvp; /* Environment pointer */ { static const char *envopen[] = { #ifdef CONFIG_TEST "-alloc", "-auto_commit", "-cdb", "-cdb_alldb", "-client_timeout", "-event", "-failchk", "-isalive", "-lock", "-lock_conflict", "-lock_detect", "-lock_max_locks", "-lock_max_lockers", "-lock_max_objects", "-lock_partitions", "-lock_timeout", "-log", "-log_filemode", "-log_buffer", "-log_inmemory", "-log_max", "-log_regionmax", "-log_remove", "-mpool_max_openfd", "-mpool_max_write", "-mpool_mmap_size", "-mpool_nommap", "-multiversion", "-mutex_set_align", "-mutex_set_incr", "-mutex_set_max", "-mutex_set_tas_spins", "-overwrite", "-pagesize", "-register", "-reg_timeout", "-region_init", "-rep", "-rep_client", "-rep_inmem_files", "-rep_lease", "-rep_master", "-rep_transport", "-server", "-server_timeout", "-set_intermediate_dir_mode", "-snapshot", "-tablesize", "-thread", "-time_notgranted", "-txn_nowait", "-txn_timeout", "-txn_timestamp", "-verbose", "-wrnosync", "-zero_log", #endif "-add_dir", "-cachesize", "-cache_max", "-create", "-create_dir", "-data_dir", "-encryptaes", "-encryptany", "-errfile", "-errpfx", "-home", "-log_dir", "-mode", "-private", "-recover", "-recover_fatal", "-shm_key", "-system_mem", "-tmp_dir", "-txn", "-txn_max", "-use_environ", "-use_environ_root", NULL }; /* * !!! * These have to be in the same order as the above, * which is close to but not quite alphabetical. */ enum envopen { #ifdef CONFIG_TEST TCL_ENV_ALLOC, TCL_ENV_AUTO_COMMIT, TCL_ENV_CDB, TCL_ENV_CDB_ALLDB, TCL_ENV_CLIENT_TO, TCL_ENV_EVENT, TCL_ENV_FAILCHK, TCL_ENV_ISALIVE, TCL_ENV_LOCK, TCL_ENV_CONFLICT, TCL_ENV_DETECT, TCL_ENV_LOCK_MAX_LOCKS, TCL_ENV_LOCK_MAX_LOCKERS, TCL_ENV_LOCK_MAX_OBJECTS, TCL_ENV_LOCK_PARTITIONS, TCL_ENV_LOCK_TIMEOUT, TCL_ENV_LOG, TCL_ENV_LOG_FILEMODE, TCL_ENV_LOG_BUFFER, TCL_ENV_LOG_INMEMORY, TCL_ENV_LOG_MAX, TCL_ENV_LOG_REGIONMAX, TCL_ENV_LOG_REMOVE, TCL_ENV_MPOOL_MAX_OPENFD, TCL_ENV_MPOOL_MAX_WRITE, TCL_ENV_MPOOL_MMAP_SIZE, TCL_ENV_MPOOL_NOMMAP, TCL_ENV_MULTIVERSION, TCL_ENV_MUTSETALIGN, TCL_ENV_MUTSETINCR, TCL_ENV_MUTSETMAX, TCL_ENV_MUTSETTAS, TCL_ENV_OVERWRITE, TCL_ENV_PAGESIZE, TCL_ENV_REGISTER, TCL_ENV_REG_TIMEOUT, TCL_ENV_REGION_INIT, TCL_ENV_REP, TCL_ENV_REP_CLIENT, TCL_ENV_REP_INMEM_FILES, TCL_ENV_REP_LEASE, TCL_ENV_REP_MASTER, TCL_ENV_REP_TRANSPORT, TCL_ENV_SERVER, TCL_ENV_SERVER_TO, TCL_ENV_SET_INTERMEDIATE_DIR, TCL_ENV_SNAPSHOT, TCL_ENV_TABLESIZE, TCL_ENV_THREAD, TCL_ENV_TIME_NOTGRANTED, TCL_ENV_TXN_NOWAIT, TCL_ENV_TXN_TIMEOUT, TCL_ENV_TXN_TIME, TCL_ENV_VERBOSE, TCL_ENV_WRNOSYNC, TCL_ENV_ZEROLOG, #endif TCL_ENV_ADD_DIR, TCL_ENV_CACHESIZE, TCL_ENV_CACHE_MAX, TCL_ENV_CREATE, TCL_ENV_CREATE_DIR, TCL_ENV_DATA_DIR, TCL_ENV_ENCRYPT_AES, TCL_ENV_ENCRYPT_ANY, TCL_ENV_ERRFILE, TCL_ENV_ERRPFX, TCL_ENV_HOME, TCL_ENV_LOG_DIR, TCL_ENV_MODE, TCL_ENV_PRIVATE, TCL_ENV_RECOVER, TCL_ENV_RECOVER_FATAL, TCL_ENV_SHM_KEY, TCL_ENV_SYSTEM_MEM, TCL_ENV_TMP_DIR, TCL_ENV_TXN, TCL_ENV_TXN_MAX, TCL_ENV_USE_ENVIRON, TCL_ENV_USE_ENVIRON_ROOT }; DB_ENV *dbenv; Tcl_Obj **myobjv; u_int32_t cr_flags, gbytes, bytes, logbufset, logmaxset; u_int32_t open_flags, rep_flags, set_flags, uintarg; int i, mode, myobjc, ncaches, optindex, result, ret; long client_to, server_to, shm; char *arg, *home, *passwd, *server; #ifdef CONFIG_TEST Tcl_Obj **myobjv1; time_t timestamp; long v; u_int32_t detect, time_flag; u_int8_t *conflicts; int intarg, intarg2, j, nmodes, temp; #endif result = TCL_OK; mode = 0; rep_flags = set_flags = cr_flags = 0; home = NULL; /* * XXX * If/when our Tcl interface becomes thread-safe, we should enable * DB_THREAD here in all cases. For now, we turn it on later in this * function, and only when we're in testing and we specify the * -thread flag, so that we can exercise MUTEX_THREAD_LOCK cases. * * In order to become truly thread-safe, we need to look at making sure * DBTCL_INFO structs are safe to share across threads (they're not * mutex-protected) before we declare the Tcl interface thread-safe. * Meanwhile, there's no strong reason to enable DB_THREAD when not * testing. */ open_flags = 0; logmaxset = logbufset = 0; if (objc <= 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); return (TCL_ERROR); } /* * Server code must go before the call to db_env_create. */ server = NULL; server_to = client_to = 0; i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i++], envopen, "option", TCL_EXACT, &optindex) != TCL_OK) { Tcl_ResetResult(interp); continue; } #ifdef CONFIG_TEST switch ((enum envopen)optindex) { case TCL_ENV_SERVER: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-server hostname"); result = TCL_ERROR; break; } FLD_SET(cr_flags, DB_RPCCLIENT); server = Tcl_GetStringFromObj(objv[i++], NULL); break; case TCL_ENV_SERVER_TO: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-server_to secs"); result = TCL_ERROR; break; } FLD_SET(cr_flags, DB_RPCCLIENT); result = Tcl_GetLongFromObj(interp, objv[i++], &server_to); break; case TCL_ENV_CLIENT_TO: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-client_to secs"); result = TCL_ERROR; break; } FLD_SET(cr_flags, DB_RPCCLIENT); result = Tcl_GetLongFromObj(interp, objv[i++], &client_to); break; default: break; } #endif } if (result != TCL_OK) return (TCL_ERROR); if ((ret = db_env_create(&dbenv, cr_flags)) != 0) return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_env_create")); *dbenvp = dbenv; /* * From here on we must 'goto error' in order to clean up the * dbenv from db_env_create. */ dbenv->set_errpfx(dbenv, ip->i_name); dbenv->set_errcall(dbenv, _ErrorFunc); if (server != NULL && (ret = dbenv->set_rpc_server(dbenv, NULL, server, client_to, server_to, 0)) != 0) { result = TCL_ERROR; goto error; } /* Hang our info pointer on the dbenv handle, so we can do callbacks. */ dbenv->app_private = ip; /* * Get the command name index from the object based on the bdbcmds * defined above. */ i = 2; while (i < objc) { Tcl_ResetResult(interp); if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option", TCL_EXACT, &optindex) != TCL_OK) { result = IS_HELP(objv[i]); goto error; } i++; switch ((enum envopen)optindex) { #ifdef CONFIG_TEST case TCL_ENV_SERVER: case TCL_ENV_SERVER_TO: case TCL_ENV_CLIENT_TO: /* * Already handled these, skip them and their arg. */ i++; break; case TCL_ENV_ALLOC: /* * Use a Tcl-local alloc and free function so that * we're sure to test whether we use umalloc/ufree in * the right places. */ (void)dbenv->set_alloc(dbenv, tcl_db_malloc, tcl_db_realloc, tcl_db_free); break; case TCL_ENV_AUTO_COMMIT: FLD_SET(set_flags, DB_AUTO_COMMIT); break; case TCL_ENV_CDB: FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL); break; case TCL_ENV_CDB_ALLDB: FLD_SET(set_flags, DB_CDB_ALLDB); break; case TCL_ENV_EVENT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-event eventproc"); result = TCL_ERROR; break; } result = tcl_EventNotify(interp, dbenv, objv[i++], ip); break; case TCL_ENV_FAILCHK: FLD_SET(open_flags, DB_FAILCHK); break; case TCL_ENV_ISALIVE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-isalive aliveproc"); result = TCL_ERROR; break; } ip->i_isalive = objv[i++]; Tcl_IncrRefCount(ip->i_isalive); _debug_check(); /* Choose an arbitrary thread count, for testing. */ if ((ret = dbenv->set_thread_count(dbenv, 5)) == 0) ret = dbenv->set_isalive(dbenv, tcl_isalive); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_isalive"); break; case TCL_ENV_LOCK: FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL); break; case TCL_ENV_CONFLICT: /* * Get conflict list. List is: * {nmodes {matrix}} * * Where matrix must be nmodes*nmodes big. * Set up conflicts array to pass. */ result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; if (myobjc != 2) { Tcl_WrongNumArgs(interp, 2, objv, "?-lock_conflict {nmodes {matrix}}?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, myobjv[0], &nmodes); if (result != TCL_OK) break; result = Tcl_ListObjGetElements(interp, myobjv[1], &myobjc, &myobjv1); if (myobjc != (nmodes * nmodes)) { Tcl_WrongNumArgs(interp, 2, objv, "?-lock_conflict {nmodes {matrix}}?"); result = TCL_ERROR; break; } ret = __os_malloc(dbenv->env, sizeof(u_int8_t) * (size_t)nmodes * (size_t)nmodes, &conflicts); if (ret != 0) { result = TCL_ERROR; break; } for (j = 0; j < myobjc; j++) { result = Tcl_GetIntFromObj(interp, myobjv1[j], &temp); conflicts[j] = temp; if (result != TCL_OK) { __os_free(NULL, conflicts); break; } } _debug_check(); ret = dbenv->set_lk_conflicts(dbenv, (u_int8_t *)conflicts, nmodes); __os_free(NULL, conflicts); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_lk_conflicts"); break; case TCL_ENV_DETECT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-lock_detect policy?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); if (strcmp(arg, "default") == 0) detect = DB_LOCK_DEFAULT; else if (strcmp(arg, "expire") == 0) detect = DB_LOCK_EXPIRE; else if (strcmp(arg, "maxlocks") == 0) detect = DB_LOCK_MAXLOCKS; else if (strcmp(arg, "maxwrites") == 0) detect = DB_LOCK_MAXWRITE; else if (strcmp(arg, "minlocks") == 0) detect = DB_LOCK_MINLOCKS; else if (strcmp(arg, "minwrites") == 0) detect = DB_LOCK_MINWRITE; else if (strcmp(arg, "oldest") == 0) detect = DB_LOCK_OLDEST; else if (strcmp(arg, "youngest") == 0) detect = DB_LOCK_YOUNGEST; else if (strcmp(arg, "random") == 0) detect = DB_LOCK_RANDOM; else { Tcl_AddErrorInfo(interp, "lock_detect: illegal policy"); result = TCL_ERROR; break; } _debug_check(); ret = dbenv->set_lk_detect(dbenv, detect); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock_detect"); break; case TCL_ENV_LOCK_MAX_LOCKS: case TCL_ENV_LOCK_MAX_LOCKERS: case TCL_ENV_LOCK_MAX_OBJECTS: case TCL_ENV_LOCK_PARTITIONS: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-lock_max max?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); switch ((enum envopen)optindex) { case TCL_ENV_LOCK_MAX_LOCKS: ret = dbenv->set_lk_max_locks(dbenv, uintarg); break; case TCL_ENV_LOCK_MAX_LOCKERS: ret = dbenv->set_lk_max_lockers(dbenv, uintarg); break; case TCL_ENV_LOCK_MAX_OBJECTS: ret = dbenv->set_lk_max_objects(dbenv, uintarg); break; case TCL_ENV_LOCK_PARTITIONS: ret = dbenv->set_lk_partitions(dbenv, uintarg); break; default: break; } result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock_max"); } break; case TCL_ENV_MUTSETALIGN: case TCL_ENV_MUTSETINCR: case TCL_ENV_MUTSETMAX: case TCL_ENV_MUTSETTAS: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-mutex_set val"); result = TCL_ERROR; break; } intarg = 0; switch ((enum envopen)optindex) { case TCL_ENV_MUTSETALIGN: intarg = DBTCL_MUT_ALIGN; break; case TCL_ENV_MUTSETINCR: intarg = DBTCL_MUT_INCR; break; case TCL_ENV_MUTSETMAX: intarg = DBTCL_MUT_MAX; break; case TCL_ENV_MUTSETTAS: intarg = DBTCL_MUT_TAS; break; default: break; } result = tcl_MutSet(interp, objv[i++], dbenv, intarg); break; case TCL_ENV_TXN_NOWAIT: FLD_SET(set_flags, DB_TXN_NOWAIT); break; case TCL_ENV_TXN_TIME: case TCL_ENV_TXN_TIMEOUT: case TCL_ENV_LOCK_TIMEOUT: case TCL_ENV_REG_TIMEOUT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-xxx_timeout time?"); result = TCL_ERROR; break; } if ((result = Tcl_GetLongFromObj( interp, objv[i++], &v)) != TCL_OK) break; timestamp = (time_t)v; _debug_check(); if ((enum envopen)optindex == TCL_ENV_TXN_TIME) ret = dbenv->set_tx_timestamp(dbenv, &timestamp); else { if ((enum envopen)optindex == TCL_ENV_LOCK_TIMEOUT) time_flag = DB_SET_LOCK_TIMEOUT; else if ((enum envopen)optindex == TCL_ENV_REG_TIMEOUT) time_flag = DB_SET_REG_TIMEOUT; else time_flag = DB_SET_TXN_TIMEOUT; ret = dbenv->set_timeout(dbenv, (db_timeout_t)timestamp, time_flag); } result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "txn_timestamp"); break; case TCL_ENV_LOG: FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL); break; case TCL_ENV_LOG_BUFFER: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-log_buffer size?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_lg_bsize(dbenv, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_bsize"); logbufset = 1; if (logmaxset) { _debug_check(); ret = dbenv->set_lg_max(dbenv, logmaxset); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_max"); logmaxset = 0; logbufset = 0; } } break; case TCL_ENV_LOG_FILEMODE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-log_filemode mode?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_lg_filemode(dbenv, (int)uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_filemode"); } break; case TCL_ENV_LOG_INMEMORY: ret = dbenv->log_set_config(dbenv, DB_LOG_IN_MEMORY, 1); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_inmemory"); break; case TCL_ENV_LOG_MAX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-log_max max?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK && logbufset) { _debug_check(); ret = dbenv->set_lg_max(dbenv, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_max"); logbufset = 0; } else logmaxset = uintarg; break; case TCL_ENV_LOG_REGIONMAX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-log_regionmax size?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_lg_regionmax(dbenv, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_regionmax"); } break; case TCL_ENV_LOG_REMOVE: ret = dbenv->log_set_config(dbenv, DB_LOG_AUTO_REMOVE, 1); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_remove"); break; case TCL_ENV_MPOOL_MAX_OPENFD: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-mpool_max_openfd fd_count?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_mp_max_openfd(dbenv, intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool_max_openfd"); } break; case TCL_ENV_MPOOL_MAX_WRITE: result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; if (myobjc != 2) { Tcl_WrongNumArgs(interp, 2, objv, "?-mpool_max_write {nwrite nsleep}?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, myobjv[0], &intarg); if (result != TCL_OK) break; result = Tcl_GetIntFromObj(interp, myobjv[1], &intarg2); if (result != TCL_OK) break; _debug_check(); ret = dbenv->set_mp_max_write( dbenv, intarg, (db_timeout_t)intarg2); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_mp_max_write"); break; case TCL_ENV_MPOOL_MMAP_SIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-mpool_mmap_size size?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_mp_mmapsize(dbenv, (size_t)intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool_mmap_size"); } break; case TCL_ENV_MPOOL_NOMMAP: FLD_SET(set_flags, DB_NOMMAP); break; case TCL_ENV_MULTIVERSION: FLD_SET(set_flags, DB_MULTIVERSION); break; case TCL_ENV_OVERWRITE: FLD_SET(set_flags, DB_OVERWRITE); break; case TCL_ENV_PAGESIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-pagesize size?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_mp_pagesize(dbenv, (u_int32_t)intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "pagesize"); } break; case TCL_ENV_TABLESIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-tablesize size?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_mp_tablesize(dbenv, (u_int32_t)intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "tablesize"); } break; case TCL_ENV_REGISTER: FLD_SET(open_flags, DB_REGISTER); break; case TCL_ENV_REGION_INIT: _debug_check(); ret = dbenv->set_flags(dbenv, DB_REGION_INIT, 1); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "region_init"); break; case TCL_ENV_SET_INTERMEDIATE_DIR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-set_intermediate_dir_mode mode?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = dbenv->set_intermediate_dir_mode(dbenv, arg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_intermediate_dir_mode"); break; case TCL_ENV_REP: FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_REP_CLIENT: rep_flags = DB_REP_CLIENT; FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_REP_MASTER: rep_flags = DB_REP_MASTER; FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_REP_INMEM_FILES: result = tcl_RepInmemFiles(interp,dbenv); if (result == TCL_OK) FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_REP_LEASE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-rep_lease {nsites timeout clockskew}"); result = TCL_ERROR; break; } result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; result = tcl_RepLease(interp, myobjc, myobjv, dbenv); if (result == TCL_OK) FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_REP_TRANSPORT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-rep_transport {envid sendproc}"); result = TCL_ERROR; break; } result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; result = tcl_RepTransport( interp, myobjc, myobjv, dbenv, ip); if (result == TCL_OK) FLD_SET(open_flags, DB_INIT_REP); break; case TCL_ENV_SNAPSHOT: FLD_SET(set_flags, DB_TXN_SNAPSHOT); break; case TCL_ENV_THREAD: /* Enable DB_THREAD when specified in testing. */ FLD_SET(open_flags, DB_THREAD); break; case TCL_ENV_TIME_NOTGRANTED: FLD_SET(set_flags, DB_TIME_NOTGRANTED); break; case TCL_ENV_VERBOSE: result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; if (myobjc != 2) { Tcl_WrongNumArgs(interp, 2, objv, "?-verbose {which on|off}?"); result = TCL_ERROR; break; } result = tcl_EnvVerbose( interp, dbenv, myobjv[0], myobjv[1]); break; case TCL_ENV_WRNOSYNC: FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC); break; case TCL_ENV_ZEROLOG: if ((ret = dbenv->log_set_config(dbenv, DB_LOG_ZERO, 1)) != 0) return ( _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_log_config")); break; #endif case TCL_ENV_TXN: FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN); /* Make sure we have an arg to check against! */ while (i < objc) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (strcmp(arg, "nosync") == 0) { FLD_SET(set_flags, DB_TXN_NOSYNC); i++; } else if (strcmp(arg, "snapshot") == 0) { FLD_SET(set_flags, DB_TXN_SNAPSHOT); i++; } else break; } break; case TCL_ENV_CREATE: FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL); break; case TCL_ENV_ENCRYPT_AES: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptaes passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); break; case TCL_ENV_ENCRYPT_ANY: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptany passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = dbenv->set_encrypt(dbenv, passwd, 0); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); break; case TCL_ENV_HOME: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-home dir?"); result = TCL_ERROR; break; } home = Tcl_GetStringFromObj(objv[i++], NULL); break; case TCL_ENV_MODE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-mode mode?"); result = TCL_ERROR; break; } /* * Don't need to check result here because * if TCL_ERROR, the error message is already * set up, and we'll bail out below. If ok, * the mode is set and we go on. */ result = Tcl_GetIntFromObj(interp, objv[i++], &mode); break; case TCL_ENV_PRIVATE: FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL); break; case TCL_ENV_RECOVER: FLD_SET(open_flags, DB_RECOVER); break; case TCL_ENV_RECOVER_FATAL: FLD_SET(open_flags, DB_RECOVER_FATAL); break; case TCL_ENV_SYSTEM_MEM: FLD_SET(open_flags, DB_SYSTEM_MEM); break; case TCL_ENV_USE_ENVIRON_ROOT: FLD_SET(open_flags, DB_USE_ENVIRON_ROOT); break; case TCL_ENV_USE_ENVIRON: FLD_SET(open_flags, DB_USE_ENVIRON); break; case TCL_ENV_CACHESIZE: result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; if (myobjc != 3) { Tcl_WrongNumArgs(interp, 2, objv, "?-cachesize {gbytes bytes ncaches}?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, myobjv[0], &gbytes); if (result != TCL_OK) break; result = _GetUInt32(interp, myobjv[1], &bytes); if (result != TCL_OK) break; result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); if (result != TCL_OK) break; _debug_check(); ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncaches); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_cachesize"); break; case TCL_ENV_CACHE_MAX: result = Tcl_ListObjGetElements(interp, objv[i], &myobjc, &myobjv); if (result == TCL_OK) i++; else break; if (myobjc != 2) { Tcl_WrongNumArgs(interp, 2, objv, "?-cache_max {gbytes bytes}?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, myobjv[0], &gbytes); if (result != TCL_OK) break; result = _GetUInt32(interp, myobjv[1], &bytes); if (result != TCL_OK) break; _debug_check(); ret = dbenv->set_cache_max(dbenv, gbytes, bytes); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_cache_max"); break; case TCL_ENV_SHM_KEY: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-shm_key key?"); result = TCL_ERROR; break; } result = Tcl_GetLongFromObj(interp, objv[i++], &shm); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_shm_key(dbenv, shm); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "shm_key"); } break; case TCL_ENV_TXN_MAX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn_max max?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = dbenv->set_tx_max(dbenv, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "txn_max"); } break; case TCL_ENV_ERRFILE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errfile file"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); tcl_EnvSetErrfile(interp, dbenv, ip, arg); break; case TCL_ENV_ERRPFX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errpfx prefix"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); result = tcl_EnvSetErrpfx(interp, dbenv, ip, arg); break; case TCL_ENV_DATA_DIR: case TCL_ENV_ADD_DIR: case TCL_ENV_CREATE_DIR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-xxx_dir dir"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); switch ((enum envopen)optindex) { case TCL_ENV_DATA_DIR: ret = dbenv->set_data_dir(dbenv, arg); break; case TCL_ENV_ADD_DIR: ret = dbenv->add_data_dir(dbenv, arg); break; case TCL_ENV_CREATE_DIR: ret = dbenv->set_create_dir(dbenv, arg); break; default: break; } result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "xxx_dir"); break; case TCL_ENV_LOG_DIR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-log_dir dir"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = dbenv->set_lg_dir(dbenv, arg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_lg_dir"); break; case TCL_ENV_TMP_DIR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-tmp_dir dir"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = dbenv->set_tmp_dir(dbenv, arg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_tmp_dir"); break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; } /* * We have to check this here. We want to set the log buffer * size first, if it is specified. So if the user did so, * then we took care of it above. But, if we get out here and * logmaxset is non-zero, then they set the log_max without * resetting the log buffer size, so we now have to do the * call to set_lg_max, since we didn't do it above. */ if (logmaxset) { _debug_check(); ret = dbenv->set_lg_max(dbenv, (u_int32_t)logmaxset); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_max"); } if (result != TCL_OK) goto error; if (set_flags) { ret = dbenv->set_flags(dbenv, set_flags, 1); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_flags"); if (result == TCL_ERROR) goto error; /* * If we are successful, clear the result so that the * return from set_flags isn't part of the result. */ Tcl_ResetResult(interp); } /* * When we get here, we have already parsed all of our args * and made all our calls to set up the environment. Everything * is okay so far, no errors, if we get here. * * Now open the environment. */ _debug_check(); ret = dbenv->open(dbenv, home, open_flags, mode); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbenv open"); if (rep_flags != 0 && result == TCL_OK) { _debug_check(); ret = dbenv->rep_start(dbenv, NULL, rep_flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "rep_start"); } error: if (result == TCL_ERROR) { if (ip->i_err && ip->i_err != stdout && ip->i_err != stderr) { (void)fclose(ip->i_err); ip->i_err = NULL; } (void)dbenv->close(dbenv, 0); } return (result); } /* * bdb_DbOpen -- * Implements the "db_create/db_open" command. * There are many, many options to the open command. * Here is the general flow: * * 0. Preparse args to determine if we have -env. * 1. Call db_create to create the db handle. * 2. Parse args tracking options. * 3. Make any pre-open setup calls necessary. * 4. Call DB->open to open the database. * 5. Return db widget handle to user. */ static int bdb_DbOpen(interp, objc, objv, ip, dbp) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ DBTCL_INFO *ip; /* Our internal info */ DB **dbp; /* DB handle */ { static const char *bdbenvopen[] = { "-env", NULL }; enum bdbenvopen { TCL_DB_ENV0 }; static const char *bdbopen[] = { #ifdef CONFIG_TEST "-btcompare", "-dupcompare", "-hashcompare", "-hashproc", "-lorder", "-minkey", "-nommap", "-notdurable", "-partition", "-partition_dirs", "-partition_callback", "-read_uncommitted", "-revsplitoff", "-test", "-thread", #endif "-auto_commit", "-btree", "-cachesize", "-chksum", "-compress", "-create", "-create_dir", "-delim", "-dup", "-dupsort", "-encrypt", "-encryptaes", "-encryptany", "-env", "-errfile", "-errpfx", "-excl", "-extent", "-ffactor", "-hash", "-inorder", "-len", "-maxsize", "-mode", "-multiversion", "-nelem", "-pad", "-pagesize", "-queue", "-rdonly", "-recno", "-recnum", "-renumber", "-snapshot", "-source", "-truncate", "-txn", "-unknown", "--", NULL }; enum bdbopen { #ifdef CONFIG_TEST TCL_DB_BTCOMPARE, TCL_DB_DUPCOMPARE, TCL_DB_HASHCOMPARE, TCL_DB_HASHPROC, TCL_DB_LORDER, TCL_DB_MINKEY, TCL_DB_NOMMAP, TCL_DB_NOTDURABLE, TCL_DB_PARTITION, TCL_DB_PART_DIRS, TCL_DB_PART_CALLBACK, TCL_DB_READ_UNCOMMITTED, TCL_DB_REVSPLIT, TCL_DB_TEST, TCL_DB_THREAD, #endif TCL_DB_AUTO_COMMIT, TCL_DB_BTREE, TCL_DB_CACHESIZE, TCL_DB_CHKSUM, TCL_DB_COMPRESS, TCL_DB_CREATE, TCL_DB_CREATE_DIR, TCL_DB_DELIM, TCL_DB_DUP, TCL_DB_DUPSORT, TCL_DB_ENCRYPT, TCL_DB_ENCRYPT_AES, TCL_DB_ENCRYPT_ANY, TCL_DB_ENV, TCL_DB_ERRFILE, TCL_DB_ERRPFX, TCL_DB_EXCL, TCL_DB_EXTENT, TCL_DB_FFACTOR, TCL_DB_HASH, TCL_DB_INORDER, TCL_DB_LEN, TCL_DB_MAXSIZE, TCL_DB_MODE, TCL_DB_MULTIVERSION, TCL_DB_NELEM, TCL_DB_PAD, TCL_DB_PAGESIZE, TCL_DB_QUEUE, TCL_DB_RDONLY, TCL_DB_RECNO, TCL_DB_RECNUM, TCL_DB_RENUMBER, TCL_DB_SNAPSHOT, TCL_DB_SOURCE, TCL_DB_TRUNCATE, TCL_DB_TXN, TCL_DB_UNKNOWN, TCL_DB_ENDARG }; DBT *keys; DBTCL_INFO *envip, *errip; DBTYPE type; DB_ENV *dbenv; DB_TXN *txn; ENV *env; Tcl_Obj **myobjv; u_int32_t gbytes, bytes, open_flags, set_flags, uintarg; int endarg, i, intarg, mode, myobjc, ncaches; int optindex, result, ret, set_err, set_pfx, subdblen; u_char *subdbtmp; char *arg, *db, *passwd, *subdb, msg[MSG_SIZE]; type = DB_UNKNOWN; endarg = mode = set_err = set_flags = set_pfx = 0; result = TCL_OK; subdbtmp = NULL; keys = NULL; db = subdb = NULL; /* * XXX * If/when our Tcl interface becomes thread-safe, we should enable * DB_THREAD here in all cases. For now, we turn it on later in this * function, and only when we're in testing and we specify the * -thread flag, so that we can exercise MUTEX_THREAD_LOCK cases. * * In order to become truly thread-safe, we need to look at making sure * DBTCL_INFO structs are safe to share across threads (they're not * mutex-protected) before we declare the Tcl interface thread-safe. * Meanwhile, there's no strong reason to enable DB_THREAD when not * testing. */ open_flags = 0; dbenv = NULL; txn = NULL; env = NULL; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); return (TCL_ERROR); } /* * We must first parse for the environment flag, since that * is needed for db_create. Then create the db handle. */ i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i++], bdbenvopen, "option", TCL_EXACT, &optindex) != TCL_OK) { /* * Reset the result so we don't get * an errant error message if there is another error. */ Tcl_ResetResult(interp); continue; } switch ((enum bdbenvopen)optindex) { case TCL_DB_ENV0: arg = Tcl_GetStringFromObj(objv[i], NULL); dbenv = NAME_TO_ENV(arg); if (dbenv == NULL) { Tcl_SetResult(interp, "db open: illegal environment", TCL_STATIC); return (TCL_ERROR); } } break; } /* * Create the db handle before parsing the args * since we'll be modifying the database options as we parse. */ ret = db_create(dbp, dbenv, 0); if (ret) return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_create")); /* Hang our info pointer on the DB handle, so we can do callbacks. */ (*dbp)->api_internal = ip; /* * XXX * Remove restriction if error handling not tied to env. * * The DB->set_err* functions overwrite the environment. So, if * we are using an env, don't overwrite it; if not using an env, * then configure error handling. */ if (dbenv == NULL) { env = NULL; (*dbp)->set_errpfx((*dbp), ip->i_name); (*dbp)->set_errcall((*dbp), _ErrorFunc); } else env = dbenv->env; /* * If we are using an env, we keep track of err info in the env's ip. * Otherwise use the DB's ip. */ envip = _PtrToInfo(dbenv); /* XXX */ if (envip) errip = envip; else errip = ip; /* * Get the option name index from the object based on the args * defined above. */ i = 2; while (i < objc) { Tcl_ResetResult(interp); if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbopen)optindex) { #ifdef CONFIG_TEST case TCL_DB_BTCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-btcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * We don't need to crack it out now--we'll want * to bundle it up to pass into Tcl_EvalObjv anyway. * Tcl's object refcounting will--I hope--take care * of the memory management here. */ ip->i_compare = objv[i++]; Tcl_IncrRefCount(ip->i_compare); _debug_check(); ret = (*dbp)->set_bt_compare(*dbp, tcl_bt_compare); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_bt_compare"); break; case TCL_DB_DUPCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-dupcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * See TCL_DB_BTCOMPARE. */ ip->i_dupcompare = objv[i++]; Tcl_IncrRefCount(ip->i_dupcompare); _debug_check(); ret = (*dbp)->set_dup_compare(*dbp, tcl_dup_compare); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_dup_compare"); break; case TCL_DB_HASHCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-hashcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * We don't need to crack it out now--we'll want * to bundle it up to pass into Tcl_EvalObjv anyway. * Tcl's object refcounting will--I hope--take care * of the memory management here. */ ip->i_compare = objv[i++]; Tcl_IncrRefCount(ip->i_compare); _debug_check(); ret = (*dbp)->set_h_compare(*dbp, tcl_bt_compare); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_compare"); break; case TCL_DB_HASHPROC: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-hashproc hashproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * See TCL_DB_BTCOMPARE. */ ip->i_hashproc = objv[i++]; Tcl_IncrRefCount(ip->i_hashproc); _debug_check(); ret = (*dbp)->set_h_hash(*dbp, tcl_h_hash); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_hash"); break; case TCL_DB_LORDER: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-lorder 1234|4321"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_lorder(*dbp, intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_lorder"); } break; case TCL_DB_MINKEY: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-minkey minkey"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_bt_minkey(*dbp, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_bt_minkey"); } break; case TCL_DB_NOMMAP: open_flags |= DB_NOMMAP; break; case TCL_DB_NOTDURABLE: set_flags |= DB_TXN_NOT_DURABLE; break; case TCL_DB_PART_CALLBACK: if (i + 1 >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-partition_callback numparts callback"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * See TCL_DB_BTCOMPARE. */ result = _GetUInt32(interp, objv[i++], &uintarg); if (result != TCL_OK) break; ip->i_part_callback = objv[i++]; Tcl_IncrRefCount(ip->i_part_callback); _debug_check(); ret = (*dbp)->set_partition( *dbp, uintarg, NULL, tcl_part_callback); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_partition_callback"); break; case TCL_DB_PART_DIRS: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-partition {dir list}"); result = TCL_ERROR; break; } ret = tcl_set_partition_dirs(interp, *dbp, objv[i++]); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_partition_dirs"); break; case TCL_DB_PARTITION: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-partition {key list}"); result = TCL_ERROR; break; } _debug_check(); ret = tcl_set_partition_keys(interp, *dbp, objv[i++], &keys); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_partition_keys"); break; case TCL_DB_READ_UNCOMMITTED: open_flags |= DB_READ_UNCOMMITTED; break; case TCL_DB_REVSPLIT: set_flags |= DB_REVSPLITOFF; break; case TCL_DB_TEST: ret = (*dbp)->set_h_hash(*dbp, __ham_test); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_hash"); break; case TCL_DB_THREAD: /* Enable DB_THREAD when specified in testing. */ open_flags |= DB_THREAD; break; #endif case TCL_DB_AUTO_COMMIT: open_flags |= DB_AUTO_COMMIT; break; case TCL_DB_ENV: /* * Already parsed this, skip it and the env pointer. */ i++; continue; case TCL_DB_TXN: if (i > (objc - 1)) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); txn = NAME_TO_TXN(arg); if (txn == NULL) { snprintf(msg, MSG_SIZE, "Open: Invalid txn: %s\n", arg); Tcl_SetResult(interp, msg, TCL_VOLATILE); result = TCL_ERROR; } break; case TCL_DB_BTREE: if (type != DB_UNKNOWN) { Tcl_SetResult(interp, "Too many DB types specified", TCL_STATIC); result = TCL_ERROR; goto error; } type = DB_BTREE; break; case TCL_DB_HASH: if (type != DB_UNKNOWN) { Tcl_SetResult(interp, "Too many DB types specified", TCL_STATIC); result = TCL_ERROR; goto error; } type = DB_HASH; break; case TCL_DB_RECNO: if (type != DB_UNKNOWN) { Tcl_SetResult(interp, "Too many DB types specified", TCL_STATIC); result = TCL_ERROR; goto error; } type = DB_RECNO; break; case TCL_DB_QUEUE: if (type != DB_UNKNOWN) { Tcl_SetResult(interp, "Too many DB types specified", TCL_STATIC); result = TCL_ERROR; goto error; } type = DB_QUEUE; break; case TCL_DB_UNKNOWN: if (type != DB_UNKNOWN) { Tcl_SetResult(interp, "Too many DB types specified", TCL_STATIC); result = TCL_ERROR; goto error; } break; case TCL_DB_CREATE: open_flags |= DB_CREATE; break; case TCL_DB_CREATE_DIR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-create_dir dir"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = (*dbp)->set_create_dir(*dbp, arg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_create_dir"); break; case TCL_DB_EXCL: open_flags |= DB_EXCL; break; case TCL_DB_RDONLY: open_flags |= DB_RDONLY; break; case TCL_DB_TRUNCATE: open_flags |= DB_TRUNCATE; break; case TCL_DB_MODE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-mode mode?"); result = TCL_ERROR; break; } /* * Don't need to check result here because * if TCL_ERROR, the error message is already * set up, and we'll bail out below. If ok, * the mode is set and we go on. */ result = Tcl_GetIntFromObj(interp, objv[i++], &mode); break; case TCL_DB_DUP: set_flags |= DB_DUP; break; case TCL_DB_DUPSORT: set_flags |= DB_DUPSORT; break; case TCL_DB_INORDER: set_flags |= DB_INORDER; break; case TCL_DB_RECNUM: set_flags |= DB_RECNUM; break; case TCL_DB_RENUMBER: set_flags |= DB_RENUMBER; break; case TCL_DB_SNAPSHOT: set_flags |= DB_SNAPSHOT; break; case TCL_DB_CHKSUM: set_flags |= DB_CHKSUM; break; case TCL_DB_ENCRYPT: set_flags |= DB_ENCRYPT; break; case TCL_DB_ENCRYPT_AES: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptaes passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = (*dbp)->set_encrypt(*dbp, passwd, DB_ENCRYPT_AES); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); break; case TCL_DB_ENCRYPT_ANY: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptany passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = (*dbp)->set_encrypt(*dbp, passwd, 0); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); break; case TCL_DB_COMPRESS: ret = (*dbp)->set_bt_compress(*dbp, 0, 0); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_bt_compress"); break; case TCL_DB_FFACTOR: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-ffactor density"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_h_ffactor(*dbp, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_ffactor"); } break; case TCL_DB_MULTIVERSION: open_flags |= DB_MULTIVERSION; break; case TCL_DB_NELEM: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-nelem nelem"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_h_nelem(*dbp, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_nelem"); } break; case TCL_DB_DELIM: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-delim delim"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_re_delim(*dbp, intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_re_delim"); } break; case TCL_DB_LEN: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-len length"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_re_len(*dbp, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_re_len"); } break; case TCL_DB_MAXSIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-len length"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->mpf->set_maxsize( (*dbp)->mpf, 0, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_maxsize"); } break; case TCL_DB_PAD: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-pad pad"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_re_pad(*dbp, intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_re_pad"); } break; case TCL_DB_SOURCE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-source file"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); _debug_check(); ret = (*dbp)->set_re_source(*dbp, arg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_re_source"); break; case TCL_DB_EXTENT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-extent size"); result = TCL_ERROR; break; } result = _GetUInt32(interp, objv[i++], &uintarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_q_extentsize(*dbp, uintarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_q_extentsize"); } break; case TCL_DB_CACHESIZE: result = Tcl_ListObjGetElements(interp, objv[i++], &myobjc, &myobjv); if (result != TCL_OK) break; if (myobjc != 3) { Tcl_WrongNumArgs(interp, 2, objv, "?-cachesize {gbytes bytes ncaches}?"); result = TCL_ERROR; break; } result = _GetUInt32(interp, myobjv[0], &gbytes); if (result != TCL_OK) break; result = _GetUInt32(interp, myobjv[1], &bytes); if (result != TCL_OK) break; result = Tcl_GetIntFromObj(interp, myobjv[2], &ncaches); if (result != TCL_OK) break; _debug_check(); ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes, ncaches); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_cachesize"); break; case TCL_DB_PAGESIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-pagesize size?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &intarg); if (result == TCL_OK) { _debug_check(); ret = (*dbp)->set_pagesize(*dbp, (size_t)intarg); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set pagesize"); } break; case TCL_DB_ERRFILE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errfile file"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); /* * If the user already set one, close it. */ if (errip->i_err != NULL && errip->i_err != stdout && errip->i_err != stderr) (void)fclose(errip->i_err); if (strcmp(arg, "/dev/stdout") == 0) errip->i_err = stdout; else if (strcmp(arg, "/dev/stderr") == 0) errip->i_err = stderr; else errip->i_err = fopen(arg, "a"); if (errip->i_err != NULL) { _debug_check(); (*dbp)->set_errfile(*dbp, errip->i_err); set_err = 1; } break; case TCL_DB_ERRPFX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errpfx prefix"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); /* * If the user already set one, free it. */ if (errip->i_errpfx != NULL) __os_free(NULL, errip->i_errpfx); if ((ret = __os_strdup((*dbp)->env, arg, &errip->i_errpfx)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "__os_strdup"); break; } if (errip->i_errpfx != NULL) { _debug_check(); (*dbp)->set_errpfx(*dbp, errip->i_errpfx); set_pfx = 1; } break; case TCL_DB_ENDARG: endarg = 1; break; } /* switch */ /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (result != TCL_OK) goto error; /* * Any args we have left, (better be 0, 1 or 2 left) are * file names. If we have 0, then an in-memory db. If * there is 1, a db name, if 2 a db and subdb name. */ if (i != objc) { /* * Dbs must be NULL terminated file names, but subdbs can * be anything. Use Strings for the db name and byte * arrays for the subdb. */ db = Tcl_GetStringFromObj(objv[i++], NULL); if (strcmp(db, "") == 0) db = NULL; if (i != objc) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); if ((ret = __os_malloc(env, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } if (set_flags) { ret = (*dbp)->set_flags(*dbp, set_flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_flags"); if (result == TCL_ERROR) goto error; /* * If we are successful, clear the result so that the * return from set_flags isn't part of the result. */ Tcl_ResetResult(interp); } /* * When we get here, we have already parsed all of our args and made * all our calls to set up the database. Everything is okay so far, * no errors, if we get here. */ _debug_check(); /* Open the database. */ ret = (*dbp)->open(*dbp, txn, db, subdb, type, open_flags, mode); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db open"); error: if (keys != NULL) __os_free(NULL, keys); if (subdb) __os_free(env, subdb); if (result == TCL_ERROR) { (void)(*dbp)->close(*dbp, 0); /* * If we opened and set up the error file in the environment * on this open, but we failed for some other reason, clean * up and close the file. * * XXX when err stuff isn't tied to env, change to use ip, * instead of envip. Also, set_err is irrelevant when that * happens. It will just read: * if (ip->i_err) * fclose(ip->i_err); */ if (set_err && errip && errip->i_err != NULL && errip->i_err != stdout && errip->i_err != stderr) { (void)fclose(errip->i_err); errip->i_err = NULL; } if (set_pfx && errip && errip->i_errpfx != NULL) { __os_free(env, errip->i_errpfx); errip->i_errpfx = NULL; } *dbp = NULL; } return (result); } #ifdef HAVE_64BIT_TYPES /* * bdb_SeqOpen -- * Implements the "Seq_create/Seq_open" command. */ static int bdb_SeqOpen(interp, objc, objv, ip, seqp) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ DBTCL_INFO *ip; /* Our internal info */ DB_SEQUENCE **seqp; /* DB_SEQUENCE handle */ { static const char *seqopen[] = { "-cachesize", "-create", "-inc", "-init", "-dec", "-max", "-min", "-thread", "-txn", "-wrap", "--", NULL } ; enum seqopen { TCL_SEQ_CACHESIZE, TCL_SEQ_CREATE, TCL_SEQ_INC, TCL_SEQ_INIT, TCL_SEQ_DEC, TCL_SEQ_MAX, TCL_SEQ_MIN, TCL_SEQ_THREAD, TCL_SEQ_TXN, TCL_SEQ_WRAP, TCL_SEQ_ENDARG }; DB *dbp; DBT key; DBTYPE type; DB_TXN *txn; db_recno_t recno; db_seq_t min, max, value; Tcl_WideInt tcl_value; u_int32_t flags, oflags; int cache, endarg, i, optindex, result, ret, setrange, setvalue, v; char *arg, *db, msg[MSG_SIZE]; COMPQUIET(ip, NULL); COMPQUIET(value, 0); *seqp = NULL; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); return (TCL_ERROR); } txn = NULL; endarg = 0; flags = oflags = 0; setrange = setvalue = 0; min = INT64_MIN; max = INT64_MAX; cache = 0; for (i = 2; i < objc;) { Tcl_ResetResult(interp); if (Tcl_GetIndexFromObj(interp, objv[i], seqopen, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; result = TCL_OK; switch ((enum seqopen)optindex) { case TCL_SEQ_CREATE: oflags |= DB_CREATE; break; case TCL_SEQ_INC: LF_SET(DB_SEQ_INC); break; case TCL_SEQ_CACHESIZE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-cachesize value?"); result = TCL_ERROR; break; } result = Tcl_GetIntFromObj(interp, objv[i++], &cache); break; case TCL_SEQ_INIT: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-init value?"); result = TCL_ERROR; break; } result = Tcl_GetWideIntFromObj( interp, objv[i++], &tcl_value); value = tcl_value; setvalue = 1; break; case TCL_SEQ_DEC: LF_SET(DB_SEQ_DEC); break; case TCL_SEQ_MAX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-max value?"); result = TCL_ERROR; break; } if ((result = Tcl_GetWideIntFromObj(interp, objv[i++], &tcl_value)) != TCL_OK) goto error; max = tcl_value; setrange = 1; break; case TCL_SEQ_MIN: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-min value?"); result = TCL_ERROR; break; } if ((result = Tcl_GetWideIntFromObj(interp, objv[i++], &tcl_value)) != TCL_OK) goto error; min = tcl_value; setrange = 1; break; case TCL_SEQ_THREAD: oflags |= DB_THREAD; break; case TCL_SEQ_TXN: if (i > (objc - 1)) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); txn = NAME_TO_TXN(arg); if (txn == NULL) { snprintf(msg, MSG_SIZE, "Sequence: Invalid txn: %s\n", arg); Tcl_SetResult(interp, msg, TCL_VOLATILE); result = TCL_ERROR; } break; case TCL_SEQ_WRAP: LF_SET(DB_SEQ_WRAP); break; case TCL_SEQ_ENDARG: endarg = 1; break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (objc - i != 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); return (TCL_ERROR); } /* * The db must be a string but the sequence key may * be anything. */ db = Tcl_GetStringFromObj(objv[i++], NULL); if ((dbp = NAME_TO_DB(db)) == NULL) { Tcl_SetResult(interp, "No such dbp", TCL_STATIC); return (TCL_ERROR); } (void)dbp->get_type(dbp, &type); if (type == DB_QUEUE || type == DB_RECNO) { result = _GetUInt32(interp, objv[i++], &recno); if (result != TCL_OK) return (result); DB_INIT_DBT(key, &recno, sizeof(recno)); } else DB_INIT_DBT(key, Tcl_GetByteArrayFromObj(objv[i++], &v), v); ret = db_sequence_create(seqp, dbp, 0); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence create")) != TCL_OK) { *seqp = NULL; return (result); } ret = (*seqp)->set_flags(*seqp, flags); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence set_flags")) != TCL_OK) goto error; if (setrange) { ret = (*seqp)->set_range(*seqp, min, max); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence set_range")) != TCL_OK) goto error; } if (cache) { ret = (*seqp)->set_cachesize(*seqp, cache); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence cachesize")) != TCL_OK) goto error; } if (setvalue) { ret = (*seqp)->initial_value(*seqp, value); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence init")) != TCL_OK) goto error; } ret = (*seqp)->open(*seqp, txn, &key, oflags); if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "sequence open")) != TCL_OK) goto error; if (0) { error: if (*seqp != NULL) (void)(*seqp)->close(*seqp, 0); *seqp = NULL; } return (result); } #endif /* * bdb_DbRemove -- * Implements the DB_ENV->remove and DB->remove command. */ static int bdb_DbRemove(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *bdbrem[] = { "-auto_commit", "-encrypt", "-encryptaes", "-encryptany", "-env", "-txn", "--", NULL }; enum bdbrem { TCL_DBREM_AUTOCOMMIT, TCL_DBREM_ENCRYPT, TCL_DBREM_ENCRYPT_AES, TCL_DBREM_ENCRYPT_ANY, TCL_DBREM_ENV, TCL_DBREM_TXN, TCL_DBREM_ENDARG }; DB *dbp; DB_ENV *dbenv; DB_TXN *txn; ENV *env; u_int32_t enc_flag, iflags, set_flags; int endarg, i, optindex, result, ret, subdblen; u_char *subdbtmp; char *arg, *db, msg[MSG_SIZE], *passwd, *subdb; dbp = NULL; dbenv = NULL; txn = NULL; env = NULL; enc_flag = iflags = set_flags = 0; endarg = 0; result = TCL_OK; subdbtmp = NULL; db = passwd = subdb = NULL; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); return (TCL_ERROR); } /* * We must first parse for the environment flag, since that * is needed for db_create. Then create the db handle. */ i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i], bdbrem, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbrem)optindex) { case TCL_DBREM_AUTOCOMMIT: iflags |= DB_AUTO_COMMIT; _debug_check(); break; case TCL_DBREM_ENCRYPT: set_flags |= DB_ENCRYPT; _debug_check(); break; case TCL_DBREM_ENCRYPT_AES: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptaes passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = DB_ENCRYPT_AES; break; case TCL_DBREM_ENCRYPT_ANY: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptany passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = 0; break; case TCL_DBREM_ENV: arg = Tcl_GetStringFromObj(objv[i++], NULL); dbenv = NAME_TO_ENV(arg); if (dbenv == NULL) { Tcl_SetResult(interp, "db remove: illegal environment", TCL_STATIC); return (TCL_ERROR); } env = dbenv->env; break; case TCL_DBREM_ENDARG: endarg = 1; break; case TCL_DBREM_TXN: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); txn = NAME_TO_TXN(arg); if (txn == NULL) { snprintf(msg, MSG_SIZE, "Put: Invalid txn: %s\n", arg); Tcl_SetResult(interp, msg, TCL_VOLATILE); result = TCL_ERROR; } break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (result != TCL_OK) goto error; /* * Any args we have left, (better be 1 or 2 left) are * file names. If there is 1, a db name, if 2 a db and subdb name. */ if ((i != (objc - 1)) || (i != (objc - 2))) { /* * Dbs must be NULL terminated file names, but subdbs can * be anything. Use Strings for the db name and byte * arrays for the subdb. */ db = Tcl_GetStringFromObj(objv[i++], NULL); if (strcmp(db, "") == 0) db = NULL; if (i != objc) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); if ((ret = __os_malloc(env, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } else { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?"); result = TCL_ERROR; goto error; } if (dbenv == NULL) { ret = db_create(&dbp, dbenv, 0); if (ret) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_create"); goto error; } /* * XXX * Remove restriction if error handling not tied to env. * * The DB->set_err* functions overwrite the environment. So, if * we are using an env, don't overwrite it; if not using an env, * then configure error handling. */ dbp->set_errpfx(dbp, "DbRemove"); dbp->set_errcall(dbp, _ErrorFunc); if (passwd != NULL) { ret = dbp->set_encrypt(dbp, passwd, enc_flag); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); } if (set_flags != 0) { ret = dbp->set_flags(dbp, set_flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_flags"); } } /* * The dbremove method is a destructor, NULL out the dbp. */ _debug_check(); if (dbp == NULL) ret = dbenv->dbremove(dbenv, txn, db, subdb, iflags); else ret = dbp->remove(dbp, db, subdb, 0); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db remove"); dbp = NULL; error: if (subdb) __os_free(env, subdb); if (result == TCL_ERROR && dbp != NULL) (void)dbp->close(dbp, 0); return (result); } /* * bdb_DbRename -- * Implements the DB_ENV->dbrename and DB->rename commands. */ static int bdb_DbRename(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *bdbmv[] = { "-auto_commit", "-encrypt", "-encryptaes", "-encryptany", "-env", "-txn", "--", NULL }; enum bdbmv { TCL_DBMV_AUTOCOMMIT, TCL_DBMV_ENCRYPT, TCL_DBMV_ENCRYPT_AES, TCL_DBMV_ENCRYPT_ANY, TCL_DBMV_ENV, TCL_DBMV_TXN, TCL_DBMV_ENDARG }; DB *dbp; DB_ENV *dbenv; DB_TXN *txn; ENV *env; u_int32_t enc_flag, iflags, set_flags; int endarg, i, newlen, optindex, result, ret, subdblen; u_char *subdbtmp; char *arg, *db, msg[MSG_SIZE], *newname, *passwd, *subdb; dbp = NULL; dbenv = NULL; txn = NULL; env = NULL; enc_flag = iflags = set_flags = 0; result = TCL_OK; endarg = 0; db = newname = passwd = subdb = NULL; subdbtmp = NULL; if (objc < 2) { Tcl_WrongNumArgs(interp, 3, objv, "?args? filename ?database? ?newname?"); return (TCL_ERROR); } /* * We must first parse for the environment flag, since that * is needed for db_create. Then create the db handle. */ i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i], bdbmv, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbmv)optindex) { case TCL_DBMV_AUTOCOMMIT: iflags |= DB_AUTO_COMMIT; _debug_check(); break; case TCL_DBMV_ENCRYPT: set_flags |= DB_ENCRYPT; _debug_check(); break; case TCL_DBMV_ENCRYPT_AES: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptaes passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = DB_ENCRYPT_AES; break; case TCL_DBMV_ENCRYPT_ANY: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptany passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = 0; break; case TCL_DBMV_ENV: arg = Tcl_GetStringFromObj(objv[i++], NULL); dbenv = NAME_TO_ENV(arg); if (dbenv == NULL) { Tcl_SetResult(interp, "db rename: illegal environment", TCL_STATIC); return (TCL_ERROR); } env = dbenv->env; break; case TCL_DBMV_ENDARG: endarg = 1; break; case TCL_DBMV_TXN: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); txn = NAME_TO_TXN(arg); if (txn == NULL) { snprintf(msg, MSG_SIZE, "Put: Invalid txn: %s\n", arg); Tcl_SetResult(interp, msg, TCL_VOLATILE); result = TCL_ERROR; } break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (result != TCL_OK) goto error; /* * Any args we have left, (better be 2 or 3 left) are * file names. If there is 2, a file name, if 3 a file and db name. */ if ((i != (objc - 2)) || (i != (objc - 3))) { /* * Dbs must be NULL terminated file names, but subdbs can * be anything. Use Strings for the db name and byte * arrays for the subdb. */ db = Tcl_GetStringFromObj(objv[i++], NULL); if (strcmp(db, "") == 0) db = NULL; if (i == objc - 2) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); if ((ret = __os_malloc(env, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &newlen); if ((ret = __os_malloc( env, (size_t)newlen + 1, &newname)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } memcpy(newname, subdbtmp, (size_t)newlen); newname[newlen] = '\0'; } else { Tcl_WrongNumArgs( interp, 3, objv, "?args? filename ?database? ?newname?"); result = TCL_ERROR; goto error; } if (dbenv == NULL) { ret = db_create(&dbp, dbenv, 0); if (ret) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_create"); goto error; } /* * XXX * Remove restriction if error handling not tied to env. * * The DB->set_err* functions overwrite the environment. So, if * we are using an env, don't overwrite it; if not using an env, * then configure error handling. */ dbp->set_errpfx(dbp, "DbRename"); dbp->set_errcall(dbp, _ErrorFunc); if (passwd != NULL) { ret = dbp->set_encrypt(dbp, passwd, enc_flag); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); } if (set_flags != 0) { ret = dbp->set_flags(dbp, set_flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_flags"); } } /* * The dbrename method is a destructor, NULL out the dbp. */ _debug_check(); if (dbp == NULL) ret = dbenv->dbrename(dbenv, txn, db, subdb, newname, iflags); else ret = dbp->rename(dbp, db, subdb, newname, 0); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db rename"); dbp = NULL; error: if (subdb) __os_free(env, subdb); if (newname) __os_free(env, newname); if (result == TCL_ERROR && dbp != NULL) (void)dbp->close(dbp, 0); return (result); } #ifdef CONFIG_TEST /* * bdb_DbVerify -- * Implements the DB->verify command. */ static int bdb_DbVerify(interp, objc, objv, ip) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ DBTCL_INFO *ip; /* Our internal info */ { static const char *bdbverify[] = { "-btcompare", "-dupcompare", "-hashcompare", "-hashproc", "-encrypt", "-encryptaes", "-encryptany", "-env", "-errfile", "-errpfx", "-noorderchk", "-orderchkonly", "-unref", "--", NULL }; enum bdbvrfy { TCL_DBVRFY_BTCOMPARE, TCL_DBVRFY_DUPCOMPARE, TCL_DBVRFY_HASHCOMPARE, TCL_DBVRFY_HASHPROC, TCL_DBVRFY_ENCRYPT, TCL_DBVRFY_ENCRYPT_AES, TCL_DBVRFY_ENCRYPT_ANY, TCL_DBVRFY_ENV, TCL_DBVRFY_ERRFILE, TCL_DBVRFY_ERRPFX, TCL_DBVRFY_NOORDERCHK, TCL_DBVRFY_ORDERCHKONLY, TCL_DBVRFY_UNREF, TCL_DBVRFY_ENDARG }; DB_ENV *dbenv; DB *dbp; FILE *errf; int (*bt_compare) __P((DB *, const DBT *, const DBT *)); int (*dup_compare) __P((DB *, const DBT *, const DBT *)); int (*h_compare) __P((DB *, const DBT *, const DBT *)); u_int32_t (*h_hash)__P((DB *, const void *, u_int32_t)); u_int32_t enc_flag, flags, set_flags; int endarg, i, optindex, result, ret, subdblen; char *arg, *db, *errpfx, *passwd, *subdb; u_char *subdbtmp; dbenv = NULL; dbp = NULL; passwd = NULL; result = TCL_OK; db = errpfx = subdb = NULL; errf = NULL; bt_compare = NULL; dup_compare = NULL; h_compare = NULL; h_hash = NULL; flags = endarg = 0; enc_flag = set_flags = 0; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); return (TCL_ERROR); } /* * We must first parse for the environment flag, since that * is needed for db_create. Then create the db handle. */ i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i], bdbverify, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbvrfy)optindex) { case TCL_DBVRFY_BTCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-btcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * We don't need to crack it out now--we'll want * to bundle it up to pass into Tcl_EvalObjv anyway. * Tcl's object refcounting will--I hope--take care * of the memory management here. */ ip->i_compare = objv[i++]; Tcl_IncrRefCount(ip->i_compare); _debug_check(); bt_compare = tcl_bt_compare; break; case TCL_DBVRFY_DUPCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-dupcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * See TCL_DBVRFY_BTCOMPARE. */ ip->i_dupcompare = objv[i++]; Tcl_IncrRefCount(ip->i_dupcompare); _debug_check(); dup_compare = tcl_dup_compare; break; case TCL_DBVRFY_HASHCOMPARE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-hashcompare compareproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * We don't need to crack it out now--we'll want * to bundle it up to pass into Tcl_EvalObjv anyway. * Tcl's object refcounting will--I hope--take care * of the memory management here. */ ip->i_compare = objv[i++]; Tcl_IncrRefCount(ip->i_compare); _debug_check(); h_compare = tcl_bt_compare; break; case TCL_DBVRFY_HASHPROC: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-hashproc hashproc"); result = TCL_ERROR; break; } /* * Store the object containing the procedure name. * See TCL_DBVRFY_BTCOMPARE. */ ip->i_hashproc = objv[i++]; Tcl_IncrRefCount(ip->i_hashproc); _debug_check(); h_hash = tcl_h_hash; break; case TCL_DBVRFY_ENCRYPT: set_flags |= DB_ENCRYPT; _debug_check(); break; case TCL_DBVRFY_ENCRYPT_AES: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptaes passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = DB_ENCRYPT_AES; break; case TCL_DBVRFY_ENCRYPT_ANY: /* Make sure we have an arg to check against! */ if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "?-encryptany passwd?"); result = TCL_ERROR; break; } passwd = Tcl_GetStringFromObj(objv[i++], NULL); enc_flag = 0; break; case TCL_DBVRFY_ENV: arg = Tcl_GetStringFromObj(objv[i++], NULL); dbenv = NAME_TO_ENV(arg); if (dbenv == NULL) { Tcl_SetResult(interp, "db verify: illegal environment", TCL_STATIC); result = TCL_ERROR; break; } break; case TCL_DBVRFY_ERRFILE: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errfile file"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); /* * If the user already set one, close it. */ if (errf != NULL && errf != stdout && errf != stderr) (void)fclose(errf); if (strcmp(arg, "/dev/stdout") == 0) errf = stdout; else if (strcmp(arg, "/dev/stderr") == 0) errf = stderr; else errf = fopen(arg, "a"); break; case TCL_DBVRFY_ERRPFX: if (i >= objc) { Tcl_WrongNumArgs(interp, 2, objv, "-errpfx prefix"); result = TCL_ERROR; break; } arg = Tcl_GetStringFromObj(objv[i++], NULL); /* * If the user already set one, free it. */ if (errpfx != NULL) __os_free(dbenv->env, errpfx); if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "__os_strdup"); break; } break; case TCL_DBVRFY_NOORDERCHK: flags |= DB_NOORDERCHK; break; case TCL_DBVRFY_ORDERCHKONLY: flags |= DB_ORDERCHKONLY; break; case TCL_DBVRFY_UNREF: flags |= DB_UNREF; break; case TCL_DBVRFY_ENDARG: endarg = 1; break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (result != TCL_OK) goto error; /* * The remaining arg is the db filename. */ /* * Any args we have left, (better be 1 or 2 left) are * file names. If there is 1, a db name, if 2 a db and subdb name. */ if (i != objc) { /* * Dbs must be NULL terminated file names, but subdbs can * be anything. Use Strings for the db name and byte * arrays for the subdb. */ db = Tcl_GetStringFromObj(objv[i++], NULL); if (strcmp(db, "") == 0) db = NULL; if (i != objc) { subdbtmp = Tcl_GetByteArrayFromObj(objv[i++], &subdblen); if ((ret = __os_malloc(dbenv->env, (size_t)subdblen + 1, &subdb)) != 0) { Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC); return (0); } memcpy(subdb, subdbtmp, (size_t)subdblen); subdb[subdblen] = '\0'; } } else { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); result = TCL_ERROR; goto error; } ret = db_create(&dbp, dbenv, 0); if (ret) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_create"); goto error; } /* Hang our info pointer on the DB handle, so we can do callbacks. */ dbp->api_internal = ip; if (errf != NULL) dbp->set_errfile(dbp, errf); if (errpfx != NULL) dbp->set_errpfx(dbp, errpfx); if (passwd != NULL && (ret = dbp->set_encrypt(dbp, passwd, enc_flag)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_encrypt"); goto error; } if (set_flags != 0 && (ret = dbp->set_flags(dbp, set_flags)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_flags"); goto error; } if (bt_compare != NULL && (ret = dbp->set_bt_compare(dbp, bt_compare)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_bt_compare"); goto error; } if (dup_compare != NULL && (ret = dbp->set_dup_compare(dbp, dup_compare)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_dup_compare"); goto error; } if (h_compare != NULL && (ret = dbp->set_h_compare(dbp, h_compare)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_compare"); goto error; } if (h_hash != NULL && (ret = dbp->set_h_hash(dbp, h_hash)) != 0) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "set_h_hash"); goto error; } /* * The verify method is a destructor, NULL out the dbp. */ ret = dbp->verify(dbp, db, subdb, NULL, flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db verify"); dbp = NULL; error: if (errf != NULL && errf != stdout && errf != stderr) (void)fclose(errf); if (errpfx != NULL) __os_free(dbenv->env, errpfx); if (dbp) (void)dbp->close(dbp, 0); return (result); } #endif /* * bdb_Version -- * Implements the version command. */ static int bdb_Version(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *bdbver[] = { "-string", NULL }; enum bdbver { TCL_VERSTRING }; int i, optindex, maj, min, patch, result, string, verobjc; char *arg, *v; Tcl_Obj *res, *verobjv[3]; result = TCL_OK; string = 0; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args?"); return (TCL_ERROR); } /* * We must first parse for the environment flag, since that * is needed for db_create. Then create the db handle. */ i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i], bdbver, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbver)optindex) { case TCL_VERSTRING: string = 1; break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; } if (result != TCL_OK) goto error; v = db_version(&maj, &min, &patch); if (string) res = NewStringObj(v, strlen(v)); else { verobjc = 3; verobjv[0] = Tcl_NewIntObj(maj); verobjv[1] = Tcl_NewIntObj(min); verobjv[2] = Tcl_NewIntObj(patch); res = Tcl_NewListObj(verobjc, verobjv); } Tcl_SetObjResult(interp, res); error: return (result); } #ifdef CONFIG_TEST /* * bdb_GetConfig -- * Implements the getconfig command. */ #define ADD_CONFIG_NAME(name) \ conf = NewStringObj(name, strlen(name)); \ if (Tcl_ListObjAppendElement(interp, res, conf) != TCL_OK) \ return (TCL_ERROR); static int bdb_GetConfig(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { Tcl_Obj *res, *conf; /* * No args. Error if we have some */ if (objc != 2) { Tcl_WrongNumArgs(interp, 2, objv, ""); return (TCL_ERROR); } res = Tcl_NewListObj(0, NULL); conf = NULL; /* * This command conditionally adds strings in based on * how DB is configured so that the test suite can make * decisions based on that. For now only implement the * configuration pieces we need. */ #ifdef DEBUG ADD_CONFIG_NAME("debug"); #endif #ifdef DEBUG_ROP ADD_CONFIG_NAME("debug_rop"); #endif #ifdef DEBUG_WOP ADD_CONFIG_NAME("debug_wop"); #endif #ifdef DIAGNOSTIC ADD_CONFIG_NAME("diagnostic"); #endif #ifdef HAVE_PARTITION ADD_CONFIG_NAME("partition"); #endif #ifdef HAVE_HASH ADD_CONFIG_NAME("hash"); #endif #ifdef HAVE_QUEUE ADD_CONFIG_NAME("queue"); #endif #ifdef HAVE_REPLICATION ADD_CONFIG_NAME("rep"); #endif #ifdef HAVE_REPLICATION_THREADS ADD_CONFIG_NAME("repmgr"); #endif #ifdef HAVE_RPC ADD_CONFIG_NAME("rpc"); #endif #ifdef HAVE_VERIFY ADD_CONFIG_NAME("verify"); #endif Tcl_SetObjResult(interp, res); return (TCL_OK); } /* * bdb_Handles -- * Implements the handles command. */ static int bdb_Handles(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { DBTCL_INFO *p; Tcl_Obj *res, *handle; /* * No args. Error if we have some */ if (objc != 2) { Tcl_WrongNumArgs(interp, 2, objv, ""); return (TCL_ERROR); } res = Tcl_NewListObj(0, NULL); LIST_FOREACH(p, &__db_infohead, entries) { handle = NewStringObj(p->i_name, strlen(p->i_name)); if (Tcl_ListObjAppendElement(interp, res, handle) != TCL_OK) return (TCL_ERROR); } Tcl_SetObjResult(interp, res); return (TCL_OK); } /* * bdb_MsgType - * Implements the msgtype command. * Given a replication message return its message type name. */ static int bdb_MsgType(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { __rep_control_args *rp; Tcl_Obj *msgname; u_int32_t len, msgtype, swaptype; int freerp, ret; /* * If the messages in rep.h change, this must change too! * Add "no_type" for 0 so that we directly index. */ static const char *msgnames[] = { "no_type", "alive", "alive_req", "all_req", "bulk_log", "bulk_page", "dupmaster", "file", "file_fail", "file_req", "lease_grant", "log", "log_more", "log_req", "master_req", "newclient", "newfile", "newmaster", "newsite", "page", "page_fail", "page_more", "page_req", "rerequest", "startsync", "update", "update_req", "verify", "verify_fail", "verify_req", "vote1", "vote2", NULL }; /* * 1 arg, the message. Error if different. */ if (objc != 3) { Tcl_WrongNumArgs(interp, 3, objv, "msgtype msg"); return (TCL_ERROR); } ret = _CopyObjBytes(interp, objv[2], &rp, &len, &freerp); if (ret != TCL_OK) { Tcl_SetResult(interp, "msgtype: bad control message", TCL_STATIC); return (TCL_ERROR); } swaptype = msgtype = rp->rectype; /* * We have no DB_ENV or ENV here. The message type may be * swapped. Get both and use the one that is in the message range. */ M_32_SWAP(swaptype); if (msgtype > REP_MAX_MSG && swaptype <= REP_MAX_MSG) msgtype = swaptype; msgname = NewStringObj(msgnames[msgtype], strlen(msgnames[msgtype])); Tcl_SetObjResult(interp, msgname); if (rp != NULL && freerp) __os_free(NULL, rp); return (TCL_OK); } /* * bdb_DbUpgrade -- * Implements the DB->upgrade command. */ static int bdb_DbUpgrade(interp, objc, objv) Tcl_Interp *interp; /* Interpreter */ int objc; /* How many arguments? */ Tcl_Obj *CONST objv[]; /* The argument objects */ { static const char *bdbupg[] = { "-dupsort", "-env", "--", NULL }; enum bdbupg { TCL_DBUPG_DUPSORT, TCL_DBUPG_ENV, TCL_DBUPG_ENDARG }; DB_ENV *dbenv; DB *dbp; u_int32_t flags; int endarg, i, optindex, result, ret; char *arg, *db; dbenv = NULL; dbp = NULL; result = TCL_OK; db = NULL; flags = endarg = 0; if (objc < 2) { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); return (TCL_ERROR); } i = 2; while (i < objc) { if (Tcl_GetIndexFromObj(interp, objv[i], bdbupg, "option", TCL_EXACT, &optindex) != TCL_OK) { arg = Tcl_GetStringFromObj(objv[i], NULL); if (arg[0] == '-') { result = IS_HELP(objv[i]); goto error; } else Tcl_ResetResult(interp); break; } i++; switch ((enum bdbupg)optindex) { case TCL_DBUPG_DUPSORT: flags |= DB_DUPSORT; break; case TCL_DBUPG_ENV: arg = Tcl_GetStringFromObj(objv[i++], NULL); dbenv = NAME_TO_ENV(arg); if (dbenv == NULL) { Tcl_SetResult(interp, "db upgrade: illegal environment", TCL_STATIC); return (TCL_ERROR); } break; case TCL_DBUPG_ENDARG: endarg = 1; break; } /* * If, at any time, parsing the args we get an error, * bail out and return. */ if (result != TCL_OK) goto error; if (endarg) break; } if (result != TCL_OK) goto error; /* * The remaining arg is the db filename. */ if (i == (objc - 1)) db = Tcl_GetStringFromObj(objv[i++], NULL); else { Tcl_WrongNumArgs(interp, 2, objv, "?args? filename"); result = TCL_ERROR; goto error; } ret = db_create(&dbp, dbenv, 0); if (ret) { result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db_create"); goto error; } /* * XXX * Remove restriction if error handling not tied to env. * * The DB->set_err* functions overwrite the environment. So, if * we are using an env, don't overwrite it; if not using an env, * then configure error handling. */ if (dbenv == NULL) { dbp->set_errpfx(dbp, "DbUpgrade"); dbp->set_errcall(dbp, _ErrorFunc); } ret = dbp->upgrade(dbp, db, flags); result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db upgrade"); error: if (dbp) (void)dbp->close(dbp, 0); return (result); } /* * tcl_bt_compare and tcl_dup_compare -- * These two are basically identical internally, so may as well * share code. The only differences are the name used in error * reporting and the Tcl_Obj representing their respective procs. */ static int tcl_bt_compare(dbp, dbta, dbtb) DB *dbp; const DBT *dbta, *dbtb; { return (tcl_compare_callback(dbp, dbta, dbtb, ((DBTCL_INFO *)dbp->api_internal)->i_compare, "bt_compare")); } static int tcl_dup_compare(dbp, dbta, dbtb) DB *dbp; const DBT *dbta, *dbtb; { return (tcl_compare_callback(dbp, dbta, dbtb, ((DBTCL_INFO *)dbp->api_internal)->i_dupcompare, "dup_compare")); } /* * tcl_compare_callback -- * Tcl callback for set_bt_compare and set_dup_compare. What this * function does is stuff the data fields of the two DBTs into Tcl ByteArray * objects, then call the procedure stored in ip->i_compare on the two * objects. Then we return that procedure's result as the comparison. */ static int tcl_compare_callback(dbp, dbta, dbtb, procobj, errname) DB *dbp; const DBT *dbta, *dbtb; Tcl_Obj *procobj; char *errname; { DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *a, *b, *resobj, *objv[3]; int result, cmp; ip = (DBTCL_INFO *)dbp->api_internal; interp = ip->i_interp; objv[0] = procobj; /* * Create two ByteArray objects, with the two data we've been passed. * This will involve a copy, which is unpleasantly slow, but there's * little we can do to avoid this (I think). */ a = Tcl_NewByteArrayObj(dbta->data, (int)dbta->size); Tcl_IncrRefCount(a); b = Tcl_NewByteArrayObj(dbtb->data, (int)dbtb->size); Tcl_IncrRefCount(b); objv[1] = a; objv[2] = b; result = Tcl_EvalObjv(interp, 3, objv, 0); if (result != TCL_OK) { /* * XXX * If this or the next Tcl call fails, we're doomed. * There's no way to return an error from comparison functions, * no way to determine what the correct sort order is, and * so no way to avoid corrupting the database if we proceed. * We could play some games stashing return values on the * DB handle, but it's not worth the trouble--no one with * any sense is going to be using this other than for testing, * and failure typically means that the bt_compare proc * had a syntax error in it or something similarly dumb. * * So, drop core. If we're not running with diagnostic * mode, panic--and always return a negative number. :-) */ panic: __db_errx(dbp->env, "Tcl %s callback failed", errname); return (__env_panic(dbp->env, DB_RUNRECOVERY)); } resobj = Tcl_GetObjResult(interp); result = Tcl_GetIntFromObj(interp, resobj, &cmp); if (result != TCL_OK) goto panic; Tcl_DecrRefCount(a); Tcl_DecrRefCount(b); return (cmp); } /* * tcl_h_hash -- * Tcl callback for the hashing function. See tcl_compare_callback-- * this works much the same way, only we're given a buffer and a length * instead of two DBTs. */ static u_int32_t tcl_h_hash(dbp, buf, len) DB *dbp; const void *buf; u_int32_t len; { DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *objv[2]; int result, hval; ip = (DBTCL_INFO *)dbp->api_internal; interp = ip->i_interp; objv[0] = ip->i_hashproc; /* * Create a ByteArray for the buffer. */ objv[1] = Tcl_NewByteArrayObj((void *)buf, (int)len); Tcl_IncrRefCount(objv[1]); result = Tcl_EvalObjv(interp, 2, objv, 0); if (result != TCL_OK) goto panic; result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval); if (result != TCL_OK) goto panic; Tcl_DecrRefCount(objv[1]); return ((u_int32_t)hval); panic: __db_errx(dbp->env, "Tcl h_hash callback failed"); (void)__env_panic(dbp->env, DB_RUNRECOVERY); return (0); } static int tcl_isalive(dbenv, pid, tid, flags) DB_ENV *dbenv; pid_t pid; db_threadid_t tid; u_int32_t flags; { ENV *env; DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *objv[2]; pid_t mypid; db_threadid_t mytid; int answer, result; __os_id(dbenv, &mypid, &mytid); if (mypid == pid && (LF_ISSET(DB_MUTEX_PROCESS_ONLY) || mytid == tid)) return (1); /* * We only support the PROCESS_ONLY case for now, because that seems * easiest, and that's all we need for our tests for the moment. */ if (!LF_ISSET(DB_MUTEX_PROCESS_ONLY)) return (1); ip = (DBTCL_INFO *)dbenv->app_private; interp = ip->i_interp; objv[0] = ip->i_isalive; objv[1] = Tcl_NewLongObj((long)pid); Tcl_IncrRefCount(objv[1]); result = Tcl_EvalObjv(interp, 2, objv, 0); if (result != TCL_OK) goto panic; Tcl_DecrRefCount(objv[1]); result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &answer); if (result != TCL_OK) goto panic; return (answer); panic: env = dbenv->env; __db_errx(env, "Tcl isalive callback failed: %s", Tcl_GetStringResult(interp)); (void)__env_panic(env, DB_RUNRECOVERY); return (0); } /* * tcl_part_callback -- */ static u_int32_t tcl_part_callback(dbp, data) DB *dbp; DBT *data; { DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *objv[2]; int result, hval; ip = (DBTCL_INFO *)dbp->api_internal; interp = ip->i_interp; objv[0] = ip->i_part_callback; objv[1] = Tcl_NewByteArrayObj(data->data, (int)data->size); Tcl_IncrRefCount(objv[1]); result = Tcl_EvalObjv(interp, 2, objv, 0); if (result != TCL_OK) goto panic; result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval); if (result != TCL_OK) goto panic; Tcl_DecrRefCount(objv[1]); return ((u_int32_t)hval); panic: __db_errx(dbp->env, "Tcl part_callback callback failed"); (void)__env_panic(dbp->env, DB_RUNRECOVERY); return (0); } /* * tcl_rep_send -- * Replication send callback. * * PUBLIC: int tcl_rep_send __P((DB_ENV *, * PUBLIC: const DBT *, const DBT *, const DB_LSN *, int, u_int32_t)); */ int tcl_rep_send(dbenv, control, rec, lsnp, eid, flags) DB_ENV *dbenv; const DBT *control, *rec; const DB_LSN *lsnp; int eid; u_int32_t flags; { #define TCLDB_SENDITEMS 7 #define TCLDB_MAXREPFLAGS 32 DBTCL_INFO *ip; Tcl_Interp *interp; Tcl_Obj *control_o, *eid_o, *flags_o, *lsn_o, *origobj, *rec_o; Tcl_Obj *lsnobj[2], *myobjv[TCLDB_MAXREPFLAGS], *objv[TCLDB_SENDITEMS]; Tcl_Obj *resobj; int i, myobjc, result, ret; ip = (DBTCL_INFO *)dbenv->app_private; interp = ip->i_interp; objv[0] = ip->i_rep_send; control_o = Tcl_NewByteArrayObj(control->data, (int)control->size); Tcl_IncrRefCount(control_o); rec_o = Tcl_NewByteArrayObj(rec->data, (int)rec->size); Tcl_IncrRefCount(rec_o); eid_o = Tcl_NewIntObj(eid); Tcl_IncrRefCount(eid_o); myobjv[myobjc = 0] = NULL; if (flags == 0) myobjv[myobjc++] = NewStringObj("none", strlen("none")); if (LF_ISSET(DB_REP_ANYWHERE)) myobjv[myobjc++] = NewStringObj("any", strlen("any")); if (LF_ISSET(DB_REP_NOBUFFER)) myobjv[myobjc++] = NewStringObj("nobuffer", strlen("nobuffer")); if (LF_ISSET(DB_REP_PERMANENT)) myobjv[myobjc++] = NewStringObj("perm", strlen("perm")); if (LF_ISSET(DB_REP_REREQUEST)) myobjv[myobjc++] = NewStringObj("rerequest", strlen("rerequest")); /* * If we're given an unrecognized flag send "unknown". */ if (myobjc == 0) myobjv[myobjc++] = NewStringObj("unknown", strlen("unknown")); for (i = 0; i < myobjc; i++) Tcl_IncrRefCount(myobjv[i]); flags_o = Tcl_NewListObj(myobjc, myobjv); Tcl_IncrRefCount(flags_o); lsnobj[0] = Tcl_NewLongObj((long)lsnp->file); Tcl_IncrRefCount(lsnobj[0]); lsnobj[1] = Tcl_NewLongObj((long)lsnp->offset); Tcl_IncrRefCount(lsnobj[1]); lsn_o = Tcl_NewListObj(2, lsnobj); Tcl_IncrRefCount(lsn_o); objv[1] = control_o; objv[2] = rec_o; objv[3] = ip->i_rep_eid; /* From ID */ objv[4] = eid_o; /* To ID */ objv[5] = flags_o; /* Flags */ objv[6] = lsn_o; /* LSN */ /* * We really want to return the original result to the * user. So, save the result obj here, and then after * we've taken care of the Tcl_EvalObjv, set the result * back to this original result. */ origobj = Tcl_GetObjResult(interp); Tcl_IncrRefCount(origobj); result = Tcl_EvalObjv(interp, TCLDB_SENDITEMS, objv, 0); if (result != TCL_OK) { /* * XXX * This probably isn't the right error behavior, but * this error should only happen if the Tcl callback is * somehow invalid, which is a fatal scripting bug. */ err: __db_errx(dbenv->env, "Tcl rep_send failure: %s", Tcl_GetStringResult(interp)); return (EINVAL); } resobj = Tcl_GetObjResult(interp); result = Tcl_GetIntFromObj(interp, resobj, &ret); if (result != TCL_OK) goto err; Tcl_SetObjResult(interp, origobj); Tcl_DecrRefCount(origobj); Tcl_DecrRefCount(control_o); Tcl_DecrRefCount(rec_o); Tcl_DecrRefCount(eid_o); for (i = 0; i < myobjc; i++) Tcl_DecrRefCount(myobjv[i]); Tcl_DecrRefCount(flags_o); Tcl_DecrRefCount(lsnobj[0]); Tcl_DecrRefCount(lsnobj[1]); Tcl_DecrRefCount(lsn_o); return (ret); } #endif #ifdef CONFIG_TEST /* * tcl_db_malloc, tcl_db_realloc, tcl_db_free -- * Tcl-local malloc, realloc, and free functions to use for user data * to exercise umalloc/urealloc/ufree. Allocate the memory as a Tcl object * so we're sure to exacerbate and catch any shared-library issues. */ static void * tcl_db_malloc(size) size_t size; { Tcl_Obj *obj; void *buf; obj = Tcl_NewObj(); if (obj == NULL) return (NULL); Tcl_IncrRefCount(obj); Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); buf = Tcl_GetString(obj); memcpy(buf, &obj, sizeof(&obj)); buf = (Tcl_Obj **)buf + 1; return (buf); } static void * tcl_db_realloc(ptr, size) void *ptr; size_t size; { Tcl_Obj *obj; if (ptr == NULL) return (tcl_db_malloc(size)); obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1); Tcl_SetObjLength(obj, (int)(size + sizeof(Tcl_Obj *))); ptr = Tcl_GetString(obj); memcpy(ptr, &obj, sizeof(&obj)); ptr = (Tcl_Obj **)ptr + 1; return (ptr); } static void tcl_db_free(ptr) void *ptr; { Tcl_Obj *obj; obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1); Tcl_DecrRefCount(obj); } static int tcl_set_partition_keys(interp, dbp, obj, keyp) Tcl_Interp *interp; DB *dbp; Tcl_Obj *obj; DBT **keyp; { DBT *keys, *kp; Tcl_Obj **obj_list; u_int32_t i, count; int ret; *keyp = NULL; if ((ret = Tcl_ListObjGetElements(interp, obj, (int *)&count, &obj_list)) != TCL_OK) return (EINVAL); if ((ret = __os_calloc(NULL, count, sizeof(DBT), &keys)) != 0) return (ret); *keyp = keys; kp = keys; for (i = 0; i < count; i++, kp++) kp->data = Tcl_GetStringFromObj(obj_list[i], (int*)&kp->size); if ((ret = dbp->set_partition(dbp, (u_int32_t)count + 1, keys, NULL)) != 0) return (ret); return (0); } static int tcl_set_partition_dirs(interp, dbp, obj) Tcl_Interp *interp; DB *dbp; Tcl_Obj *obj; { char **dp, **dirs; Tcl_Obj **obj_list; u_int32_t i, count; int ret; if ((ret = Tcl_ListObjGetElements(interp, obj, (int*)&count, &obj_list)) != TCL_OK) return (EINVAL); if ((ret = __os_calloc(NULL, count + 1, sizeof(char *), &dirs)) != 0) return (ret); dp = dirs; for (i = 0; i < count; i++, dp++) *dp = Tcl_GetStringFromObj(obj_list[i], NULL); if ((ret = dbp->set_partition_dirs(dbp, (const char **)dirs)) != 0) return (ret); __os_free(NULL, dirs); return (0); } #endif
mit
manopaul/Autonomous-Vehicles
11 - Path Planning/src/Eigen-3.3/bench/benchFFT.cpp
301
2806
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include <iostream> #include <bench/BenchUtil.h> #include <complex> #include <vector> #include <Eigen/Core> #include <unsupported/Eigen/FFT> using namespace Eigen; using namespace std; template <typename T> string nameof(); template <> string nameof<float>() {return "float";} template <> string nameof<double>() {return "double";} template <> string nameof<long double>() {return "long double";} #ifndef TYPE #define TYPE float #endif #ifndef NFFT #define NFFT 1024 #endif #ifndef NDATA #define NDATA 1000000 #endif using namespace Eigen; template <typename T> void bench(int nfft,bool fwd,bool unscaled=false, bool halfspec=false) { typedef typename NumTraits<T>::Real Scalar; typedef typename std::complex<Scalar> Complex; int nits = NDATA/nfft; vector<T> inbuf(nfft); vector<Complex > outbuf(nfft); FFT< Scalar > fft; if (unscaled) { fft.SetFlag(fft.Unscaled); cout << "unscaled "; } if (halfspec) { fft.SetFlag(fft.HalfSpectrum); cout << "halfspec "; } std::fill(inbuf.begin(),inbuf.end(),0); fft.fwd( outbuf , inbuf); BenchTimer timer; timer.reset(); for (int k=0;k<8;++k) { timer.start(); if (fwd) for(int i = 0; i < nits; i++) fft.fwd( outbuf , inbuf); else for(int i = 0; i < nits; i++) fft.inv(inbuf,outbuf); timer.stop(); } cout << nameof<Scalar>() << " "; double mflops = 5.*nfft*log2((double)nfft) / (1e6 * timer.value() / (double)nits ); if ( NumTraits<T>::IsComplex ) { cout << "complex"; }else{ cout << "real "; mflops /= 2; } if (fwd) cout << " fwd"; else cout << " inv"; cout << " NFFT=" << nfft << " " << (double(1e-6*nfft*nits)/timer.value()) << " MS/s " << mflops << "MFLOPS\n"; } int main(int argc,char ** argv) { bench<complex<float> >(NFFT,true); bench<complex<float> >(NFFT,false); bench<float>(NFFT,true); bench<float>(NFFT,false); bench<float>(NFFT,false,true); bench<float>(NFFT,false,true,true); bench<complex<double> >(NFFT,true); bench<complex<double> >(NFFT,false); bench<double>(NFFT,true); bench<double>(NFFT,false); bench<complex<long double> >(NFFT,true); bench<complex<long double> >(NFFT,false); bench<long double>(NFFT,true); bench<long double>(NFFT,false); return 0; }
mit
calvinfarias/IC2015-2
BOOST/boost_1_61_0/libs/config/test/no_auto_multidecl_fail.cpp
47
1080
// This file was automatically generated on Thu Mar 12 17:32:04 2009 // by libs/config/tools/generate.cpp // Copyright John Maddock 2002-4. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org/libs/config for the most recent version.// // Revision $Id$ // // Test file for macro BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS // This file should not compile, if it does then // BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS should not be defined. // See file boost_no_auto_multidecl.ipp for details // Must not have BOOST_ASSERT_CONFIG set; it defeats // the objective of this file: #ifdef BOOST_ASSERT_CONFIG # undef BOOST_ASSERT_CONFIG #endif #include <boost/config.hpp> #include "test.hpp" #ifdef BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS #include "boost_no_auto_multidecl.ipp" #else #error "this file should not compile" #endif int main( int, char *[] ) { return boost_no_cxx11_auto_multideclarations::test(); }
mit
Shockblast/godot
thirdparty/icu4c/common/dictionarydata.cpp
49
8292
// © 2016 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html /* ******************************************************************************* * Copyright (C) 2014-2016, International Business Machines * Corporation and others. All Rights Reserved. ******************************************************************************* * dictionarydata.h * * created on: 2012may31 * created by: Markus W. Scherer & Maxime Serrano */ #include "dictionarydata.h" #include "unicode/ucharstrie.h" #include "unicode/bytestrie.h" #include "unicode/udata.h" #include "cmemory.h" #if !UCONFIG_NO_BREAK_ITERATION U_NAMESPACE_BEGIN const int32_t DictionaryData::TRIE_TYPE_BYTES = 0; const int32_t DictionaryData::TRIE_TYPE_UCHARS = 1; const int32_t DictionaryData::TRIE_TYPE_MASK = 7; const int32_t DictionaryData::TRIE_HAS_VALUES = 8; const int32_t DictionaryData::TRANSFORM_NONE = 0; const int32_t DictionaryData::TRANSFORM_TYPE_OFFSET = 0x1000000; const int32_t DictionaryData::TRANSFORM_TYPE_MASK = 0x7f000000; const int32_t DictionaryData::TRANSFORM_OFFSET_MASK = 0x1fffff; DictionaryMatcher::~DictionaryMatcher() { } UCharsDictionaryMatcher::~UCharsDictionaryMatcher() { udata_close(file); } int32_t UCharsDictionaryMatcher::getType() const { return DictionaryData::TRIE_TYPE_UCHARS; } int32_t UCharsDictionaryMatcher::matches(UText *text, int32_t maxLength, int32_t limit, int32_t *lengths, int32_t *cpLengths, int32_t *values, int32_t *prefix) const { UCharsTrie uct(characters); int32_t startingTextIndex = (int32_t)utext_getNativeIndex(text); int32_t wordCount = 0; int32_t codePointsMatched = 0; for (UChar32 c = utext_next32(text); c >= 0; c=utext_next32(text)) { UStringTrieResult result = (codePointsMatched == 0) ? uct.first(c) : uct.next(c); int32_t lengthMatched = (int32_t)utext_getNativeIndex(text) - startingTextIndex; codePointsMatched += 1; if (USTRINGTRIE_HAS_VALUE(result)) { if (wordCount < limit) { if (values != NULL) { values[wordCount] = uct.getValue(); } if (lengths != NULL) { lengths[wordCount] = lengthMatched; } if (cpLengths != NULL) { cpLengths[wordCount] = codePointsMatched; } ++wordCount; } if (result == USTRINGTRIE_FINAL_VALUE) { break; } } else if (result == USTRINGTRIE_NO_MATCH) { break; } if (lengthMatched >= maxLength) { break; } } if (prefix != NULL) { *prefix = codePointsMatched; } return wordCount; } BytesDictionaryMatcher::~BytesDictionaryMatcher() { udata_close(file); } UChar32 BytesDictionaryMatcher::transform(UChar32 c) const { if ((transformConstant & DictionaryData::TRANSFORM_TYPE_MASK) == DictionaryData::TRANSFORM_TYPE_OFFSET) { if (c == 0x200D) { return 0xFF; } else if (c == 0x200C) { return 0xFE; } int32_t delta = c - (transformConstant & DictionaryData::TRANSFORM_OFFSET_MASK); if (delta < 0 || 0xFD < delta) { return U_SENTINEL; } return (UChar32)delta; } return c; } int32_t BytesDictionaryMatcher::getType() const { return DictionaryData::TRIE_TYPE_BYTES; } int32_t BytesDictionaryMatcher::matches(UText *text, int32_t maxLength, int32_t limit, int32_t *lengths, int32_t *cpLengths, int32_t *values, int32_t *prefix) const { BytesTrie bt(characters); int32_t startingTextIndex = (int32_t)utext_getNativeIndex(text); int32_t wordCount = 0; int32_t codePointsMatched = 0; for (UChar32 c = utext_next32(text); c >= 0; c=utext_next32(text)) { UStringTrieResult result = (codePointsMatched == 0) ? bt.first(transform(c)) : bt.next(transform(c)); int32_t lengthMatched = (int32_t)utext_getNativeIndex(text) - startingTextIndex; codePointsMatched += 1; if (USTRINGTRIE_HAS_VALUE(result)) { if (wordCount < limit) { if (values != NULL) { values[wordCount] = bt.getValue(); } if (lengths != NULL) { lengths[wordCount] = lengthMatched; } if (cpLengths != NULL) { cpLengths[wordCount] = codePointsMatched; } ++wordCount; } if (result == USTRINGTRIE_FINAL_VALUE) { break; } } else if (result == USTRINGTRIE_NO_MATCH) { break; } if (lengthMatched >= maxLength) { break; } } if (prefix != NULL) { *prefix = codePointsMatched; } return wordCount; } U_NAMESPACE_END U_NAMESPACE_USE U_CAPI int32_t U_EXPORT2 udict_swap(const UDataSwapper *ds, const void *inData, int32_t length, void *outData, UErrorCode *pErrorCode) { const UDataInfo *pInfo; int32_t headerSize; const uint8_t *inBytes; uint8_t *outBytes; const int32_t *inIndexes; int32_t indexes[DictionaryData::IX_COUNT]; int32_t i, offset, size; headerSize = udata_swapDataHeader(ds, inData, length, outData, pErrorCode); if (pErrorCode == NULL || U_FAILURE(*pErrorCode)) return 0; pInfo = (const UDataInfo *)((const char *)inData + 4); if (!(pInfo->dataFormat[0] == 0x44 && pInfo->dataFormat[1] == 0x69 && pInfo->dataFormat[2] == 0x63 && pInfo->dataFormat[3] == 0x74 && pInfo->formatVersion[0] == 1)) { udata_printError(ds, "udict_swap(): data format %02x.%02x.%02x.%02x (format version %02x) is not recognized as dictionary data\n", pInfo->dataFormat[0], pInfo->dataFormat[1], pInfo->dataFormat[2], pInfo->dataFormat[3], pInfo->formatVersion[0]); *pErrorCode = U_UNSUPPORTED_ERROR; return 0; } inBytes = (const uint8_t *)inData + headerSize; outBytes = (uint8_t *)outData + headerSize; inIndexes = (const int32_t *)inBytes; if (length >= 0) { length -= headerSize; if (length < (int32_t)(sizeof(indexes))) { udata_printError(ds, "udict_swap(): too few bytes (%d after header) for dictionary data\n", length); *pErrorCode = U_INDEX_OUTOFBOUNDS_ERROR; return 0; } } for (i = 0; i < DictionaryData::IX_COUNT; i++) { indexes[i] = udata_readInt32(ds, inIndexes[i]); } size = indexes[DictionaryData::IX_TOTAL_SIZE]; if (length >= 0) { if (length < size) { udata_printError(ds, "udict_swap(): too few bytes (%d after header) for all of dictionary data\n", length); *pErrorCode = U_INDEX_OUTOFBOUNDS_ERROR; return 0; } if (inBytes != outBytes) { uprv_memcpy(outBytes, inBytes, size); } offset = 0; ds->swapArray32(ds, inBytes, sizeof(indexes), outBytes, pErrorCode); offset = (int32_t)sizeof(indexes); int32_t trieType = indexes[DictionaryData::IX_TRIE_TYPE] & DictionaryData::TRIE_TYPE_MASK; int32_t nextOffset = indexes[DictionaryData::IX_RESERVED1_OFFSET]; if (trieType == DictionaryData::TRIE_TYPE_UCHARS) { ds->swapArray16(ds, inBytes + offset, nextOffset - offset, outBytes + offset, pErrorCode); } else if (trieType == DictionaryData::TRIE_TYPE_BYTES) { // nothing to do } else { udata_printError(ds, "udict_swap(): unknown trie type!\n"); *pErrorCode = U_UNSUPPORTED_ERROR; return 0; } // these next two sections are empty in the current format, // but may be used later. offset = nextOffset; nextOffset = indexes[DictionaryData::IX_RESERVED2_OFFSET]; offset = nextOffset; nextOffset = indexes[DictionaryData::IX_TOTAL_SIZE]; offset = nextOffset; } return headerSize + size; } #endif
mit
alexhenrie/poedit
deps/boost/libs/local_function/test/typeof_template.cpp
53
1154
// Copyright (C) 2009-2012 Lorenzo Caminiti // Distributed under the Boost Software License, Version 1.0 // (see accompanying file LICENSE_1_0.txt or a copy at // http://www.boost.org/LICENSE_1_0.txt) // Home at http://www.boost.org/libs/local_function #include <boost/config.hpp> #ifdef BOOST_NO_CXX11_VARIADIC_MACROS # error "variadic macros required" #else #include "addable.hpp" #include <boost/local_function.hpp> #include <boost/type_traits/remove_reference.hpp> #include <boost/concept_check.hpp> #include <boost/detail/lightweight_test.hpp> #include <algorithm> //[typeof_template template<typename T> T calculate(const T& factor) { T sum = 0; void BOOST_LOCAL_FUNCTION_TPL(const bind factor, bind& sum, T num) { // Local function `TYPEOF` does not need `typename`. BOOST_CONCEPT_ASSERT((Addable<typename boost::remove_reference< BOOST_LOCAL_FUNCTION_TYPEOF(sum)>::type>)); sum += factor * num; } BOOST_LOCAL_FUNCTION_NAME_TPL(add) add(6); return sum; } //] int main(void) { BOOST_TEST(calculate(10) == 60); return boost::report_errors(); } #endif // VARIADIC_MACROS
mit
huziyizero/godot
drivers/builtin_openssl2/ssl/s23_clnt.c
54
27000
/* ssl/s23_clnt.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ /* ==================================================================== * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <stdio.h> #include "ssl_locl.h" #include <openssl/buffer.h> #include <openssl/rand.h> #include <openssl/objects.h> #include <openssl/evp.h> static const SSL_METHOD *ssl23_get_client_method(int ver); static int ssl23_client_hello(SSL *s); static int ssl23_get_server_hello(SSL *s); static const SSL_METHOD *ssl23_get_client_method(int ver) { #ifndef OPENSSL_NO_SSL2 if (ver == SSL2_VERSION) return (SSLv2_client_method()); #endif #ifndef OPENSSL_NO_SSL3 if (ver == SSL3_VERSION) return (SSLv3_client_method()); #endif if (ver == TLS1_VERSION) return (TLSv1_client_method()); else if (ver == TLS1_1_VERSION) return (TLSv1_1_client_method()); else if (ver == TLS1_2_VERSION) return (TLSv1_2_client_method()); else return (NULL); } IMPLEMENT_ssl23_meth_func(SSLv23_client_method, ssl_undefined_function, ssl23_connect, ssl23_get_client_method) int ssl23_connect(SSL *s) { BUF_MEM *buf = NULL; unsigned long Time = (unsigned long)time(NULL); void (*cb) (const SSL *ssl, int type, int val) = NULL; int ret = -1; int new_state, state; RAND_add(&Time, sizeof(Time), 0); ERR_clear_error(); clear_sys_error(); if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; s->in_handshake++; if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s); for (;;) { state = s->state; switch (s->state) { case SSL_ST_BEFORE: case SSL_ST_CONNECT: case SSL_ST_BEFORE | SSL_ST_CONNECT: case SSL_ST_OK | SSL_ST_CONNECT: if (s->session != NULL) { SSLerr(SSL_F_SSL23_CONNECT, SSL_R_SSL23_DOING_SESSION_ID_REUSE); ret = -1; goto end; } s->server = 0; if (cb != NULL) cb(s, SSL_CB_HANDSHAKE_START, 1); /* s->version=TLS1_VERSION; */ s->type = SSL_ST_CONNECT; if (s->init_buf == NULL) { if ((buf = BUF_MEM_new()) == NULL) { ret = -1; goto end; } if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) { ret = -1; goto end; } s->init_buf = buf; buf = NULL; } if (!ssl3_setup_buffers(s)) { ret = -1; goto end; } ssl3_init_finished_mac(s); s->state = SSL23_ST_CW_CLNT_HELLO_A; s->ctx->stats.sess_connect++; s->init_num = 0; break; case SSL23_ST_CW_CLNT_HELLO_A: case SSL23_ST_CW_CLNT_HELLO_B: s->shutdown = 0; ret = ssl23_client_hello(s); if (ret <= 0) goto end; s->state = SSL23_ST_CR_SRVR_HELLO_A; s->init_num = 0; break; case SSL23_ST_CR_SRVR_HELLO_A: case SSL23_ST_CR_SRVR_HELLO_B: ret = ssl23_get_server_hello(s); if (ret >= 0) cb = NULL; goto end; /* break; */ default: SSLerr(SSL_F_SSL23_CONNECT, SSL_R_UNKNOWN_STATE); ret = -1; goto end; /* break; */ } if (s->debug) { (void)BIO_flush(s->wbio); } if ((cb != NULL) && (s->state != state)) { new_state = s->state; s->state = state; cb(s, SSL_CB_CONNECT_LOOP, 1); s->state = new_state; } } end: s->in_handshake--; if (buf != NULL) BUF_MEM_free(buf); if (cb != NULL) cb(s, SSL_CB_CONNECT_EXIT, ret); return (ret); } static int ssl23_no_ssl2_ciphers(SSL *s) { SSL_CIPHER *cipher; STACK_OF(SSL_CIPHER) *ciphers; int i; ciphers = SSL_get_ciphers(s); for (i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) { cipher = sk_SSL_CIPHER_value(ciphers, i); if (cipher->algorithm_ssl == SSL_SSLV2) return 0; } return 1; } /* * Fill a ClientRandom or ServerRandom field of length len. Returns <= 0 on * failure, 1 on success. */ int ssl_fill_hello_random(SSL *s, int server, unsigned char *result, int len) { int send_time = 0; if (len < 4) return 0; if (server) send_time = (s->mode & SSL_MODE_SEND_SERVERHELLO_TIME) != 0; else send_time = (s->mode & SSL_MODE_SEND_CLIENTHELLO_TIME) != 0; if (send_time) { unsigned long Time = (unsigned long)time(NULL); unsigned char *p = result; l2n(Time, p); return RAND_pseudo_bytes(p, len - 4); } else return RAND_pseudo_bytes(result, len); } static int ssl23_client_hello(SSL *s) { unsigned char *buf; unsigned char *p, *d; int i, ch_len; unsigned long l; int ssl2_compat; int version = 0, version_major, version_minor; int al = 0; #ifndef OPENSSL_NO_COMP int j; SSL_COMP *comp; #endif int ret; unsigned long mask, options = s->options; ssl2_compat = (options & SSL_OP_NO_SSLv2) ? 0 : 1; if (ssl2_compat && ssl23_no_ssl2_ciphers(s)) ssl2_compat = 0; /* * SSL_OP_NO_X disables all protocols above X *if* there are * some protocols below X enabled. This is required in order * to maintain "version capability" vector contiguous. So * that if application wants to disable TLS1.0 in favour of * TLS1>=1, it would be insufficient to pass SSL_NO_TLSv1, the * answer is SSL_OP_NO_TLSv1|SSL_OP_NO_SSLv3|SSL_OP_NO_SSLv2. */ mask = SSL_OP_NO_TLSv1_1 | SSL_OP_NO_TLSv1 #if !defined(OPENSSL_NO_SSL3) | SSL_OP_NO_SSLv3 #endif #if !defined(OPENSSL_NO_SSL2) | (ssl2_compat ? SSL_OP_NO_SSLv2 : 0) #endif ; #if !defined(OPENSSL_NO_TLS1_2_CLIENT) version = TLS1_2_VERSION; if ((options & SSL_OP_NO_TLSv1_2) && (options & mask) != mask) version = TLS1_1_VERSION; #else version = TLS1_1_VERSION; #endif mask &= ~SSL_OP_NO_TLSv1_1; if ((options & SSL_OP_NO_TLSv1_1) && (options & mask) != mask) version = TLS1_VERSION; mask &= ~SSL_OP_NO_TLSv1; #if !defined(OPENSSL_NO_SSL3) if ((options & SSL_OP_NO_TLSv1) && (options & mask) != mask) version = SSL3_VERSION; mask &= ~SSL_OP_NO_SSLv3; #endif #if !defined(OPENSSL_NO_SSL2) if ((options & SSL_OP_NO_SSLv3) && (options & mask) != mask) version = SSL2_VERSION; #endif #ifndef OPENSSL_NO_TLSEXT if (version != SSL2_VERSION) { /* * have to disable SSL 2.0 compatibility if we need TLS extensions */ if (s->tlsext_hostname != NULL) ssl2_compat = 0; if (s->tlsext_status_type != -1) ssl2_compat = 0; # ifdef TLSEXT_TYPE_opaque_prf_input if (s->ctx->tlsext_opaque_prf_input_callback != 0 || s->tlsext_opaque_prf_input != NULL) ssl2_compat = 0; # endif if (s->cert->cli_ext.meths_count != 0) ssl2_compat = 0; } #endif buf = (unsigned char *)s->init_buf->data; if (s->state == SSL23_ST_CW_CLNT_HELLO_A) { /* * Since we're sending s23 client hello, we're not reusing a session, as * we'd be using the method from the saved session instead */ if (!ssl_get_new_session(s, 0)) { return -1; } p = s->s3->client_random; if (ssl_fill_hello_random(s, 0, p, SSL3_RANDOM_SIZE) <= 0) return -1; if (version == TLS1_2_VERSION) { version_major = TLS1_2_VERSION_MAJOR; version_minor = TLS1_2_VERSION_MINOR; } else if (tls1_suiteb(s)) { SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_ONLY_TLS_1_2_ALLOWED_IN_SUITEB_MODE); return -1; } else if (version == TLS1_1_VERSION) { version_major = TLS1_1_VERSION_MAJOR; version_minor = TLS1_1_VERSION_MINOR; } else if (version == TLS1_VERSION) { version_major = TLS1_VERSION_MAJOR; version_minor = TLS1_VERSION_MINOR; } #ifdef OPENSSL_FIPS else if (FIPS_mode()) { SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE); return -1; } #endif else if (version == SSL3_VERSION) { version_major = SSL3_VERSION_MAJOR; version_minor = SSL3_VERSION_MINOR; } else if (version == SSL2_VERSION) { version_major = SSL2_VERSION_MAJOR; version_minor = SSL2_VERSION_MINOR; } else { SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_NO_PROTOCOLS_AVAILABLE); return (-1); } s->client_version = version; if (ssl2_compat) { /* create SSL 2.0 compatible Client Hello */ /* two byte record header will be written last */ d = &(buf[2]); p = d + 9; /* leave space for message type, version, * individual length fields */ *(d++) = SSL2_MT_CLIENT_HELLO; *(d++) = version_major; *(d++) = version_minor; /* Ciphers supported */ i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), p, 0); if (i == 0) { /* no ciphers */ SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE); return -1; } s2n(i, d); p += i; /* * put in the session-id length (zero since there is no reuse) */ s2n(0, d); if (s->options & SSL_OP_NETSCAPE_CHALLENGE_BUG) ch_len = SSL2_CHALLENGE_LENGTH; else ch_len = SSL2_MAX_CHALLENGE_LENGTH; /* write out sslv2 challenge */ /* * Note that ch_len must be <= SSL3_RANDOM_SIZE (32), because it * is one of SSL2_MAX_CHALLENGE_LENGTH (32) or * SSL2_MAX_CHALLENGE_LENGTH (16), but leave the check in for * futurproofing */ if (SSL3_RANDOM_SIZE < ch_len) i = SSL3_RANDOM_SIZE; else i = ch_len; s2n(i, d); memset(&(s->s3->client_random[0]), 0, SSL3_RANDOM_SIZE); if (RAND_pseudo_bytes (&(s->s3->client_random[SSL3_RANDOM_SIZE - i]), i) <= 0) return -1; memcpy(p, &(s->s3->client_random[SSL3_RANDOM_SIZE - i]), i); p += i; i = p - &(buf[2]); buf[0] = ((i >> 8) & 0xff) | 0x80; buf[1] = (i & 0xff); /* number of bytes to write */ s->init_num = i + 2; s->init_off = 0; ssl3_finish_mac(s, &(buf[2]), i); } else { /* create Client Hello in SSL 3.0/TLS 1.0 format */ /* * do the record header (5 bytes) and handshake message header (4 * bytes) last */ d = p = &(buf[9]); *(p++) = version_major; *(p++) = version_minor; /* Random stuff */ memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE); p += SSL3_RANDOM_SIZE; /* Session ID (zero since there is no reuse) */ *(p++) = 0; /* Ciphers supported (using SSL 3.0/TLS 1.0 format) */ i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]), ssl3_put_cipher_by_char); if (i == 0) { SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE); return -1; } #ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH /* * Some servers hang if client hello > 256 bytes as hack * workaround chop number of supported ciphers to keep it well * below this if we use TLS v1.2 */ if (TLS1_get_version(s) >= TLS1_2_VERSION && i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH) i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1; #endif s2n(i, p); p += i; /* COMPRESSION */ #ifdef OPENSSL_NO_COMP *(p++) = 1; #else if ((s->options & SSL_OP_NO_COMPRESSION) || !s->ctx->comp_methods) j = 0; else j = sk_SSL_COMP_num(s->ctx->comp_methods); *(p++) = 1 + j; for (i = 0; i < j; i++) { comp = sk_SSL_COMP_value(s->ctx->comp_methods, i); *(p++) = comp->id; } #endif *(p++) = 0; /* Add the NULL method */ #ifndef OPENSSL_NO_TLSEXT /* TLS extensions */ if (ssl_prepare_clienthello_tlsext(s) <= 0) { SSLerr(SSL_F_SSL23_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT); return -1; } if ((p = ssl_add_clienthello_tlsext(s, p, buf + SSL3_RT_MAX_PLAIN_LENGTH, &al)) == NULL) { ssl3_send_alert(s, SSL3_AL_FATAL, al); SSLerr(SSL_F_SSL23_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); return -1; } #endif l = p - d; /* fill in 4-byte handshake header */ d = &(buf[5]); *(d++) = SSL3_MT_CLIENT_HELLO; l2n3(l, d); l += 4; if (l > SSL3_RT_MAX_PLAIN_LENGTH) { SSLerr(SSL_F_SSL23_CLIENT_HELLO, ERR_R_INTERNAL_ERROR); return -1; } /* fill in 5-byte record header */ d = buf; *(d++) = SSL3_RT_HANDSHAKE; *(d++) = version_major; /* * Some servers hang if we use long client hellos and a record * number > TLS 1.0. */ if (TLS1_get_client_version(s) > TLS1_VERSION) *(d++) = 1; else *(d++) = version_minor; s2n((int)l, d); /* number of bytes to write */ s->init_num = p - buf; s->init_off = 0; ssl3_finish_mac(s, &(buf[5]), s->init_num - 5); } s->state = SSL23_ST_CW_CLNT_HELLO_B; s->init_off = 0; } /* SSL3_ST_CW_CLNT_HELLO_B */ ret = ssl23_write_bytes(s); if ((ret >= 2) && s->msg_callback) { /* Client Hello has been sent; tell msg_callback */ if (ssl2_compat) s->msg_callback(1, SSL2_VERSION, 0, s->init_buf->data + 2, ret - 2, s, s->msg_callback_arg); else { s->msg_callback(1, version, SSL3_RT_HEADER, s->init_buf->data, 5, s, s->msg_callback_arg); s->msg_callback(1, version, SSL3_RT_HANDSHAKE, s->init_buf->data + 5, ret - 5, s, s->msg_callback_arg); } } return ret; } static int ssl23_get_server_hello(SSL *s) { char buf[8]; unsigned char *p; int i; int n; n = ssl23_read_bytes(s, 7); if (n != 7) return (n); p = s->packet; memcpy(buf, p, n); if ((p[0] & 0x80) && (p[2] == SSL2_MT_SERVER_HELLO) && (p[5] == 0x00) && (p[6] == 0x02)) { #ifdef OPENSSL_NO_SSL2 SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_PROTOCOL); goto err; #else /* we are talking sslv2 */ /* * we need to clean up the SSLv3 setup and put in the sslv2 stuff. */ int ch_len; if (s->options & SSL_OP_NO_SSLv2) { SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_PROTOCOL); goto err; } if (s->s2 == NULL) { if (!ssl2_new(s)) goto err; } else ssl2_clear(s); if (s->options & SSL_OP_NETSCAPE_CHALLENGE_BUG) ch_len = SSL2_CHALLENGE_LENGTH; else ch_len = SSL2_MAX_CHALLENGE_LENGTH; /* write out sslv2 challenge */ /* * Note that ch_len must be <= SSL3_RANDOM_SIZE (32), because it is * one of SSL2_MAX_CHALLENGE_LENGTH (32) or SSL2_MAX_CHALLENGE_LENGTH * (16), but leave the check in for futurproofing */ i = (SSL3_RANDOM_SIZE < ch_len) ? SSL3_RANDOM_SIZE : ch_len; s->s2->challenge_length = i; memcpy(s->s2->challenge, &(s->s3->client_random[SSL3_RANDOM_SIZE - i]), i); if (s->s3 != NULL) ssl3_free(s); if (!BUF_MEM_grow_clean(s->init_buf, SSL2_MAX_RECORD_LENGTH_3_BYTE_HEADER)) { SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, ERR_R_BUF_LIB); goto err; } s->state = SSL2_ST_GET_SERVER_HELLO_A; if (!(s->client_version == SSL2_VERSION)) /* * use special padding (SSL 3.0 draft/RFC 2246, App. E.2) */ s->s2->ssl2_rollback = 1; /* * setup the 7 bytes we have read so we get them from the sslv2 * buffer */ s->rstate = SSL_ST_READ_HEADER; s->packet_length = n; s->packet = &(s->s2->rbuf[0]); memcpy(s->packet, buf, n); s->s2->rbuf_left = n; s->s2->rbuf_offs = 0; /* we have already written one */ s->s2->write_sequence = 1; s->method = SSLv2_client_method(); s->handshake_func = s->method->ssl_connect; #endif } else if (p[1] == SSL3_VERSION_MAJOR && p[2] <= TLS1_2_VERSION_MINOR && ((p[0] == SSL3_RT_HANDSHAKE && p[5] == SSL3_MT_SERVER_HELLO) || (p[0] == SSL3_RT_ALERT && p[3] == 0 && p[4] == 2))) { /* we have sslv3 or tls1 (server hello or alert) */ #ifndef OPENSSL_NO_SSL3 if ((p[2] == SSL3_VERSION_MINOR) && !(s->options & SSL_OP_NO_SSLv3)) { # ifdef OPENSSL_FIPS if (FIPS_mode()) { SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_R_ONLY_TLS_ALLOWED_IN_FIPS_MODE); goto err; } # endif s->version = SSL3_VERSION; s->method = SSLv3_client_method(); } else #endif if ((p[2] == TLS1_VERSION_MINOR) && !(s->options & SSL_OP_NO_TLSv1)) { s->version = TLS1_VERSION; s->method = TLSv1_client_method(); } else if ((p[2] == TLS1_1_VERSION_MINOR) && !(s->options & SSL_OP_NO_TLSv1_1)) { s->version = TLS1_1_VERSION; s->method = TLSv1_1_client_method(); } else if ((p[2] == TLS1_2_VERSION_MINOR) && !(s->options & SSL_OP_NO_TLSv1_2)) { s->version = TLS1_2_VERSION; s->method = TLSv1_2_client_method(); } else { SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_R_UNSUPPORTED_PROTOCOL); goto err; } s->session->ssl_version = s->version; /* ensure that TLS_MAX_VERSION is up-to-date */ OPENSSL_assert(s->version <= TLS_MAX_VERSION); if (p[0] == SSL3_RT_ALERT && p[5] != SSL3_AL_WARNING) { /* fatal alert */ void (*cb) (const SSL *ssl, int type, int val) = NULL; int j; if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; i = p[5]; if (cb != NULL) { j = (i << 8) | p[6]; cb(s, SSL_CB_READ_ALERT, j); } if (s->msg_callback) { s->msg_callback(0, s->version, SSL3_RT_HEADER, p, 5, s, s->msg_callback_arg); s->msg_callback(0, s->version, SSL3_RT_ALERT, p + 5, 2, s, s->msg_callback_arg); } s->rwstate = SSL_NOTHING; SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_AD_REASON_OFFSET + p[6]); goto err; } if (!ssl_init_wbio_buffer(s, 1)) goto err; /* we are in this state */ s->state = SSL3_ST_CR_SRVR_HELLO_A; /* * put the 7 bytes we have read into the input buffer for SSLv3 */ s->rstate = SSL_ST_READ_HEADER; s->packet_length = n; if (s->s3->rbuf.buf == NULL) if (!ssl3_setup_read_buffer(s)) goto err; s->packet = &(s->s3->rbuf.buf[0]); memcpy(s->packet, buf, n); s->s3->rbuf.left = n; s->s3->rbuf.offset = 0; s->handshake_func = s->method->ssl_connect; } else { SSLerr(SSL_F_SSL23_GET_SERVER_HELLO, SSL_R_UNKNOWN_PROTOCOL); goto err; } s->init_num = 0; return (SSL_connect(s)); err: return (-1); }
mit
egoitzro/poedit
deps/boost/libs/spirit/test/karma/stream.cpp
59
4525
// Copyright (c) 2001-2011 Hartmut Kaiser // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <cwchar> #include <streambuf> #include <iostream> #include <boost/config/warning_disable.hpp> #include <boost/detail/lightweight_test.hpp> #include <boost/cstdint.hpp> #include <boost/spirit/include/karma_char.hpp> #include <boost/spirit/include/karma_string.hpp> #include <boost/spirit/include/karma_stream.hpp> #include <boost/spirit/include/karma_directive.hpp> #include <boost/spirit/include/phoenix_core.hpp> #include <boost/spirit/include/phoenix_operator.hpp> #include "test.hpp" using namespace spirit_test; // a simple complex number representation z = a + bi struct complex { complex (double a, double b) : a(a), b(b) {} double a; double b; template <typename Char> friend std::basic_ostream<Char>& operator<< (std::basic_ostream<Char>& os, complex z) { os << "{" << z.a << "," << z.b << "}"; return os; } }; /////////////////////////////////////////////////////////////////////////////// int main() { using namespace boost::spirit; { BOOST_TEST(test("x", stream, 'x')); BOOST_TEST(test("xyz", stream, "xyz")); BOOST_TEST(test("xyz", stream, std::string("xyz"))); BOOST_TEST(test("1", stream, 1)); BOOST_TEST(test("1.1", stream, 1.1)); BOOST_TEST(test("{1.2,2.4}", stream, complex(1.2, 2.4))); } { BOOST_TEST(test("x", stream('x'))); BOOST_TEST(test("xyz", stream("xyz"))); BOOST_TEST(test("xyz", stream(std::string("xyz")))); BOOST_TEST(test("1", stream(1))); BOOST_TEST(test("1.1", stream(1.1))); BOOST_TEST(test("{1.2,2.4}", stream(complex(1.2, 2.4)))); } { using namespace boost::spirit::ascii; BOOST_TEST(test("x", lower[stream], 'X')); BOOST_TEST(test("xyz", lower[stream], "XYZ")); BOOST_TEST(test("xyz", lower[stream], std::string("XYZ"))); BOOST_TEST(test("X", upper[stream], 'x')); BOOST_TEST(test("XYZ", upper[stream], "xyz")); BOOST_TEST(test("XYZ", upper[stream], std::string("xyz"))); } { BOOST_TEST(test_delimited("x ", stream, 'x', ' ')); BOOST_TEST(test_delimited("xyz ", stream, "xyz", ' ')); BOOST_TEST(test_delimited("xyz ", stream, std::string("xyz"), ' ')); BOOST_TEST(test_delimited("1 ", stream, 1, ' ')); BOOST_TEST(test_delimited("1.1 ", stream, 1.1, ' ')); BOOST_TEST(test_delimited("{1.2,2.4} ", stream, complex(1.2, 2.4), ' ')); } { typedef karma::stream_generator<utf8_char> utf8_stream_type; utf8_stream_type const utf8_stream = utf8_stream_type(); BOOST_TEST(test_delimited("x ", utf8_stream, 'x', ' ')); BOOST_TEST(test_delimited("xyz ", utf8_stream, "xyz", ' ')); BOOST_TEST(test_delimited("xyz ", utf8_stream, std::string("xyz"), ' ')); BOOST_TEST(test_delimited("1 ", utf8_stream, 1, ' ')); BOOST_TEST(test_delimited("1.1 ", utf8_stream, 1.1, ' ')); BOOST_TEST(test_delimited("{1.2,2.4} ", utf8_stream, complex(1.2, 2.4), ' ')); BOOST_TEST(test("x", utf8_stream('x'))); BOOST_TEST(test("xyz", utf8_stream("xyz"))); BOOST_TEST(test("xyz", utf8_stream(std::string("xyz")))); BOOST_TEST(test("1", utf8_stream(1))); BOOST_TEST(test("1.1", utf8_stream(1.1))); BOOST_TEST(test("{1.2,2.4}", utf8_stream(complex(1.2, 2.4)))); } { using namespace boost::spirit::ascii; BOOST_TEST(test_delimited("x ", lower[stream], 'X', ' ')); BOOST_TEST(test_delimited("xyz ", lower[stream], "XYZ", ' ')); BOOST_TEST(test_delimited("xyz ", lower[stream], std::string("XYZ"), ' ')); BOOST_TEST(test_delimited("X ", upper[stream], 'x', ' ')); BOOST_TEST(test_delimited("XYZ ", upper[stream], "xyz", ' ')); BOOST_TEST(test_delimited("XYZ ", upper[stream], std::string("xyz"), ' ')); } { // lazy streams namespace phx = boost::phoenix; std::basic_string<char> s("abc"); BOOST_TEST((test("abc", stream(phx::val(s))))); BOOST_TEST((test("abc", stream(phx::ref(s))))); } { boost::optional<char> c; BOOST_TEST(!test("", stream, c)); c = 'x'; BOOST_TEST(test("x", stream, c)); } return boost::report_errors(); }
mit
valavanisleonidas/Automatic_Image_Classification
Libs/VLFEAT/src/mser.c
64
17322
/** @file mser-driver.c ** @author Andrea Vedaldi ** @brief MSER command line driver - Definition ** @internal **/ /* Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #define VL_MSER_DRIVER_VERSION 0.2 #include "generic-driver.h" #include <vl/generic.h> #include <vl/stringop.h> #include <vl/pgm.h> #include <vl/mser.h> #include <vl/getopt_long.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> /* ----------------------------------------------------------------- */ /* help message */ char const help_message [] = "Usage: %s [options] files ...\n" "\n" "Options include:\n" " --verbose -v Be verbose\n" " --help -h Print this help message\n" " --seeds Specify seeds file\n" " --frames Specify frames file\n" " --meta Specify meta file\n" " --delta -d Specify MSER delta paramter\n" " --epsilon -e Specify MSER epsilon parameter\n" " --no-dups Remove duplicate\n" " --dups Keep duplicates\n" " --max-area Specify maximum region (relative) area\n" " --min-area Specify minimum region (relative) area\n" " --max-variation Specify maximum absolute region stability\n" " --bright-on-dark Enable or disable bright-on-dark regions (default 1)\n" " --dark-on-bright Enable or disable dark-on-bright regions (default 1)\n" "\n" ; /* ----------------------------------------------------------------- */ /* long options codes */ enum { opt_seed = 1000, opt_frame, opt_meta, opt_max_area, opt_min_area, opt_max_variation, opt_min_diversity, opt_bright, opt_dark } ; /* short options */ char const opts [] = "vhd:" ; /* long options */ struct option const longopts [] = { { "verbose", no_argument, 0, 'v' }, { "help", no_argument, 0, 'h' }, { "delta", required_argument, 0, 'd' }, { "seeds", optional_argument, 0, opt_seed }, { "frames", optional_argument, 0, opt_frame }, { "meta", optional_argument, 0, opt_meta }, { "max-area", required_argument, 0, opt_max_area }, { "min-area", required_argument, 0, opt_min_area }, { "max-variation", required_argument, 0, opt_max_variation }, { "min-diversity", required_argument, 0, opt_min_diversity }, { "bright-on-dark", required_argument, 0, opt_bright }, { "dark-on-bright", required_argument, 0, opt_dark }, { 0, 0, 0, 0 } } ; /* ----------------------------------------------------------------- */ /** @brief MSER driver entry point **/ int main(int argc, char **argv) { /* algorithm parameters */ double delta = -1 ; double max_area = -1 ; double min_area = -1 ; double max_variation = -1 ; double min_diversity = -1 ; int bright_on_dark = 1 ; int dark_on_bright = 1 ; vl_bool err = VL_ERR_OK ; char err_msg [1024] ; int n ; int exit_code = 0 ; int verbose = 0 ; VlFileMeta frm = {0, "%.frame", VL_PROT_ASCII, "", 0} ; VlFileMeta piv = {0, "%.mser", VL_PROT_ASCII, "", 0} ; VlFileMeta met = {0, "%.meta", VL_PROT_ASCII, "", 0} ; #define ERRF(msg, arg) { \ err = VL_ERR_BAD_ARG ; \ snprintf(err_msg, sizeof(err_msg), msg, arg) ; \ break ; \ } #define ERR(msg) { \ err = VL_ERR_BAD_ARG ; \ snprintf(err_msg, sizeof(err_msg), msg) ; \ break ; \ } /* ------------------------------------------------------------------ * Parse options * --------------------------------------------------------------- */ while (!err) { int ch = getopt_long(argc, argv, opts, longopts, 0) ; /* If there are no files passed as input, print the help and settings */ if (ch == -1 && argc - optind == 0) ch = 'h'; /* end of option list? */ if (ch == -1) break; /* process options */ switch (ch) { /* .......................................................... */ case '?' : ERRF("Invalid option '%s'.", argv [optind - 1]) ; break ; case ':' : ERRF("Missing mandatory argument for option '%s'.", argv [optind - 1]) ; break ; case 'h' : printf (help_message, argv [0]) ; printf ("MSERs filespec: `%s'\n", piv.pattern) ; printf ("Frames filespec: `%s'\n", frm.pattern) ; printf ("Meta filespec: `%s'\n", met.pattern) ; printf ("Version: driver %s; libvl %s\n", VL_XSTRINGIFY(VL_MSER_DRIVER_VERSION), vl_get_version_string()) ; exit (0) ; break ; case 'v' : ++ verbose ; break ; /* .......................................................... */ case 'd' : n = sscanf (optarg, "%lf", &delta) ; if (n == 0 || delta < 0) ERRF("The argument of '%s' must be a non-negative number.", argv [optind - 1]) ; break ; /* ........................................................... */ case opt_max_area : n = sscanf (optarg, "%lf", &max_area) ; if (n == 0 || max_area < 0 || max_area > 1) ERR("max-area argument must be in the [0,1] range.") ; break ; case opt_min_area : n = sscanf (optarg, "%lf", &min_area) ; if (n == 0 || min_area < 0 || min_area > 1) ERR("min-area argument must be in the [0,1] range.") ; break ; case opt_max_variation : n = sscanf (optarg, "%lf", &max_variation) ; if (n == 0 || max_variation < 0) ERR("max-variation argument must be non-negative.") ; break ; case opt_min_diversity : n = sscanf (optarg, "%lf", &min_diversity) ; if (n == 0 || min_diversity < 0 || min_diversity > 1) ERR("min-diversity argument must be in the [0,1] range.") ; break ; /* ........................................................... */ case opt_frame : err = vl_file_meta_parse (&frm, optarg) ; if (err) ERRF("The arguments of '%s' is invalid.", argv [optind - 1]) ; break ; case opt_seed : err = vl_file_meta_parse (&piv, optarg) ; if (err) ERRF("The arguments of '%s' is invalid.", argv [optind - 1]) ; break ; case opt_meta : err = vl_file_meta_parse (&met, optarg) ; if (err) ERRF("The arguments of '%s' is invalid.", argv [optind - 1]) ; if (met.protocol != VL_PROT_ASCII) ERR("meta file supports only ASCII protocol") ; break ; case opt_bright : n = sscanf (optarg, "%d", &bright_on_dark) ; if (n == 0 || (bright_on_dark != 0 && bright_on_dark != 1)) ERR("bright_on_dark must be 0 or 1.") ; break ; case opt_dark : n = sscanf (optarg, "%d", &dark_on_bright) ; if (n == 0 || (dark_on_bright != 0 && dark_on_bright != 1)) ERR("dark_on_bright must be 0 or 1.") ; break ; /* .......................................................... */ case 0 : default : abort() ; } } /* check for parsing errors */ if (err) { fprintf(stderr, "%s: error: %s (%d)\n", argv [0], err_msg, err) ; exit (1) ; } /* parse other arguments (filenames) */ argc -= optind ; argv += optind ; /* make sure at least one file */ if (piv.active == 0 && frm.active == 0) { frm.active = 1 ; } if (verbose > 1) { printf("mser: frames output\n") ; printf("mser: active %d\n", frm.active ) ; printf("mser: pattern %s\n", frm.pattern) ; printf("mser: protocol %s\n", vl_string_protocol_name (frm.protocol)) ; printf("mser: seeds output\n") ; printf("mser: active %d\n", piv.active ) ; printf("mser: pattern %s\n", piv.pattern) ; printf("mser: protocol %s\n", vl_string_protocol_name (piv.protocol)) ; printf("mser: meta output\n") ; printf("mser: active %d\n", met.active ) ; printf("mser: pattern %s\n", met.pattern) ; printf("mser: protocol %s\n", vl_string_protocol_name (met.protocol)) ; } /* ------------------------------------------------------------------ * Process one image per time * --------------------------------------------------------------- */ while (argc--) { char basename [1024] ; char const *name = *argv++ ; VlMserFilt *filt = 0 ; VlMserFilt *filtinv = 0 ; vl_uint8 *data = 0 ; vl_uint8 *datainv = 0 ; VlPgmImage pim ; vl_uint const *regions ; vl_uint const *regionsinv ; float const *frames ; float const *framesinv ; enum {ndims = 2} ; int dims [ndims] ; int nregions = 0, nregionsinv = 0, nframes = 0, nframesinv =0; int i, j, dof ; vl_size q ; FILE *in = 0 ; /* Open files ------------------------------------------------ */ /* get basenmae from filename */ q = vl_string_basename (basename, sizeof(basename), name, 1) ; err = (q >= sizeof(basename)) ; if (err) { snprintf(err_msg, sizeof(err_msg), "Basename of '%s' is too long", name); err = VL_ERR_OVERFLOW ; goto done ; } if (verbose) { printf("mser: processing '%s'\n", name) ; } if (verbose > 1) { printf("mser: basename is '%s'\n", basename) ; } #define WERR(name) \ if (err == VL_ERR_OVERFLOW) { \ snprintf(err_msg, sizeof(err_msg), \ "Output file name too long.") ; \ goto done ; \ } else if (err) { \ snprintf(err_msg, sizeof(err_msg), \ "Could not open '%s' for writing.", name) ; \ goto done ; \ } /* open input file */ in = fopen (name, "rb") ; if (!in) { err = VL_ERR_IO ; snprintf(err_msg, sizeof(err_msg), "Could not open '%s' for reading.", name) ; goto done ; } /* open output files */ err = vl_file_meta_open (&piv, basename, "w") ; WERR(piv.name) ; err = vl_file_meta_open (&frm, basename, "w") ; WERR(frm.name) ; err = vl_file_meta_open (&met, basename, "w") ; WERR(met.name) ; if (verbose > 1) { if (piv.active) printf("mser: writing seeds to '%s'\n", piv.name); if (frm.active) printf("mser: writing frames to '%s'\n", frm.name); if (met.active) printf("mser: writing meta to '%s'\n", met.name); } /* Read image data -------------------------------------------- */ /* read source image header */ err = vl_pgm_extract_head (in, &pim) ; if (err) { err = VL_ERR_IO ; snprintf(err_msg, sizeof(err_msg), "PGM header corrputed.") ; goto done ; } if (verbose) { printf("mser: image is %" VL_FMT_SIZE " by %" VL_FMT_SIZE " pixels\n", pim. width, pim. height) ; } /* allocate buffer */ data = malloc(vl_pgm_get_npixels (&pim) * vl_pgm_get_bpp (&pim)) ; if (!data) { err = VL_ERR_ALLOC ; snprintf(err_msg, sizeof(err_msg), "Could not allocate enough memory.") ; goto done ; } /* read PGM */ err = vl_pgm_extract_data (in, &pim, data) ; if (err) { snprintf(err_msg, sizeof(err_msg), "PGM body corrputed.") ; goto done ; } /* Process data ---------------------------------------------- */ dims[0] = pim.width ; dims[1] = pim.height ; filt = vl_mser_new (ndims, dims) ; filtinv = vl_mser_new (ndims, dims) ; if (!filt || !filtinv) { snprintf(err_msg, sizeof(err_msg), "Could not create an MSER filter.") ; goto done ; } if (delta >= 0) vl_mser_set_delta (filt, (vl_mser_pix) delta) ; if (max_area >= 0) vl_mser_set_max_area (filt, max_area) ; if (min_area >= 0) vl_mser_set_min_area (filt, min_area) ; if (max_variation >= 0) vl_mser_set_max_variation (filt, max_variation) ; if (min_diversity >= 0) vl_mser_set_min_diversity (filt, min_diversity) ; if (delta >= 0) vl_mser_set_delta (filtinv, (vl_mser_pix) delta) ; if (max_area >= 0) vl_mser_set_max_area (filtinv, max_area) ; if (min_area >= 0) vl_mser_set_min_area (filtinv, min_area) ; if (max_variation >= 0) vl_mser_set_max_variation (filtinv, max_variation) ; if (min_diversity >= 0) vl_mser_set_min_diversity (filtinv, min_diversity) ; if (verbose) { printf("mser: parameters:\n") ; printf("mser: delta = %d\n", vl_mser_get_delta (filt)) ; printf("mser: max_area = %g\n", vl_mser_get_max_area (filt)) ; printf("mser: min_area = %g\n", vl_mser_get_min_area (filt)) ; printf("mser: max_variation = %g\n", vl_mser_get_max_variation (filt)) ; printf("mser: min_diversity = %g\n", vl_mser_get_min_diversity (filt)) ; } if (dark_on_bright) { vl_mser_process (filt, (vl_mser_pix*) data) ; /* Save result ----------------------------------------------- */ nregions = vl_mser_get_regions_num (filt) ; regions = vl_mser_get_regions (filt) ; if (piv.active) { for (i = 0 ; i < nregions ; ++i) { fprintf(piv.file, "%d ", regions [i]) ; } } if (frm.active) { vl_mser_ell_fit (filt) ; nframes = vl_mser_get_ell_num (filt) ; dof = vl_mser_get_ell_dof (filt) ; frames = vl_mser_get_ell (filt) ; for (i = 0 ; i < nframes ; ++i) { for (j = 0 ; j < dof ; ++j) { fprintf(frm.file, "%f ", *frames++) ; } fprintf(frm.file, "\n") ; } } } if (bright_on_dark) { /* allocate buffer */ datainv = malloc(vl_pgm_get_npixels (&pim) * vl_pgm_get_bpp (&pim)) ; for (i = 0; i < (signed)vl_pgm_get_npixels (&pim); i++) { datainv[i] = ~data[i]; /* 255 - data[i] */ } if (!datainv) { err = VL_ERR_ALLOC ; snprintf(err_msg, sizeof(err_msg), "Could not allocate enough memory.") ; goto done ; } vl_mser_process (filtinv, (vl_mser_pix*) datainv) ; /* Save result ----------------------------------------------- */ nregionsinv = vl_mser_get_regions_num (filtinv) ; regionsinv = vl_mser_get_regions (filtinv) ; if (piv.active) { for (i = 0 ; i < nregionsinv ; ++i) { fprintf(piv.file, "%d ", -regionsinv [i]) ; } } if (frm.active) { vl_mser_ell_fit (filtinv) ; nframesinv = vl_mser_get_ell_num (filtinv) ; dof = vl_mser_get_ell_dof (filtinv) ; framesinv = vl_mser_get_ell (filtinv) ; for (i = 0 ; i < nframesinv ; ++i) { for (j = 0 ; j < dof ; ++j) { fprintf(frm.file, "%f ", *framesinv++) ; } fprintf(frm.file, "\n") ; } } } if (met.active) { fprintf(met.file, "<mser\n") ; fprintf(met.file, " input = '%s'\n", name) ; if (piv.active) { fprintf(met.file, " seeds = '%s'\n", piv.name) ; } if (frm.active) { fprintf(met.file," frames = '%s'\n", frm.name) ; } fprintf(met.file, ">\n") ; } /* Next guy ----------------------------------------------- */ done : /* release filter */ if (filt) { vl_mser_delete (filt) ; filt = 0 ; } if (filtinv) { vl_mser_delete (filtinv) ; filtinv = 0 ; } /* release image data */ if (data) { free (data) ; data = 0 ; } if (datainv) { free (datainv) ; datainv = 0 ; } /* close files */ if (in) { fclose (in) ; in = 0 ; } vl_file_meta_close (&frm) ; vl_file_meta_close (&piv) ; vl_file_meta_close (&met) ; /* if bad print error message */ if (err) { fprintf (stderr, "mser: err: %s (%d)\n", err_msg, err) ; exit_code = 1 ; } } /* quit */ return exit_code ; }
mit
BenKeyFSI/poedit
deps/boost/libs/iostreams/test/stream_offset_32bit_test.cpp
64
1640
/* * Distributed under the Boost Software License, Version 1.0.(See accompanying * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) * * See http://www.boost.org/libs/iostreams for documentation. * File: libs/iostreams/test/stream_offset_32bit_test.cpp * Date: Sun Dec 23 21:11:23 MST 2007 * Copyright: 2007-2008 CodeRage, LLC * Author: Jonathan Turkanis * Contact: turkanis at coderage dot com * * Tests the functions defined in the header "boost/iostreams/positioning.hpp" * with small (32-bit) file offsets. */ #include <boost/iostreams/positioning.hpp> #include <boost/test/test_tools.hpp> #include <boost/test/unit_test.hpp> #include <boost/type_traits/is_integral.hpp> using namespace std; using namespace boost; using namespace boost::iostreams; using boost::unit_test::test_suite; void stream_offset_32bit_test() { stream_offset small_file = 1000000; stream_offset off = -small_file; streampos pos = offset_to_position(off); while (off < small_file) { BOOST_CHECK(off == position_to_offset(offset_to_position(off))); BOOST_CHECK(pos == offset_to_position(position_to_offset(pos))); off += 20000; pos += 20000; BOOST_CHECK(off == position_to_offset(offset_to_position(off))); BOOST_CHECK(pos == offset_to_position(position_to_offset(pos))); off -= 10000; pos -= 10000; } } test_suite* init_unit_test_suite(int, char* []) { test_suite* test = BOOST_TEST_SUITE("stream_offset 32-bit test"); test->add(BOOST_TEST_CASE(&stream_offset_32bit_test)); return test; }
mit
rollenrolm/godot
drivers/freetype/src/base/ftapi.c
66
4042
/***************************************************************************/ /* */ /* ftapi.c */ /* */ /* The FreeType compatibility functions (body). */ /* */ /* Copyright 2002-2016 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #include <ft2build.h> #include FT_LIST_H #include FT_OUTLINE_H #include FT_INTERNAL_OBJECTS_H #include FT_INTERNAL_DEBUG_H #include FT_INTERNAL_STREAM_H #include FT_TRUETYPE_TABLES_H #include FT_OUTLINE_H /*************************************************************************/ /*************************************************************************/ /*************************************************************************/ /**** ****/ /**** ****/ /**** C O M P A T I B I L I T Y ****/ /**** ****/ /**** ****/ /*************************************************************************/ /*************************************************************************/ /*************************************************************************/ /* backwards compatibility API */ FT_BASE_DEF( void ) FT_New_Memory_Stream( FT_Library library, FT_Byte* base, FT_ULong size, FT_Stream stream ) { FT_UNUSED( library ); FT_Stream_OpenMemory( stream, base, size ); } FT_BASE_DEF( FT_Error ) FT_Seek_Stream( FT_Stream stream, FT_ULong pos ) { return FT_Stream_Seek( stream, pos ); } FT_BASE_DEF( FT_Error ) FT_Skip_Stream( FT_Stream stream, FT_Long distance ) { return FT_Stream_Skip( stream, distance ); } FT_BASE_DEF( FT_Error ) FT_Read_Stream( FT_Stream stream, FT_Byte* buffer, FT_ULong count ) { return FT_Stream_Read( stream, buffer, count ); } FT_BASE_DEF( FT_Error ) FT_Read_Stream_At( FT_Stream stream, FT_ULong pos, FT_Byte* buffer, FT_ULong count ) { return FT_Stream_ReadAt( stream, pos, buffer, count ); } FT_BASE_DEF( FT_Error ) FT_Extract_Frame( FT_Stream stream, FT_ULong count, FT_Byte** pbytes ) { return FT_Stream_ExtractFrame( stream, count, pbytes ); } FT_BASE_DEF( void ) FT_Release_Frame( FT_Stream stream, FT_Byte** pbytes ) { FT_Stream_ReleaseFrame( stream, pbytes ); } FT_BASE_DEF( FT_Error ) FT_Access_Frame( FT_Stream stream, FT_ULong count ) { return FT_Stream_EnterFrame( stream, count ); } FT_BASE_DEF( void ) FT_Forget_Frame( FT_Stream stream ) { FT_Stream_ExitFrame( stream ); } /* END */
mit
stephaneAG/PengPod700
QtEsrc/qt-everywhere-opensource-src-4.8.5/src/3rdparty/webkit/Source/WebCore/dom/ScriptRunner.cpp
67
3947
/* * Copyright (C) 2010 Google, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "ScriptRunner.h" #include "CachedScript.h" #include "Document.h" #include "Element.h" #include "PendingScript.h" #include "ScriptElement.h" namespace WebCore { ScriptRunner::ScriptRunner(Document* document) : m_document(document) , m_timer(this, &ScriptRunner::timerFired) { ASSERT(document); } ScriptRunner::~ScriptRunner() { for (size_t i = 0; i < m_scriptsToExecuteSoon.size(); ++i) m_document->decrementLoadEventDelayCount(); for (size_t i = 0; i < m_scriptsToExecuteInOrder.size(); ++i) m_document->decrementLoadEventDelayCount(); } void ScriptRunner::queueScriptForExecution(ScriptElement* scriptElement, CachedResourceHandle<CachedScript> cachedScript, ExecutionType executionType) { ASSERT(scriptElement); Element* element = scriptElement->element(); ASSERT(element); ASSERT(element->inDocument()); m_document->incrementLoadEventDelayCount(); switch (executionType) { case ASYNC_EXECUTION: m_scriptsToExecuteSoon.append(PendingScript(element, cachedScript.get())); if (!m_timer.isActive()) m_timer.startOneShot(0); break; case IN_ORDER_EXECUTION: m_scriptsToExecuteInOrder.append(PendingScript(element, cachedScript.get())); break; default: ASSERT_NOT_REACHED(); } } void ScriptRunner::suspend() { m_timer.stop(); } void ScriptRunner::resume() { if (hasPendingScripts()) m_timer.startOneShot(0); } void ScriptRunner::notifyInOrderScriptReady() { ASSERT(!m_scriptsToExecuteInOrder.isEmpty()); m_timer.startOneShot(0); } void ScriptRunner::timerFired(Timer<ScriptRunner>* timer) { ASSERT_UNUSED(timer, timer == &m_timer); RefPtr<Document> protect(m_document); Vector<PendingScript> scripts; scripts.swap(m_scriptsToExecuteSoon); size_t numInOrderScriptsToExecute = 0; for (; numInOrderScriptsToExecute < m_scriptsToExecuteInOrder.size() && m_scriptsToExecuteInOrder[numInOrderScriptsToExecute].cachedScript()->isLoaded(); ++numInOrderScriptsToExecute) scripts.append(m_scriptsToExecuteInOrder[numInOrderScriptsToExecute]); if (numInOrderScriptsToExecute) m_scriptsToExecuteInOrder.remove(0, numInOrderScriptsToExecute); size_t size = scripts.size(); for (size_t i = 0; i < size; ++i) { CachedScript* cachedScript = scripts[i].cachedScript(); RefPtr<Element> element = scripts[i].releaseElementAndClear(); toScriptElement(element.get())->execute(cachedScript); m_document->decrementLoadEventDelayCount(); } } }
mit
mosafwat/InterviewTool
node_modules/node-sass/src/libsass/src/values.cpp
324
4873
#include "sass.hpp" #include "sass.h" #include "values.hpp" #include <stdint.h> namespace Sass { // convert value from C++ side to C-API union Sass_Value* ast_node_to_sass_value (const Expression_Ptr val) { if (val->concrete_type() == Expression::NUMBER) { Number_Ptr_Const res = Cast<Number>(val); return sass_make_number(res->value(), res->unit().c_str()); } else if (val->concrete_type() == Expression::COLOR) { Color_Ptr_Const col = Cast<Color>(val); return sass_make_color(col->r(), col->g(), col->b(), col->a()); } else if (val->concrete_type() == Expression::LIST) { List_Ptr_Const l = Cast<List>(val); union Sass_Value* list = sass_make_list(l->size(), l->separator(), l->is_bracketed()); for (size_t i = 0, L = l->length(); i < L; ++i) { Expression_Obj obj = l->at(i); auto val = ast_node_to_sass_value(obj); sass_list_set_value(list, i, val); } return list; } else if (val->concrete_type() == Expression::MAP) { Map_Ptr_Const m = Cast<Map>(val); union Sass_Value* map = sass_make_map(m->length()); size_t i = 0; for (Expression_Obj key : m->keys()) { sass_map_set_key(map, i, ast_node_to_sass_value(key)); sass_map_set_value(map, i, ast_node_to_sass_value(m->at(key))); ++ i; } return map; } else if (val->concrete_type() == Expression::NULL_VAL) { return sass_make_null(); } else if (val->concrete_type() == Expression::BOOLEAN) { Boolean_Ptr_Const res = Cast<Boolean>(val); return sass_make_boolean(res->value()); } else if (val->concrete_type() == Expression::STRING) { if (String_Quoted_Ptr_Const qstr = Cast<String_Quoted>(val)) { return sass_make_qstring(qstr->value().c_str()); } else if (String_Constant_Ptr_Const cstr = Cast<String_Constant>(val)) { return sass_make_string(cstr->value().c_str()); } } return sass_make_error("unknown sass value type"); } // convert value from C-API to C++ side Value_Ptr sass_value_to_ast_node (const union Sass_Value* val) { switch (sass_value_get_tag(val)) { case SASS_NUMBER: return SASS_MEMORY_NEW(Number, ParserState("[C-VALUE]"), sass_number_get_value(val), sass_number_get_unit(val)); break; case SASS_BOOLEAN: return SASS_MEMORY_NEW(Boolean, ParserState("[C-VALUE]"), sass_boolean_get_value(val)); break; case SASS_COLOR: return SASS_MEMORY_NEW(Color, ParserState("[C-VALUE]"), sass_color_get_r(val), sass_color_get_g(val), sass_color_get_b(val), sass_color_get_a(val)); break; case SASS_STRING: if (sass_string_is_quoted(val)) { return SASS_MEMORY_NEW(String_Quoted, ParserState("[C-VALUE]"), sass_string_get_value(val)); } else { return SASS_MEMORY_NEW(String_Constant, ParserState("[C-VALUE]"), sass_string_get_value(val)); } break; case SASS_LIST: { List_Ptr l = SASS_MEMORY_NEW(List, ParserState("[C-VALUE]"), sass_list_get_length(val), sass_list_get_separator(val)); for (size_t i = 0, L = sass_list_get_length(val); i < L; ++i) { l->append(sass_value_to_ast_node(sass_list_get_value(val, i))); } l->is_bracketed(sass_list_get_is_bracketed(val)); return l; } break; case SASS_MAP: { Map_Ptr m = SASS_MEMORY_NEW(Map, ParserState("[C-VALUE]")); for (size_t i = 0, L = sass_map_get_length(val); i < L; ++i) { *m << std::make_pair( sass_value_to_ast_node(sass_map_get_key(val, i)), sass_value_to_ast_node(sass_map_get_value(val, i))); } return m; } break; case SASS_NULL: return SASS_MEMORY_NEW(Null, ParserState("[C-VALUE]")); break; case SASS_ERROR: return SASS_MEMORY_NEW(Custom_Error, ParserState("[C-VALUE]"), sass_error_get_message(val)); break; case SASS_WARNING: return SASS_MEMORY_NEW(Custom_Warning, ParserState("[C-VALUE]"), sass_warning_get_message(val)); break; } return 0; } }
mit
fractalcoin/fractalcoin
src/test/multisig_tests.cpp
71
9964
#include "key.h" #include "keystore.h" #include "main.h" #include "script.h" #include "uint256.h" #include <boost/assign/std/vector.hpp> #include <boost/foreach.hpp> #include <boost/test/unit_test.hpp> using namespace std; using namespace boost::assign; typedef vector<unsigned char> valtype; extern uint256 SignatureHash(const CScript &scriptCode, const CTransaction& txTo, unsigned int nIn, int nHashType); BOOST_AUTO_TEST_SUITE(multisig_tests) CScript sign_multisig(CScript scriptPubKey, vector<CKey> keys, CTransaction transaction, int whichIn) { uint256 hash = SignatureHash(scriptPubKey, transaction, whichIn, SIGHASH_ALL); CScript result; result << OP_0; // CHECKMULTISIG bug workaround BOOST_FOREACH(const CKey &key, keys) { vector<unsigned char> vchSig; BOOST_CHECK(key.Sign(hash, vchSig)); vchSig.push_back((unsigned char)SIGHASH_ALL); result << vchSig; } return result; } BOOST_AUTO_TEST_CASE(multisig_verify) { unsigned int flags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC; CKey key[4]; for (int i = 0; i < 4; i++) key[i].MakeNewKey(true); CScript a_and_b; a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; CScript a_or_b; a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; CScript escrow; escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG; CTransaction txFrom; // Funding transaction txFrom.vout.resize(3); txFrom.vout[0].scriptPubKey = a_and_b; txFrom.vout[1].scriptPubKey = a_or_b; txFrom.vout[2].scriptPubKey = escrow; CTransaction txTo[3]; // Spending transaction for (int i = 0; i < 3; i++) { txTo[i].vin.resize(1); txTo[i].vout.resize(1); txTo[i].vin[0].prevout.n = i; txTo[i].vin[0].prevout.hash = txFrom.GetHash(); txTo[i].vout[0].nValue = 1; } vector<CKey> keys; CScript s; // Test a AND b: keys.clear(); keys += key[0],key[1]; // magic operator+= from boost.assign s = sign_multisig(a_and_b, keys, txTo[0], 0); BOOST_CHECK(VerifyScript(s, a_and_b, txTo[0], 0, flags, 0)); for (int i = 0; i < 4; i++) { keys.clear(); keys += key[i]; s = sign_multisig(a_and_b, keys, txTo[0], 0); BOOST_CHECK_MESSAGE(!VerifyScript(s, a_and_b, txTo[0], 0, flags, 0), strprintf("a&b 1: %d", i)); keys.clear(); keys += key[1],key[i]; s = sign_multisig(a_and_b, keys, txTo[0], 0); BOOST_CHECK_MESSAGE(!VerifyScript(s, a_and_b, txTo[0], 0, flags, 0), strprintf("a&b 2: %d", i)); } // Test a OR b: for (int i = 0; i < 4; i++) { keys.clear(); keys += key[i]; s = sign_multisig(a_or_b, keys, txTo[1], 0); if (i == 0 || i == 1) BOOST_CHECK_MESSAGE(VerifyScript(s, a_or_b, txTo[1], 0, flags, 0), strprintf("a|b: %d", i)); else BOOST_CHECK_MESSAGE(!VerifyScript(s, a_or_b, txTo[1], 0, flags, 0), strprintf("a|b: %d", i)); } s.clear(); s << OP_0 << OP_0; BOOST_CHECK(!VerifyScript(s, a_or_b, txTo[1], 0, flags, 0)); s.clear(); s << OP_0 << OP_1; BOOST_CHECK(!VerifyScript(s, a_or_b, txTo[1], 0, flags, 0)); for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { keys.clear(); keys += key[i],key[j]; s = sign_multisig(escrow, keys, txTo[2], 0); if (i < j && i < 3 && j < 3) BOOST_CHECK_MESSAGE(VerifyScript(s, escrow, txTo[2], 0, flags, 0), strprintf("escrow 1: %d %d", i, j)); else BOOST_CHECK_MESSAGE(!VerifyScript(s, escrow, txTo[2], 0, flags, 0), strprintf("escrow 2: %d %d", i, j)); } } BOOST_AUTO_TEST_CASE(multisig_IsStandard) { CKey key[4]; for (int i = 0; i < 4; i++) key[i].MakeNewKey(true); txnouttype whichType; CScript a_and_b; a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; BOOST_CHECK(::IsStandard(a_and_b, whichType)); CScript a_or_b; a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; BOOST_CHECK(::IsStandard(a_or_b, whichType)); CScript escrow; escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG; BOOST_CHECK(::IsStandard(escrow, whichType)); CScript one_of_four; one_of_four << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << key[3].GetPubKey() << OP_4 << OP_CHECKMULTISIG; BOOST_CHECK(!::IsStandard(one_of_four, whichType)); CScript malformed[6]; malformed[0] << OP_3 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; malformed[1] << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_3 << OP_CHECKMULTISIG; malformed[2] << OP_0 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; malformed[3] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_0 << OP_CHECKMULTISIG; malformed[4] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_CHECKMULTISIG; malformed[5] << OP_1 << key[0].GetPubKey() << key[1].GetPubKey(); for (int i = 0; i < 6; i++) BOOST_CHECK(!::IsStandard(malformed[i], whichType)); } BOOST_AUTO_TEST_CASE(multisig_Solver1) { // Tests Solver() that returns lists of keys that are // required to satisfy a ScriptPubKey // // Also tests IsMine() and ExtractAddress() // // Note: ExtractAddress for the multisignature transactions // always returns false for this release, even if you have // one key that would satisfy an (a|b) or 2-of-3 keys needed // to spend an escrow transaction. // CBasicKeyStore keystore, emptykeystore, partialkeystore; CKey key[3]; CTxDestination keyaddr[3]; for (int i = 0; i < 3; i++) { key[i].MakeNewKey(true); keystore.AddKey(key[i]); keyaddr[i] = key[i].GetPubKey().GetID(); } partialkeystore.AddKey(key[0]); { vector<valtype> solutions; txnouttype whichType; CScript s; s << key[0].GetPubKey() << OP_CHECKSIG; BOOST_CHECK(Solver(s, whichType, solutions)); BOOST_CHECK(solutions.size() == 1); CTxDestination addr; BOOST_CHECK(ExtractDestination(s, addr)); BOOST_CHECK(addr == keyaddr[0]); BOOST_CHECK(IsMine(keystore, s)); BOOST_CHECK(!IsMine(emptykeystore, s)); } { vector<valtype> solutions; txnouttype whichType; CScript s; s << OP_DUP << OP_HASH160 << key[0].GetPubKey().GetID() << OP_EQUALVERIFY << OP_CHECKSIG; BOOST_CHECK(Solver(s, whichType, solutions)); BOOST_CHECK(solutions.size() == 1); CTxDestination addr; BOOST_CHECK(ExtractDestination(s, addr)); BOOST_CHECK(addr == keyaddr[0]); BOOST_CHECK(IsMine(keystore, s)); BOOST_CHECK(!IsMine(emptykeystore, s)); } { vector<valtype> solutions; txnouttype whichType; CScript s; s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; BOOST_CHECK(Solver(s, whichType, solutions)); BOOST_CHECK_EQUAL(solutions.size(), 4U); CTxDestination addr; BOOST_CHECK(!ExtractDestination(s, addr)); BOOST_CHECK(IsMine(keystore, s)); BOOST_CHECK(!IsMine(emptykeystore, s)); BOOST_CHECK(!IsMine(partialkeystore, s)); } { vector<valtype> solutions; txnouttype whichType; CScript s; s << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; BOOST_CHECK(Solver(s, whichType, solutions)); BOOST_CHECK_EQUAL(solutions.size(), 4U); vector<CTxDestination> addrs; int nRequired; BOOST_CHECK(ExtractDestinations(s, whichType, addrs, nRequired)); BOOST_CHECK(addrs[0] == keyaddr[0]); BOOST_CHECK(addrs[1] == keyaddr[1]); BOOST_CHECK(nRequired == 1); BOOST_CHECK(IsMine(keystore, s)); BOOST_CHECK(!IsMine(emptykeystore, s)); BOOST_CHECK(!IsMine(partialkeystore, s)); } { vector<valtype> solutions; txnouttype whichType; CScript s; s << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG; BOOST_CHECK(Solver(s, whichType, solutions)); BOOST_CHECK(solutions.size() == 5); } } BOOST_AUTO_TEST_CASE(multisig_Sign) { // Test SignSignature() (and therefore the version of Solver() that signs transactions) CBasicKeyStore keystore; CKey key[4]; for (int i = 0; i < 4; i++) { key[i].MakeNewKey(true); keystore.AddKey(key[i]); } CScript a_and_b; a_and_b << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; CScript a_or_b; a_or_b << OP_1 << key[0].GetPubKey() << key[1].GetPubKey() << OP_2 << OP_CHECKMULTISIG; CScript escrow; escrow << OP_2 << key[0].GetPubKey() << key[1].GetPubKey() << key[2].GetPubKey() << OP_3 << OP_CHECKMULTISIG; CTransaction txFrom; // Funding transaction txFrom.vout.resize(3); txFrom.vout[0].scriptPubKey = a_and_b; txFrom.vout[1].scriptPubKey = a_or_b; txFrom.vout[2].scriptPubKey = escrow; CTransaction txTo[3]; // Spending transaction for (int i = 0; i < 3; i++) { txTo[i].vin.resize(1); txTo[i].vout.resize(1); txTo[i].vin[0].prevout.n = i; txTo[i].vin[0].prevout.hash = txFrom.GetHash(); txTo[i].vout[0].nValue = 1; } for (int i = 0; i < 3; i++) { BOOST_CHECK_MESSAGE(SignSignature(keystore, txFrom, txTo[i], 0), strprintf("SignSignature %d", i)); } } BOOST_AUTO_TEST_SUITE_END()
mit
arca1n/cocos2d-x_nextpeer_integration
external/lua/luajit/src/src/host/buildvm_fold.c
73
6473
/* ** LuaJIT VM builder: IR folding hash table generator. ** Copyright (C) 2005-2013 Mike Pall. See Copyright Notice in luajit.h */ #include "buildvm.h" #include "lj_obj.h" #include "lj_ir.h" /* Context for the folding hash table generator. */ static int lineno; static int funcidx; static uint32_t foldkeys[BUILD_MAX_FOLD]; static uint32_t nkeys; /* Try to fill the hash table with keys using the hash parameters. */ static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol) { uint32_t i; if (dorol && ((r & 31) == 0 || (r>>5) == 0)) return 0; /* Avoid zero rotates. */ memset(htab, 0xff, (sz+1)*sizeof(uint32_t)); for (i = 0; i < nkeys; i++) { uint32_t key = foldkeys[i]; uint32_t k = key & 0xffffff; uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) : (((k << (r>>5)) - k) << (r&31))) % sz; if (htab[h] != 0xffffffff) { /* Collision on primary slot. */ if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */ /* Try to move the colliding key, if possible. */ if (h < sz-1 && htab[h+2] == 0xffffffff) { uint32_t k2 = htab[h+1] & 0xffffff; uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) : (((k2 << (r>>5)) - k2) << (r&31))) % sz; if (h2 != h+1) return 0; /* Cannot resolve collision. */ htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */ } else { return 0; /* Collision. */ } } htab[h+1] = key; } else { htab[h] = key; } } return 1; /* Success, all keys could be stored. */ } /* Print the generated hash table. */ static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz) { uint32_t i; fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x", sz+1, htab[0]); for (i = 1; i < sz+1; i++) fprintf(ctx->fp, ",\n0x%08x", htab[i]); fprintf(ctx->fp, "\n};\n\n"); } /* Exhaustive search for the shortest semi-perfect hash table. */ static void makehash(BuildCtx *ctx) { uint32_t htab[BUILD_MAX_FOLD*2+1]; uint32_t sz, r; /* Search for the smallest hash table with an odd size. */ for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) { /* First try all shift hash combinations. */ for (r = 0; r < 32*32; r++) { if (tryhash(htab, sz, r, 0)) { printhash(ctx, htab, sz); fprintf(ctx->fp, "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n", r>>5, r&31, sz); return; } } /* Then try all rotate hash combinations. */ for (r = 0; r < 32*32; r++) { if (tryhash(htab, sz, r, 1)) { printhash(ctx, htab, sz); fprintf(ctx->fp, "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n", r>>5, r&31, sz); return; } } } fprintf(stderr, "Error: search for perfect hash failed\n"); exit(1); } /* Parse one token of a fold rule. */ static uint32_t nexttoken(char **pp, int allowlit, int allowany) { char *p = *pp; if (p) { uint32_t i; char *q = strchr(p, ' '); if (q) *q++ = '\0'; *pp = q; if (allowlit && !strncmp(p, "IRFPM_", 6)) { for (i = 0; irfpm_names[i]; i++) if (!strcmp(irfpm_names[i], p+6)) return i; } else if (allowlit && !strncmp(p, "IRFL_", 5)) { for (i = 0; irfield_names[i]; i++) if (!strcmp(irfield_names[i], p+5)) return i; } else if (allowlit && !strncmp(p, "IRCALL_", 7)) { for (i = 0; ircall_names[i]; i++) if (!strcmp(ircall_names[i], p+7)) return i; } else if (allowlit && !strncmp(p, "IRCONV_", 7)) { for (i = 0; irt_names[i]; i++) { const char *r = strchr(p+7, '_'); if (r && !strncmp(irt_names[i], p+7, r-(p+7))) { uint32_t j; for (j = 0; irt_names[j]; j++) if (!strcmp(irt_names[j], r+1)) return (i << 5) + j; } } } else if (allowlit && *p >= '0' && *p <= '9') { for (i = 0; *p >= '0' && *p <= '9'; p++) i = i*10 + (*p - '0'); if (*p == '\0') return i; } else if (allowany && !strcmp("any", p)) { return allowany; } else { for (i = 0; ir_names[i]; i++) if (!strcmp(ir_names[i], p)) return i; } fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno); exit(1); } return 0; } /* Parse a fold rule. */ static void foldrule(char *p) { uint32_t op = nexttoken(&p, 0, 0); uint32_t left = nexttoken(&p, 0, 0x7f); uint32_t right = nexttoken(&p, 1, 0x3ff); uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right; uint32_t i; if (nkeys >= BUILD_MAX_FOLD) { fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n"); exit(1); } /* Simple insertion sort to detect duplicates. */ for (i = nkeys; i > 0; i--) { if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff)) break; if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) { fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno); exit(1); } foldkeys[i] = foldkeys[i-1]; } foldkeys[i] = key; nkeys++; } /* Emit C source code for IR folding hash table. */ void emit_fold(BuildCtx *ctx) { char buf[256]; /* We don't care about analyzing lines longer than that. */ const char *fname = ctx->args[0]; FILE *fp; if (fname == NULL) { fprintf(stderr, "Error: missing input filename\n"); exit(1); } if (fname[0] == '-' && fname[1] == '\0') { fp = stdin; } else { fp = fopen(fname, "r"); if (!fp) { fprintf(stderr, "Error: cannot open input file '%s': %s\n", fname, strerror(errno)); exit(1); } } fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n"); fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n"); lineno = 0; funcidx = 0; nkeys = 0; while (fgets(buf, sizeof(buf), fp) != NULL) { lineno++; /* The prefix must be at the start of a line, otherwise it's ignored. */ if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) { char *p = buf+sizeof(FOLDDEF_PREFIX)-1; char *q = strchr(p, ')'); if (p[0] == '(' && q) { p++; *q = '\0'; foldrule(p); } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) { p += 2; *q = '\0'; if (funcidx) fprintf(ctx->fp, ",\n"); if (p[-2] == 'X') fprintf(ctx->fp, " %s", p); else fprintf(ctx->fp, " fold_%s", p); funcidx++; } else { buf[strlen(buf)-1] = '\0'; fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n", FOLDDEF_PREFIX, p, lineno); exit(1); } } } fclose(fp); fprintf(ctx->fp, "\n};\n\n"); makehash(ctx); }
mit
andrewrk/zig
lib/libc/wasi/libc-top-half/musl/src/math/cos.c
87
2111
/* origin: FreeBSD /usr/src/lib/msun/src/s_cos.c */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ /* cos(x) * Return cosine function of x. * * kernel function: * __sin ... sine function on [-pi/4,pi/4] * __cos ... cosine function on [-pi/4,pi/4] * __rem_pio2 ... argument reduction routine * * Method. * Let S,C and T denote the sin, cos and tan respectively on * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2 * in [-pi/4 , +pi/4], and let n = k mod 4. * We have * * n sin(x) cos(x) tan(x) * ---------------------------------------------------------- * 0 S C T * 1 C -S -1/T * 2 -S -C T * 3 -C S -1/T * ---------------------------------------------------------- * * Special cases: * Let trig be any of sin, cos, or tan. * trig(+-INF) is NaN, with signals; * trig(NaN) is that NaN; * * Accuracy: * TRIG(x) returns trig(x) nearly rounded */ #include "libm.h" double cos(double x) { double y[2]; uint32_t ix; unsigned n; GET_HIGH_WORD(ix, x); ix &= 0x7fffffff; /* |x| ~< pi/4 */ if (ix <= 0x3fe921fb) { if (ix < 0x3e46a09e) { /* |x| < 2**-27 * sqrt(2) */ /* raise inexact if x!=0 */ FORCE_EVAL(x + 0x1p120f); return 1.0; } return __cos(x, 0); } /* cos(Inf or NaN) is NaN */ if (ix >= 0x7ff00000) return x-x; /* argument reduction */ n = __rem_pio2(x, y); switch (n&3) { case 0: return __cos(y[0], y[1]); case 1: return -__sin(y[0], y[1], 1); case 2: return -__cos(y[0], y[1]); default: return __sin(y[0], y[1], 1); } }
mit
AdrianoRuseler/LAUNCHXL-F28377S
ControlSuite/F2837xS_examples_Cpu1/sdfm_filter_sync_dma_cpu01/F2837xS_PieVect.c
92
14768
//########################################################################### // // FILE: F2837xS_PieVect.c // // TITLE: F2837xS Device PIE Vector Initialization Functions // //########################################################################### // $TI Release: F2837xS Support Library v210 $ // $Release Date: Tue Nov 1 15:35:23 CDT 2016 $ // $Copyright: Copyright (C) 2014-2016 Texas Instruments Incorporated - // http://www.ti.com/ ALL RIGHTS RESERVED $ //########################################################################### // // Included Files // #include "F2837xS_device.h" #include "F2837xS_Examples.h" // // Globals // const struct PIE_VECT_TABLE PieVectTableInit = { PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved PIE_RESERVED_ISR, // Reserved TIMER1_ISR, // CPU Timer 1 Interrupt TIMER2_ISR, // CPU Timer 2 Interrupt DATALOG_ISR, // Datalogging Interrupt RTOS_ISR, // RTOS Interrupt EMU_ISR, // Emulation Interrupt NMI_ISR, // Non-Maskable Interrupt ILLEGAL_ISR, // Illegal Operation Trap USER1_ISR, // User Defined Trap 1 USER2_ISR, // User Defined Trap 2 USER3_ISR, // User Defined Trap 3 USER4_ISR, // User Defined Trap 4 USER5_ISR, // User Defined Trap 5 USER6_ISR, // User Defined Trap 6 USER7_ISR, // User Defined Trap 7 USER8_ISR, // User Defined Trap 8 USER9_ISR, // User Defined Trap 9 USER10_ISR, // User Defined Trap 10 USER11_ISR, // User Defined Trap 11 USER12_ISR, // User Defined Trap 12 ADCA1_ISR, // 1.1 - ADCA Interrupt 1 ADCB1_ISR, // 1.2 - ADCB Interrupt 1 ADCC1_ISR, // 1.3 - ADCC Interrupt 1 XINT1_ISR, // 1.4 - XINT1 Interrupt XINT2_ISR, // 1.5 - XINT2 Interrupt ADCD1_ISR, // 1.6 - ADCD Interrupt 1 TIMER0_ISR, // 1.7 - Timer 0 Interrupt WAKE_ISR, // 1.8 - Standby and Halt Wakeup Interrupt EPWM1_TZ_ISR, // 2.1 - ePWM1 Trip Zone Interrupt EPWM2_TZ_ISR, // 2.2 - ePWM2 Trip Zone Interrupt EPWM3_TZ_ISR, // 2.3 - ePWM3 Trip Zone Interrupt EPWM4_TZ_ISR, // 2.4 - ePWM4 Trip Zone Interrupt EPWM5_TZ_ISR, // 2.5 - ePWM5 Trip Zone Interrupt EPWM6_TZ_ISR, // 2.6 - ePWM6 Trip Zone Interrupt EPWM7_TZ_ISR, // 2.7 - ePWM7 Trip Zone Interrupt EPWM8_TZ_ISR, // 2.8 - ePWM8 Trip Zone Interrupt EPWM1_ISR, // 3.1 - ePWM1 Interrupt EPWM2_ISR, // 3.2 - ePWM2 Interrupt EPWM3_ISR, // 3.3 - ePWM3 Interrupt EPWM4_ISR, // 3.4 - ePWM4 Interrupt EPWM5_ISR, // 3.5 - ePWM5 Interrupt EPWM6_ISR, // 3.6 - ePWM6 Interrupt EPWM7_ISR, // 3.7 - ePWM7 Interrupt EPWM8_ISR, // 3.8 - ePWM8 Interrupt ECAP1_ISR, // 4.1 - eCAP1 Interrupt ECAP2_ISR, // 4.2 - eCAP2 Interrupt ECAP3_ISR, // 4.3 - eCAP3 Interrupt ECAP4_ISR, // 4.4 - eCAP4 Interrupt ECAP5_ISR, // 4.5 - eCAP5 Interrupt ECAP6_ISR, // 4.6 - eCAP6 Interrupt PIE_RESERVED_ISR, // 4.7 - Reserved PIE_RESERVED_ISR, // 4.8 - Reserved EQEP1_ISR, // 5.1 - eQEP1 Interrupt EQEP2_ISR, // 5.2 - eQEP2 Interrupt EQEP3_ISR, // 5.3 - eQEP3 Interrupt PIE_RESERVED_ISR, // 5.4 - Reserved PIE_RESERVED_ISR, // 5.5 - Reserved PIE_RESERVED_ISR, // 5.6 - Reserved PIE_RESERVED_ISR, // 5.7 - Reserved PIE_RESERVED_ISR, // 5.8 - Reserved SPIA_RX_ISR, // 6.1 - SPIA Receive Interrupt SPIA_TX_ISR, // 6.2 - SPIA Transmit Interrupt SPIB_RX_ISR, // 6.3 - SPIB Receive Interrupt SPIB_TX_ISR, // 6.4 - SPIB Transmit Interrupt MCBSPA_RX_ISR, // 6.5 - McBSPA Receive Interrupt MCBSPA_TX_ISR, // 6.6 - McBSPA Transmit Interrupt MCBSPB_RX_ISR, // 6.7 - McBSPB Receive Interrupt MCBSPB_TX_ISR, // 6.8 - McBSPB Transmit Interrupt DMA_CH1_ISR, // 7.1 - DMA Channel 1 Interrupt DMA_CH2_ISR, // 7.2 - DMA Channel 2 Interrupt DMA_CH3_ISR, // 7.3 - DMA Channel 3 Interrupt DMA_CH4_ISR, // 7.4 - DMA Channel 4 Interrupt DMA_CH5_ISR, // 7.5 - DMA Channel 5 Interrupt DMA_CH6_ISR, // 7.6 - DMA Channel 6 Interrupt PIE_RESERVED_ISR, // 7.7 - Reserved PIE_RESERVED_ISR, // 7.8 - Reserved I2CA_ISR, // 8.1 - I2CA Interrupt 1 I2CA_FIFO_ISR, // 8.2 - I2CA Interrupt 2 I2CB_ISR, // 8.3 - I2CB Interrupt 1 I2CB_FIFO_ISR, // 8.4 - I2CB Interrupt 2 SCIC_RX_ISR, // 8.5 - SCIC Receive Interrupt SCIC_TX_ISR, // 8.6 - SCIC Transmit Interrupt SCID_RX_ISR, // 8.7 - SCID Receive Interrupt SCID_TX_ISR, // 8.8 - SCID Transmit Interrupt SCIA_RX_ISR, // 9.1 - SCIA Receive Interrupt SCIA_TX_ISR, // 9.2 - SCIA Transmit Interrupt SCIB_RX_ISR, // 9.3 - SCIB Receive Interrupt SCIB_TX_ISR, // 9.4 - SCIB Transmit Interrupt CANA0_ISR, // 9.5 - CANA Interrupt 0 CANA1_ISR, // 9.6 - CANA Interrupt 1 CANB0_ISR, // 9.7 - CANB Interrupt 0 CANB1_ISR, // 9.8 - CANB Interrupt 1 ADCA_EVT_ISR, // 10.1 - ADCA Event Interrupt ADCA2_ISR, // 10.2 - ADCA Interrupt 2 ADCA3_ISR, // 10.3 - ADCA Interrupt 3 ADCA4_ISR, // 10.4 - ADCA Interrupt 4 ADCB_EVT_ISR, // 10.5 - ADCB Event Interrupt ADCB2_ISR, // 10.6 - ADCB Interrupt 2 ADCB3_ISR, // 10.7 - ADCB Interrupt 3 ADCB4_ISR, // 10.8 - ADCB Interrupt 4 CLA1_1_ISR, // 11.1 - CLA1 Interrupt 1 CLA1_2_ISR, // 11.2 - CLA1 Interrupt 2 CLA1_3_ISR, // 11.3 - CLA1 Interrupt 3 CLA1_4_ISR, // 11.4 - CLA1 Interrupt 4 CLA1_5_ISR, // 11.5 - CLA1 Interrupt 5 CLA1_6_ISR, // 11.6 - CLA1 Interrupt 6 CLA1_7_ISR, // 11.7 - CLA1 Interrupt 7 CLA1_8_ISR, // 11.8 - CLA1 Interrupt 8 XINT3_ISR, // 12.1 - XINT3 Interrupt XINT4_ISR, // 12.2 - XINT4 Interrupt XINT5_ISR, // 12.3 - XINT5 Interrupt PIE_RESERVED_ISR, // 12.4 - Reserved PIE_RESERVED_ISR, // 12.5 - Reserved VCU_ISR, // 12.6 - VCU Interrupt FPU_OVERFLOW_ISR, // 12.7 - FPU Overflow Interrupt FPU_UNDERFLOW_ISR, // 12.8 - FPU Underflow Interrupt PIE_RESERVED_ISR, // 1.9 - Reserved PIE_RESERVED_ISR, // 1.10 - Reserved PIE_RESERVED_ISR, // 1.11 - Reserved PIE_RESERVED_ISR, // 1.12 - Reserved IPC0_ISR, // 1.13 - IPC Interrupt 0 IPC1_ISR, // 1.14 - IPC Interrupt 1 IPC2_ISR, // 1.15 - IPC Interrupt 2 IPC3_ISR, // 1.16 - IPC Interrupt 3 EPWM9_TZ_ISR, // 2.9 - ePWM9 Trip Zone Interrupt EPWM10_TZ_ISR, // 2.10 - ePWM10 Trip Zone Interrupt EPWM11_TZ_ISR, // 2.11 - ePWM11 Trip Zone Interrupt EPWM12_TZ_ISR, // 2.12 - ePWM12 Trip Zone Interrupt PIE_RESERVED_ISR, // 2.13 - Reserved PIE_RESERVED_ISR, // 2.14 - Reserved PIE_RESERVED_ISR, // 2.15 - Reserved PIE_RESERVED_ISR, // 2.16 - Reserved EPWM9_ISR, // 3.9 - ePWM9 Interrupt EPWM10_ISR, // 3.10 - ePWM10 Interrupt EPWM11_ISR, // 3.11 - ePWM11 Interrupt EPWM12_ISR, // 3.12 - ePWM12 Interrupt PIE_RESERVED_ISR, // 3.13 - Reserved PIE_RESERVED_ISR, // 3.14 - Reserved PIE_RESERVED_ISR, // 3.15 - Reserved PIE_RESERVED_ISR, // 3.16 - Reserved PIE_RESERVED_ISR, // 4.9 - Reserved PIE_RESERVED_ISR, // 4.10 - Reserved PIE_RESERVED_ISR, // 4.11 - Reserved PIE_RESERVED_ISR, // 4.12 - Reserved PIE_RESERVED_ISR, // 4.13 - Reserved PIE_RESERVED_ISR, // 4.14 - Reserved PIE_RESERVED_ISR, // 4.15 - Reserved PIE_RESERVED_ISR, // 4.16 - Reserved SD1_ISR, // 5.9 - SD1 Interrupt SD2_ISR, // 5.10 - SD2 Interrupt PIE_RESERVED_ISR, // 5.11 - Reserved PIE_RESERVED_ISR, // 5.12 - Reserved PIE_RESERVED_ISR, // 5.13 - Reserved PIE_RESERVED_ISR, // 5.14 - Reserved PIE_RESERVED_ISR, // 5.15 - Reserved PIE_RESERVED_ISR, // 5.16 - Reserved SPIC_RX_ISR, // 6.9 - SPIC Receive Interrupt SPIC_TX_ISR, // 6.10 - SPIC Transmit Interrupt PIE_RESERVED_ISR, // 6.11 - Reserved PIE_RESERVED_ISR, // 6.12 - Reserved PIE_RESERVED_ISR, // 6.13 - Reserved PIE_RESERVED_ISR, // 6.14 - Reserved PIE_RESERVED_ISR, // 6.15 - Reserved PIE_RESERVED_ISR, // 6.16 - Reserved PIE_RESERVED_ISR, // 7.9 - Reserved PIE_RESERVED_ISR, // 7.10 - Reserved PIE_RESERVED_ISR, // 7.11 - Reserved PIE_RESERVED_ISR, // 7.12 - Reserved PIE_RESERVED_ISR, // 7.13 - Reserved PIE_RESERVED_ISR, // 7.14 - Reserved PIE_RESERVED_ISR, // 7.15 - Reserved PIE_RESERVED_ISR, // 7.16 - Reserved PIE_RESERVED_ISR, // 8.9 - Reserved PIE_RESERVED_ISR, // 8.10 - Reserved PIE_RESERVED_ISR, // 8.11 - Reserved PIE_RESERVED_ISR, // 8.12 - Reserved PIE_RESERVED_ISR, // 8.13 - Reserved PIE_RESERVED_ISR, // 8.14 - Reserved #ifdef CPU1 UPPA_ISR, // 8.15 - uPPA Interrupt PIE_RESERVED_ISR, // 8.16 - Reserved #elif defined(CPU2) PIE_RESERVED_ISR, // 8.15 - Reserved PIE_RESERVED_ISR, // 8.16 - Reserved #endif PIE_RESERVED_ISR, // 9.9 - Reserved PIE_RESERVED_ISR, // 9.10 - Reserved PIE_RESERVED_ISR, // 9.11 - Reserved PIE_RESERVED_ISR, // 9.12 - Reserved PIE_RESERVED_ISR, // 9.13 - Reserved PIE_RESERVED_ISR, // 9.14 - Reserved #ifdef CPU1 USBA_ISR, // 9.15 - USBA Interrupt #elif defined(CPU2) PIE_RESERVED_ISR, // 9.15 - Reserved #endif PIE_RESERVED_ISR, // 9.16 - Reserved ADCC_EVT_ISR, // 10.9 - ADCC Event Interrupt ADCC2_ISR, // 10.10 - ADCC Interrupt 2 ADCC3_ISR, // 10.11 - ADCC Interrupt 3 ADCC4_ISR, // 10.12 - ADCC Interrupt 4 ADCD_EVT_ISR, // 10.13 - ADCD Event Interrupt ADCD2_ISR, // 10.14 - ADCD Interrupt 2 ADCD3_ISR, // 10.15 - ADCD Interrupt 3 ADCD4_ISR, // 10.16 - ADCD Interrupt 4 PIE_RESERVED_ISR, // 11.9 - Reserved PIE_RESERVED_ISR, // 11.10 - Reserved PIE_RESERVED_ISR, // 11.11 - Reserved PIE_RESERVED_ISR, // 11.12 - Reserved PIE_RESERVED_ISR, // 11.13 - Reserved PIE_RESERVED_ISR, // 11.14 - Reserved PIE_RESERVED_ISR, // 11.15 - Reserved PIE_RESERVED_ISR, // 11.16 - Reserved EMIF_ERROR_ISR, // 12.9 - EMIF Error Interrupt RAM_CORRECTABLE_ERROR_ISR, // 12.10 - RAM Correctable Error Interrupt FLASH_CORRECTABLE_ERROR_ISR, // 12.11 - Flash Correctable Error Interrupt RAM_ACCESS_VIOLATION_ISR, // 12.12 - RAM Access Violation Interrupt SYS_PLL_SLIP_ISR, // 12.13 - System PLL Slip Interrupt AUX_PLL_SLIP_ISR, // 12.14 - Auxiliary PLL Slip Interrupt CLA_OVERFLOW_ISR, // 12.15 - CLA Overflow Interrupt CLA_UNDERFLOW_ISR // 12.16 - CLA Underflow Interrupt }; // // InitPieVectTable - This function initializes the PIE vector table to a // known state and must be executed after boot time. // void InitPieVectTable(void) { Uint16 i; Uint32 *Source = (void *) &PieVectTableInit; Uint32 *Dest = (void *) &PieVectTable; // // Do not write over first 3 32-bit locations (these locations are // initialized by Boot ROM with boot variables) // Source = Source + 3; Dest = Dest + 3; EALLOW; for(i = 0; i < 221; i++) { *Dest++ = *Source++; } EDIS; // // Enable the PIE Vector Table // PieCtrlRegs.PIECTRL.bit.ENPIE = 1; } // // End of file //
mit
pentix/mongoose
mongoose.c
104
141876
// Copyright (c) 2004-2011 Sergey Lyubka // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #if defined(_WIN32) #define _CRT_SECURE_NO_WARNINGS // Disable deprecation warning in VS2005 #else #define _XOPEN_SOURCE 600 // For flockfile() on Linux #define _LARGEFILE_SOURCE // Enable 64-bit file offsets #define __STDC_FORMAT_MACROS // <inttypes.h> wants this for C++ #define __STDC_LIMIT_MACROS // C++ wants that for INT64_MAX #endif #if defined(__SYMBIAN32__) #define NO_SSL // SSL is not supported #define NO_CGI // CGI is not supported #define PATH_MAX FILENAME_MAX #endif // __SYMBIAN32__ #ifndef _WIN32_WCE // Some ANSI #includes are not available on Windows CE #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <signal.h> #include <fcntl.h> #endif // !_WIN32_WCE #include <time.h> #include <stdlib.h> #include <stdarg.h> #include <assert.h> #include <string.h> #include <ctype.h> #include <limits.h> #include <stddef.h> #include <stdio.h> #if defined(_WIN32) && !defined(__SYMBIAN32__) // Windows specific #define _WIN32_WINNT 0x0400 // To make it link in VS2005 #include <windows.h> #ifndef PATH_MAX #define PATH_MAX MAX_PATH #endif #ifndef _WIN32_WCE #include <process.h> #include <direct.h> #include <io.h> #else // _WIN32_WCE #include <winsock2.h> #include <ws2tcpip.h> #define NO_CGI // WinCE has no pipes typedef long off_t; #define errno GetLastError() #define strerror(x) _ultoa(x, (char *) _alloca(sizeof(x) *3 ), 10) #endif // _WIN32_WCE #define MAKEUQUAD(lo, hi) ((uint64_t)(((uint32_t)(lo)) | \ ((uint64_t)((uint32_t)(hi))) << 32)) #define RATE_DIFF 10000000 // 100 nsecs #define EPOCH_DIFF MAKEUQUAD(0xd53e8000, 0x019db1de) #define SYS2UNIX_TIME(lo, hi) \ (time_t) ((MAKEUQUAD((lo), (hi)) - EPOCH_DIFF) / RATE_DIFF) // Visual Studio 6 does not know __func__ or __FUNCTION__ // The rest of MS compilers use __FUNCTION__, not C99 __func__ // Also use _strtoui64 on modern M$ compilers #if defined(_MSC_VER) && _MSC_VER < 1300 #define STRX(x) #x #define STR(x) STRX(x) #define __func__ "line " STR(__LINE__) #define strtoull(x, y, z) strtoul(x, y, z) #define strtoll(x, y, z) strtol(x, y, z) #else #define __func__ __FUNCTION__ #define strtoull(x, y, z) _strtoui64(x, y, z) #define strtoll(x, y, z) _strtoi64(x, y, z) #endif // _MSC_VER #define ERRNO GetLastError() #define NO_SOCKLEN_T #define SSL_LIB "ssleay32.dll" #define CRYPTO_LIB "libeay32.dll" #define DIRSEP '\\' #define IS_DIRSEP_CHAR(c) ((c) == '/' || (c) == '\\') #define O_NONBLOCK 0 #if !defined(EWOULDBLOCK) #define EWOULDBLOCK WSAEWOULDBLOCK #endif // !EWOULDBLOCK #define _POSIX_ #define INT64_FMT "I64d" #define WINCDECL __cdecl #define SHUT_WR 1 #define snprintf _snprintf #define vsnprintf _vsnprintf #define mg_sleep(x) Sleep(x) #define pipe(x) _pipe(x, MG_BUF_LEN, _O_BINARY) #define popen(x, y) _popen(x, y) #define pclose(x) _pclose(x) #define close(x) _close(x) #define dlsym(x,y) GetProcAddress((HINSTANCE) (x), (y)) #define RTLD_LAZY 0 #define fseeko(x, y, z) _lseeki64(_fileno(x), (y), (z)) #define fdopen(x, y) _fdopen((x), (y)) #define write(x, y, z) _write((x), (y), (unsigned) z) #define read(x, y, z) _read((x), (y), (unsigned) z) #define flockfile(x) EnterCriticalSection(&global_log_file_lock) #define funlockfile(x) LeaveCriticalSection(&global_log_file_lock) #if !defined(fileno) #define fileno(x) _fileno(x) #endif // !fileno MINGW #defines fileno typedef HANDLE pthread_mutex_t; typedef struct {HANDLE signal, broadcast;} pthread_cond_t; typedef DWORD pthread_t; #define pid_t HANDLE // MINGW typedefs pid_t to int. Using #define here. struct timespec { long tv_nsec; long tv_sec; }; static int pthread_mutex_lock(pthread_mutex_t *); static int pthread_mutex_unlock(pthread_mutex_t *); static FILE *mg_fopen(const char *path, const char *mode); #if defined(HAVE_STDINT) #include <stdint.h> #else typedef unsigned int uint32_t; typedef unsigned short uint16_t; typedef unsigned __int64 uint64_t; typedef __int64 int64_t; #define INT64_MAX 9223372036854775807 #endif // HAVE_STDINT // POSIX dirent interface struct dirent { char d_name[PATH_MAX]; }; typedef struct DIR { HANDLE handle; WIN32_FIND_DATAW info; struct dirent result; } DIR; #else // UNIX specific #include <sys/wait.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/time.h> #include <stdint.h> #include <inttypes.h> #include <netdb.h> #include <pwd.h> #include <unistd.h> #include <dirent.h> #if !defined(NO_SSL_DL) && !defined(NO_SSL) #include <dlfcn.h> #endif #include <pthread.h> #if defined(__MACH__) #define SSL_LIB "libssl.dylib" #define CRYPTO_LIB "libcrypto.dylib" #else #if !defined(SSL_LIB) #define SSL_LIB "libssl.so" #endif #if !defined(CRYPTO_LIB) #define CRYPTO_LIB "libcrypto.so" #endif #endif #define DIRSEP '/' #define IS_DIRSEP_CHAR(c) ((c) == '/') #ifndef O_BINARY #define O_BINARY 0 #endif // O_BINARY #define closesocket(a) close(a) #define mg_fopen(x, y) fopen(x, y) #define mg_mkdir(x, y) mkdir(x, y) #define mg_remove(x) remove(x) #define mg_rename(x, y) rename(x, y) #define mg_sleep(x) usleep((x) * 1000) #define ERRNO errno #define INVALID_SOCKET (-1) #define INT64_FMT PRId64 typedef int SOCKET; #define WINCDECL #endif // End of Windows and UNIX specific includes #include "mongoose.h" #define MONGOOSE_VERSION "3.2" #define PASSWORDS_FILE_NAME ".htpasswd" #define CGI_ENVIRONMENT_SIZE 4096 #define MAX_CGI_ENVIR_VARS 64 #define MG_BUF_LEN 8192 #define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0])) #ifdef _WIN32 static CRITICAL_SECTION global_log_file_lock; static pthread_t pthread_self(void) { return GetCurrentThreadId(); } #endif // _WIN32 #if defined(DEBUG) #define DEBUG_TRACE(x) do { \ flockfile(stdout); \ printf("*** %lu.%p.%s.%d: ", \ (unsigned long) time(NULL), (void *) pthread_self(), \ __func__, __LINE__); \ printf x; \ putchar('\n'); \ fflush(stdout); \ funlockfile(stdout); \ } while (0) #else #define DEBUG_TRACE(x) #endif // DEBUG // Darwin prior to 7.0 and Win32 do not have socklen_t #ifdef NO_SOCKLEN_T typedef int socklen_t; #endif // NO_SOCKLEN_T #if !defined(MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif static const char *http_500_error = "Internal Server Error"; // Snatched from OpenSSL includes. I put the prototypes here to be independent // from the OpenSSL source installation. Having this, mongoose + SSL can be // built on any system with binary SSL libraries installed. typedef struct ssl_st SSL; typedef struct ssl_method_st SSL_METHOD; typedef struct ssl_ctx_st SSL_CTX; #define SSL_ERROR_WANT_READ 2 #define SSL_ERROR_WANT_WRITE 3 #define SSL_FILETYPE_PEM 1 #define CRYPTO_LOCK 1 #if defined(NO_SSL_DL) extern void SSL_free(SSL *); extern int SSL_accept(SSL *); extern int SSL_connect(SSL *); extern int SSL_read(SSL *, void *, int); extern int SSL_write(SSL *, const void *, int); extern int SSL_get_error(const SSL *, int); extern int SSL_set_fd(SSL *, int); extern SSL *SSL_new(SSL_CTX *); extern SSL_CTX *SSL_CTX_new(SSL_METHOD *); extern SSL_METHOD *SSLv23_server_method(void); extern SSL_METHOD *SSLv23_client_method(void); extern int SSL_library_init(void); extern void SSL_load_error_strings(void); extern int SSL_CTX_use_PrivateKey_file(SSL_CTX *, const char *, int); extern int SSL_CTX_use_certificate_file(SSL_CTX *, const char *, int); extern int SSL_CTX_use_certificate_chain_file(SSL_CTX *, const char *); extern void SSL_CTX_set_default_passwd_cb(SSL_CTX *, mg_callback_t); extern void SSL_CTX_free(SSL_CTX *); extern unsigned long ERR_get_error(void); extern char *ERR_error_string(unsigned long, char *); extern int CRYPTO_num_locks(void); extern void CRYPTO_set_locking_callback(void (*)(int, int, const char *, int)); extern void CRYPTO_set_id_callback(unsigned long (*)(void)); #else // Dynamically loaded SSL functionality struct ssl_func { const char *name; // SSL function name void (*ptr)(void); // Function pointer }; #define SSL_free (* (void (*)(SSL *)) ssl_sw[0].ptr) #define SSL_accept (* (int (*)(SSL *)) ssl_sw[1].ptr) #define SSL_connect (* (int (*)(SSL *)) ssl_sw[2].ptr) #define SSL_read (* (int (*)(SSL *, void *, int)) ssl_sw[3].ptr) #define SSL_write (* (int (*)(SSL *, const void *,int)) ssl_sw[4].ptr) #define SSL_get_error (* (int (*)(SSL *, int)) ssl_sw[5].ptr) #define SSL_set_fd (* (int (*)(SSL *, SOCKET)) ssl_sw[6].ptr) #define SSL_new (* (SSL * (*)(SSL_CTX *)) ssl_sw[7].ptr) #define SSL_CTX_new (* (SSL_CTX * (*)(SSL_METHOD *)) ssl_sw[8].ptr) #define SSLv23_server_method (* (SSL_METHOD * (*)(void)) ssl_sw[9].ptr) #define SSL_library_init (* (int (*)(void)) ssl_sw[10].ptr) #define SSL_CTX_use_PrivateKey_file (* (int (*)(SSL_CTX *, \ const char *, int)) ssl_sw[11].ptr) #define SSL_CTX_use_certificate_file (* (int (*)(SSL_CTX *, \ const char *, int)) ssl_sw[12].ptr) #define SSL_CTX_set_default_passwd_cb \ (* (void (*)(SSL_CTX *, mg_callback_t)) ssl_sw[13].ptr) #define SSL_CTX_free (* (void (*)(SSL_CTX *)) ssl_sw[14].ptr) #define SSL_load_error_strings (* (void (*)(void)) ssl_sw[15].ptr) #define SSL_CTX_use_certificate_chain_file \ (* (int (*)(SSL_CTX *, const char *)) ssl_sw[16].ptr) #define SSLv23_client_method (* (SSL_METHOD * (*)(void)) ssl_sw[17].ptr) #define CRYPTO_num_locks (* (int (*)(void)) crypto_sw[0].ptr) #define CRYPTO_set_locking_callback \ (* (void (*)(void (*)(int, int, const char *, int))) crypto_sw[1].ptr) #define CRYPTO_set_id_callback \ (* (void (*)(unsigned long (*)(void))) crypto_sw[2].ptr) #define ERR_get_error (* (unsigned long (*)(void)) crypto_sw[3].ptr) #define ERR_error_string (* (char * (*)(unsigned long,char *)) crypto_sw[4].ptr) // set_ssl_option() function updates this array. // It loads SSL library dynamically and changes NULLs to the actual addresses // of respective functions. The macros above (like SSL_connect()) are really // just calling these functions indirectly via the pointer. static struct ssl_func ssl_sw[] = { {"SSL_free", NULL}, {"SSL_accept", NULL}, {"SSL_connect", NULL}, {"SSL_read", NULL}, {"SSL_write", NULL}, {"SSL_get_error", NULL}, {"SSL_set_fd", NULL}, {"SSL_new", NULL}, {"SSL_CTX_new", NULL}, {"SSLv23_server_method", NULL}, {"SSL_library_init", NULL}, {"SSL_CTX_use_PrivateKey_file", NULL}, {"SSL_CTX_use_certificate_file",NULL}, {"SSL_CTX_set_default_passwd_cb",NULL}, {"SSL_CTX_free", NULL}, {"SSL_load_error_strings", NULL}, {"SSL_CTX_use_certificate_chain_file", NULL}, {"SSLv23_client_method", NULL}, {NULL, NULL} }; // Similar array as ssl_sw. These functions could be located in different lib. #if !defined(NO_SSL) static struct ssl_func crypto_sw[] = { {"CRYPTO_num_locks", NULL}, {"CRYPTO_set_locking_callback", NULL}, {"CRYPTO_set_id_callback", NULL}, {"ERR_get_error", NULL}, {"ERR_error_string", NULL}, {NULL, NULL} }; #endif // NO_SSL #endif // NO_SSL_DL static const char *month_names[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; // Unified socket address. For IPv6 support, add IPv6 address structure // in the union u. union usa { struct sockaddr sa; struct sockaddr_in sin; #if defined(USE_IPV6) struct sockaddr_in6 sin6; #endif }; // Describes a string (chunk of memory). struct vec { const char *ptr; size_t len; }; // Structure used by mg_stat() function. Uses 64 bit file length. struct mgstat { int is_directory; // Directory marker int64_t size; // File size time_t mtime; // Modification time }; // Describes listening socket, or socket which was accept()-ed by the master // thread and queued for future handling by the worker thread. struct socket { struct socket *next; // Linkage SOCKET sock; // Listening socket union usa lsa; // Local socket address union usa rsa; // Remote socket address int is_ssl; // Is socket SSL-ed }; // NOTE(lsm): this enum shoulds be in sync with the config_options below. enum { CGI_EXTENSIONS, CGI_ENVIRONMENT, PUT_DELETE_PASSWORDS_FILE, CGI_INTERPRETER, PROTECT_URI, AUTHENTICATION_DOMAIN, SSI_EXTENSIONS, ACCESS_LOG_FILE, SSL_CHAIN_FILE, ENABLE_DIRECTORY_LISTING, ERROR_LOG_FILE, GLOBAL_PASSWORDS_FILE, INDEX_FILES, ENABLE_KEEP_ALIVE, ACCESS_CONTROL_LIST, MAX_REQUEST_SIZE, EXTRA_MIME_TYPES, LISTENING_PORTS, DOCUMENT_ROOT, SSL_CERTIFICATE, NUM_THREADS, RUN_AS_USER, REWRITE, HIDE_FILES, NUM_OPTIONS }; static const char *config_options[] = { "C", "cgi_pattern", "**.cgi$|**.pl$|**.php$", "E", "cgi_environment", NULL, "G", "put_delete_passwords_file", NULL, "I", "cgi_interpreter", NULL, "P", "protect_uri", NULL, "R", "authentication_domain", "mydomain.com", "S", "ssi_pattern", "**.shtml$|**.shtm$", "a", "access_log_file", NULL, "c", "ssl_chain_file", NULL, "d", "enable_directory_listing", "yes", "e", "error_log_file", NULL, "g", "global_passwords_file", NULL, "i", "index_files", "index.html,index.htm,index.cgi,index.shtml,index.php", "k", "enable_keep_alive", "no", "l", "access_control_list", NULL, "M", "max_request_size", "16384", "m", "extra_mime_types", NULL, "p", "listening_ports", "8080", "r", "document_root", ".", "s", "ssl_certificate", NULL, "t", "num_threads", "10", "u", "run_as_user", NULL, "w", "url_rewrite_patterns", NULL, "x", "hide_files_patterns", NULL, NULL }; #define ENTRIES_PER_CONFIG_OPTION 3 struct mg_context { volatile int stop_flag; // Should we stop event loop SSL_CTX *ssl_ctx; // SSL context SSL_CTX *client_ssl_ctx; // Client SSL context char *config[NUM_OPTIONS]; // Mongoose configuration parameters mg_callback_t user_callback; // User-defined callback function void *user_data; // User-defined data struct socket *listening_sockets; volatile int num_threads; // Number of threads pthread_mutex_t mutex; // Protects (max|num)_threads pthread_cond_t cond; // Condvar for tracking workers terminations struct socket queue[20]; // Accepted sockets volatile int sq_head; // Head of the socket queue volatile int sq_tail; // Tail of the socket queue pthread_cond_t sq_full; // Singaled when socket is produced pthread_cond_t sq_empty; // Signaled when socket is consumed }; struct mg_connection { struct mg_request_info request_info; struct mg_context *ctx; SSL *ssl; // SSL descriptor struct socket client; // Connected client time_t birth_time; // Time connection was accepted int64_t num_bytes_sent; // Total bytes sent to client int64_t content_len; // Content-Length header value int64_t consumed_content; // How many bytes of content is already read char *buf; // Buffer for received data char *path_info; // PATH_INFO part of the URL int must_close; // 1 if connection must be closed int buf_size; // Buffer size int request_len; // Size of the request + headers in a buffer int data_len; // Total size of data in a buffer }; const char **mg_get_valid_option_names(void) { return config_options; } static void *call_user(struct mg_connection *conn, enum mg_event event) { conn->request_info.user_data = conn->ctx->user_data; return conn->ctx->user_callback == NULL ? NULL : conn->ctx->user_callback(event, conn); } static int get_option_index(const char *name) { int i; for (i = 0; config_options[i] != NULL; i += ENTRIES_PER_CONFIG_OPTION) { if (strcmp(config_options[i], name) == 0 || strcmp(config_options[i + 1], name) == 0) { return i / ENTRIES_PER_CONFIG_OPTION; } } return -1; } const char *mg_get_option(const struct mg_context *ctx, const char *name) { int i; if ((i = get_option_index(name)) == -1) { return NULL; } else if (ctx->config[i] == NULL) { return ""; } else { return ctx->config[i]; } } static void sockaddr_to_string(char *buf, size_t len, const union usa *usa) { buf[0] = '\0'; #if defined(USE_IPV6) inet_ntop(usa->sa.sa_family, usa->sa.sa_family == AF_INET ? (void *) &usa->sin.sin_addr : (void *) &usa->sin6.sin6_addr, buf, len); #elif defined(_WIN32) // Only Windoze Vista (and newer) have inet_ntop() strncpy(buf, inet_ntoa(usa->sin.sin_addr), len); #else inet_ntop(usa->sa.sa_family, (void *) &usa->sin.sin_addr, buf, len); #endif } // Print error message to the opened error log stream. static void cry(struct mg_connection *conn, const char *fmt, ...) { char buf[MG_BUF_LEN], src_addr[20]; va_list ap; FILE *fp; time_t timestamp; va_start(ap, fmt); (void) vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); // Do not lock when getting the callback value, here and below. // I suppose this is fine, since function cannot disappear in the // same way string option can. conn->request_info.log_message = buf; if (call_user(conn, MG_EVENT_LOG) == NULL) { fp = conn->ctx->config[ERROR_LOG_FILE] == NULL ? NULL : mg_fopen(conn->ctx->config[ERROR_LOG_FILE], "a+"); if (fp != NULL) { flockfile(fp); timestamp = time(NULL); sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa); fprintf(fp, "[%010lu] [error] [client %s] ", (unsigned long) timestamp, src_addr); if (conn->request_info.request_method != NULL) { fprintf(fp, "%s %s: ", conn->request_info.request_method, conn->request_info.uri); } (void) fprintf(fp, "%s", buf); fputc('\n', fp); funlockfile(fp); if (fp != stderr) { fclose(fp); } } } conn->request_info.log_message = NULL; } // Return fake connection structure. Used for logging, if connection // is not applicable at the moment of logging. static struct mg_connection *fc(struct mg_context *ctx) { static struct mg_connection fake_connection; fake_connection.ctx = ctx; return &fake_connection; } const char *mg_version(void) { return MONGOOSE_VERSION; } const struct mg_request_info *mg_get_request_info(struct mg_connection *conn) { return &conn->request_info; } static void mg_strlcpy(register char *dst, register const char *src, size_t n) { for (; *src != '\0' && n > 1; n--) { *dst++ = *src++; } *dst = '\0'; } static int lowercase(const char *s) { return tolower(* (const unsigned char *) s); } static int mg_strncasecmp(const char *s1, const char *s2, size_t len) { int diff = 0; if (len > 0) do { diff = lowercase(s1++) - lowercase(s2++); } while (diff == 0 && s1[-1] != '\0' && --len > 0); return diff; } static int mg_strcasecmp(const char *s1, const char *s2) { int diff; do { diff = lowercase(s1++) - lowercase(s2++); } while (diff == 0 && s1[-1] != '\0'); return diff; } static char * mg_strndup(const char *ptr, size_t len) { char *p; if ((p = (char *) malloc(len + 1)) != NULL) { mg_strlcpy(p, ptr, len + 1); } return p; } static char * mg_strdup(const char *str) { return mg_strndup(str, strlen(str)); } // Like snprintf(), but never returns negative value, or the value // that is larger than a supplied buffer. // Thanks to Adam Zeldis to pointing snprintf()-caused vulnerability // in his audit report. static int mg_vsnprintf(struct mg_connection *conn, char *buf, size_t buflen, const char *fmt, va_list ap) { int n; if (buflen == 0) return 0; n = vsnprintf(buf, buflen, fmt, ap); if (n < 0) { cry(conn, "vsnprintf error"); n = 0; } else if (n >= (int) buflen) { cry(conn, "truncating vsnprintf buffer: [%.*s]", n > 200 ? 200 : n, buf); n = (int) buflen - 1; } buf[n] = '\0'; return n; } static int mg_snprintf(struct mg_connection *conn, char *buf, size_t buflen, const char *fmt, ...) { va_list ap; int n; va_start(ap, fmt); n = mg_vsnprintf(conn, buf, buflen, fmt, ap); va_end(ap); return n; } // Skip the characters until one of the delimiters characters found. // 0-terminate resulting word. Skip the delimiter and following whitespaces if any. // Advance pointer to buffer to the next word. Return found 0-terminated word. // Delimiters can be quoted with quotechar. static char *skip_quoted(char **buf, const char *delimiters, const char *whitespace, char quotechar) { char *p, *begin_word, *end_word, *end_whitespace; begin_word = *buf; end_word = begin_word + strcspn(begin_word, delimiters); // Check for quotechar if (end_word > begin_word) { p = end_word - 1; while (*p == quotechar) { // If there is anything beyond end_word, copy it if (*end_word == '\0') { *p = '\0'; break; } else { size_t end_off = strcspn(end_word + 1, delimiters); memmove (p, end_word, end_off + 1); p += end_off; // p must correspond to end_word - 1 end_word += end_off + 1; } } for (p++; p < end_word; p++) { *p = '\0'; } } if (*end_word == '\0') { *buf = end_word; } else { end_whitespace = end_word + 1 + strspn(end_word + 1, whitespace); for (p = end_word; p < end_whitespace; p++) { *p = '\0'; } *buf = end_whitespace; } return begin_word; } // Simplified version of skip_quoted without quote char // and whitespace == delimiters static char *skip(char **buf, const char *delimiters) { return skip_quoted(buf, delimiters, delimiters, 0); } // Return HTTP header value, or NULL if not found. static const char *get_header(const struct mg_request_info *ri, const char *name) { int i; for (i = 0; i < ri->num_headers; i++) if (!mg_strcasecmp(name, ri->http_headers[i].name)) return ri->http_headers[i].value; return NULL; } const char *mg_get_header(const struct mg_connection *conn, const char *name) { return get_header(&conn->request_info, name); } // A helper function for traversing comma separated list of values. // It returns a list pointer shifted to the next value, of NULL if the end // of the list found. // Value is stored in val vector. If value has form "x=y", then eq_val // vector is initialized to point to the "y" part, and val vector length // is adjusted to point only to "x". static const char *next_option(const char *list, struct vec *val, struct vec *eq_val) { if (list == NULL || *list == '\0') { // End of the list list = NULL; } else { val->ptr = list; if ((list = strchr(val->ptr, ',')) != NULL) { // Comma found. Store length and shift the list ptr val->len = list - val->ptr; list++; } else { // This value is the last one list = val->ptr + strlen(val->ptr); val->len = list - val->ptr; } if (eq_val != NULL) { // Value has form "x=y", adjust pointers and lengths // so that val points to "x", and eq_val points to "y". eq_val->len = 0; eq_val->ptr = (const char *) memchr(val->ptr, '=', val->len); if (eq_val->ptr != NULL) { eq_val->ptr++; // Skip over '=' character eq_val->len = val->ptr + val->len - eq_val->ptr; val->len = (eq_val->ptr - val->ptr) - 1; } } } return list; } static int match_prefix(const char *pattern, int pattern_len, const char *str) { const char *or_str; int i, j, len, res; if ((or_str = (const char *) memchr(pattern, '|', pattern_len)) != NULL) { res = match_prefix(pattern, or_str - pattern, str); return res > 0 ? res : match_prefix(or_str + 1, (pattern + pattern_len) - (or_str + 1), str); } i = j = 0; res = -1; for (; i < pattern_len; i++, j++) { if (pattern[i] == '?' && str[j] != '\0') { continue; } else if (pattern[i] == '$') { return str[j] == '\0' ? j : -1; } else if (pattern[i] == '*') { i++; if (pattern[i] == '*') { i++; len = strlen(str + j); } else { len = strcspn(str + j, "/"); } if (i == pattern_len) { return j + len; } do { res = match_prefix(pattern + i, pattern_len - i, str + j + len); } while (res == -1 && len-- > 0); return res == -1 ? -1 : j + res + len; } else if (pattern[i] != str[j]) { return -1; } } return j; } // HTTP 1.1 assumes keep alive if "Connection:" header is not set // This function must tolerate situations when connection info is not // set up, for example if request parsing failed. static int should_keep_alive(const struct mg_connection *conn) { const char *http_version = conn->request_info.http_version; const char *header = mg_get_header(conn, "Connection"); if (conn->must_close || conn->request_info.status_code == 401 || mg_strcasecmp(conn->ctx->config[ENABLE_KEEP_ALIVE], "yes") != 0 || (header != NULL && mg_strcasecmp(header, "keep-alive") != 0) || (header == NULL && http_version && strcmp(http_version, "1.1"))) { return 0; } return 1; } static const char *suggest_connection_header(const struct mg_connection *conn) { return should_keep_alive(conn) ? "keep-alive" : "close"; } static void send_http_error(struct mg_connection *conn, int status, const char *reason, const char *fmt, ...) { char buf[MG_BUF_LEN]; va_list ap; int len; conn->request_info.status_code = status; if (call_user(conn, MG_HTTP_ERROR) == NULL) { buf[0] = '\0'; len = 0; // Errors 1xx, 204 and 304 MUST NOT send a body if (status > 199 && status != 204 && status != 304) { len = mg_snprintf(conn, buf, sizeof(buf), "Error %d: %s", status, reason); buf[len++] = '\n'; va_start(ap, fmt); len += mg_vsnprintf(conn, buf + len, sizeof(buf) - len, fmt, ap); va_end(ap); } DEBUG_TRACE(("[%s]", buf)); mg_printf(conn, "HTTP/1.1 %d %s\r\n" "Content-Type: text/plain\r\n" "Content-Length: %d\r\n" "Connection: %s\r\n\r\n", status, reason, len, suggest_connection_header(conn)); conn->num_bytes_sent += mg_printf(conn, "%s", buf); } } #if defined(_WIN32) && !defined(__SYMBIAN32__) static int pthread_mutex_init(pthread_mutex_t *mutex, void *unused) { unused = NULL; *mutex = CreateMutex(NULL, FALSE, NULL); return *mutex == NULL ? -1 : 0; } static int pthread_mutex_destroy(pthread_mutex_t *mutex) { return CloseHandle(*mutex) == 0 ? -1 : 0; } static int pthread_mutex_lock(pthread_mutex_t *mutex) { return WaitForSingleObject(*mutex, INFINITE) == WAIT_OBJECT_0? 0 : -1; } static int pthread_mutex_unlock(pthread_mutex_t *mutex) { return ReleaseMutex(*mutex) == 0 ? -1 : 0; } static int pthread_cond_init(pthread_cond_t *cv, const void *unused) { unused = NULL; cv->signal = CreateEvent(NULL, FALSE, FALSE, NULL); cv->broadcast = CreateEvent(NULL, TRUE, FALSE, NULL); return cv->signal != NULL && cv->broadcast != NULL ? 0 : -1; } static int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mutex) { HANDLE handles[] = {cv->signal, cv->broadcast}; ReleaseMutex(*mutex); WaitForMultipleObjects(2, handles, FALSE, INFINITE); return WaitForSingleObject(*mutex, INFINITE) == WAIT_OBJECT_0? 0 : -1; } static int pthread_cond_signal(pthread_cond_t *cv) { return SetEvent(cv->signal) == 0 ? -1 : 0; } static int pthread_cond_broadcast(pthread_cond_t *cv) { // Implementation with PulseEvent() has race condition, see // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html return PulseEvent(cv->broadcast) == 0 ? -1 : 0; } static int pthread_cond_destroy(pthread_cond_t *cv) { return CloseHandle(cv->signal) && CloseHandle(cv->broadcast) ? 0 : -1; } // For Windows, change all slashes to backslashes in path names. static void change_slashes_to_backslashes(char *path) { int i; for (i = 0; path[i] != '\0'; i++) { if (path[i] == '/') path[i] = '\\'; // i > 0 check is to preserve UNC paths, like \\server\file.txt if (path[i] == '\\' && i > 0) while (path[i + 1] == '\\' || path[i + 1] == '/') (void) memmove(path + i + 1, path + i + 2, strlen(path + i + 1)); } } // Encode 'path' which is assumed UTF-8 string, into UNICODE string. // wbuf and wbuf_len is a target buffer and its length. static void to_unicode(const char *path, wchar_t *wbuf, size_t wbuf_len) { char buf[PATH_MAX], buf2[PATH_MAX], *p; mg_strlcpy(buf, path, sizeof(buf)); change_slashes_to_backslashes(buf); // Point p to the end of the file name p = buf + strlen(buf) - 1; // Trim trailing backslash character while (p > buf && *p == '\\' && p[-1] != ':') { *p-- = '\0'; } // Protect from CGI code disclosure. // This is very nasty hole. Windows happily opens files with // some garbage in the end of file name. So fopen("a.cgi ", "r") // actually opens "a.cgi", and does not return an error! if (*p == 0x20 || // No space at the end (*p == 0x2e && p > buf) || // No '.' but allow '.' as full path *p == 0x2b || // No '+' (*p & ~0x7f)) { // And generally no non-ascii chars (void) fprintf(stderr, "Rejecting suspicious path: [%s]", buf); wbuf[0] = L'\0'; } else { // Convert to Unicode and back. If doubly-converted string does not // match the original, something is fishy, reject. memset(wbuf, 0, wbuf_len * sizeof(wchar_t)); MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, (int) wbuf_len); WideCharToMultiByte(CP_UTF8, 0, wbuf, (int) wbuf_len, buf2, sizeof(buf2), NULL, NULL); if (strcmp(buf, buf2) != 0) { wbuf[0] = L'\0'; } } } #if defined(_WIN32_WCE) static time_t time(time_t *ptime) { time_t t; SYSTEMTIME st; FILETIME ft; GetSystemTime(&st); SystemTimeToFileTime(&st, &ft); t = SYS2UNIX_TIME(ft.dwLowDateTime, ft.dwHighDateTime); if (ptime != NULL) { *ptime = t; } return t; } static struct tm *localtime(const time_t *ptime, struct tm *ptm) { int64_t t = ((int64_t) *ptime) * RATE_DIFF + EPOCH_DIFF; FILETIME ft, lft; SYSTEMTIME st; TIME_ZONE_INFORMATION tzinfo; if (ptm == NULL) { return NULL; } * (int64_t *) &ft = t; FileTimeToLocalFileTime(&ft, &lft); FileTimeToSystemTime(&lft, &st); ptm->tm_year = st.wYear - 1900; ptm->tm_mon = st.wMonth - 1; ptm->tm_wday = st.wDayOfWeek; ptm->tm_mday = st.wDay; ptm->tm_hour = st.wHour; ptm->tm_min = st.wMinute; ptm->tm_sec = st.wSecond; ptm->tm_yday = 0; // hope nobody uses this ptm->tm_isdst = GetTimeZoneInformation(&tzinfo) == TIME_ZONE_ID_DAYLIGHT ? 1 : 0; return ptm; } static struct tm *gmtime(const time_t *ptime, struct tm *ptm) { // FIXME(lsm): fix this. return localtime(ptime, ptm); } static size_t strftime(char *dst, size_t dst_size, const char *fmt, const struct tm *tm) { (void) snprintf(dst, dst_size, "implement strftime() for WinCE"); return 0; } #endif static int mg_rename(const char* oldname, const char* newname) { wchar_t woldbuf[PATH_MAX]; wchar_t wnewbuf[PATH_MAX]; to_unicode(oldname, woldbuf, ARRAY_SIZE(woldbuf)); to_unicode(newname, wnewbuf, ARRAY_SIZE(wnewbuf)); return MoveFileW(woldbuf, wnewbuf) ? 0 : -1; } static FILE *mg_fopen(const char *path, const char *mode) { wchar_t wbuf[PATH_MAX], wmode[20]; to_unicode(path, wbuf, ARRAY_SIZE(wbuf)); MultiByteToWideChar(CP_UTF8, 0, mode, -1, wmode, ARRAY_SIZE(wmode)); return _wfopen(wbuf, wmode); } static int mg_stat(const char *path, struct mgstat *stp) { int ok = -1; // Error wchar_t wbuf[PATH_MAX]; WIN32_FILE_ATTRIBUTE_DATA info; to_unicode(path, wbuf, ARRAY_SIZE(wbuf)); if (GetFileAttributesExW(wbuf, GetFileExInfoStandard, &info) != 0) { stp->size = MAKEUQUAD(info.nFileSizeLow, info.nFileSizeHigh); stp->mtime = SYS2UNIX_TIME(info.ftLastWriteTime.dwLowDateTime, info.ftLastWriteTime.dwHighDateTime); stp->is_directory = info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY; ok = 0; // Success } return ok; } static int mg_remove(const char *path) { wchar_t wbuf[PATH_MAX]; to_unicode(path, wbuf, ARRAY_SIZE(wbuf)); return DeleteFileW(wbuf) ? 0 : -1; } static int mg_mkdir(const char *path, int mode) { char buf[PATH_MAX]; wchar_t wbuf[PATH_MAX]; mode = 0; // Unused mg_strlcpy(buf, path, sizeof(buf)); change_slashes_to_backslashes(buf); (void) MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, sizeof(wbuf)); return CreateDirectoryW(wbuf, NULL) ? 0 : -1; } // Implementation of POSIX opendir/closedir/readdir for Windows. static DIR * opendir(const char *name) { DIR *dir = NULL; wchar_t wpath[PATH_MAX]; DWORD attrs; if (name == NULL) { SetLastError(ERROR_BAD_ARGUMENTS); } else if ((dir = (DIR *) malloc(sizeof(*dir))) == NULL) { SetLastError(ERROR_NOT_ENOUGH_MEMORY); } else { to_unicode(name, wpath, ARRAY_SIZE(wpath)); attrs = GetFileAttributesW(wpath); if (attrs != 0xFFFFFFFF && ((attrs & FILE_ATTRIBUTE_DIRECTORY) == FILE_ATTRIBUTE_DIRECTORY)) { (void) wcscat(wpath, L"\\*"); dir->handle = FindFirstFileW(wpath, &dir->info); dir->result.d_name[0] = '\0'; } else { free(dir); dir = NULL; } } return dir; } static int closedir(DIR *dir) { int result = 0; if (dir != NULL) { if (dir->handle != INVALID_HANDLE_VALUE) result = FindClose(dir->handle) ? 0 : -1; free(dir); } else { result = -1; SetLastError(ERROR_BAD_ARGUMENTS); } return result; } static struct dirent *readdir(DIR *dir) { struct dirent *result = 0; if (dir) { if (dir->handle != INVALID_HANDLE_VALUE) { result = &dir->result; (void) WideCharToMultiByte(CP_UTF8, 0, dir->info.cFileName, -1, result->d_name, sizeof(result->d_name), NULL, NULL); if (!FindNextFileW(dir->handle, &dir->info)) { (void) FindClose(dir->handle); dir->handle = INVALID_HANDLE_VALUE; } } else { SetLastError(ERROR_FILE_NOT_FOUND); } } else { SetLastError(ERROR_BAD_ARGUMENTS); } return result; } #define set_close_on_exec(fd) // No FD_CLOEXEC on Windows int mg_start_thread(mg_thread_func_t f, void *p) { return _beginthread((void (__cdecl *)(void *)) f, 0, p) == -1L ? -1 : 0; } static HANDLE dlopen(const char *dll_name, int flags) { wchar_t wbuf[PATH_MAX]; flags = 0; // Unused to_unicode(dll_name, wbuf, ARRAY_SIZE(wbuf)); return LoadLibraryW(wbuf); } #if !defined(NO_CGI) #define SIGKILL 0 static int kill(pid_t pid, int sig_num) { (void) TerminateProcess(pid, sig_num); (void) CloseHandle(pid); return 0; } static pid_t spawn_process(struct mg_connection *conn, const char *prog, char *envblk, char *envp[], int fd_stdin, int fd_stdout, const char *dir) { HANDLE me; char *p, *interp, cmdline[PATH_MAX], buf[PATH_MAX]; FILE *fp; STARTUPINFOA si = { sizeof(si) }; PROCESS_INFORMATION pi = { 0 }; envp = NULL; // Unused // TODO(lsm): redirect CGI errors to the error log file si.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; si.wShowWindow = SW_HIDE; me = GetCurrentProcess(); (void) DuplicateHandle(me, (HANDLE) _get_osfhandle(fd_stdin), me, &si.hStdInput, 0, TRUE, DUPLICATE_SAME_ACCESS); (void) DuplicateHandle(me, (HANDLE) _get_osfhandle(fd_stdout), me, &si.hStdOutput, 0, TRUE, DUPLICATE_SAME_ACCESS); // If CGI file is a script, try to read the interpreter line interp = conn->ctx->config[CGI_INTERPRETER]; if (interp == NULL) { buf[2] = '\0'; mg_snprintf(conn, cmdline, sizeof(cmdline), "%s%c%s", dir, DIRSEP, prog); if ((fp = fopen(cmdline, "r")) != NULL) { (void) fgets(buf, sizeof(buf), fp); if (buf[0] != '#' || buf[1] != '!') { // First line does not start with "#!". Do not set interpreter. buf[2] = '\0'; } else { // Trim whitespaces in interpreter name for (p = &buf[strlen(buf) - 1]; p > buf && isspace(*p); p--) { *p = '\0'; } } (void) fclose(fp); } interp = buf + 2; } (void) mg_snprintf(conn, cmdline, sizeof(cmdline), "%s%s%s%c%s", interp, interp[0] == '\0' ? "" : " ", dir, DIRSEP, prog); DEBUG_TRACE(("Running [%s]", cmdline)); if (CreateProcessA(NULL, cmdline, NULL, NULL, TRUE, CREATE_NEW_PROCESS_GROUP, envblk, dir, &si, &pi) == 0) { cry(conn, "%s: CreateProcess(%s): %d", __func__, cmdline, ERRNO); pi.hProcess = (pid_t) -1; } else { (void) close(fd_stdin); (void) close(fd_stdout); } (void) CloseHandle(si.hStdOutput); (void) CloseHandle(si.hStdInput); (void) CloseHandle(pi.hThread); return (pid_t) pi.hProcess; } #endif // !NO_CGI static int set_non_blocking_mode(SOCKET sock) { unsigned long on = 1; return ioctlsocket(sock, FIONBIO, &on); } #else static int mg_stat(const char *path, struct mgstat *stp) { struct stat st; int ok; if (stat(path, &st) == 0) { ok = 0; stp->size = st.st_size; stp->mtime = st.st_mtime; stp->is_directory = S_ISDIR(st.st_mode); } else { ok = -1; } return ok; } static void set_close_on_exec(int fd) { (void) fcntl(fd, F_SETFD, FD_CLOEXEC); } int mg_start_thread(mg_thread_func_t func, void *param) { pthread_t thread_id; pthread_attr_t attr; (void) pthread_attr_init(&attr); (void) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); // TODO(lsm): figure out why mongoose dies on Linux if next line is enabled // (void) pthread_attr_setstacksize(&attr, sizeof(struct mg_connection) * 5); return pthread_create(&thread_id, &attr, func, param); } #ifndef NO_CGI static pid_t spawn_process(struct mg_connection *conn, const char *prog, char *envblk, char *envp[], int fd_stdin, int fd_stdout, const char *dir) { pid_t pid; const char *interp; envblk = NULL; // Unused if ((pid = fork()) == -1) { // Parent send_http_error(conn, 500, http_500_error, "fork(): %s", strerror(ERRNO)); } else if (pid == 0) { // Child if (chdir(dir) != 0) { cry(conn, "%s: chdir(%s): %s", __func__, dir, strerror(ERRNO)); } else if (dup2(fd_stdin, 0) == -1) { cry(conn, "%s: dup2(%d, 0): %s", __func__, fd_stdin, strerror(ERRNO)); } else if (dup2(fd_stdout, 1) == -1) { cry(conn, "%s: dup2(%d, 1): %s", __func__, fd_stdout, strerror(ERRNO)); } else { (void) dup2(fd_stdout, 2); (void) close(fd_stdin); (void) close(fd_stdout); interp = conn->ctx->config[CGI_INTERPRETER]; if (interp == NULL) { (void) execle(prog, prog, NULL, envp); cry(conn, "%s: execle(%s): %s", __func__, prog, strerror(ERRNO)); } else { (void) execle(interp, interp, prog, NULL, envp); cry(conn, "%s: execle(%s %s): %s", __func__, interp, prog, strerror(ERRNO)); } } exit(EXIT_FAILURE); } else { // Parent. Close stdio descriptors (void) close(fd_stdin); (void) close(fd_stdout); } return pid; } #endif // !NO_CGI static int set_non_blocking_mode(SOCKET sock) { int flags; flags = fcntl(sock, F_GETFL, 0); (void) fcntl(sock, F_SETFL, flags | O_NONBLOCK); return 0; } #endif // _WIN32 // Write data to the IO channel - opened file descriptor, socket or SSL // descriptor. Return number of bytes written. static int64_t push(FILE *fp, SOCKET sock, SSL *ssl, const char *buf, int64_t len) { int64_t sent; int n, k; sent = 0; while (sent < len) { // How many bytes we send in this iteration k = len - sent > INT_MAX ? INT_MAX : (int) (len - sent); if (ssl != NULL) { n = SSL_write(ssl, buf + sent, k); } else if (fp != NULL) { n = fwrite(buf + sent, 1, (size_t) k, fp); if (ferror(fp)) n = -1; } else { n = send(sock, buf + sent, (size_t) k, MSG_NOSIGNAL); } if (n < 0) break; sent += n; } return sent; } // Read from IO channel - opened file descriptor, socket, or SSL descriptor. // Return number of bytes read. static int pull(FILE *fp, SOCKET sock, SSL *ssl, char *buf, int len) { int nread; if (ssl != NULL) { nread = SSL_read(ssl, buf, len); } else if (fp != NULL) { // Use read() instead of fread(), because if we're reading from the CGI // pipe, fread() may block until IO buffer is filled up. We cannot afford // to block and must pass all read bytes immediately to the client. nread = read(fileno(fp), buf, (size_t) len); if (ferror(fp)) nread = -1; } else { nread = recv(sock, buf, (size_t) len, 0); } return nread; } int mg_read(struct mg_connection *conn, void *buf, size_t len) { int n, buffered_len, nread; const char *buffered; assert((conn->content_len == -1 && conn->consumed_content == 0) || conn->consumed_content <= conn->content_len); DEBUG_TRACE(("%p %zu %lld %lld", buf, len, conn->content_len, conn->consumed_content)); nread = 0; if (conn->consumed_content < conn->content_len) { // Adjust number of bytes to read. int64_t to_read = conn->content_len - conn->consumed_content; if (to_read < (int64_t) len) { len = (int) to_read; } // How many bytes of data we have buffered in the request buffer? buffered = conn->buf + conn->request_len + conn->consumed_content; buffered_len = conn->data_len - conn->request_len; assert(buffered_len >= 0); // Return buffered data back if we haven't done that yet. if (conn->consumed_content < (int64_t) buffered_len) { buffered_len -= (int) conn->consumed_content; if (len < (size_t) buffered_len) { buffered_len = len; } memcpy(buf, buffered, (size_t)buffered_len); len -= buffered_len; buf = (char *) buf + buffered_len; conn->consumed_content += buffered_len; nread = buffered_len; } // We have returned all buffered data. Read new data from the remote socket. while (len > 0) { n = pull(NULL, conn->client.sock, conn->ssl, (char *) buf, (int) len); if (n <= 0) { break; } buf = (char *) buf + n; conn->consumed_content += n; nread += n; len -= n; } } return nread; } int mg_write(struct mg_connection *conn, const void *buf, size_t len) { return (int) push(NULL, conn->client.sock, conn->ssl, (const char *) buf, (int64_t) len); } int mg_printf(struct mg_connection *conn, const char *fmt, ...) { char buf[MG_BUF_LEN]; int len; va_list ap; va_start(ap, fmt); len = mg_vsnprintf(conn, buf, sizeof(buf), fmt, ap); va_end(ap); return mg_write(conn, buf, (size_t)len); } // URL-decode input buffer into destination buffer. // 0-terminate the destination buffer. Return the length of decoded data. // form-url-encoded data differs from URI encoding in a way that it // uses '+' as character for space, see RFC 1866 section 8.2.1 // http://ftp.ics.uci.edu/pub/ietf/html/rfc1866.txt static size_t url_decode(const char *src, size_t src_len, char *dst, size_t dst_len, int is_form_url_encoded) { size_t i, j; int a, b; #define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) { if (src[i] == '%' && isxdigit(* (const unsigned char *) (src + i + 1)) && isxdigit(* (const unsigned char *) (src + i + 2))) { a = tolower(* (const unsigned char *) (src + i + 1)); b = tolower(* (const unsigned char *) (src + i + 2)); dst[j] = (char) ((HEXTOI(a) << 4) | HEXTOI(b)); i += 2; } else if (is_form_url_encoded && src[i] == '+') { dst[j] = ' '; } else { dst[j] = src[i]; } } dst[j] = '\0'; // Null-terminate the destination return j; } // Scan given buffer and fetch the value of the given variable. // It can be specified in query string, or in the POST data. // Return NULL if the variable not found, or allocated 0-terminated value. // It is caller's responsibility to free the returned value. int mg_get_var(const char *buf, size_t buf_len, const char *name, char *dst, size_t dst_len) { const char *p, *e, *s; size_t name_len, len; name_len = strlen(name); e = buf + buf_len; len = -1; dst[0] = '\0'; // buf is "var1=val1&var2=val2...". Find variable first for (p = buf; p != NULL && p + name_len < e; p++) { if ((p == buf || p[-1] == '&') && p[name_len] == '=' && !mg_strncasecmp(name, p, name_len)) { // Point p to variable value p += name_len + 1; // Point s to the end of the value s = (const char *) memchr(p, '&', (size_t)(e - p)); if (s == NULL) { s = e; } assert(s >= p); // Decode variable into destination buffer if ((size_t) (s - p) < dst_len) { len = url_decode(p, (size_t)(s - p), dst, dst_len, 1); } break; } } return len; } int mg_get_cookie(const struct mg_connection *conn, const char *cookie_name, char *dst, size_t dst_size) { const char *s, *p, *end; int name_len, len = -1; dst[0] = '\0'; if ((s = mg_get_header(conn, "Cookie")) == NULL) { return 0; } name_len = strlen(cookie_name); end = s + strlen(s); for (; (s = strstr(s, cookie_name)) != NULL; s += name_len) if (s[name_len] == '=') { s += name_len + 1; if ((p = strchr(s, ' ')) == NULL) p = end; if (p[-1] == ';') p--; if (*s == '"' && p[-1] == '"' && p > s + 1) { s++; p--; } if ((size_t) (p - s) < dst_size) { len = (p - s) + 1; mg_strlcpy(dst, s, (size_t)len); } break; } return len; } static int convert_uri_to_file_name(struct mg_connection *conn, char *buf, size_t buf_len, struct mgstat *st) { struct vec a, b; const char *rewrite, *uri = conn->request_info.uri; char *p; int match_len, stat_result; buf_len--; // This is because memmove() for PATH_INFO may shift part // of the path one byte on the right. mg_snprintf(conn, buf, buf_len, "%s%s", conn->ctx->config[DOCUMENT_ROOT], uri); rewrite = conn->ctx->config[REWRITE]; while ((rewrite = next_option(rewrite, &a, &b)) != NULL) { if ((match_len = match_prefix(a.ptr, a.len, uri)) > 0) { mg_snprintf(conn, buf, buf_len, "%.*s%s", b.len, b.ptr, uri + match_len); break; } } #if defined(_WIN32) && !defined(__SYMBIAN32__) //change_slashes_to_backslashes(buf); #endif // _WIN32 if ((stat_result = mg_stat(buf, st)) != 0) { // Support PATH_INFO for CGI scripts. for (p = buf + strlen(buf); p > buf + 1; p--) { if (*p == '/') { *p = '\0'; if (match_prefix(conn->ctx->config[CGI_EXTENSIONS], strlen(conn->ctx->config[CGI_EXTENSIONS]), buf) > 0 && (stat_result = mg_stat(buf, st)) == 0) { // Shift PATH_INFO block one character right, e.g. // "/x.cgi/foo/bar\x00" => "/x.cgi\x00/foo/bar\x00" // conn->path_info is pointing to the local variable "path" declared // in handle_request(), so PATH_INFO not valid after // handle_request returns. conn->path_info = p + 1; memmove(p + 2, p + 1, strlen(p + 1) + 1); // +1 is for trailing \0 p[1] = '/'; break; } else { *p = '/'; stat_result = -1; } } } } return stat_result; } static int sslize(struct mg_connection *conn, SSL_CTX *s, int (*func)(SSL *)) { return (conn->ssl = SSL_new(s)) != NULL && SSL_set_fd(conn->ssl, conn->client.sock) == 1 && func(conn->ssl) == 1; } // Check whether full request is buffered. Return: // -1 if request is malformed // 0 if request is not yet fully buffered // >0 actual request length, including last \r\n\r\n static int get_request_len(const char *buf, int buflen) { const char *s, *e; int len = 0; DEBUG_TRACE(("buf: %p, len: %d", buf, buflen)); for (s = buf, e = s + buflen - 1; len <= 0 && s < e; s++) // Control characters are not allowed but >=128 is. if (!isprint(* (const unsigned char *) s) && *s != '\r' && *s != '\n' && * (const unsigned char *) s < 128) { len = -1; } else if (s[0] == '\n' && s[1] == '\n') { len = (int) (s - buf) + 2; } else if (s[0] == '\n' && &s[1] < e && s[1] == '\r' && s[2] == '\n') { len = (int) (s - buf) + 3; } return len; } // Convert month to the month number. Return -1 on error, or month number static int get_month_index(const char *s) { size_t i; for (i = 0; i < ARRAY_SIZE(month_names); i++) if (!strcmp(s, month_names[i])) return (int) i; return -1; } // Parse UTC date-time string, and return the corresponding time_t value. static time_t parse_date_string(const char *datetime) { static const unsigned short days_before_month[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; char month_str[32]; int second, minute, hour, day, month, year, leap_days, days; time_t result = (time_t) 0; if (((sscanf(datetime, "%d/%3s/%d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%d %3s %d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%*3s, %d %3s %d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%d-%3s-%d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6)) && year > 1970 && (month = get_month_index(month_str)) != -1) { year -= 1970; leap_days = year / 4 - year / 100 + year / 400; days = year * 365 + days_before_month[month] + (day - 1) + leap_days; result = days * 24 * 3600 + hour * 3600 + minute * 60 + second; } return result; } // Protect against directory disclosure attack by removing '..', // excessive '/' and '\' characters static void remove_double_dots_and_double_slashes(char *s) { char *p = s; while (*s != '\0') { *p++ = *s++; if (IS_DIRSEP_CHAR(s[-1])) { // Skip all following slashes and backslashes while (IS_DIRSEP_CHAR(s[0])) { s++; } // Skip all double-dots while (*s == '.' && s[1] == '.') { s += 2; } } } *p = '\0'; } static const struct { const char *extension; size_t ext_len; const char *mime_type; } builtin_mime_types[] = { {".html", 5, "text/html"}, {".htm", 4, "text/html"}, {".shtm", 5, "text/html"}, {".shtml", 6, "text/html"}, {".css", 4, "text/css"}, {".js", 3, "application/x-javascript"}, {".ico", 4, "image/x-icon"}, {".gif", 4, "image/gif"}, {".jpg", 4, "image/jpeg"}, {".jpeg", 5, "image/jpeg"}, {".png", 4, "image/png"}, {".svg", 4, "image/svg+xml"}, {".txt", 4, "text/plain"}, {".torrent", 8, "application/x-bittorrent"}, {".wav", 4, "audio/x-wav"}, {".mp3", 4, "audio/x-mp3"}, {".mid", 4, "audio/mid"}, {".m3u", 4, "audio/x-mpegurl"}, {".ram", 4, "audio/x-pn-realaudio"}, {".xml", 4, "text/xml"}, {".json", 5, "text/json"}, {".xslt", 5, "application/xml"}, {".ra", 3, "audio/x-pn-realaudio"}, {".doc", 4, "application/msword"}, {".exe", 4, "application/octet-stream"}, {".zip", 4, "application/x-zip-compressed"}, {".xls", 4, "application/excel"}, {".tgz", 4, "application/x-tar-gz"}, {".tar", 4, "application/x-tar"}, {".gz", 3, "application/x-gunzip"}, {".arj", 4, "application/x-arj-compressed"}, {".rar", 4, "application/x-arj-compressed"}, {".rtf", 4, "application/rtf"}, {".pdf", 4, "application/pdf"}, {".swf", 4, "application/x-shockwave-flash"}, {".mpg", 4, "video/mpeg"}, {".webm", 5, "video/webm"}, {".mpeg", 5, "video/mpeg"}, {".mp4", 4, "video/mp4"}, {".m4v", 4, "video/x-m4v"}, {".asf", 4, "video/x-ms-asf"}, {".avi", 4, "video/x-msvideo"}, {".bmp", 4, "image/bmp"}, {NULL, 0, NULL} }; const char *mg_get_builtin_mime_type(const char *path) { const char *ext; size_t i, path_len; path_len = strlen(path); for (i = 0; builtin_mime_types[i].extension != NULL; i++) { ext = path + (path_len - builtin_mime_types[i].ext_len); if (path_len > builtin_mime_types[i].ext_len && mg_strcasecmp(ext, builtin_mime_types[i].extension) == 0) { return builtin_mime_types[i].mime_type; } } return "text/plain"; } // Look at the "path" extension and figure what mime type it has. // Store mime type in the vector. static void get_mime_type(struct mg_context *ctx, const char *path, struct vec *vec) { struct vec ext_vec, mime_vec; const char *list, *ext; size_t path_len; path_len = strlen(path); // Scan user-defined mime types first, in case user wants to // override default mime types. list = ctx->config[EXTRA_MIME_TYPES]; while ((list = next_option(list, &ext_vec, &mime_vec)) != NULL) { // ext now points to the path suffix ext = path + path_len - ext_vec.len; if (mg_strncasecmp(ext, ext_vec.ptr, ext_vec.len) == 0) { *vec = mime_vec; return; } } vec->ptr = mg_get_builtin_mime_type(path); vec->len = strlen(vec->ptr); } #ifndef HAVE_MD5 typedef struct MD5Context { uint32_t buf[4]; uint32_t bits[2]; unsigned char in[64]; } MD5_CTX; #if defined(__BYTE_ORDER) && (__BYTE_ORDER == 1234) #define byteReverse(buf, len) // Do nothing #else static void byteReverse(unsigned char *buf, unsigned longs) { uint32_t t; do { t = (uint32_t) ((unsigned) buf[3] << 8 | buf[2]) << 16 | ((unsigned) buf[1] << 8 | buf[0]); *(uint32_t *) buf = t; buf += 4; } while (--longs); } #endif #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) #define F4(x, y, z) (y ^ (x | ~z)) #define MD5STEP(f, w, x, y, z, data, s) \ ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x ) // Start MD5 accumulation. Set bit count to 0 and buffer to mysterious // initialization constants. static void MD5Init(MD5_CTX *ctx) { ctx->buf[0] = 0x67452301; ctx->buf[1] = 0xefcdab89; ctx->buf[2] = 0x98badcfe; ctx->buf[3] = 0x10325476; ctx->bits[0] = 0; ctx->bits[1] = 0; } static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) { register uint32_t a, b, c, d; a = buf[0]; b = buf[1]; c = buf[2]; d = buf[3]; MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; } static void MD5Update(MD5_CTX *ctx, unsigned char const *buf, unsigned len) { uint32_t t; t = ctx->bits[0]; if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t) ctx->bits[1]++; ctx->bits[1] += len >> 29; t = (t >> 3) & 0x3f; if (t) { unsigned char *p = (unsigned char *) ctx->in + t; t = 64 - t; if (len < t) { memcpy(p, buf, len); return; } memcpy(p, buf, t); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); buf += t; len -= t; } while (len >= 64) { memcpy(ctx->in, buf, 64); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); buf += 64; len -= 64; } memcpy(ctx->in, buf, len); } static void MD5Final(unsigned char digest[16], MD5_CTX *ctx) { unsigned count; unsigned char *p; count = (ctx->bits[0] >> 3) & 0x3F; p = ctx->in + count; *p++ = 0x80; count = 64 - 1 - count; if (count < 8) { memset(p, 0, count); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); memset(ctx->in, 0, 56); } else { memset(p, 0, count - 8); } byteReverse(ctx->in, 14); ((uint32_t *) ctx->in)[14] = ctx->bits[0]; ((uint32_t *) ctx->in)[15] = ctx->bits[1]; MD5Transform(ctx->buf, (uint32_t *) ctx->in); byteReverse((unsigned char *) ctx->buf, 4); memcpy(digest, ctx->buf, 16); memset((char *) ctx, 0, sizeof(*ctx)); } #endif // !HAVE_MD5 // Stringify binary data. Output buffer must be twice as big as input, // because each byte takes 2 bytes in string representation static void bin2str(char *to, const unsigned char *p, size_t len) { static const char *hex = "0123456789abcdef"; for (; len--; p++) { *to++ = hex[p[0] >> 4]; *to++ = hex[p[0] & 0x0f]; } *to = '\0'; } // Return stringified MD5 hash for list of vectors. Buffer must be 33 bytes. void mg_md5(char *buf, ...) { unsigned char hash[16]; const char *p; va_list ap; MD5_CTX ctx; MD5Init(&ctx); va_start(ap, buf); while ((p = va_arg(ap, const char *)) != NULL) { MD5Update(&ctx, (const unsigned char *) p, (unsigned) strlen(p)); } va_end(ap); MD5Final(hash, &ctx); bin2str(buf, hash, sizeof(hash)); } // Check the user's password, return 1 if OK static int check_password(const char *method, const char *ha1, const char *uri, const char *nonce, const char *nc, const char *cnonce, const char *qop, const char *response) { char ha2[32 + 1], expected_response[32 + 1]; // Some of the parameters may be NULL if (method == NULL || nonce == NULL || nc == NULL || cnonce == NULL || qop == NULL || response == NULL) { return 0; } // NOTE(lsm): due to a bug in MSIE, we do not compare the URI // TODO(lsm): check for authentication timeout if (// strcmp(dig->uri, c->ouri) != 0 || strlen(response) != 32 // || now - strtoul(dig->nonce, NULL, 10) > 3600 ) { return 0; } mg_md5(ha2, method, ":", uri, NULL); mg_md5(expected_response, ha1, ":", nonce, ":", nc, ":", cnonce, ":", qop, ":", ha2, NULL); return mg_strcasecmp(response, expected_response) == 0; } // Use the global passwords file, if specified by auth_gpass option, // or search for .htpasswd in the requested directory. static FILE *open_auth_file(struct mg_connection *conn, const char *path) { struct mg_context *ctx = conn->ctx; char name[PATH_MAX]; const char *p, *e; struct mgstat st; FILE *fp; if (ctx->config[GLOBAL_PASSWORDS_FILE] != NULL) { // Use global passwords file fp = mg_fopen(ctx->config[GLOBAL_PASSWORDS_FILE], "r"); if (fp == NULL) cry(fc(ctx), "fopen(%s): %s", ctx->config[GLOBAL_PASSWORDS_FILE], strerror(ERRNO)); } else if (!mg_stat(path, &st) && st.is_directory) { (void) mg_snprintf(conn, name, sizeof(name), "%s%c%s", path, DIRSEP, PASSWORDS_FILE_NAME); fp = mg_fopen(name, "r"); } else { // Try to find .htpasswd in requested directory. for (p = path, e = p + strlen(p) - 1; e > p; e--) if (IS_DIRSEP_CHAR(*e)) break; (void) mg_snprintf(conn, name, sizeof(name), "%.*s%c%s", (int) (e - p), p, DIRSEP, PASSWORDS_FILE_NAME); fp = mg_fopen(name, "r"); } return fp; } // Parsed Authorization header struct ah { char *user, *uri, *cnonce, *response, *qop, *nc, *nonce; }; static int parse_auth_header(struct mg_connection *conn, char *buf, size_t buf_size, struct ah *ah) { char *name, *value, *s; const char *auth_header; if ((auth_header = mg_get_header(conn, "Authorization")) == NULL || mg_strncasecmp(auth_header, "Digest ", 7) != 0) { return 0; } // Make modifiable copy of the auth header (void) mg_strlcpy(buf, auth_header + 7, buf_size); s = buf; (void) memset(ah, 0, sizeof(*ah)); // Parse authorization header for (;;) { // Gobble initial spaces while (isspace(* (unsigned char *) s)) { s++; } name = skip_quoted(&s, "=", " ", 0); // Value is either quote-delimited, or ends at first comma or space. if (s[0] == '\"') { s++; value = skip_quoted(&s, "\"", " ", '\\'); if (s[0] == ',') { s++; } } else { value = skip_quoted(&s, ", ", " ", 0); // IE uses commas, FF uses spaces } if (*name == '\0') { break; } if (!strcmp(name, "username")) { ah->user = value; } else if (!strcmp(name, "cnonce")) { ah->cnonce = value; } else if (!strcmp(name, "response")) { ah->response = value; } else if (!strcmp(name, "uri")) { ah->uri = value; } else if (!strcmp(name, "qop")) { ah->qop = value; } else if (!strcmp(name, "nc")) { ah->nc = value; } else if (!strcmp(name, "nonce")) { ah->nonce = value; } } // CGI needs it as REMOTE_USER if (ah->user != NULL) { conn->request_info.remote_user = mg_strdup(ah->user); } else { return 0; } return 1; } // Authorize against the opened passwords file. Return 1 if authorized. static int authorize(struct mg_connection *conn, FILE *fp) { struct ah ah; char line[256], f_user[256], ha1[256], f_domain[256], buf[MG_BUF_LEN]; if (!parse_auth_header(conn, buf, sizeof(buf), &ah)) { return 0; } // Loop over passwords file while (fgets(line, sizeof(line), fp) != NULL) { if (sscanf(line, "%[^:]:%[^:]:%s", f_user, f_domain, ha1) != 3) { continue; } if (!strcmp(ah.user, f_user) && !strcmp(conn->ctx->config[AUTHENTICATION_DOMAIN], f_domain)) return check_password( conn->request_info.request_method, ha1, ah.uri, ah.nonce, ah.nc, ah.cnonce, ah.qop, ah.response); } return 0; } // Return 1 if request is authorised, 0 otherwise. static int check_authorization(struct mg_connection *conn, const char *path) { FILE *fp; char fname[PATH_MAX]; struct vec uri_vec, filename_vec; const char *list; int authorized; fp = NULL; authorized = 1; list = conn->ctx->config[PROTECT_URI]; while ((list = next_option(list, &uri_vec, &filename_vec)) != NULL) { if (!memcmp(conn->request_info.uri, uri_vec.ptr, uri_vec.len)) { (void) mg_snprintf(conn, fname, sizeof(fname), "%.*s", filename_vec.len, filename_vec.ptr); if ((fp = mg_fopen(fname, "r")) == NULL) { cry(conn, "%s: cannot open %s: %s", __func__, fname, strerror(errno)); } break; } } if (fp == NULL) { fp = open_auth_file(conn, path); } if (fp != NULL) { authorized = authorize(conn, fp); (void) fclose(fp); } return authorized; } static void send_authorization_request(struct mg_connection *conn) { conn->request_info.status_code = 401; (void) mg_printf(conn, "HTTP/1.1 401 Unauthorized\r\n" "Content-Length: 0\r\n" "WWW-Authenticate: Digest qop=\"auth\", " "realm=\"%s\", nonce=\"%lu\"\r\n\r\n", conn->ctx->config[AUTHENTICATION_DOMAIN], (unsigned long) time(NULL)); } static int is_authorized_for_put(struct mg_connection *conn) { FILE *fp; int ret = 0; fp = conn->ctx->config[PUT_DELETE_PASSWORDS_FILE] == NULL ? NULL : mg_fopen(conn->ctx->config[PUT_DELETE_PASSWORDS_FILE], "r"); if (fp != NULL) { ret = authorize(conn, fp); (void) fclose(fp); } return ret; } int mg_modify_passwords_file(const char *fname, const char *domain, const char *user, const char *pass) { int found; char line[512], u[512], d[512], ha1[33], tmp[PATH_MAX]; FILE *fp, *fp2; found = 0; fp = fp2 = NULL; // Regard empty password as no password - remove user record. if (pass != NULL && pass[0] == '\0') { pass = NULL; } (void) snprintf(tmp, sizeof(tmp), "%s.tmp", fname); // Create the file if does not exist if ((fp = mg_fopen(fname, "a+")) != NULL) { (void) fclose(fp); } // Open the given file and temporary file if ((fp = mg_fopen(fname, "r")) == NULL) { return 0; } else if ((fp2 = mg_fopen(tmp, "w+")) == NULL) { fclose(fp); return 0; } // Copy the stuff to temporary file while (fgets(line, sizeof(line), fp) != NULL) { if (sscanf(line, "%[^:]:%[^:]:%*s", u, d) != 2) { continue; } if (!strcmp(u, user) && !strcmp(d, domain)) { found++; if (pass != NULL) { mg_md5(ha1, user, ":", domain, ":", pass, NULL); fprintf(fp2, "%s:%s:%s\n", user, domain, ha1); } } else { (void) fprintf(fp2, "%s", line); } } // If new user, just add it if (!found && pass != NULL) { mg_md5(ha1, user, ":", domain, ":", pass, NULL); (void) fprintf(fp2, "%s:%s:%s\n", user, domain, ha1); } // Close files (void) fclose(fp); (void) fclose(fp2); // Put the temp file in place of real file (void) mg_remove(fname); (void) mg_rename(tmp, fname); return 1; } struct de { struct mg_connection *conn; char *file_name; struct mgstat st; }; static void url_encode(const char *src, char *dst, size_t dst_len) { static const char *dont_escape = "._-$,;~()"; static const char *hex = "0123456789abcdef"; const char *end = dst + dst_len - 1; for (; *src != '\0' && dst < end; src++, dst++) { if (isalnum(*(const unsigned char *) src) || strchr(dont_escape, * (const unsigned char *) src) != NULL) { *dst = *src; } else if (dst + 2 < end) { dst[0] = '%'; dst[1] = hex[(* (const unsigned char *) src) >> 4]; dst[2] = hex[(* (const unsigned char *) src) & 0xf]; dst += 2; } } *dst = '\0'; } static void print_dir_entry(struct de *de) { char size[64], mod[64], href[PATH_MAX]; if (de->st.is_directory) { (void) mg_snprintf(de->conn, size, sizeof(size), "%s", "[DIRECTORY]"); } else { // We use (signed) cast below because MSVC 6 compiler cannot // convert unsigned __int64 to double. Sigh. if (de->st.size < 1024) { (void) mg_snprintf(de->conn, size, sizeof(size), "%lu", (unsigned long) de->st.size); } else if (de->st.size < 1024 * 1024) { (void) mg_snprintf(de->conn, size, sizeof(size), "%.1fk", (double) de->st.size / 1024.0); } else if (de->st.size < 1024 * 1024 * 1024) { (void) mg_snprintf(de->conn, size, sizeof(size), "%.1fM", (double) de->st.size / 1048576); } else { (void) mg_snprintf(de->conn, size, sizeof(size), "%.1fG", (double) de->st.size / 1073741824); } } (void) strftime(mod, sizeof(mod), "%d-%b-%Y %H:%M", localtime(&de->st.mtime)); url_encode(de->file_name, href, sizeof(href)); de->conn->num_bytes_sent += mg_printf(de->conn, "<tr><td><a href=\"%s%s%s\">%s%s</a></td>" "<td>&nbsp;%s</td><td>&nbsp;&nbsp;%s</td></tr>\n", de->conn->request_info.uri, href, de->st.is_directory ? "/" : "", de->file_name, de->st.is_directory ? "/" : "", mod, size); } // This function is called from send_directory() and used for // sorting directory entries by size, or name, or modification time. // On windows, __cdecl specification is needed in case if project is built // with __stdcall convention. qsort always requires __cdels callback. static int WINCDECL compare_dir_entries(const void *p1, const void *p2) { const struct de *a = (const struct de *) p1, *b = (const struct de *) p2; const char *query_string = a->conn->request_info.query_string; int cmp_result = 0; if (query_string == NULL) { query_string = "na"; } if (a->st.is_directory && !b->st.is_directory) { return -1; // Always put directories on top } else if (!a->st.is_directory && b->st.is_directory) { return 1; // Always put directories on top } else if (*query_string == 'n') { cmp_result = strcmp(a->file_name, b->file_name); } else if (*query_string == 's') { cmp_result = a->st.size == b->st.size ? 0 : a->st.size > b->st.size ? 1 : -1; } else if (*query_string == 'd') { cmp_result = a->st.mtime == b->st.mtime ? 0 : a->st.mtime > b->st.mtime ? 1 : -1; } return query_string[1] == 'd' ? -cmp_result : cmp_result; } static int must_hide_file(struct mg_connection *conn, const char *path) { const char *pw_pattern = "**" PASSWORDS_FILE_NAME "$"; const char *pattern = conn->ctx->config[HIDE_FILES]; return match_prefix(pw_pattern, strlen(pw_pattern), path) > 0 || (pattern != NULL && match_prefix(pattern, strlen(pattern), path) > 0); } static int scan_directory(struct mg_connection *conn, const char *dir, void *data, void (*cb)(struct de *, void *)) { char path[PATH_MAX]; struct dirent *dp; DIR *dirp; struct de de; if ((dirp = opendir(dir)) == NULL) { return 0; } else { de.conn = conn; while ((dp = readdir(dirp)) != NULL) { // Do not show current dir and hidden files if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..") || must_hide_file(conn, dp->d_name)) { continue; } mg_snprintf(conn, path, sizeof(path), "%s%c%s", dir, DIRSEP, dp->d_name); // If we don't memset stat structure to zero, mtime will have // garbage and strftime() will segfault later on in // print_dir_entry(). memset is required only if mg_stat() // fails. For more details, see // http://code.google.com/p/mongoose/issues/detail?id=79 if (mg_stat(path, &de.st) != 0) { memset(&de.st, 0, sizeof(de.st)); } de.file_name = dp->d_name; cb(&de, data); } (void) closedir(dirp); } return 1; } struct dir_scan_data { struct de *entries; int num_entries; int arr_size; }; static void dir_scan_callback(struct de *de, void *data) { struct dir_scan_data *dsd = (struct dir_scan_data *) data; if (dsd->entries == NULL || dsd->num_entries >= dsd->arr_size) { dsd->arr_size *= 2; dsd->entries = (struct de *) realloc(dsd->entries, dsd->arr_size * sizeof(dsd->entries[0])); } if (dsd->entries == NULL) { // TODO(lsm): propagate an error to the caller dsd->num_entries = 0; } else { dsd->entries[dsd->num_entries].file_name = mg_strdup(de->file_name); dsd->entries[dsd->num_entries].st = de->st; dsd->entries[dsd->num_entries].conn = de->conn; dsd->num_entries++; } } static void handle_directory_request(struct mg_connection *conn, const char *dir) { int i, sort_direction; struct dir_scan_data data = { NULL, 0, 128 }; if (!scan_directory(conn, dir, &data, dir_scan_callback)) { send_http_error(conn, 500, "Cannot open directory", "Error: opendir(%s): %s", dir, strerror(ERRNO)); return; } sort_direction = conn->request_info.query_string != NULL && conn->request_info.query_string[1] == 'd' ? 'a' : 'd'; conn->must_close = 1; mg_printf(conn, "%s", "HTTP/1.1 200 OK\r\n" "Connection: close\r\n" "Content-Type: text/html; charset=utf-8\r\n\r\n"); conn->num_bytes_sent += mg_printf(conn, "<html><head><title>Index of %s</title>" "<style>th {text-align: left;}</style></head>" "<body><h1>Index of %s</h1><pre><table cellpadding=\"0\">" "<tr><th><a href=\"?n%c\">Name</a></th>" "<th><a href=\"?d%c\">Modified</a></th>" "<th><a href=\"?s%c\">Size</a></th></tr>" "<tr><td colspan=\"3\"><hr></td></tr>", conn->request_info.uri, conn->request_info.uri, sort_direction, sort_direction, sort_direction); // Print first entry - link to a parent directory conn->num_bytes_sent += mg_printf(conn, "<tr><td><a href=\"%s%s\">%s</a></td>" "<td>&nbsp;%s</td><td>&nbsp;&nbsp;%s</td></tr>\n", conn->request_info.uri, "..", "Parent directory", "-", "-"); // Sort and print directory entries qsort(data.entries, (size_t) data.num_entries, sizeof(data.entries[0]), compare_dir_entries); for (i = 0; i < data.num_entries; i++) { print_dir_entry(&data.entries[i]); free(data.entries[i].file_name); } free(data.entries); conn->num_bytes_sent += mg_printf(conn, "%s", "</table></body></html>"); conn->request_info.status_code = 200; } // Send len bytes from the opened file to the client. static void send_file_data(struct mg_connection *conn, FILE *fp, int64_t len) { char buf[MG_BUF_LEN]; int to_read, num_read, num_written; while (len > 0) { // Calculate how much to read from the file in the buffer to_read = sizeof(buf); if ((int64_t) to_read > len) to_read = (int) len; // Read from file, exit the loop on error if ((num_read = fread(buf, 1, (size_t)to_read, fp)) == 0) break; // Send read bytes to the client, exit the loop on error if ((num_written = mg_write(conn, buf, (size_t)num_read)) != num_read) break; // Both read and were successful, adjust counters conn->num_bytes_sent += num_written; len -= num_written; } } static int parse_range_header(const char *header, int64_t *a, int64_t *b) { return sscanf(header, "bytes=%" INT64_FMT "-%" INT64_FMT, a, b); } static void gmt_time_string(char *buf, size_t buf_len, time_t *t) { strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S GMT", gmtime(t)); } static void construct_etag(char *buf, size_t buf_len, const struct mgstat *stp) { snprintf(buf, buf_len, "\"%lx.%" INT64_FMT "\"", (unsigned long) stp->mtime, stp->size); } static void handle_file_request(struct mg_connection *conn, const char *path, struct mgstat *stp) { char date[64], lm[64], etag[64], range[64]; const char *msg = "OK", *hdr; time_t curtime = time(NULL); int64_t cl, r1, r2; struct vec mime_vec; FILE *fp; int n; get_mime_type(conn->ctx, path, &mime_vec); cl = stp->size; conn->request_info.status_code = 200; range[0] = '\0'; if ((fp = mg_fopen(path, "rb")) == NULL) { send_http_error(conn, 500, http_500_error, "fopen(%s): %s", path, strerror(ERRNO)); return; } set_close_on_exec(fileno(fp)); // If Range: header specified, act accordingly r1 = r2 = 0; hdr = mg_get_header(conn, "Range"); if (hdr != NULL && (n = parse_range_header(hdr, &r1, &r2)) > 0) { conn->request_info.status_code = 206; (void) fseeko(fp, r1, SEEK_SET); cl = n == 2 ? r2 - r1 + 1: cl - r1; (void) mg_snprintf(conn, range, sizeof(range), "Content-Range: bytes " "%" INT64_FMT "-%" INT64_FMT "/%" INT64_FMT "\r\n", r1, r1 + cl - 1, stp->size); msg = "Partial Content"; } // Prepare Etag, Date, Last-Modified headers. Must be in UTC, according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3 gmt_time_string(date, sizeof(date), &curtime); gmt_time_string(lm, sizeof(lm), &stp->mtime); construct_etag(etag, sizeof(etag), stp); (void) mg_printf(conn, "HTTP/1.1 %d %s\r\n" "Date: %s\r\n" "Last-Modified: %s\r\n" "Etag: %s\r\n" "Content-Type: %.*s\r\n" "Content-Length: %" INT64_FMT "\r\n" "Connection: %s\r\n" "Accept-Ranges: bytes\r\n" "%s\r\n", conn->request_info.status_code, msg, date, lm, etag, (int) mime_vec.len, mime_vec.ptr, cl, suggest_connection_header(conn), range); if (strcmp(conn->request_info.request_method, "HEAD") != 0) { send_file_data(conn, fp, cl); } (void) fclose(fp); } void mg_send_file(struct mg_connection *conn, const char *path) { struct mgstat st; if (mg_stat(path, &st) == 0) { handle_file_request(conn, path, &st); } else { send_http_error(conn, 404, "Not Found", "%s", "File not found"); } } // Parse HTTP headers from the given buffer, advance buffer to the point // where parsing stopped. static void parse_http_headers(char **buf, struct mg_request_info *ri) { int i; for (i = 0; i < (int) ARRAY_SIZE(ri->http_headers); i++) { ri->http_headers[i].name = skip_quoted(buf, ":", " ", 0); ri->http_headers[i].value = skip(buf, "\r\n"); if (ri->http_headers[i].name[0] == '\0') break; ri->num_headers = i + 1; } } static int is_valid_http_method(const char *method) { return !strcmp(method, "GET") || !strcmp(method, "POST") || !strcmp(method, "HEAD") || !strcmp(method, "CONNECT") || !strcmp(method, "PUT") || !strcmp(method, "DELETE") || !strcmp(method, "OPTIONS") || !strcmp(method, "PROPFIND"); } // Parse HTTP request, fill in mg_request_info structure. // This function modifies the buffer with HTTP request by nul-terminating // HTTP request components, header names and header values. static int parse_http_message(char *buf, int len, struct mg_request_info *ri) { int request_length = get_request_len(buf, len); if (request_length > 0) { // Reset attributes. DO NOT TOUCH is_ssl, remote_ip, remote_port ri->remote_user = ri->request_method = ri->uri = ri->http_version = NULL; ri->num_headers = 0; ri->status_code = -1; buf[request_length - 1] = '\0'; // RFC says that all initial whitespaces should be ingored while (*buf != '\0' && isspace(* (unsigned char *) buf)) { buf++; } ri->request_method = skip(&buf, " "); ri->uri = skip(&buf, " "); ri->http_version = skip(&buf, "\r\n"); parse_http_headers(&buf, ri); } return request_length; } static int parse_http_request(char *buf, int len, struct mg_request_info *ri) { int result = parse_http_message(buf, len, ri); if (result > 0 && is_valid_http_method(ri->request_method) && !strncmp(ri->http_version, "HTTP/", 5)) { ri->http_version += 5; // Skip "HTTP/" } else { result = -1; } return result; } static int parse_http_response(char *buf, int len, struct mg_request_info *ri) { int result = parse_http_message(buf, len, ri); return result > 0 && !strncmp(ri->request_method, "HTTP/", 5) ? result : -1; } // Keep reading the input (either opened file descriptor fd, or socket sock, // or SSL descriptor ssl) into buffer buf, until \r\n\r\n appears in the // buffer (which marks the end of HTTP request). Buffer buf may already // have some data. The length of the data is stored in nread. // Upon every read operation, increase nread by the number of bytes read. static int read_request(FILE *fp, SOCKET sock, SSL *ssl, char *buf, int bufsiz, int *nread) { int request_len, n = 0; do { request_len = get_request_len(buf, *nread); if (request_len == 0 && (n = pull(fp, sock, ssl, buf + *nread, bufsiz - *nread)) > 0) { *nread += n; } } while (*nread <= bufsiz && request_len == 0 && n > 0); return request_len; } // For given directory path, substitute it to valid index file. // Return 0 if index file has been found, -1 if not found. // If the file is found, it's stats is returned in stp. static int substitute_index_file(struct mg_connection *conn, char *path, size_t path_len, struct mgstat *stp) { const char *list = conn->ctx->config[INDEX_FILES]; struct mgstat st; struct vec filename_vec; size_t n = strlen(path); int found = 0; // The 'path' given to us points to the directory. Remove all trailing // directory separator characters from the end of the path, and // then append single directory separator character. while (n > 0 && IS_DIRSEP_CHAR(path[n - 1])) { n--; } path[n] = DIRSEP; // Traverse index files list. For each entry, append it to the given // path and see if the file exists. If it exists, break the loop while ((list = next_option(list, &filename_vec, NULL)) != NULL) { // Ignore too long entries that may overflow path buffer if (filename_vec.len > path_len - (n + 2)) continue; // Prepare full path to the index file (void) mg_strlcpy(path + n + 1, filename_vec.ptr, filename_vec.len + 1); // Does it exist? if (mg_stat(path, &st) == 0) { // Yes it does, break the loop *stp = st; found = 1; break; } } // If no index file exists, restore directory path if (!found) { path[n] = '\0'; } return found; } // Return True if we should reply 304 Not Modified. static int is_not_modified(const struct mg_connection *conn, const struct mgstat *stp) { char etag[40]; const char *ims = mg_get_header(conn, "If-Modified-Since"); const char *inm = mg_get_header(conn, "If-None-Match"); construct_etag(etag, sizeof(etag), stp); return (inm != NULL && !mg_strcasecmp(etag, inm)) || (ims != NULL && stp->mtime <= parse_date_string(ims)); } static int forward_body_data(struct mg_connection *conn, FILE *fp, SOCKET sock, SSL *ssl) { const char *expect, *buffered; char buf[MG_BUF_LEN]; int to_read, nread, buffered_len, success = 0; expect = mg_get_header(conn, "Expect"); assert(fp != NULL); if (conn->content_len == -1) { send_http_error(conn, 411, "Length Required", ""); } else if (expect != NULL && mg_strcasecmp(expect, "100-continue")) { send_http_error(conn, 417, "Expectation Failed", ""); } else { if (expect != NULL) { (void) mg_printf(conn, "%s", "HTTP/1.1 100 Continue\r\n\r\n"); } buffered = conn->buf + conn->request_len; buffered_len = conn->data_len - conn->request_len; assert(buffered_len >= 0); assert(conn->consumed_content == 0); if (buffered_len > 0) { if ((int64_t) buffered_len > conn->content_len) { buffered_len = (int) conn->content_len; } push(fp, sock, ssl, buffered, (int64_t) buffered_len); conn->consumed_content += buffered_len; } while (conn->consumed_content < conn->content_len) { to_read = sizeof(buf); if ((int64_t) to_read > conn->content_len - conn->consumed_content) { to_read = (int) (conn->content_len - conn->consumed_content); } nread = pull(NULL, conn->client.sock, conn->ssl, buf, to_read); if (nread <= 0 || push(fp, sock, ssl, buf, nread) != nread) { break; } conn->consumed_content += nread; } if (conn->consumed_content == conn->content_len) { success = 1; } // Each error code path in this function must send an error if (!success) { send_http_error(conn, 577, http_500_error, ""); } } return success; } #if !defined(NO_CGI) // This structure helps to create an environment for the spawned CGI program. // Environment is an array of "VARIABLE=VALUE\0" ASCIIZ strings, // last element must be NULL. // However, on Windows there is a requirement that all these VARIABLE=VALUE\0 // strings must reside in a contiguous buffer. The end of the buffer is // marked by two '\0' characters. // We satisfy both worlds: we create an envp array (which is vars), all // entries are actually pointers inside buf. struct cgi_env_block { struct mg_connection *conn; char buf[CGI_ENVIRONMENT_SIZE]; // Environment buffer int len; // Space taken char *vars[MAX_CGI_ENVIR_VARS]; // char **envp int nvars; // Number of variables }; // Append VARIABLE=VALUE\0 string to the buffer, and add a respective // pointer into the vars array. static char *addenv(struct cgi_env_block *block, const char *fmt, ...) { int n, space; char *added; va_list ap; // Calculate how much space is left in the buffer space = sizeof(block->buf) - block->len - 2; assert(space >= 0); // Make a pointer to the free space int the buffer added = block->buf + block->len; // Copy VARIABLE=VALUE\0 string into the free space va_start(ap, fmt); n = mg_vsnprintf(block->conn, added, (size_t) space, fmt, ap); va_end(ap); // Make sure we do not overflow buffer and the envp array if (n > 0 && n < space && block->nvars < (int) ARRAY_SIZE(block->vars) - 2) { // Append a pointer to the added string into the envp array block->vars[block->nvars++] = block->buf + block->len; // Bump up used length counter. Include \0 terminator block->len += n + 1; } return added; } static void prepare_cgi_environment(struct mg_connection *conn, const char *prog, struct cgi_env_block *blk) { const char *s, *slash; struct vec var_vec; char *p, src_addr[20]; int i; blk->len = blk->nvars = 0; blk->conn = conn; sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa); addenv(blk, "SERVER_NAME=%s", conn->ctx->config[AUTHENTICATION_DOMAIN]); addenv(blk, "SERVER_ROOT=%s", conn->ctx->config[DOCUMENT_ROOT]); addenv(blk, "DOCUMENT_ROOT=%s", conn->ctx->config[DOCUMENT_ROOT]); // Prepare the environment block addenv(blk, "%s", "GATEWAY_INTERFACE=CGI/1.1"); addenv(blk, "%s", "SERVER_PROTOCOL=HTTP/1.1"); addenv(blk, "%s", "REDIRECT_STATUS=200"); // For PHP // TODO(lsm): fix this for IPv6 case addenv(blk, "SERVER_PORT=%d", ntohs(conn->client.lsa.sin.sin_port)); addenv(blk, "REQUEST_METHOD=%s", conn->request_info.request_method); addenv(blk, "REMOTE_ADDR=%s", src_addr); addenv(blk, "REMOTE_PORT=%d", conn->request_info.remote_port); addenv(blk, "REQUEST_URI=%s", conn->request_info.uri); // SCRIPT_NAME assert(conn->request_info.uri[0] == '/'); slash = strrchr(conn->request_info.uri, '/'); if ((s = strrchr(prog, '/')) == NULL) s = prog; addenv(blk, "SCRIPT_NAME=%.*s%s", slash - conn->request_info.uri, conn->request_info.uri, s); addenv(blk, "SCRIPT_FILENAME=%s", prog); addenv(blk, "PATH_TRANSLATED=%s", prog); addenv(blk, "HTTPS=%s", conn->ssl == NULL ? "off" : "on"); if ((s = mg_get_header(conn, "Content-Type")) != NULL) addenv(blk, "CONTENT_TYPE=%s", s); if (conn->request_info.query_string != NULL) addenv(blk, "QUERY_STRING=%s", conn->request_info.query_string); if ((s = mg_get_header(conn, "Content-Length")) != NULL) addenv(blk, "CONTENT_LENGTH=%s", s); if ((s = getenv("PATH")) != NULL) addenv(blk, "PATH=%s", s); if (conn->path_info != NULL) { addenv(blk, "PATH_INFO=%s", conn->path_info); } #if defined(_WIN32) if ((s = getenv("COMSPEC")) != NULL) { addenv(blk, "COMSPEC=%s", s); } if ((s = getenv("SYSTEMROOT")) != NULL) { addenv(blk, "SYSTEMROOT=%s", s); } if ((s = getenv("SystemDrive")) != NULL) { addenv(blk, "SystemDrive=%s", s); } #else if ((s = getenv("LD_LIBRARY_PATH")) != NULL) addenv(blk, "LD_LIBRARY_PATH=%s", s); #endif // _WIN32 if ((s = getenv("PERLLIB")) != NULL) addenv(blk, "PERLLIB=%s", s); if (conn->request_info.remote_user != NULL) { addenv(blk, "REMOTE_USER=%s", conn->request_info.remote_user); addenv(blk, "%s", "AUTH_TYPE=Digest"); } // Add all headers as HTTP_* variables for (i = 0; i < conn->request_info.num_headers; i++) { p = addenv(blk, "HTTP_%s=%s", conn->request_info.http_headers[i].name, conn->request_info.http_headers[i].value); // Convert variable name into uppercase, and change - to _ for (; *p != '=' && *p != '\0'; p++) { if (*p == '-') *p = '_'; *p = (char) toupper(* (unsigned char *) p); } } // Add user-specified variables s = conn->ctx->config[CGI_ENVIRONMENT]; while ((s = next_option(s, &var_vec, NULL)) != NULL) { addenv(blk, "%.*s", var_vec.len, var_vec.ptr); } blk->vars[blk->nvars++] = NULL; blk->buf[blk->len++] = '\0'; assert(blk->nvars < (int) ARRAY_SIZE(blk->vars)); assert(blk->len > 0); assert(blk->len < (int) sizeof(blk->buf)); } static void handle_cgi_request(struct mg_connection *conn, const char *prog) { int headers_len, data_len, i, fd_stdin[2], fd_stdout[2]; const char *status, *status_text; char buf[16384], *pbuf, dir[PATH_MAX], *p; struct mg_request_info ri; struct cgi_env_block blk; FILE *in, *out; pid_t pid; prepare_cgi_environment(conn, prog, &blk); // CGI must be executed in its own directory. 'dir' must point to the // directory containing executable program, 'p' must point to the // executable program name relative to 'dir'. (void) mg_snprintf(conn, dir, sizeof(dir), "%s", prog); if ((p = strrchr(dir, DIRSEP)) != NULL) { *p++ = '\0'; } else { dir[0] = '.', dir[1] = '\0'; p = (char *) prog; } pid = (pid_t) -1; fd_stdin[0] = fd_stdin[1] = fd_stdout[0] = fd_stdout[1] = -1; in = out = NULL; if (pipe(fd_stdin) != 0 || pipe(fd_stdout) != 0) { send_http_error(conn, 500, http_500_error, "Cannot create CGI pipe: %s", strerror(ERRNO)); goto done; } else if ((pid = spawn_process(conn, p, blk.buf, blk.vars, fd_stdin[0], fd_stdout[1], dir)) == (pid_t) -1) { goto done; } else if ((in = fdopen(fd_stdin[1], "wb")) == NULL || (out = fdopen(fd_stdout[0], "rb")) == NULL) { send_http_error(conn, 500, http_500_error, "fopen: %s", strerror(ERRNO)); goto done; } setbuf(in, NULL); setbuf(out, NULL); // spawn_process() must close those! // If we don't mark them as closed, close() attempt before // return from this function throws an exception on Windows. // Windows does not like when closed descriptor is closed again. fd_stdin[0] = fd_stdout[1] = -1; // Send POST data to the CGI process if needed if (!strcmp(conn->request_info.request_method, "POST") && !forward_body_data(conn, in, INVALID_SOCKET, NULL)) { goto done; } // Close so child gets an EOF. fclose(in); in = NULL; // Now read CGI reply into a buffer. We need to set correct // status code, thus we need to see all HTTP headers first. // Do not send anything back to client, until we buffer in all // HTTP headers. data_len = 0; headers_len = read_request(out, INVALID_SOCKET, NULL, buf, sizeof(buf), &data_len); if (headers_len <= 0) { send_http_error(conn, 500, http_500_error, "CGI program sent malformed HTTP headers: [%.*s]", data_len, buf); goto done; } pbuf = buf; buf[headers_len - 1] = '\0'; parse_http_headers(&pbuf, &ri); // Make up and send the status line status_text = "OK"; if ((status = get_header(&ri, "Status")) != NULL) { conn->request_info.status_code = atoi(status); status_text = status; while (isdigit(* (unsigned char *) status_text) || *status_text == ' ') { status_text++; } } else if (get_header(&ri, "Location") != NULL) { conn->request_info.status_code = 302; } else { conn->request_info.status_code = 200; } if (get_header(&ri, "Connection") != NULL && !mg_strcasecmp(get_header(&ri, "Connection"), "keep-alive")) { conn->must_close = 1; } (void) mg_printf(conn, "HTTP/1.1 %d %s\r\n", conn->request_info.status_code, status_text); // Send headers for (i = 0; i < ri.num_headers; i++) { mg_printf(conn, "%s: %s\r\n", ri.http_headers[i].name, ri.http_headers[i].value); } (void) mg_write(conn, "\r\n", 2); // Send chunk of data that may be read after the headers conn->num_bytes_sent += mg_write(conn, buf + headers_len, (size_t)(data_len - headers_len)); // Read the rest of CGI output and send to the client send_file_data(conn, out, INT64_MAX); done: if (pid != (pid_t) -1) { kill(pid, SIGKILL); } if (fd_stdin[0] != -1) { (void) close(fd_stdin[0]); } if (fd_stdout[1] != -1) { (void) close(fd_stdout[1]); } if (in != NULL) { (void) fclose(in); } else if (fd_stdin[1] != -1) { (void) close(fd_stdin[1]); } if (out != NULL) { (void) fclose(out); } else if (fd_stdout[0] != -1) { (void) close(fd_stdout[0]); } } #endif // !NO_CGI // For a given PUT path, create all intermediate subdirectories // for given path. Return 0 if the path itself is a directory, // or -1 on error, 1 if OK. static int put_dir(const char *path) { char buf[PATH_MAX]; const char *s, *p; struct mgstat st; int len, res = 1; for (s = p = path + 2; (p = strchr(s, DIRSEP)) != NULL; s = ++p) { len = p - path; if (len >= (int) sizeof(buf)) { res = -1; break; } memcpy(buf, path, len); buf[len] = '\0'; // Try to create intermediate directory DEBUG_TRACE(("mkdir(%s)", buf)); if (mg_stat(buf, &st) == -1 && mg_mkdir(buf, 0755) != 0) { res = -1; break; } // Is path itself a directory? if (p[1] == '\0') { res = 0; } } return res; } static void put_file(struct mg_connection *conn, const char *path) { struct mgstat st; const char *range; int64_t r1, r2; FILE *fp; int rc; conn->request_info.status_code = mg_stat(path, &st) == 0 ? 200 : 201; if ((rc = put_dir(path)) == 0) { mg_printf(conn, "HTTP/1.1 %d OK\r\n\r\n", conn->request_info.status_code); } else if (rc == -1) { send_http_error(conn, 500, http_500_error, "put_dir(%s): %s", path, strerror(ERRNO)); } else if ((fp = mg_fopen(path, "wb+")) == NULL) { send_http_error(conn, 500, http_500_error, "fopen(%s): %s", path, strerror(ERRNO)); } else { set_close_on_exec(fileno(fp)); range = mg_get_header(conn, "Content-Range"); r1 = r2 = 0; if (range != NULL && parse_range_header(range, &r1, &r2) > 0) { conn->request_info.status_code = 206; // TODO(lsm): handle seek error (void) fseeko(fp, r1, SEEK_SET); } if (forward_body_data(conn, fp, INVALID_SOCKET, NULL)) (void) mg_printf(conn, "HTTP/1.1 %d OK\r\n\r\n", conn->request_info.status_code); (void) fclose(fp); } } static void send_ssi_file(struct mg_connection *, const char *, FILE *, int); static void do_ssi_include(struct mg_connection *conn, const char *ssi, char *tag, int include_level) { char file_name[MG_BUF_LEN], path[PATH_MAX], *p; FILE *fp; // sscanf() is safe here, since send_ssi_file() also uses buffer // of size MG_BUF_LEN to get the tag. So strlen(tag) is always < MG_BUF_LEN. if (sscanf(tag, " virtual=\"%[^\"]\"", file_name) == 1) { // File name is relative to the webserver root (void) mg_snprintf(conn, path, sizeof(path), "%s%c%s", conn->ctx->config[DOCUMENT_ROOT], DIRSEP, file_name); } else if (sscanf(tag, " file=\"%[^\"]\"", file_name) == 1) { // File name is relative to the webserver working directory // or it is absolute system path (void) mg_snprintf(conn, path, sizeof(path), "%s", file_name); } else if (sscanf(tag, " \"%[^\"]\"", file_name) == 1) { // File name is relative to the currect document (void) mg_snprintf(conn, path, sizeof(path), "%s", ssi); if ((p = strrchr(path, DIRSEP)) != NULL) { p[1] = '\0'; } (void) mg_snprintf(conn, path + strlen(path), sizeof(path) - strlen(path), "%s", file_name); } else { cry(conn, "Bad SSI #include: [%s]", tag); return; } if ((fp = mg_fopen(path, "rb")) == NULL) { cry(conn, "Cannot open SSI #include: [%s]: fopen(%s): %s", tag, path, strerror(ERRNO)); } else { set_close_on_exec(fileno(fp)); if (match_prefix(conn->ctx->config[SSI_EXTENSIONS], strlen(conn->ctx->config[SSI_EXTENSIONS]), path) > 0) { send_ssi_file(conn, path, fp, include_level + 1); } else { send_file_data(conn, fp, INT64_MAX); } (void) fclose(fp); } } #if !defined(NO_POPEN) static void do_ssi_exec(struct mg_connection *conn, char *tag) { char cmd[MG_BUF_LEN]; FILE *fp; if (sscanf(tag, " \"%[^\"]\"", cmd) != 1) { cry(conn, "Bad SSI #exec: [%s]", tag); } else if ((fp = popen(cmd, "r")) == NULL) { cry(conn, "Cannot SSI #exec: [%s]: %s", cmd, strerror(ERRNO)); } else { send_file_data(conn, fp, INT64_MAX); (void) pclose(fp); } } #endif // !NO_POPEN static void send_ssi_file(struct mg_connection *conn, const char *path, FILE *fp, int include_level) { char buf[MG_BUF_LEN]; int ch, len, in_ssi_tag; if (include_level > 10) { cry(conn, "SSI #include level is too deep (%s)", path); return; } in_ssi_tag = 0; len = 0; while ((ch = fgetc(fp)) != EOF) { if (in_ssi_tag && ch == '>') { in_ssi_tag = 0; buf[len++] = (char) ch; buf[len] = '\0'; assert(len <= (int) sizeof(buf)); if (len < 6 || memcmp(buf, "<!--#", 5) != 0) { // Not an SSI tag, pass it (void) mg_write(conn, buf, (size_t)len); } else { if (!memcmp(buf + 5, "include", 7)) { do_ssi_include(conn, path, buf + 12, include_level); #if !defined(NO_POPEN) } else if (!memcmp(buf + 5, "exec", 4)) { do_ssi_exec(conn, buf + 9); #endif // !NO_POPEN } else { cry(conn, "%s: unknown SSI " "command: \"%s\"", path, buf); } } len = 0; } else if (in_ssi_tag) { if (len == 5 && memcmp(buf, "<!--#", 5) != 0) { // Not an SSI tag in_ssi_tag = 0; } else if (len == (int) sizeof(buf) - 2) { cry(conn, "%s: SSI tag is too large", path); len = 0; } buf[len++] = ch & 0xff; } else if (ch == '<') { in_ssi_tag = 1; if (len > 0) { (void) mg_write(conn, buf, (size_t)len); } len = 0; buf[len++] = ch & 0xff; } else { buf[len++] = ch & 0xff; if (len == (int) sizeof(buf)) { (void) mg_write(conn, buf, (size_t)len); len = 0; } } } // Send the rest of buffered data if (len > 0) { (void) mg_write(conn, buf, (size_t)len); } } static void handle_ssi_file_request(struct mg_connection *conn, const char *path) { FILE *fp; if ((fp = mg_fopen(path, "rb")) == NULL) { send_http_error(conn, 500, http_500_error, "fopen(%s): %s", path, strerror(ERRNO)); } else { conn->must_close = 1; set_close_on_exec(fileno(fp)); mg_printf(conn, "HTTP/1.1 200 OK\r\n" "Content-Type: text/html\r\nConnection: %s\r\n\r\n", suggest_connection_header(conn)); send_ssi_file(conn, path, fp, 0); (void) fclose(fp); } } static void send_options(struct mg_connection *conn) { conn->request_info.status_code = 200; (void) mg_printf(conn, "HTTP/1.1 200 OK\r\n" "Allow: GET, POST, HEAD, CONNECT, PUT, DELETE, OPTIONS\r\n" "DAV: 1\r\n\r\n"); } // Writes PROPFIND properties for a collection element static void print_props(struct mg_connection *conn, const char* uri, struct mgstat* st) { char mtime[64]; gmt_time_string(mtime, sizeof(mtime), &st->mtime); conn->num_bytes_sent += mg_printf(conn, "<d:response>" "<d:href>%s</d:href>" "<d:propstat>" "<d:prop>" "<d:resourcetype>%s</d:resourcetype>" "<d:getcontentlength>%" INT64_FMT "</d:getcontentlength>" "<d:getlastmodified>%s</d:getlastmodified>" "</d:prop>" "<d:status>HTTP/1.1 200 OK</d:status>" "</d:propstat>" "</d:response>\n", uri, st->is_directory ? "<d:collection/>" : "", st->size, mtime); } static void print_dav_dir_entry(struct de *de, void *data) { char href[PATH_MAX]; struct mg_connection *conn = (struct mg_connection *) data; mg_snprintf(conn, href, sizeof(href), "%s%s", conn->request_info.uri, de->file_name); print_props(conn, href, &de->st); } static void handle_propfind(struct mg_connection *conn, const char* path, struct mgstat* st) { const char *depth = mg_get_header(conn, "Depth"); conn->must_close = 1; conn->request_info.status_code = 207; mg_printf(conn, "HTTP/1.1 207 Multi-Status\r\n" "Connection: close\r\n" "Content-Type: text/xml; charset=utf-8\r\n\r\n"); conn->num_bytes_sent += mg_printf(conn, "<?xml version=\"1.0\" encoding=\"utf-8\"?>" "<d:multistatus xmlns:d='DAV:'>\n"); // Print properties for the requested resource itself print_props(conn, conn->request_info.uri, st); // If it is a directory, print directory entries too if Depth is not 0 if (st->is_directory && !mg_strcasecmp(conn->ctx->config[ENABLE_DIRECTORY_LISTING], "yes") && (depth == NULL || strcmp(depth, "0") != 0)) { scan_directory(conn, path, conn, &print_dav_dir_entry); } conn->num_bytes_sent += mg_printf(conn, "%s\n", "</d:multistatus>"); } // This is the heart of the Mongoose's logic. // This function is called when the request is read, parsed and validated, // and Mongoose must decide what action to take: serve a file, or // a directory, or call embedded function, etcetera. static void handle_request(struct mg_connection *conn) { struct mg_request_info *ri = &conn->request_info; char path[PATH_MAX]; int stat_result, uri_len; struct mgstat st; if ((conn->request_info.query_string = strchr(ri->uri, '?')) != NULL) { * conn->request_info.query_string++ = '\0'; } uri_len = strlen(ri->uri); url_decode(ri->uri, (size_t)uri_len, ri->uri, (size_t)(uri_len + 1), 0); remove_double_dots_and_double_slashes(ri->uri); stat_result = convert_uri_to_file_name(conn, path, sizeof(path), &st); DEBUG_TRACE(("%s", ri->uri)); if (!check_authorization(conn, path)) { send_authorization_request(conn); } else if (call_user(conn, MG_NEW_REQUEST) != NULL) { // Do nothing, callback has served the request } else if (!strcmp(ri->request_method, "OPTIONS")) { send_options(conn); } else if (conn->ctx->config[DOCUMENT_ROOT] == NULL) { send_http_error(conn, 404, "Not Found", "Not Found"); } else if ((!strcmp(ri->request_method, "PUT") || !strcmp(ri->request_method, "DELETE")) && (conn->ctx->config[PUT_DELETE_PASSWORDS_FILE] == NULL || !is_authorized_for_put(conn))) { send_authorization_request(conn); } else if (!strcmp(ri->request_method, "PUT")) { put_file(conn, path); } else if (!strcmp(ri->request_method, "DELETE")) { if (mg_remove(path) == 0) { send_http_error(conn, 200, "OK", ""); } else { send_http_error(conn, 500, http_500_error, "remove(%s): %s", path, strerror(ERRNO)); } } else if (stat_result != 0 || must_hide_file(conn, path)) { send_http_error(conn, 404, "Not Found", "%s", "File not found"); } else if (st.is_directory && ri->uri[uri_len - 1] != '/') { (void) mg_printf(conn, "HTTP/1.1 301 Moved Permanently\r\n" "Location: %s/\r\n\r\n", ri->uri); } else if (!strcmp(ri->request_method, "PROPFIND")) { handle_propfind(conn, path, &st); } else if (st.is_directory && !substitute_index_file(conn, path, sizeof(path), &st)) { if (!mg_strcasecmp(conn->ctx->config[ENABLE_DIRECTORY_LISTING], "yes")) { handle_directory_request(conn, path); } else { send_http_error(conn, 403, "Directory Listing Denied", "Directory listing denied"); } #if !defined(NO_CGI) } else if (match_prefix(conn->ctx->config[CGI_EXTENSIONS], strlen(conn->ctx->config[CGI_EXTENSIONS]), path) > 0) { if (strcmp(ri->request_method, "POST") && strcmp(ri->request_method, "GET")) { send_http_error(conn, 501, "Not Implemented", "Method %s is not implemented", ri->request_method); } else { handle_cgi_request(conn, path); } #endif // !NO_CGI } else if (match_prefix(conn->ctx->config[SSI_EXTENSIONS], strlen(conn->ctx->config[SSI_EXTENSIONS]), path) > 0) { handle_ssi_file_request(conn, path); } else if (is_not_modified(conn, &st)) { send_http_error(conn, 304, "Not Modified", ""); } else { handle_file_request(conn, path, &st); } } static void close_all_listening_sockets(struct mg_context *ctx) { struct socket *sp, *tmp; for (sp = ctx->listening_sockets; sp != NULL; sp = tmp) { tmp = sp->next; (void) closesocket(sp->sock); free(sp); } } // Valid listening port specification is: [ip_address:]port[s] // Examples: 80, 443s, 127.0.0.1:3128,1.2.3.4:8080s // TODO(lsm): add parsing of the IPv6 address static int parse_port_string(const struct vec *vec, struct socket *so) { int a, b, c, d, port, len; // MacOS needs that. If we do not zero it, subsequent bind() will fail. // Also, all-zeroes in the socket address means binding to all addresses // for both IPv4 and IPv6 (INADDR_ANY and IN6ADDR_ANY_INIT). memset(so, 0, sizeof(*so)); if (sscanf(vec->ptr, "%d.%d.%d.%d:%d%n", &a, &b, &c, &d, &port, &len) == 5) { // Bind to a specific IPv4 address so->lsa.sin.sin_addr.s_addr = htonl((a << 24) | (b << 16) | (c << 8) | d); } else if (sscanf(vec->ptr, "%d%n", &port, &len) != 1 || len <= 0 || len > (int) vec->len || (vec->ptr[len] && vec->ptr[len] != 's' && vec->ptr[len] != ',')) { return 0; } so->is_ssl = vec->ptr[len] == 's'; #if defined(USE_IPV6) so->lsa.sin6.sin6_family = AF_INET6; so->lsa.sin6.sin6_port = htons((uint16_t) port); #else so->lsa.sin.sin_family = AF_INET; so->lsa.sin.sin_port = htons((uint16_t) port); #endif return 1; } static int set_ports_option(struct mg_context *ctx) { const char *list = ctx->config[LISTENING_PORTS]; int on = 1, success = 1; SOCKET sock; struct vec vec; struct socket so, *listener; while (success && (list = next_option(list, &vec, NULL)) != NULL) { if (!parse_port_string(&vec, &so)) { cry(fc(ctx), "%s: %.*s: invalid port spec. Expecting list of: %s", __func__, vec.len, vec.ptr, "[IP_ADDRESS:]PORT[s|p]"); success = 0; } else if (so.is_ssl && (ctx->ssl_ctx == NULL || ctx->config[SSL_CERTIFICATE] == NULL)) { cry(fc(ctx), "Cannot add SSL socket, is -ssl_certificate option set?"); success = 0; } else if ((sock = socket(so.lsa.sa.sa_family, SOCK_STREAM, 6)) == INVALID_SOCKET || #if !defined(_WIN32) // On Windows, SO_REUSEADDR is recommended only for // broadcast UDP sockets setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) != 0 || #endif // !_WIN32 // Set TCP keep-alive. This is needed because if HTTP-level // keep-alive is enabled, and client resets the connection, // server won't get TCP FIN or RST and will keep the connection // open forever. With TCP keep-alive, next keep-alive // handshake will figure out that the client is down and // will close the server end. // Thanks to Igor Klopov who suggested the patch. setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (void *) &on, sizeof(on)) != 0 || bind(sock, &so.lsa.sa, sizeof(so.lsa)) != 0 || listen(sock, 100) != 0) { closesocket(sock); cry(fc(ctx), "%s: cannot bind to %.*s: %s", __func__, vec.len, vec.ptr, strerror(ERRNO)); success = 0; } else if ((listener = (struct socket *) calloc(1, sizeof(*listener))) == NULL) { closesocket(sock); cry(fc(ctx), "%s: %s", __func__, strerror(ERRNO)); success = 0; } else { *listener = so; listener->sock = sock; set_close_on_exec(listener->sock); listener->next = ctx->listening_sockets; ctx->listening_sockets = listener; } } if (!success) { close_all_listening_sockets(ctx); } return success; } static void log_header(const struct mg_connection *conn, const char *header, FILE *fp) { const char *header_value; if ((header_value = mg_get_header(conn, header)) == NULL) { (void) fprintf(fp, "%s", " -"); } else { (void) fprintf(fp, " \"%s\"", header_value); } } static void log_access(const struct mg_connection *conn) { const struct mg_request_info *ri; FILE *fp; char date[64], src_addr[20]; fp = conn->ctx->config[ACCESS_LOG_FILE] == NULL ? NULL : mg_fopen(conn->ctx->config[ACCESS_LOG_FILE], "a+"); if (fp == NULL) return; strftime(date, sizeof(date), "%d/%b/%Y:%H:%M:%S %z", localtime(&conn->birth_time)); ri = &conn->request_info; flockfile(fp); sockaddr_to_string(src_addr, sizeof(src_addr), &conn->client.rsa); fprintf(fp, "%s - %s [%s] \"%s %s HTTP/%s\" %d %" INT64_FMT, src_addr, ri->remote_user == NULL ? "-" : ri->remote_user, date, ri->request_method ? ri->request_method : "-", ri->uri ? ri->uri : "-", ri->http_version, conn->request_info.status_code, conn->num_bytes_sent); log_header(conn, "Referer", fp); log_header(conn, "User-Agent", fp); fputc('\n', fp); fflush(fp); funlockfile(fp); fclose(fp); } static int isbyte(int n) { return n >= 0 && n <= 255; } // Verify given socket address against the ACL. // Return -1 if ACL is malformed, 0 if address is disallowed, 1 if allowed. static int check_acl(struct mg_context *ctx, const union usa *usa) { int a, b, c, d, n, mask, allowed; char flag; uint32_t acl_subnet, acl_mask, remote_ip; struct vec vec; const char *list = ctx->config[ACCESS_CONTROL_LIST]; if (list == NULL) { return 1; } (void) memcpy(&remote_ip, &usa->sin.sin_addr, sizeof(remote_ip)); // If any ACL is set, deny by default allowed = '-'; while ((list = next_option(list, &vec, NULL)) != NULL) { mask = 32; if (sscanf(vec.ptr, "%c%d.%d.%d.%d%n", &flag, &a, &b, &c, &d, &n) != 5) { cry(fc(ctx), "%s: subnet must be [+|-]x.x.x.x[/x]", __func__); return -1; } else if (flag != '+' && flag != '-') { cry(fc(ctx), "%s: flag must be + or -: [%s]", __func__, vec.ptr); return -1; } else if (!isbyte(a)||!isbyte(b)||!isbyte(c)||!isbyte(d)) { cry(fc(ctx), "%s: bad ip address: [%s]", __func__, vec.ptr); return -1; } else if (sscanf(vec.ptr + n, "/%d", &mask) == 0) { // Do nothing, no mask specified } else if (mask < 0 || mask > 32) { cry(fc(ctx), "%s: bad subnet mask: %d [%s]", __func__, n, vec.ptr); return -1; } acl_subnet = (a << 24) | (b << 16) | (c << 8) | d; acl_mask = mask ? 0xffffffffU << (32 - mask) : 0; if (acl_subnet == (ntohl(remote_ip) & acl_mask)) { allowed = flag; } } return allowed == '+'; } static void add_to_set(SOCKET fd, fd_set *set, int *max_fd) { FD_SET(fd, set); if (fd > (SOCKET) *max_fd) { *max_fd = (int) fd; } } #if !defined(_WIN32) static int set_uid_option(struct mg_context *ctx) { struct passwd *pw; const char *uid = ctx->config[RUN_AS_USER]; int success = 0; if (uid == NULL) { success = 1; } else { if ((pw = getpwnam(uid)) == NULL) { cry(fc(ctx), "%s: unknown user [%s]", __func__, uid); } else if (setgid(pw->pw_gid) == -1) { cry(fc(ctx), "%s: setgid(%s): %s", __func__, uid, strerror(errno)); } else if (setuid(pw->pw_uid) == -1) { cry(fc(ctx), "%s: setuid(%s): %s", __func__, uid, strerror(errno)); } else { success = 1; } } return success; } #endif // !_WIN32 #if !defined(NO_SSL) static pthread_mutex_t *ssl_mutexes; // Return OpenSSL error message static const char *ssl_error(void) { unsigned long err; err = ERR_get_error(); return err == 0 ? "" : ERR_error_string(err, NULL); } static void ssl_locking_callback(int mode, int mutex_num, const char *file, int line) { line = 0; // Unused file = NULL; // Unused if (mode & CRYPTO_LOCK) { (void) pthread_mutex_lock(&ssl_mutexes[mutex_num]); } else { (void) pthread_mutex_unlock(&ssl_mutexes[mutex_num]); } } static unsigned long ssl_id_callback(void) { return (unsigned long) pthread_self(); } #if !defined(NO_SSL_DL) static int load_dll(struct mg_context *ctx, const char *dll_name, struct ssl_func *sw) { union {void *p; void (*fp)(void);} u; void *dll_handle; struct ssl_func *fp; if ((dll_handle = dlopen(dll_name, RTLD_LAZY)) == NULL) { cry(fc(ctx), "%s: cannot load %s", __func__, dll_name); return 0; } for (fp = sw; fp->name != NULL; fp++) { #ifdef _WIN32 // GetProcAddress() returns pointer to function u.fp = (void (*)(void)) dlsym(dll_handle, fp->name); #else // dlsym() on UNIX returns void *. ISO C forbids casts of data pointers to // function pointers. We need to use a union to make a cast. u.p = dlsym(dll_handle, fp->name); #endif // _WIN32 if (u.fp == NULL) { cry(fc(ctx), "%s: %s: cannot find %s", __func__, dll_name, fp->name); return 0; } else { fp->ptr = u.fp; } } return 1; } #endif // NO_SSL_DL // Dynamically load SSL library. Set up ctx->ssl_ctx pointer. static int set_ssl_option(struct mg_context *ctx) { struct mg_request_info request_info; int i, size; const char *pem = ctx->config[SSL_CERTIFICATE]; const char *chain = ctx->config[SSL_CHAIN_FILE]; #if !defined(NO_SSL_DL) if (!load_dll(ctx, SSL_LIB, ssl_sw) || !load_dll(ctx, CRYPTO_LIB, crypto_sw)) { return 0; } #endif // NO_SSL_DL // Initialize SSL crap SSL_library_init(); SSL_load_error_strings(); if ((ctx->client_ssl_ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) { cry(fc(ctx), "SSL_CTX_new error: %s", ssl_error()); } if ((ctx->ssl_ctx = SSL_CTX_new(SSLv23_server_method())) == NULL) { cry(fc(ctx), "SSL_CTX_new error: %s", ssl_error()); } else if (ctx->user_callback != NULL) { memset(&request_info, 0, sizeof(request_info)); request_info.user_data = ctx->user_data; ctx->user_callback(MG_INIT_SSL, (struct mg_connection *) ctx->ssl_ctx); } if (ctx->ssl_ctx != NULL && pem != NULL && SSL_CTX_use_certificate_file(ctx->ssl_ctx, pem, SSL_FILETYPE_PEM) == 0) { cry(fc(ctx), "%s: cannot open %s: %s", __func__, pem, ssl_error()); return 0; } if (ctx->ssl_ctx != NULL && pem != NULL && SSL_CTX_use_PrivateKey_file(ctx->ssl_ctx, pem, SSL_FILETYPE_PEM) == 0) { cry(fc(ctx), "%s: cannot open %s: %s", __func__, pem, ssl_error()); return 0; } if (ctx->ssl_ctx != NULL && chain != NULL && SSL_CTX_use_certificate_chain_file(ctx->ssl_ctx, chain) == 0) { cry(fc(ctx), "%s: cannot open %s: %s", __func__, chain, ssl_error()); return 0; } // Initialize locking callbacks, needed for thread safety. // http://www.openssl.org/support/faq.html#PROG1 size = sizeof(pthread_mutex_t) * CRYPTO_num_locks(); if ((ssl_mutexes = (pthread_mutex_t *) malloc((size_t)size)) == NULL) { cry(fc(ctx), "%s: cannot allocate mutexes: %s", __func__, ssl_error()); return 0; } for (i = 0; i < CRYPTO_num_locks(); i++) { pthread_mutex_init(&ssl_mutexes[i], NULL); } CRYPTO_set_locking_callback(&ssl_locking_callback); CRYPTO_set_id_callback(&ssl_id_callback); return 1; } static void uninitialize_ssl(struct mg_context *ctx) { int i; if (ctx->ssl_ctx != NULL) { CRYPTO_set_locking_callback(NULL); for (i = 0; i < CRYPTO_num_locks(); i++) { pthread_mutex_destroy(&ssl_mutexes[i]); } CRYPTO_set_locking_callback(NULL); CRYPTO_set_id_callback(NULL); } } #endif // !NO_SSL static int set_gpass_option(struct mg_context *ctx) { struct mgstat mgstat; const char *path = ctx->config[GLOBAL_PASSWORDS_FILE]; return path == NULL || mg_stat(path, &mgstat) == 0; } static int set_acl_option(struct mg_context *ctx) { union usa fake; return check_acl(ctx, &fake) != -1; } static void reset_per_request_attributes(struct mg_connection *conn) { conn->path_info = NULL; conn->num_bytes_sent = conn->consumed_content = 0; conn->content_len = -1; conn->request_len = conn->data_len = 0; conn->must_close = 0; } static void close_socket_gracefully(SOCKET sock) { char buf[MG_BUF_LEN]; struct linger linger; int n; // Set linger option to avoid socket hanging out after close. This prevent // ephemeral port exhaust problem under high QPS. linger.l_onoff = 1; linger.l_linger = 1; setsockopt(sock, SOL_SOCKET, SO_LINGER, (void *) &linger, sizeof(linger)); // Send FIN to the client (void) shutdown(sock, SHUT_WR); set_non_blocking_mode(sock); // Read and discard pending data. If we do not do that and close the // socket, the data in the send buffer may be discarded. This // behaviour is seen on Windows, when client keeps sending data // when server decide to close the connection; then when client // does recv() it gets no data back. do { n = pull(NULL, sock, NULL, buf, sizeof(buf)); } while (n > 0); // Now we know that our FIN is ACK-ed, safe to close (void) closesocket(sock); } static void close_connection(struct mg_connection *conn) { if (conn->ssl) { SSL_free(conn->ssl); conn->ssl = NULL; } if (conn->client.sock != INVALID_SOCKET) { close_socket_gracefully(conn->client.sock); } } void mg_close_connection(struct mg_connection *conn) { close_connection(conn); free(conn); } struct mg_connection *mg_connect(struct mg_context *ctx, const char *host, int port, int use_ssl) { struct mg_connection *newconn = NULL; struct sockaddr_in sin; struct hostent *he; int sock; if (ctx->client_ssl_ctx == NULL && use_ssl) { cry(fc(ctx), "%s: SSL is not initialized", __func__); } else if ((he = gethostbyname(host)) == NULL) { cry(fc(ctx), "%s: gethostbyname(%s): %s", __func__, host, strerror(ERRNO)); } else if ((sock = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) { cry(fc(ctx), "%s: socket: %s", __func__, strerror(ERRNO)); } else { sin.sin_family = AF_INET; sin.sin_port = htons((uint16_t) port); sin.sin_addr = * (struct in_addr *) he->h_addr_list[0]; if (connect(sock, (struct sockaddr *) &sin, sizeof(sin)) != 0) { cry(fc(ctx), "%s: connect(%s:%d): %s", __func__, host, port, strerror(ERRNO)); closesocket(sock); } else if ((newconn = (struct mg_connection *) calloc(1, sizeof(*newconn))) == NULL) { cry(fc(ctx), "%s: calloc: %s", __func__, strerror(ERRNO)); closesocket(sock); } else { newconn->ctx = ctx; newconn->client.sock = sock; newconn->client.rsa.sin = sin; newconn->client.is_ssl = use_ssl; if (use_ssl) { sslize(newconn, ctx->client_ssl_ctx, SSL_connect); } } } return newconn; } FILE *mg_fetch(struct mg_context *ctx, const char *url, const char *path, char *buf, size_t buf_len, struct mg_request_info *ri) { struct mg_connection *newconn; int n, req_length, data_length, port; char host[1025], proto[10], buf2[MG_BUF_LEN]; FILE *fp = NULL; if (sscanf(url, "%9[htps]://%1024[^:]:%d/%n", proto, host, &port, &n) == 3) { } else if (sscanf(url, "%9[htps]://%1024[^/]/%n", proto, host, &n) == 2) { port = mg_strcasecmp(proto, "https") == 0 ? 443 : 80; } else { cry(fc(ctx), "%s: invalid URL: [%s]", __func__, url); return NULL; } if ((newconn = mg_connect(ctx, host, port, !strcmp(proto, "https"))) == NULL) { cry(fc(ctx), "%s: mg_connect(%s): %s", __func__, url, strerror(ERRNO)); } else { mg_printf(newconn, "GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n", url + n, host); data_length = 0; req_length = read_request(NULL, newconn->client.sock, newconn->ssl, buf, buf_len, &data_length); if (req_length <= 0) { cry(fc(ctx), "%s(%s): invalid HTTP reply", __func__, url); } else if (parse_http_response(buf, req_length, ri) <= 0) { cry(fc(ctx), "%s(%s): cannot parse HTTP headers", __func__, url); } else if ((fp = fopen(path, "w+b")) == NULL) { cry(fc(ctx), "%s: fopen(%s): %s", __func__, path, strerror(ERRNO)); } else { // Write chunk of data that may be in the user's buffer data_length -= req_length; if (data_length > 0 && fwrite(buf + req_length, 1, data_length, fp) != (size_t) data_length) { cry(fc(ctx), "%s: fwrite(%s): %s", __func__, path, strerror(ERRNO)); fclose(fp); fp = NULL; } // Read the rest of the response and write it to the file. Do not use // mg_read() cause we didn't set newconn->content_len properly. while (fp && (data_length = pull(NULL, newconn->client.sock, newconn->ssl, buf2, sizeof(buf2))) > 0) { if (fwrite(buf2, 1, data_length, fp) != (size_t) data_length) { cry(fc(ctx), "%s: fwrite(%s): %s", __func__, path, strerror(ERRNO)); fclose(fp); fp = NULL; break; } } } mg_close_connection(newconn); } return fp; } static void discard_current_request_from_buffer(struct mg_connection *conn) { char *buffered; int buffered_len, body_len; buffered = conn->buf + conn->request_len; buffered_len = conn->data_len - conn->request_len; assert(buffered_len >= 0); if (conn->content_len <= 0) { // Protect from negative Content-Length, too body_len = 0; } else if (conn->content_len < (int64_t) buffered_len) { body_len = (int) conn->content_len; } else { body_len = buffered_len; } conn->data_len -= conn->request_len + body_len; memmove(conn->buf, conn->buf + conn->request_len + body_len, (size_t) conn->data_len); } static int is_valid_uri(const char *uri) { // Conform to http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 // URI can be an asterisk (*) or should start with slash. return uri[0] == '/' || (uri[0] == '*' && uri[1] == '\0'); } static void process_new_connection(struct mg_connection *conn) { struct mg_request_info *ri = &conn->request_info; int keep_alive_enabled; const char *cl; keep_alive_enabled = !strcmp(conn->ctx->config[ENABLE_KEEP_ALIVE], "yes"); do { reset_per_request_attributes(conn); conn->request_len = read_request(NULL, conn->client.sock, conn->ssl, conn->buf, conn->buf_size, &conn->data_len); assert(conn->data_len >= conn->request_len); if (conn->request_len == 0 && conn->data_len == conn->buf_size) { send_http_error(conn, 413, "Request Too Large", ""); return; } if (conn->request_len <= 0) { return; // Remote end closed the connection } if (parse_http_request(conn->buf, conn->buf_size, ri) <= 0 || !is_valid_uri(ri->uri)) { // Do not put garbage in the access log, just send it back to the client send_http_error(conn, 400, "Bad Request", "Cannot parse HTTP request: [%.*s]", conn->data_len, conn->buf); } else if (strcmp(ri->http_version, "1.0") && strcmp(ri->http_version, "1.1")) { // Request seems valid, but HTTP version is strange send_http_error(conn, 505, "HTTP version not supported", ""); log_access(conn); } else { // Request is valid, handle it cl = get_header(ri, "Content-Length"); conn->content_len = cl == NULL ? -1 : strtoll(cl, NULL, 10); conn->birth_time = time(NULL); handle_request(conn); call_user(conn, MG_REQUEST_COMPLETE); log_access(conn); discard_current_request_from_buffer(conn); } if (ri->remote_user != NULL) { free((void *) ri->remote_user); } } while (conn->ctx->stop_flag == 0 && keep_alive_enabled && should_keep_alive(conn)); } // Worker threads take accepted socket from the queue static int consume_socket(struct mg_context *ctx, struct socket *sp) { (void) pthread_mutex_lock(&ctx->mutex); DEBUG_TRACE(("going idle")); // If the queue is empty, wait. We're idle at this point. while (ctx->sq_head == ctx->sq_tail && ctx->stop_flag == 0) { pthread_cond_wait(&ctx->sq_full, &ctx->mutex); } // If we're stopping, sq_head may be equal to sq_tail. if (ctx->sq_head > ctx->sq_tail) { // Copy socket from the queue and increment tail *sp = ctx->queue[ctx->sq_tail % ARRAY_SIZE(ctx->queue)]; ctx->sq_tail++; DEBUG_TRACE(("grabbed socket %d, going busy", sp->sock)); // Wrap pointers if needed while (ctx->sq_tail > (int) ARRAY_SIZE(ctx->queue)) { ctx->sq_tail -= ARRAY_SIZE(ctx->queue); ctx->sq_head -= ARRAY_SIZE(ctx->queue); } } (void) pthread_cond_signal(&ctx->sq_empty); (void) pthread_mutex_unlock(&ctx->mutex); return !ctx->stop_flag; } static void worker_thread(struct mg_context *ctx) { struct mg_connection *conn; int buf_size = atoi(ctx->config[MAX_REQUEST_SIZE]); conn = (struct mg_connection *) calloc(1, sizeof(*conn) + buf_size); if (conn == NULL) { cry(fc(ctx), "%s", "Cannot create new connection struct, OOM"); return; } conn->buf_size = buf_size; conn->buf = (char *) (conn + 1); // Call consume_socket() even when ctx->stop_flag > 0, to let it signal // sq_empty condvar to wake up the master waiting in produce_socket() while (consume_socket(ctx, &conn->client)) { conn->birth_time = time(NULL); conn->ctx = ctx; // Fill in IP, port info early so even if SSL setup below fails, // error handler would have the corresponding info. // Thanks to Johannes Winkelmann for the patch. // TODO(lsm): Fix IPv6 case conn->request_info.remote_port = ntohs(conn->client.rsa.sin.sin_port); memcpy(&conn->request_info.remote_ip, &conn->client.rsa.sin.sin_addr.s_addr, 4); conn->request_info.remote_ip = ntohl(conn->request_info.remote_ip); conn->request_info.is_ssl = conn->client.is_ssl; if (!conn->client.is_ssl || (conn->client.is_ssl && sslize(conn, conn->ctx->ssl_ctx, SSL_accept))) { process_new_connection(conn); } close_connection(conn); } free(conn); // Signal master that we're done with connection and exiting (void) pthread_mutex_lock(&ctx->mutex); ctx->num_threads--; (void) pthread_cond_signal(&ctx->cond); assert(ctx->num_threads >= 0); (void) pthread_mutex_unlock(&ctx->mutex); DEBUG_TRACE(("exiting")); } // Master thread adds accepted socket to a queue static void produce_socket(struct mg_context *ctx, const struct socket *sp) { (void) pthread_mutex_lock(&ctx->mutex); // If the queue is full, wait while (ctx->stop_flag == 0 && ctx->sq_head - ctx->sq_tail >= (int) ARRAY_SIZE(ctx->queue)) { (void) pthread_cond_wait(&ctx->sq_empty, &ctx->mutex); } if (ctx->sq_head - ctx->sq_tail < (int) ARRAY_SIZE(ctx->queue)) { // Copy socket to the queue and increment head ctx->queue[ctx->sq_head % ARRAY_SIZE(ctx->queue)] = *sp; ctx->sq_head++; DEBUG_TRACE(("queued socket %d", sp->sock)); } (void) pthread_cond_signal(&ctx->sq_full); (void) pthread_mutex_unlock(&ctx->mutex); } static void accept_new_connection(const struct socket *listener, struct mg_context *ctx) { struct socket accepted; char src_addr[20]; socklen_t len; int allowed; len = sizeof(accepted.rsa); accepted.lsa = listener->lsa; accepted.sock = accept(listener->sock, &accepted.rsa.sa, &len); if (accepted.sock != INVALID_SOCKET) { allowed = check_acl(ctx, &accepted.rsa); if (allowed) { // Put accepted socket structure into the queue DEBUG_TRACE(("accepted socket %d", accepted.sock)); accepted.is_ssl = listener->is_ssl; produce_socket(ctx, &accepted); } else { sockaddr_to_string(src_addr, sizeof(src_addr), &accepted.rsa); cry(fc(ctx), "%s: %s is not allowed to connect", __func__, src_addr); (void) closesocket(accepted.sock); } } } static void master_thread(struct mg_context *ctx) { fd_set read_set; struct timeval tv; struct socket *sp; int max_fd; // Increase priority of the master thread #if defined(_WIN32) SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_ABOVE_NORMAL); #endif #if defined(ISSUE_317) struct sched_param sched_param; sched_param.sched_priority = sched_get_priority_max(SCHED_RR); pthread_setschedparam(pthread_self(), SCHED_RR, &sched_param); #endif while (ctx->stop_flag == 0) { FD_ZERO(&read_set); max_fd = -1; // Add listening sockets to the read set for (sp = ctx->listening_sockets; sp != NULL; sp = sp->next) { add_to_set(sp->sock, &read_set, &max_fd); } tv.tv_sec = 0; tv.tv_usec = 200 * 1000; if (select(max_fd + 1, &read_set, NULL, NULL, &tv) < 0) { #ifdef _WIN32 // On windows, if read_set and write_set are empty, // select() returns "Invalid parameter" error // (at least on my Windows XP Pro). So in this case, we sleep here. mg_sleep(1000); #endif // _WIN32 } else { for (sp = ctx->listening_sockets; sp != NULL; sp = sp->next) { if (ctx->stop_flag == 0 && FD_ISSET(sp->sock, &read_set)) { accept_new_connection(sp, ctx); } } } } DEBUG_TRACE(("stopping workers")); // Stop signal received: somebody called mg_stop. Quit. close_all_listening_sockets(ctx); // Wakeup workers that are waiting for connections to handle. pthread_cond_broadcast(&ctx->sq_full); // Wait until all threads finish (void) pthread_mutex_lock(&ctx->mutex); while (ctx->num_threads > 0) { (void) pthread_cond_wait(&ctx->cond, &ctx->mutex); } (void) pthread_mutex_unlock(&ctx->mutex); // All threads exited, no sync is needed. Destroy mutex and condvars (void) pthread_mutex_destroy(&ctx->mutex); (void) pthread_cond_destroy(&ctx->cond); (void) pthread_cond_destroy(&ctx->sq_empty); (void) pthread_cond_destroy(&ctx->sq_full); #if !defined(NO_SSL) uninitialize_ssl(ctx); #endif // Signal mg_stop() that we're done ctx->stop_flag = 2; DEBUG_TRACE(("exiting")); } static void free_context(struct mg_context *ctx) { int i; // Deallocate config parameters for (i = 0; i < NUM_OPTIONS; i++) { if (ctx->config[i] != NULL) free(ctx->config[i]); } // Deallocate SSL context if (ctx->ssl_ctx != NULL) { SSL_CTX_free(ctx->ssl_ctx); } if (ctx->client_ssl_ctx != NULL) { SSL_CTX_free(ctx->client_ssl_ctx); } #ifndef NO_SSL if (ssl_mutexes != NULL) { free(ssl_mutexes); } #endif // !NO_SSL // Deallocate context itself free(ctx); } void mg_stop(struct mg_context *ctx) { ctx->stop_flag = 1; // Wait until mg_fini() stops while (ctx->stop_flag != 2) { mg_sleep(10); } free_context(ctx); #if defined(_WIN32) && !defined(__SYMBIAN32__) (void) WSACleanup(); #endif // _WIN32 } struct mg_context *mg_start(mg_callback_t user_callback, void *user_data, const char **options) { struct mg_context *ctx; const char *name, *value, *default_value; int i; #if defined(_WIN32) && !defined(__SYMBIAN32__) WSADATA data; WSAStartup(MAKEWORD(2,2), &data); InitializeCriticalSection(&global_log_file_lock); #endif // _WIN32 // Allocate context and initialize reasonable general case defaults. // TODO(lsm): do proper error handling here. ctx = (struct mg_context *) calloc(1, sizeof(*ctx)); ctx->user_callback = user_callback; ctx->user_data = user_data; while (options && (name = *options++) != NULL) { if ((i = get_option_index(name)) == -1) { cry(fc(ctx), "Invalid option: %s", name); free_context(ctx); return NULL; } else if ((value = *options++) == NULL) { cry(fc(ctx), "%s: option value cannot be NULL", name); free_context(ctx); return NULL; } if (ctx->config[i] != NULL) { cry(fc(ctx), "warning: %s: duplicate option", name); } ctx->config[i] = mg_strdup(value); DEBUG_TRACE(("[%s] -> [%s]", name, value)); } // Set default value if needed for (i = 0; config_options[i * ENTRIES_PER_CONFIG_OPTION] != NULL; i++) { default_value = config_options[i * ENTRIES_PER_CONFIG_OPTION + 2]; if (ctx->config[i] == NULL && default_value != NULL) { ctx->config[i] = mg_strdup(default_value); DEBUG_TRACE(("Setting default: [%s] -> [%s]", config_options[i * ENTRIES_PER_CONFIG_OPTION + 1], default_value)); } } // NOTE(lsm): order is important here. SSL certificates must // be initialized before listening ports. UID must be set last. if (!set_gpass_option(ctx) || #if !defined(NO_SSL) !set_ssl_option(ctx) || #endif !set_ports_option(ctx) || #if !defined(_WIN32) !set_uid_option(ctx) || #endif !set_acl_option(ctx)) { free_context(ctx); return NULL; } #if !defined(_WIN32) && !defined(__SYMBIAN32__) // Ignore SIGPIPE signal, so if browser cancels the request, it // won't kill the whole process. (void) signal(SIGPIPE, SIG_IGN); // Also ignoring SIGCHLD to let the OS to reap zombies properly. (void) signal(SIGCHLD, SIG_IGN); #endif // !_WIN32 (void) pthread_mutex_init(&ctx->mutex, NULL); (void) pthread_cond_init(&ctx->cond, NULL); (void) pthread_cond_init(&ctx->sq_empty, NULL); (void) pthread_cond_init(&ctx->sq_full, NULL); // Start master (listening) thread mg_start_thread((mg_thread_func_t) master_thread, ctx); // Start worker threads for (i = 0; i < atoi(ctx->config[NUM_THREADS]); i++) { if (mg_start_thread((mg_thread_func_t) worker_thread, ctx) != 0) { cry(fc(ctx), "Cannot start worker thread: %d", ERRNO); } else { ctx->num_threads++; } } return ctx; }
mit
wulin9005/cocosbuilder
Examples/CocosBuilderExample/libs/CCBReader/SSZipArchive/minizip/unzip.c
109
71943
/* unzip.c -- IO for uncompress .zip files using zlib Version 1.1, February 14h, 2010 part of the MiniZip project - ( http://www.winimage.com/zLibDll/minizip.html ) Copyright (C) 1998-2010 Gilles Vollant (minizip) ( http://www.winimage.com/zLibDll/minizip.html ) Modifications of Unzip for Zip64 Copyright (C) 2007-2008 Even Rouault Modifications for Zip64 support on both zip and unzip Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) For more info read MiniZip_info.txt ------------------------------------------------------------------------------------ Decryption code comes from crypt.c by Info-ZIP but has been greatly reduced in terms of compatibility with older software. The following is from the original crypt.c. Code woven in by Terry Thorsen 1/2003. Copyright (c) 1990-2000 Info-ZIP. All rights reserved. See the accompanying file LICENSE, version 2000-Apr-09 or later (the contents of which are also included in zip.h) for terms of use. If, for some reason, all these files are missing, the Info-ZIP license also may be found at: ftp://ftp.info-zip.org/pub/infozip/license.html crypt.c (full version) by Info-ZIP. Last revised: [see crypt.h] The encryption/decryption parts of this source code (as opposed to the non-echoing password parts) were originally written in Europe. The whole source package can be freely distributed, including from the USA. (Prior to January 2000, re-export from the US was a violation of US law.) This encryption code is a direct transcription of the algorithm from Roger Schlafly, described by Phil Katz in the file appnote.txt. This file (appnote.txt) is distributed with the PKZIP program (even in the version without encryption capabilities). ------------------------------------------------------------------------------------ Changes in unzip.c 2007-2008 - Even Rouault - Addition of cpl_unzGetCurrentFileZStreamPos 2007-2008 - Even Rouault - Decoration of symbol names unz* -> cpl_unz* 2007-2008 - Even Rouault - Remove old C style function prototypes 2007-2008 - Even Rouault - Add unzip support for ZIP64 Copyright (C) 2007-2008 Even Rouault Oct-2009 - Mathias Svensson - Removed cpl_* from symbol names (Even Rouault added them but since this is now moved to a new project (minizip64) I renamed them again). Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G should only read the compressed/uncompressed size from the Zip64 format if the size from normal header was 0xFFFFFFFF Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required) Patch created by Daniel Borca Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer Copyright (C) 1998 - 2010 Gilles Vollant, Even Rouault, Mathias Svensson */ #include <stdio.h> #include <stdlib.h> #include <string.h> //#ifndef NOUNCRYPT // #define NOUNCRYPT //#endif #include "zlib.h" #include "unzip.h" #ifdef STDC # include <stddef.h> # include <string.h> # include <stdlib.h> #endif #ifdef NO_ERRNO_H extern int errno; #else # include <errno.h> #endif #ifndef local # define local static #endif /* compile with -Dlocal if your debugger can't find static symbols */ #ifndef CASESENSITIVITYDEFAULT_NO # if !defined(unix) && !defined(CASESENSITIVITYDEFAULT_YES) # define CASESENSITIVITYDEFAULT_NO # endif #endif #ifndef UNZ_BUFSIZE #define UNZ_BUFSIZE (16384) #endif #ifndef UNZ_MAXFILENAMEINZIP #define UNZ_MAXFILENAMEINZIP (256) #endif #ifndef ALLOC # define ALLOC(size) (malloc(size)) #endif #ifndef TRYFREE # define TRYFREE(p) {if (p) free(p);} #endif #define SIZECENTRALDIRITEM (0x2e) #define SIZEZIPLOCALHEADER (0x1e) const char unz_copyright[] = " unzip 1.01 Copyright 1998-2004 Gilles Vollant - http://www.winimage.com/zLibDll"; /* unz_file_info_interntal contain internal info about a file in zipfile*/ typedef struct unz_file_info64_internal_s { ZPOS64_T offset_curfile;/* relative offset of local header 8 bytes */ } unz_file_info64_internal; /* file_in_zip_read_info_s contain internal information about a file in zipfile, when reading and decompress it */ typedef struct { char *read_buffer; /* internal buffer for compressed data */ z_stream stream; /* zLib stream structure for inflate */ #ifdef HAVE_BZIP2 bz_stream bstream; /* bzLib stream structure for bziped */ #endif ZPOS64_T pos_in_zipfile; /* position in byte on the zipfile, for fseek*/ uLong stream_initialised; /* flag set if stream structure is initialised*/ ZPOS64_T offset_local_extrafield;/* offset of the local extra field */ uInt size_local_extrafield;/* size of the local extra field */ ZPOS64_T pos_local_extrafield; /* position in the local extra field in read*/ ZPOS64_T total_out_64; uLong crc32; /* crc32 of all data uncompressed */ uLong crc32_wait; /* crc32 we must obtain after decompress all */ ZPOS64_T rest_read_compressed; /* number of byte to be decompressed */ ZPOS64_T rest_read_uncompressed;/*number of byte to be obtained after decomp*/ zlib_filefunc64_32_def z_filefunc; voidpf filestream; /* io structore of the zipfile */ uLong compression_method; /* compression method (0==store) */ ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ int raw; } file_in_zip64_read_info_s; /* unz64_s contain internal information about the zipfile */ typedef struct { zlib_filefunc64_32_def z_filefunc; int is64bitOpenFunction; voidpf filestream; /* io structore of the zipfile */ unz_global_info64 gi; /* public global information */ ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ ZPOS64_T num_file; /* number of the current file in the zipfile*/ ZPOS64_T pos_in_central_dir; /* pos of the current file in the central dir*/ ZPOS64_T current_file_ok; /* flag about the usability of the current file*/ ZPOS64_T central_pos; /* position of the beginning of the central dir*/ ZPOS64_T size_central_dir; /* size of the central directory */ ZPOS64_T offset_central_dir; /* offset of start of central directory with respect to the starting disk number */ unz_file_info64 cur_file_info; /* public info about the current file in zip*/ unz_file_info64_internal cur_file_info_internal; /* private info about it*/ file_in_zip64_read_info_s* pfile_in_zip_read; /* structure about the current file if we are decompressing it */ int encrypted; int isZip64; # ifndef NOUNCRYPT unsigned long keys[3]; /* keys defining the pseudo-random sequence */ const unsigned long* pcrc_32_tab; # endif } unz64_s; #ifndef NOUNCRYPT #include "crypt.h" #endif /* =========================================================================== Read a byte from a gz_stream; update next_in and avail_in. Return EOF for end of file. IN assertion: the stream s has been sucessfully opened for reading. */ local int unz64local_getByte OF(( const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi)); local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi) { unsigned char c; int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); if (err==1) { *pi = (int)c; return UNZ_OK; } else { if (ZERROR64(*pzlib_filefunc_def,filestream)) return UNZ_ERRNO; else return UNZ_EOF; } } /* =========================================================================== Reads a long in LSB order from the given gz_stream. Sets */ local int unz64local_getShort OF(( const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); local int unz64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX) { uLong x ; int i = 0; int err; err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((uLong)i)<<8; if (err==UNZ_OK) *pX = x; else *pX = 0; return err; } local int unz64local_getLong OF(( const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); local int unz64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX) { uLong x ; int i = 0; int err; err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x = (uLong)i; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((uLong)i)<<8; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((uLong)i)<<16; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x += ((uLong)i)<<24; if (err==UNZ_OK) *pX = x; else *pX = 0; return err; } local int unz64local_getLong64 OF(( const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX)); local int unz64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX) { ZPOS64_T x ; int i = 0; int err; err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x = (ZPOS64_T)i; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<8; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<16; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<24; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<32; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<40; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<48; if (err==UNZ_OK) err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); x |= ((ZPOS64_T)i)<<56; if (err==UNZ_OK) *pX = x; else *pX = 0; return err; } /* My own strcmpi / strcasecmp */ local int strcmpcasenosensitive_internal (const char* fileName1, const char* fileName2) { for (;;) { char c1=*(fileName1++); char c2=*(fileName2++); if ((c1>='a') && (c1<='z')) c1 -= 0x20; if ((c2>='a') && (c2<='z')) c2 -= 0x20; if (c1=='\0') return ((c2=='\0') ? 0 : -1); if (c2=='\0') return 1; if (c1<c2) return -1; if (c1>c2) return 1; } } #ifdef CASESENSITIVITYDEFAULT_NO #define CASESENSITIVITYDEFAULTVALUE 2 #else #define CASESENSITIVITYDEFAULTVALUE 1 #endif #ifndef STRCMPCASENOSENTIVEFUNCTION #define STRCMPCASENOSENTIVEFUNCTION strcmpcasenosensitive_internal #endif /* Compare two filename (fileName1,fileName2). If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp) If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi or strcasecmp) If iCaseSenisivity = 0, case sensitivity is defaut of your operating system (like 1 on Unix, 2 on Windows) */ extern int ZEXPORT unzStringFileNameCompare (const char* fileName1, const char* fileName2, int iCaseSensitivity) { if (iCaseSensitivity==0) iCaseSensitivity=CASESENSITIVITYDEFAULTVALUE; if (iCaseSensitivity==1) return strcmp(fileName1,fileName2); return STRCMPCASENOSENTIVEFUNCTION(fileName1,fileName2); } #ifndef BUFREADCOMMENT #define BUFREADCOMMENT (0x400) #endif /* Locate the Central directory of a zipfile (at the end, just before the global comment) */ local ZPOS64_T unz64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; ZPOS64_T uSizeFile; ZPOS64_T uBackRead; ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ ZPOS64_T uPosFound=0; if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) return 0; uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); if (uMaxBack>uSizeFile) uMaxBack = uSizeFile; buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); if (buf==NULL) return 0; uBackRead = 4; while (uBackRead<uMaxBack) { uLong uReadSize; ZPOS64_T uReadPos ; int i; if (uBackRead+BUFREADCOMMENT>uMaxBack) uBackRead = uMaxBack; else uBackRead+=BUFREADCOMMENT; uReadPos = uSizeFile-uBackRead ; uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) break; if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) break; for (i=(int)uReadSize-3; (i--)>0;) if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) { uPosFound = uReadPos+i; break; } if (uPosFound!=0) break; } TRYFREE(buf); return uPosFound; } /* Locate the Central directory 64 of a zipfile (at the end, just before the global comment) */ local ZPOS64_T unz64local_SearchCentralDir64 OF(( const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); local ZPOS64_T unz64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; ZPOS64_T uSizeFile; ZPOS64_T uBackRead; ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ ZPOS64_T uPosFound=0; uLong uL; ZPOS64_T relativeOffset; if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) return 0; uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); if (uMaxBack>uSizeFile) uMaxBack = uSizeFile; buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); if (buf==NULL) return 0; uBackRead = 4; while (uBackRead<uMaxBack) { uLong uReadSize; ZPOS64_T uReadPos; int i; if (uBackRead+BUFREADCOMMENT>uMaxBack) uBackRead = uMaxBack; else uBackRead+=BUFREADCOMMENT; uReadPos = uSizeFile-uBackRead ; uReadSize = ((BUFREADCOMMENT+4) < (uSizeFile-uReadPos)) ? (BUFREADCOMMENT+4) : (uLong)(uSizeFile-uReadPos); if (ZSEEK64(*pzlib_filefunc_def,filestream,uReadPos,ZLIB_FILEFUNC_SEEK_SET)!=0) break; if (ZREAD64(*pzlib_filefunc_def,filestream,buf,uReadSize)!=uReadSize) break; for (i=(int)uReadSize-3; (i--)>0;) if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) { uPosFound = uReadPos+i; break; } if (uPosFound!=0) break; } TRYFREE(buf); if (uPosFound == 0) return 0; /* Zip64 end of central directory locator */ if (ZSEEK64(*pzlib_filefunc_def,filestream, uPosFound,ZLIB_FILEFUNC_SEEK_SET)!=0) return 0; /* the signature, already checked */ if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) return 0; /* number of the disk with the start of the zip64 end of central directory */ if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) return 0; if (uL != 0) return 0; /* relative offset of the zip64 end of central directory record */ if (unz64local_getLong64(pzlib_filefunc_def,filestream,&relativeOffset)!=UNZ_OK) return 0; /* total number of disks */ if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) return 0; if (uL != 1) return 0; /* Goto end of central directory record */ if (ZSEEK64(*pzlib_filefunc_def,filestream, relativeOffset,ZLIB_FILEFUNC_SEEK_SET)!=0) return 0; /* the signature */ if (unz64local_getLong(pzlib_filefunc_def,filestream,&uL)!=UNZ_OK) return 0; if (uL != 0x06064b50) return 0; return relativeOffset; } /* Open a Zip file. path contain the full pathname (by example, on a Windows NT computer "c:\\test\\zlib114.zip" or on an Unix computer "zlib/zlib114.zip". If the zipfile cannot be opened (file doesn't exist or in not valid), the return value is NULL. Else, the return value is a unzFile Handle, usable with other function of this unzip package. */ local unzFile unzOpenInternal (const void *path, zlib_filefunc64_32_def* pzlib_filefunc64_32_def, int is64bitOpenFunction) { unz64_s us; unz64_s *s; ZPOS64_T central_pos; uLong uL; uLong number_disk; /* number of the current dist, used for spaning ZIP, unsupported, always 0*/ uLong number_disk_with_CD; /* number the the disk with central dir, used for spaning ZIP, unsupported, always 0*/ ZPOS64_T number_entry_CD; /* total number of entries in the central dir (same than number_entry on nospan) */ int err=UNZ_OK; if (unz_copyright[0]!=' ') return NULL; us.z_filefunc.zseek32_file = NULL; us.z_filefunc.ztell32_file = NULL; if (pzlib_filefunc64_32_def==NULL) fill_fopen64_filefunc(&us.z_filefunc.zfile_func64); else us.z_filefunc = *pzlib_filefunc64_32_def; us.is64bitOpenFunction = is64bitOpenFunction; us.filestream = ZOPEN64(us.z_filefunc, path, ZLIB_FILEFUNC_MODE_READ | ZLIB_FILEFUNC_MODE_EXISTING); if (us.filestream==NULL) return NULL; central_pos = unz64local_SearchCentralDir64(&us.z_filefunc,us.filestream); if (central_pos) { uLong uS; ZPOS64_T uL64; us.isZip64 = 1; if (ZSEEK64(us.z_filefunc, us.filestream, central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) err=UNZ_ERRNO; /* the signature, already checked */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; /* size of zip64 end of central directory record */ if (unz64local_getLong64(&us.z_filefunc, us.filestream,&uL64)!=UNZ_OK) err=UNZ_ERRNO; /* version made by */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) err=UNZ_ERRNO; /* version needed to extract */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&uS)!=UNZ_OK) err=UNZ_ERRNO; /* number of this disk */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) err=UNZ_ERRNO; /* number of the disk with the start of the central directory */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) err=UNZ_ERRNO; /* total number of entries in the central directory on this disk */ if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.gi.number_entry)!=UNZ_OK) err=UNZ_ERRNO; /* total number of entries in the central directory */ if (unz64local_getLong64(&us.z_filefunc, us.filestream,&number_entry_CD)!=UNZ_OK) err=UNZ_ERRNO; if ((number_entry_CD!=us.gi.number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) err=UNZ_BADZIPFILE; /* size of the central directory */ if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.size_central_dir)!=UNZ_OK) err=UNZ_ERRNO; /* offset of start of central directory with respect to the starting disk number */ if (unz64local_getLong64(&us.z_filefunc, us.filestream,&us.offset_central_dir)!=UNZ_OK) err=UNZ_ERRNO; us.gi.size_comment = 0; } else { central_pos = unz64local_SearchCentralDir(&us.z_filefunc,us.filestream); if (central_pos==0) err=UNZ_ERRNO; us.isZip64 = 0; if (ZSEEK64(us.z_filefunc, us.filestream, central_pos,ZLIB_FILEFUNC_SEEK_SET)!=0) err=UNZ_ERRNO; /* the signature, already checked */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; /* number of this disk */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk)!=UNZ_OK) err=UNZ_ERRNO; /* number of the disk with the start of the central directory */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&number_disk_with_CD)!=UNZ_OK) err=UNZ_ERRNO; /* total number of entries in the central dir on this disk */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; us.gi.number_entry = uL; /* total number of entries in the central dir */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; number_entry_CD = uL; if ((number_entry_CD!=us.gi.number_entry) || (number_disk_with_CD!=0) || (number_disk!=0)) err=UNZ_BADZIPFILE; /* size of the central directory */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; us.size_central_dir = uL; /* offset of start of central directory with respect to the starting disk number */ if (unz64local_getLong(&us.z_filefunc, us.filestream,&uL)!=UNZ_OK) err=UNZ_ERRNO; us.offset_central_dir = uL; /* zipfile comment length */ if (unz64local_getShort(&us.z_filefunc, us.filestream,&us.gi.size_comment)!=UNZ_OK) err=UNZ_ERRNO; } if ((central_pos<us.offset_central_dir+us.size_central_dir) && (err==UNZ_OK)) err=UNZ_BADZIPFILE; if (err!=UNZ_OK) { ZCLOSE64(us.z_filefunc, us.filestream); return NULL; } us.byte_before_the_zipfile = central_pos - (us.offset_central_dir+us.size_central_dir); us.central_pos = central_pos; us.pfile_in_zip_read = NULL; us.encrypted = 0; s=(unz64_s*)ALLOC(sizeof(unz64_s)); if( s != NULL) { *s=us; unzGoToFirstFile((unzFile)s); } return (unzFile)s; } extern unzFile ZEXPORT unzOpen2 (const char *path, zlib_filefunc_def* pzlib_filefunc32_def) { if (pzlib_filefunc32_def != NULL) { zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; fill_zlib_filefunc64_32_def_from_filefunc32(&zlib_filefunc64_32_def_fill,pzlib_filefunc32_def); return unzOpenInternal(path, &zlib_filefunc64_32_def_fill, 0); } else return unzOpenInternal(path, NULL, 0); } extern unzFile ZEXPORT unzOpen2_64 (const void *path, zlib_filefunc64_def* pzlib_filefunc_def) { if (pzlib_filefunc_def != NULL) { zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; zlib_filefunc64_32_def_fill.zfile_func64 = *pzlib_filefunc_def; zlib_filefunc64_32_def_fill.ztell32_file = NULL; zlib_filefunc64_32_def_fill.zseek32_file = NULL; return unzOpenInternal(path, &zlib_filefunc64_32_def_fill, 1); } else return unzOpenInternal(path, NULL, 1); } extern unzFile ZEXPORT unzOpen (const char *path) { return unzOpenInternal(path, NULL, 0); } extern unzFile ZEXPORT unzOpen64 (const void *path) { return unzOpenInternal(path, NULL, 1); } /* Close a ZipFile opened with unzipOpen. If there is files inside the .Zip opened with unzipOpenCurrentFile (see later), these files MUST be closed with unzipCloseCurrentFile before call unzipClose. return UNZ_OK if there is no problem. */ extern int ZEXPORT unzClose (unzFile file) { unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; if (s->pfile_in_zip_read!=NULL) unzCloseCurrentFile(file); ZCLOSE64(s->z_filefunc, s->filestream); TRYFREE(s); return UNZ_OK; } /* Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed return UNZ_OK if there is no problem. */ extern int ZEXPORT unzGetGlobalInfo64 (unzFile file, unz_global_info64* pglobal_info) { unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; *pglobal_info=s->gi; return UNZ_OK; } extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info32) { unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; /* to do : check if number_entry is not truncated */ pglobal_info32->number_entry = (uLong)s->gi.number_entry; pglobal_info32->size_comment = s->gi.size_comment; return UNZ_OK; } /* Translate date/time from Dos format to tm_unz (readable more easilty) */ local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm) { ZPOS64_T uDate; uDate = (ZPOS64_T)(ulDosDate>>16); ptm->tm_mday = (uInt)(uDate&0x1f) ; ptm->tm_mon = (uInt)((((uDate)&0x1E0)/0x20)-1) ; ptm->tm_year = (uInt)(((uDate&0x0FE00)/0x0200)+1980) ; ptm->tm_hour = (uInt) ((ulDosDate &0xF800)/0x800); ptm->tm_min = (uInt) ((ulDosDate&0x7E0)/0x20) ; ptm->tm_sec = (uInt) (2*(ulDosDate&0x1f)) ; } /* Get Info about the current file in the zipfile, with internal only info */ local int unz64local_GetCurrentFileInfoInternal OF((unzFile file, unz_file_info64 *pfile_info, unz_file_info64_internal *pfile_info_internal, char *szFileName, uLong fileNameBufferSize, void *extraField, uLong extraFieldBufferSize, char *szComment, uLong commentBufferSize)); local int unz64local_GetCurrentFileInfoInternal (unzFile file, unz_file_info64 *pfile_info, unz_file_info64_internal *pfile_info_internal, char *szFileName, uLong fileNameBufferSize, void *extraField, uLong extraFieldBufferSize, char *szComment, uLong commentBufferSize) { unz64_s* s; unz_file_info64 file_info; unz_file_info64_internal file_info_internal; int err=UNZ_OK; uLong uMagic; long lSeek=0; uLong uL; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; if (ZSEEK64(s->z_filefunc, s->filestream, s->pos_in_central_dir+s->byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET)!=0) err=UNZ_ERRNO; /* we check the magic */ if (err==UNZ_OK) { if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) err=UNZ_ERRNO; else if (uMagic!=0x02014b50) err=UNZ_BADZIPFILE; } if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.version_needed) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.flag) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.compression_method) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.dosDate) != UNZ_OK) err=UNZ_ERRNO; unz64local_DosDateToTmuDate(file_info.dosDate,&file_info.tmu_date); if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.crc) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; file_info.compressed_size = uL; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; file_info.uncompressed_size = uL; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_filename) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_extra) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.size_file_comment) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.disk_num_start) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&file_info.internal_fa) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.external_fa) != UNZ_OK) err=UNZ_ERRNO; // relative offset of local header if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; file_info_internal.offset_curfile = uL; lSeek+=file_info.size_filename; if ((err==UNZ_OK) && (szFileName!=NULL)) { uLong uSizeRead ; if (file_info.size_filename<fileNameBufferSize) { *(szFileName+file_info.size_filename)='\0'; uSizeRead = file_info.size_filename; } else uSizeRead = fileNameBufferSize; if ((file_info.size_filename>0) && (fileNameBufferSize>0)) if (ZREAD64(s->z_filefunc, s->filestream,szFileName,uSizeRead)!=uSizeRead) err=UNZ_ERRNO; lSeek -= uSizeRead; } // Read extrafield if ((err==UNZ_OK) && (extraField!=NULL)) { ZPOS64_T uSizeRead ; if (file_info.size_file_extra<extraFieldBufferSize) uSizeRead = file_info.size_file_extra; else uSizeRead = extraFieldBufferSize; if (lSeek!=0) { if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; } if ((file_info.size_file_extra>0) && (extraFieldBufferSize>0)) if (ZREAD64(s->z_filefunc, s->filestream,extraField,(uLong)uSizeRead)!=uSizeRead) err=UNZ_ERRNO; lSeek += file_info.size_file_extra - (uLong)uSizeRead; } else lSeek += file_info.size_file_extra; if ((err==UNZ_OK) && (file_info.size_file_extra != 0)) { uLong acc = 0; // since lSeek now points to after the extra field we need to move back lSeek -= file_info.size_file_extra; if (lSeek!=0) { if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; } while(acc < file_info.size_file_extra) { uLong headerId; uLong dataSize; if (unz64local_getShort(&s->z_filefunc, s->filestream,&headerId) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&dataSize) != UNZ_OK) err=UNZ_ERRNO; /* ZIP64 extra fields */ if (headerId == 0x0001) { uLong uL; if(file_info.uncompressed_size == (ZPOS64_T)(unsigned long)-1) { if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.uncompressed_size) != UNZ_OK) err=UNZ_ERRNO; } if(file_info.compressed_size == (ZPOS64_T)(unsigned long)-1) { if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.compressed_size) != UNZ_OK) err=UNZ_ERRNO; } if(file_info_internal.offset_curfile == (ZPOS64_T)(unsigned long)-1) { /* Relative Header offset */ if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info_internal.offset_curfile) != UNZ_OK) err=UNZ_ERRNO; } if(file_info.disk_num_start == (unsigned long)-1) { /* Disk Start Number */ if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) err=UNZ_ERRNO; } } else { if (ZSEEK64(s->z_filefunc, s->filestream,dataSize,ZLIB_FILEFUNC_SEEK_CUR)!=0) err=UNZ_ERRNO; } acc += 2 + 2 + dataSize; } } if ((err==UNZ_OK) && (szComment!=NULL)) { uLong uSizeRead ; if (file_info.size_file_comment<commentBufferSize) { *(szComment+file_info.size_file_comment)='\0'; uSizeRead = file_info.size_file_comment; } else uSizeRead = commentBufferSize; if (lSeek!=0) { if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) { #ifndef __clang_analyzer__ lSeek=0; #endif } else err=UNZ_ERRNO; } if ((file_info.size_file_comment>0) && (commentBufferSize>0)) if (ZREAD64(s->z_filefunc, s->filestream,szComment,uSizeRead)!=uSizeRead) err=UNZ_ERRNO; #ifndef __clang_analyzer__ lSeek+=file_info.size_file_comment - uSizeRead; #endif } #ifndef __clang_analyzer__ else lSeek+=file_info.size_file_comment; #endif if ((err==UNZ_OK) && (pfile_info!=NULL)) *pfile_info=file_info; if ((err==UNZ_OK) && (pfile_info_internal!=NULL)) *pfile_info_internal=file_info_internal; return err; } /* Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed return UNZ_OK if there is no problem. */ extern int ZEXPORT unzGetCurrentFileInfo64 (unzFile file, unz_file_info64 * pfile_info, char * szFileName, uLong fileNameBufferSize, void *extraField, uLong extraFieldBufferSize, char* szComment, uLong commentBufferSize) { return unz64local_GetCurrentFileInfoInternal(file,pfile_info,NULL, szFileName,fileNameBufferSize, extraField,extraFieldBufferSize, szComment,commentBufferSize); } extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, unz_file_info * pfile_info, char * szFileName, uLong fileNameBufferSize, void *extraField, uLong extraFieldBufferSize, char* szComment, uLong commentBufferSize) { int err; unz_file_info64 file_info64; err = unz64local_GetCurrentFileInfoInternal(file,&file_info64,NULL, szFileName,fileNameBufferSize, extraField,extraFieldBufferSize, szComment,commentBufferSize); if (err==UNZ_OK) { pfile_info->version = file_info64.version; pfile_info->version_needed = file_info64.version_needed; pfile_info->flag = file_info64.flag; pfile_info->compression_method = file_info64.compression_method; pfile_info->dosDate = file_info64.dosDate; pfile_info->crc = file_info64.crc; pfile_info->size_filename = file_info64.size_filename; pfile_info->size_file_extra = file_info64.size_file_extra; pfile_info->size_file_comment = file_info64.size_file_comment; pfile_info->disk_num_start = file_info64.disk_num_start; pfile_info->internal_fa = file_info64.internal_fa; pfile_info->external_fa = file_info64.external_fa; pfile_info->tmu_date = file_info64.tmu_date, pfile_info->compressed_size = (uLong)file_info64.compressed_size; pfile_info->uncompressed_size = (uLong)file_info64.uncompressed_size; } return err; } /* Set the current file of the zipfile to the first file. return UNZ_OK if there is no problem */ extern int ZEXPORT unzGoToFirstFile (unzFile file) { int err=UNZ_OK; unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; s->pos_in_central_dir=s->offset_central_dir; s->num_file=0; err=unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); return err; } /* Set the current file of the zipfile to the next file. return UNZ_OK if there is no problem return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. */ extern int ZEXPORT unzGoToNextFile (unzFile file) { unz64_s* s; int err; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; if (s->gi.number_entry != 0xffff) /* 2^16 files overflow hack */ if (s->num_file+1==s->gi.number_entry) return UNZ_END_OF_LIST_OF_FILE; s->pos_in_central_dir += SIZECENTRALDIRITEM + s->cur_file_info.size_filename + s->cur_file_info.size_file_extra + s->cur_file_info.size_file_comment ; s->num_file++; err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); return err; } /* Try locate the file szFileName in the zipfile. For the iCaseSensitivity signification, see unzipStringFileNameCompare return value : UNZ_OK if the file is found. It becomes the current file. UNZ_END_OF_LIST_OF_FILE if the file is not found */ extern int ZEXPORT unzLocateFile (unzFile file, const char *szFileName, int iCaseSensitivity) { unz64_s* s; int err; /* We remember the 'current' position in the file so that we can jump * back there if we fail. */ unz_file_info64 cur_file_infoSaved; unz_file_info64_internal cur_file_info_internalSaved; ZPOS64_T num_fileSaved; ZPOS64_T pos_in_central_dirSaved; if (file==NULL) return UNZ_PARAMERROR; if (strlen(szFileName)>=UNZ_MAXFILENAMEINZIP) return UNZ_PARAMERROR; s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; /* Save the current state */ num_fileSaved = s->num_file; pos_in_central_dirSaved = s->pos_in_central_dir; cur_file_infoSaved = s->cur_file_info; cur_file_info_internalSaved = s->cur_file_info_internal; err = unzGoToFirstFile(file); while (err == UNZ_OK) { char szCurrentFileName[UNZ_MAXFILENAMEINZIP+1]; err = unzGetCurrentFileInfo64(file,NULL, szCurrentFileName,sizeof(szCurrentFileName)-1, NULL,0,NULL,0); if (err == UNZ_OK) { if (unzStringFileNameCompare(szCurrentFileName, szFileName,iCaseSensitivity)==0) return UNZ_OK; err = unzGoToNextFile(file); } } /* We failed, so restore the state of the 'current file' to where we * were. */ s->num_file = num_fileSaved ; s->pos_in_central_dir = pos_in_central_dirSaved ; s->cur_file_info = cur_file_infoSaved; s->cur_file_info_internal = cur_file_info_internalSaved; return err; } /* /////////////////////////////////////////// // Contributed by Ryan Haksi (mailto://cryogen@infoserve.net) // I need random access // // Further optimization could be realized by adding an ability // to cache the directory in memory. The goal being a single // comprehensive file read to put the file I need in a memory. */ /* typedef struct unz_file_pos_s { ZPOS64_T pos_in_zip_directory; // offset in file ZPOS64_T num_of_file; // # of file } unz_file_pos; */ extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) { unz64_s* s; if (file==NULL || file_pos==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_END_OF_LIST_OF_FILE; file_pos->pos_in_zip_directory = s->pos_in_central_dir; file_pos->num_of_file = s->num_file; return UNZ_OK; } extern int ZEXPORT unzGetFilePos( unzFile file, unz_file_pos* file_pos) { unz64_file_pos file_pos64; int err = unzGetFilePos64(file,&file_pos64); if (err==UNZ_OK) { file_pos->pos_in_zip_directory = (uLong)file_pos64.pos_in_zip_directory; file_pos->num_of_file = (uLong)file_pos64.num_of_file; } return err; } extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos) { unz64_s* s; int err; if (file==NULL || file_pos==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; /* jump to the right spot */ s->pos_in_central_dir = file_pos->pos_in_zip_directory; s->num_file = file_pos->num_of_file; /* set the current file */ err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); /* return results */ s->current_file_ok = (err == UNZ_OK); return err; } extern int ZEXPORT unzGoToFilePos( unzFile file, unz_file_pos* file_pos) { unz64_file_pos file_pos64; if (file_pos == NULL) return UNZ_PARAMERROR; file_pos64.pos_in_zip_directory = file_pos->pos_in_zip_directory; file_pos64.num_of_file = file_pos->num_of_file; return unzGoToFilePos64(file,&file_pos64); } /* // Unzip Helper Functions - should be here? /////////////////////////////////////////// */ /* Read the local header of the current zipfile Check the coherency of the local header and info in the end of central directory about this file store in *piSizeVar the size of extra info in local header (filename and size of extra field data) */ local int unz64local_CheckCurrentFileCoherencyHeader (unz64_s* s, uInt* piSizeVar, ZPOS64_T * poffset_local_extrafield, uInt * psize_local_extrafield) { uLong uMagic,uData,uFlags; uLong size_filename; uLong size_extra_field; int err=UNZ_OK; *piSizeVar = 0; *poffset_local_extrafield = 0; *psize_local_extrafield = 0; if (ZSEEK64(s->z_filefunc, s->filestream,s->cur_file_info_internal.offset_curfile + s->byte_before_the_zipfile,ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; if (err==UNZ_OK) { if (unz64local_getLong(&s->z_filefunc, s->filestream,&uMagic) != UNZ_OK) err=UNZ_ERRNO; else if (uMagic!=0x04034b50) err=UNZ_BADZIPFILE; } if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) err=UNZ_ERRNO; /* else if ((err==UNZ_OK) && (uData!=s->cur_file_info.wVersion)) err=UNZ_BADZIPFILE; */ if (unz64local_getShort(&s->z_filefunc, s->filestream,&uFlags) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) err=UNZ_ERRNO; else if ((err==UNZ_OK) && (uData!=s->cur_file_info.compression_method)) err=UNZ_BADZIPFILE; if ((err==UNZ_OK) && (s->cur_file_info.compression_method!=0) && /* #ifdef HAVE_BZIP2 */ (s->cur_file_info.compression_method!=Z_BZIP2ED) && /* #endif */ (s->cur_file_info.compression_method!=Z_DEFLATED)) err=UNZ_BADZIPFILE; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* date/time */ err=UNZ_ERRNO; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* crc */ err=UNZ_ERRNO; else if ((err==UNZ_OK) && (uData!=s->cur_file_info.crc) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size compr */ err=UNZ_ERRNO; else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.compressed_size) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; if (unz64local_getLong(&s->z_filefunc, s->filestream,&uData) != UNZ_OK) /* size uncompr */ err=UNZ_ERRNO; else if (uData != 0xFFFFFFFF && (err==UNZ_OK) && (uData!=s->cur_file_info.uncompressed_size) && ((uFlags & 8)==0)) err=UNZ_BADZIPFILE; if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_filename) != UNZ_OK) err=UNZ_ERRNO; else if ((err==UNZ_OK) && (size_filename!=s->cur_file_info.size_filename)) err=UNZ_BADZIPFILE; *piSizeVar += (uInt)size_filename; if (unz64local_getShort(&s->z_filefunc, s->filestream,&size_extra_field) != UNZ_OK) err=UNZ_ERRNO; *poffset_local_extrafield= s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + size_filename; *psize_local_extrafield = (uInt)size_extra_field; *piSizeVar += (uInt)size_extra_field; return err; } /* Open for reading data the current file in the zipfile. If there is no error and the file is opened, the return value is UNZ_OK. */ extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, int* level, int raw, const char* password) { int err=UNZ_OK; uInt iSizeVar; unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; ZPOS64_T offset_local_extrafield; /* offset of the local extra field */ uInt size_local_extrafield; /* size of the local extra field */ # ifndef NOUNCRYPT char source[12]; # else if (password != NULL) return UNZ_PARAMERROR; # endif if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; if (!s->current_file_ok) return UNZ_PARAMERROR; if (s->pfile_in_zip_read != NULL) unzCloseCurrentFile(file); if (unz64local_CheckCurrentFileCoherencyHeader(s,&iSizeVar, &offset_local_extrafield,&size_local_extrafield)!=UNZ_OK) return UNZ_BADZIPFILE; pfile_in_zip_read_info = (file_in_zip64_read_info_s*)ALLOC(sizeof(file_in_zip64_read_info_s)); if (pfile_in_zip_read_info==NULL) return UNZ_INTERNALERROR; pfile_in_zip_read_info->read_buffer=(char*)ALLOC(UNZ_BUFSIZE); pfile_in_zip_read_info->offset_local_extrafield = offset_local_extrafield; pfile_in_zip_read_info->size_local_extrafield = size_local_extrafield; pfile_in_zip_read_info->pos_local_extrafield=0; pfile_in_zip_read_info->raw=raw; if (pfile_in_zip_read_info->read_buffer==NULL) { TRYFREE(pfile_in_zip_read_info); return UNZ_INTERNALERROR; } pfile_in_zip_read_info->stream_initialised=0; if (method!=NULL) *method = (int)s->cur_file_info.compression_method; if (level!=NULL) { *level = 6; switch (s->cur_file_info.flag & 0x06) { case 6 : *level = 1; break; case 4 : *level = 2; break; case 2 : *level = 9; break; } } if ((s->cur_file_info.compression_method!=0) && /* #ifdef HAVE_BZIP2 */ (s->cur_file_info.compression_method!=Z_BZIP2ED) && /* #endif */ (s->cur_file_info.compression_method!=Z_DEFLATED)) { #ifndef __clang_analyzer__ err=UNZ_BADZIPFILE; #endif } pfile_in_zip_read_info->crc32_wait=s->cur_file_info.crc; pfile_in_zip_read_info->crc32=0; pfile_in_zip_read_info->total_out_64=0; pfile_in_zip_read_info->compression_method = s->cur_file_info.compression_method; pfile_in_zip_read_info->filestream=s->filestream; pfile_in_zip_read_info->z_filefunc=s->z_filefunc; #ifndef __clang_analyzer__ pfile_in_zip_read_info->byte_before_the_zipfile=s->byte_before_the_zipfile; #endif pfile_in_zip_read_info->stream.total_out = 0; if ((s->cur_file_info.compression_method==Z_BZIP2ED) && (!raw)) { #ifdef HAVE_BZIP2 pfile_in_zip_read_info->bstream.bzalloc = (void *(*) (void *, int, int))0; pfile_in_zip_read_info->bstream.bzfree = (free_func)0; pfile_in_zip_read_info->bstream.opaque = (voidpf)0; pfile_in_zip_read_info->bstream.state = (voidpf)0; pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; pfile_in_zip_read_info->stream.zfree = (free_func)0; pfile_in_zip_read_info->stream.opaque = (voidpf)0; pfile_in_zip_read_info->stream.next_in = (voidpf)0; pfile_in_zip_read_info->stream.avail_in = 0; err=BZ2_bzDecompressInit(&pfile_in_zip_read_info->bstream, 0, 0); if (err == Z_OK) pfile_in_zip_read_info->stream_initialised=Z_BZIP2ED; else { TRYFREE(pfile_in_zip_read_info); return err; } #else pfile_in_zip_read_info->raw=1; #endif } else if ((s->cur_file_info.compression_method==Z_DEFLATED) && (!raw)) { pfile_in_zip_read_info->stream.zalloc = (alloc_func)0; pfile_in_zip_read_info->stream.zfree = (free_func)0; pfile_in_zip_read_info->stream.opaque = (voidpf)0; pfile_in_zip_read_info->stream.next_in = 0; pfile_in_zip_read_info->stream.avail_in = 0; err=inflateInit2(&pfile_in_zip_read_info->stream, -MAX_WBITS); if (err == Z_OK) pfile_in_zip_read_info->stream_initialised=Z_DEFLATED; else { TRYFREE(pfile_in_zip_read_info); return err; } /* windowBits is passed < 0 to tell that there is no zlib header. * Note that in this case inflate *requires* an extra "dummy" byte * after the compressed stream in order to complete decompression and * return Z_STREAM_END. * In unzip, i don't wait absolutely Z_STREAM_END because I known the * size of both compressed and uncompressed data */ } pfile_in_zip_read_info->rest_read_compressed = s->cur_file_info.compressed_size ; pfile_in_zip_read_info->rest_read_uncompressed = s->cur_file_info.uncompressed_size ; pfile_in_zip_read_info->pos_in_zipfile = s->cur_file_info_internal.offset_curfile + SIZEZIPLOCALHEADER + iSizeVar; pfile_in_zip_read_info->stream.avail_in = (uInt)0; s->pfile_in_zip_read = pfile_in_zip_read_info; s->encrypted = 0; # ifndef NOUNCRYPT if (password != NULL) { int i; s->pcrc_32_tab = (const unsigned long*)get_crc_table(); init_keys(password,s->keys,s->pcrc_32_tab); if (ZSEEK64(s->z_filefunc, s->filestream, s->pfile_in_zip_read->pos_in_zipfile + s->pfile_in_zip_read->byte_before_the_zipfile, SEEK_SET)!=0) return UNZ_INTERNALERROR; if(ZREAD64(s->z_filefunc, s->filestream,source, 12)<12) return UNZ_INTERNALERROR; for (i = 0; i<12; i++) zdecode(s->keys,s->pcrc_32_tab,source[i]); s->pfile_in_zip_read->pos_in_zipfile+=12; s->encrypted=1; } # endif return UNZ_OK; } extern int ZEXPORT unzOpenCurrentFile (unzFile file) { return unzOpenCurrentFile3(file, NULL, NULL, 0, NULL); } extern int ZEXPORT unzOpenCurrentFilePassword (unzFile file, const char* password) { return unzOpenCurrentFile3(file, NULL, NULL, 0, password); } extern int ZEXPORT unzOpenCurrentFile2 (unzFile file, int* method, int* level, int raw) { return unzOpenCurrentFile3(file, method, level, raw, NULL); } /** Addition for GDAL : START */ extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64( unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; s=(unz64_s*)file; if (file==NULL) return 0; //UNZ_PARAMERROR; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return 0; //UNZ_PARAMERROR; return pfile_in_zip_read_info->pos_in_zipfile + pfile_in_zip_read_info->byte_before_the_zipfile; } /** Addition for GDAL : END */ /* Read bytes from the current file. buf contain buffer where data must be copied len the size of buf. return the number of byte copied if somes bytes are copied return 0 if the end of file was reached return <0 with error code if there is an error (UNZ_ERRNO for IO error, or zLib error for uncompress error) */ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) { int err=UNZ_OK; uInt iRead = 0; unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; if (pfile_in_zip_read_info->read_buffer == NULL) return UNZ_END_OF_LIST_OF_FILE; if (len==0) return 0; pfile_in_zip_read_info->stream.next_out = (Bytef*)buf; pfile_in_zip_read_info->stream.avail_out = (uInt)len; // NOTE: // This bit of code seems to try to set the amount of space in the output buffer based on the // value stored in the headers stored in the .zip file. However, if those values are incorrect // it may result in a loss of data when uncompresssing that file. The compressed data is still // legit and will deflate without knowing the uncompressed code so this tidbit is unnecessary and // may cause issues for some .zip files. // // It's removed in here to fix those issues. // // See: https://github.com/samsoffes/ssziparchive/issues/16 // /* if ((len>pfile_in_zip_read_info->rest_read_uncompressed) && (!(pfile_in_zip_read_info->raw))) pfile_in_zip_read_info->stream.avail_out = (uInt)pfile_in_zip_read_info->rest_read_uncompressed; */ if ((len>pfile_in_zip_read_info->rest_read_compressed+ pfile_in_zip_read_info->stream.avail_in) && (pfile_in_zip_read_info->raw)) pfile_in_zip_read_info->stream.avail_out = (uInt)pfile_in_zip_read_info->rest_read_compressed+ pfile_in_zip_read_info->stream.avail_in; while (pfile_in_zip_read_info->stream.avail_out>0) { if ((pfile_in_zip_read_info->stream.avail_in==0) && (pfile_in_zip_read_info->rest_read_compressed>0)) { uInt uReadThis = UNZ_BUFSIZE; if (pfile_in_zip_read_info->rest_read_compressed<uReadThis) uReadThis = (uInt)pfile_in_zip_read_info->rest_read_compressed; if (uReadThis == 0) return UNZ_EOF; if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->pos_in_zipfile + pfile_in_zip_read_info->byte_before_the_zipfile, ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; if (ZREAD64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->read_buffer, uReadThis)!=uReadThis) return UNZ_ERRNO; # ifndef NOUNCRYPT if(s->encrypted) { uInt i; for(i=0;i<uReadThis;i++) pfile_in_zip_read_info->read_buffer[i] = zdecode(s->keys,s->pcrc_32_tab, pfile_in_zip_read_info->read_buffer[i]); } # endif pfile_in_zip_read_info->pos_in_zipfile += uReadThis; pfile_in_zip_read_info->rest_read_compressed-=uReadThis; pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->read_buffer; pfile_in_zip_read_info->stream.avail_in = (uInt)uReadThis; } if ((pfile_in_zip_read_info->compression_method==0) || (pfile_in_zip_read_info->raw)) { uInt uDoCopy,i ; if ((pfile_in_zip_read_info->stream.avail_in == 0) && (pfile_in_zip_read_info->rest_read_compressed == 0)) return (iRead==0) ? UNZ_EOF : iRead; if (pfile_in_zip_read_info->stream.avail_out < pfile_in_zip_read_info->stream.avail_in) uDoCopy = pfile_in_zip_read_info->stream.avail_out ; else uDoCopy = pfile_in_zip_read_info->stream.avail_in ; for (i=0;i<uDoCopy;i++) *(pfile_in_zip_read_info->stream.next_out+i) = *(pfile_in_zip_read_info->stream.next_in+i); pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uDoCopy; pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32, pfile_in_zip_read_info->stream.next_out, uDoCopy); pfile_in_zip_read_info->rest_read_uncompressed-=uDoCopy; pfile_in_zip_read_info->stream.avail_in -= uDoCopy; pfile_in_zip_read_info->stream.avail_out -= uDoCopy; pfile_in_zip_read_info->stream.next_out += uDoCopy; pfile_in_zip_read_info->stream.next_in += uDoCopy; pfile_in_zip_read_info->stream.total_out += uDoCopy; iRead += uDoCopy; } else if (pfile_in_zip_read_info->compression_method==Z_BZIP2ED) { #ifdef HAVE_BZIP2 uLong uTotalOutBefore,uTotalOutAfter; const Bytef *bufBefore; uLong uOutThis; pfile_in_zip_read_info->bstream.next_in = (char*)pfile_in_zip_read_info->stream.next_in; pfile_in_zip_read_info->bstream.avail_in = pfile_in_zip_read_info->stream.avail_in; pfile_in_zip_read_info->bstream.total_in_lo32 = pfile_in_zip_read_info->stream.total_in; pfile_in_zip_read_info->bstream.total_in_hi32 = 0; pfile_in_zip_read_info->bstream.next_out = (char*)pfile_in_zip_read_info->stream.next_out; pfile_in_zip_read_info->bstream.avail_out = pfile_in_zip_read_info->stream.avail_out; pfile_in_zip_read_info->bstream.total_out_lo32 = pfile_in_zip_read_info->stream.total_out; pfile_in_zip_read_info->bstream.total_out_hi32 = 0; uTotalOutBefore = pfile_in_zip_read_info->bstream.total_out_lo32; bufBefore = (const Bytef *)pfile_in_zip_read_info->bstream.next_out; err=BZ2_bzDecompress(&pfile_in_zip_read_info->bstream); uTotalOutAfter = pfile_in_zip_read_info->bstream.total_out_lo32; uOutThis = uTotalOutAfter-uTotalOutBefore; pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (uInt)(uOutThis)); pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis; iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); pfile_in_zip_read_info->stream.next_in = (Bytef*)pfile_in_zip_read_info->bstream.next_in; pfile_in_zip_read_info->stream.avail_in = pfile_in_zip_read_info->bstream.avail_in; pfile_in_zip_read_info->stream.total_in = pfile_in_zip_read_info->bstream.total_in_lo32; pfile_in_zip_read_info->stream.next_out = (Bytef*)pfile_in_zip_read_info->bstream.next_out; pfile_in_zip_read_info->stream.avail_out = pfile_in_zip_read_info->bstream.avail_out; pfile_in_zip_read_info->stream.total_out = pfile_in_zip_read_info->bstream.total_out_lo32; if (err==BZ_STREAM_END) return (iRead==0) ? UNZ_EOF : iRead; if (err!=BZ_OK) break; #endif } // end Z_BZIP2ED else { ZPOS64_T uTotalOutBefore,uTotalOutAfter; const Bytef *bufBefore; ZPOS64_T uOutThis; int flush=Z_SYNC_FLUSH; uTotalOutBefore = pfile_in_zip_read_info->stream.total_out; bufBefore = pfile_in_zip_read_info->stream.next_out; /* if ((pfile_in_zip_read_info->rest_read_uncompressed == pfile_in_zip_read_info->stream.avail_out) && (pfile_in_zip_read_info->rest_read_compressed == 0)) flush = Z_FINISH; */ err=inflate(&pfile_in_zip_read_info->stream,flush); if ((err>=0) && (pfile_in_zip_read_info->stream.msg!=NULL)) err = Z_DATA_ERROR; uTotalOutAfter = pfile_in_zip_read_info->stream.total_out; uOutThis = uTotalOutAfter-uTotalOutBefore; pfile_in_zip_read_info->total_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; pfile_in_zip_read_info->crc32 = crc32(pfile_in_zip_read_info->crc32,bufBefore, (uInt)(uOutThis)); pfile_in_zip_read_info->rest_read_uncompressed -= uOutThis; iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); if (err==Z_STREAM_END) return (iRead==0) ? UNZ_EOF : iRead; if (err!=Z_OK) break; } } if (err==Z_OK) return iRead; return err; } /* Give the current position in uncompressed data */ extern z_off_t ZEXPORT unztell (unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; return (z_off_t)pfile_in_zip_read_info->stream.total_out; } extern ZPOS64_T ZEXPORT unztell64 (unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return (ZPOS64_T)-1; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return (ZPOS64_T)-1; return pfile_in_zip_read_info->total_out_64; } /* return 1 if the end of file was reached, 0 elsewhere */ extern int ZEXPORT unzeof (unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; if (pfile_in_zip_read_info->rest_read_uncompressed == 0) return 1; else return 0; } /* Read extra field from the current file (opened by unzOpenCurrentFile) This is the local-header version of the extra field (sometimes, there is more info in the local-header version than in the central-header) if buf==NULL, it return the size of the local extra field that can be read if buf!=NULL, len is the size of the buffer, the extra header is copied in buf. the return value is the number of bytes copied in buf, or (if <0) the error code */ extern int ZEXPORT unzGetLocalExtrafield (unzFile file, voidp buf, unsigned len) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; uInt read_now; ZPOS64_T size_to_read; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; size_to_read = (pfile_in_zip_read_info->size_local_extrafield - pfile_in_zip_read_info->pos_local_extrafield); if (buf==NULL) return (int)size_to_read; if (len>size_to_read) read_now = (uInt)size_to_read; else read_now = (uInt)len ; if (read_now==0) return 0; if (ZSEEK64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, pfile_in_zip_read_info->offset_local_extrafield + pfile_in_zip_read_info->pos_local_extrafield, ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; if (ZREAD64(pfile_in_zip_read_info->z_filefunc, pfile_in_zip_read_info->filestream, buf,read_now)!=read_now) return UNZ_ERRNO; return (int)read_now; } /* Close the file in zip opened with unzipOpenCurrentFile Return UNZ_CRCERROR if all the file was read but the CRC is not good */ extern int ZEXPORT unzCloseCurrentFile (unzFile file) { int err=UNZ_OK; unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; pfile_in_zip_read_info=s->pfile_in_zip_read; if (pfile_in_zip_read_info==NULL) return UNZ_PARAMERROR; if ((pfile_in_zip_read_info->rest_read_uncompressed == 0) && (!pfile_in_zip_read_info->raw)) { if (pfile_in_zip_read_info->crc32 != pfile_in_zip_read_info->crc32_wait) err=UNZ_CRCERROR; } TRYFREE(pfile_in_zip_read_info->read_buffer); pfile_in_zip_read_info->read_buffer = NULL; if (pfile_in_zip_read_info->stream_initialised == Z_DEFLATED) inflateEnd(&pfile_in_zip_read_info->stream); #ifdef HAVE_BZIP2 else if (pfile_in_zip_read_info->stream_initialised == Z_BZIP2ED) BZ2_bzDecompressEnd(&pfile_in_zip_read_info->bstream); #endif pfile_in_zip_read_info->stream_initialised = 0; TRYFREE(pfile_in_zip_read_info); s->pfile_in_zip_read=NULL; return err; } /* Get the global comment string of the ZipFile, in the szComment buffer. uSizeBuf is the size of the szComment buffer. return the number of byte copied or an error code <0 */ extern int ZEXPORT unzGetGlobalComment (unzFile file, char * szComment, uLong uSizeBuf) { unz64_s* s; uLong uReadThis ; if (file==NULL) return (int)UNZ_PARAMERROR; s=(unz64_s*)file; uReadThis = uSizeBuf; if (uReadThis>s->gi.size_comment) uReadThis = s->gi.size_comment; if (ZSEEK64(s->z_filefunc,s->filestream,s->central_pos+22,ZLIB_FILEFUNC_SEEK_SET)!=0) return UNZ_ERRNO; if (uReadThis>0) { *szComment='\0'; if (ZREAD64(s->z_filefunc,s->filestream,szComment,uReadThis)!=uReadThis) return UNZ_ERRNO; } if ((szComment != NULL) && (uSizeBuf > s->gi.size_comment)) *(szComment+s->gi.size_comment)='\0'; return (int)uReadThis; } /* Additions by RX '2004 */ extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) { unz64_s* s; if (file==NULL) return 0; //UNZ_PARAMERROR; s=(unz64_s*)file; if (!s->current_file_ok) return 0; if (s->gi.number_entry != 0 && s->gi.number_entry != 0xffff) if (s->num_file==s->gi.number_entry) return 0; return s->pos_in_central_dir; } extern uLong ZEXPORT unzGetOffset (unzFile file) { ZPOS64_T offset64; if (file==NULL) return 0; //UNZ_PARAMERROR; offset64 = unzGetOffset64(file); return (uLong)offset64; } extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) { unz64_s* s; int err; if (file==NULL) return UNZ_PARAMERROR; s=(unz64_s*)file; s->pos_in_central_dir = pos; s->num_file = s->gi.number_entry; /* hack */ err = unz64local_GetCurrentFileInfoInternal(file,&s->cur_file_info, &s->cur_file_info_internal, NULL,0,NULL,0,NULL,0); s->current_file_ok = (err == UNZ_OK); return err; } extern int ZEXPORT unzSetOffset (unzFile file, uLong pos) { return unzSetOffset64(file,pos); }
mit
sjsinju/coreclr
src/pal/prebuilt/idl/clrprivbinding_i.cpp
117
2517
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. /* this ALWAYS GENERATED file contains the IIDs and CLSIDs */ /* link this file in with the server and any clients */ /* File created by MIDL compiler version 8.00.0603 */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ #ifdef __cplusplus extern "C"{ #endif #include <rpc.h> #include <rpcndr.h> #ifdef _MIDL_USE_GUIDDEF_ #ifndef INITGUID #define INITGUID #include <guiddef.h> #undef INITGUID #else #include <guiddef.h> #endif #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) #else // !_MIDL_USE_GUIDDEF_ #ifndef __IID_DEFINED__ #define __IID_DEFINED__ typedef struct _IID { unsigned long x; unsigned short s1; unsigned short s2; unsigned char c[8]; } IID; #endif // __IID_DEFINED__ #ifndef CLSID_DEFINED #define CLSID_DEFINED typedef IID CLSID; #endif // CLSID_DEFINED #define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \ const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}} #endif !_MIDL_USE_GUIDDEF_ MIDL_DEFINE_GUID(IID, IID_ICLRPrivBinder,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x42); MIDL_DEFINE_GUID(IID, IID_ICLRPrivAssembly,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x43); MIDL_DEFINE_GUID(IID, IID_ICLRPrivResource,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x47); MIDL_DEFINE_GUID(IID, IID_ICLRPrivResourcePath,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x44); MIDL_DEFINE_GUID(IID, IID_ICLRPrivResourceStream,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x45); MIDL_DEFINE_GUID(IID, IID_ICLRPrivResourceHMODULE,0x2601F621,0xE462,0x404C,0xB2,0x99,0x3E,0x1D,0xE7,0x2F,0x85,0x46); MIDL_DEFINE_GUID(IID, IID_ICLRPrivResourceAssembly,0x8d2d3cc9,0x1249,0x4ad4,0x97,0x7d,0xb7,0x72,0xbd,0x4e,0x8a,0x94); MIDL_DEFINE_GUID(IID, IID_ICLRPrivAssemblyInfo,0x5653946E,0x800B,0x48B7,0x8B,0x09,0xB1,0xB8,0x79,0xB5,0x4F,0x68); MIDL_DEFINE_GUID(IID, IID_ICLRPrivAssemblyID_WinRT,0x4372D277,0x9906,0x4FED,0xBF,0x53,0x30,0xC0,0xB4,0x01,0x08,0x96); MIDL_DEFINE_GUID(IID, IID_ICLRPrivWinRtTypeBinder,0x6DE2A085,0xEFF4,0x4078,0x9F,0x60,0xB9,0xD3,0x66,0x73,0x63,0x98); #undef MIDL_DEFINE_GUID #ifdef __cplusplus } #endif
mit
Dmitry-Me/coreclr
src/binder/stringlexer.cpp
120
5283
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. // ============================================================ // // StringLexer.cpp // // // Implements the StringLexer class // // ============================================================ #define DISABLE_BINDER_DEBUG_LOGGING #include "stringlexer.hpp" #include "utils.hpp" #include "ex.h" namespace BINDER_SPACE { StringLexer::LEXEME_TYPE StringLexer::GetNextLexeme(SString &currentString, BOOL fPermitUnescapedQuotes) { BOOL fIsEscaped = FALSE; WCHAR wcCurrentChar = INVALID_CHARACTER; BINDER_LOG_ENTER(L"StringLexer::GetNextLexeme"); // Remove any white spaces do { wcCurrentChar = PopCharacter(&fIsEscaped); } while (IsWhitespace(wcCurrentChar)); // Determine lexeme type LEXEME_TYPE kLexemeType = LEXEME_TYPE_INVALID; if (!fIsEscaped) { kLexemeType = GetLexemeType(wcCurrentChar); if (kLexemeType != LEXEME_TYPE_STRING) { return kLexemeType; } } // First character of string lexeme; push it back PushCharacter(wcCurrentChar, fIsEscaped); kLexemeType = ParseString(currentString, fPermitUnescapedQuotes); if (kLexemeType == LEXEME_TYPE_STRING) { BINDER_LOG_LEAVE_HR(L"StringLexer::GetNextLexeme(LEXEME_TYPE_STRING)", S_OK); } else { BINDER_LOG_LEAVE_HR(L"StringLexer::GetNextLexeme(LEXEME_TYPE_INVALID)", S_FALSE); } return kLexemeType; } StringLexer::LEXEME_TYPE StringLexer::ParseString(SString &currentString, BOOL fPermitUnescapedQuotes) { BOOL fIsFirstCharacter = TRUE; WCHAR wcCurrentChar = INVALID_CHARACTER; WCHAR wcOpeningQuote = INVALID_CHARACTER; currentString.Clear(); // Read until we find another lexeme that's not a string character for (;;) { BOOL fIsEscaped = FALSE; wcCurrentChar = PopCharacter(&fIsEscaped); if (wcCurrentChar == INVALID_CHARACTER) { // Found invalid character encoding BINDER_LOG(L"StringLexer::ParseString: Invalid character encoding"); return LEXEME_TYPE_INVALID; } if (IsEOS(wcCurrentChar)) { if (IsQuoteCharacter(wcOpeningQuote)) { // EOS and unclosed quotes is an error BINDER_LOG(L"StringLexer::ParseString: EOS and unclosed quotes"); return LEXEME_TYPE_INVALID; } else { // Reached end of input and therefore of string break; } } if (fIsFirstCharacter) { fIsFirstCharacter = FALSE; // If first character is quote, then record its quoteness if (IsQuoteCharacter(wcCurrentChar)) { wcOpeningQuote = wcCurrentChar; continue; } } if (wcCurrentChar == wcOpeningQuote) { // We've found the closing quote for a quoted string break; } if (!fPermitUnescapedQuotes && !fIsEscaped && IsQuoteCharacter(wcCurrentChar) && !IsQuoteCharacter(wcOpeningQuote)) { // Unescaped quotes in the middle of the string are an error BINDER_LOG(L"StringLexer::ParseString: Quote in the middle of a string"); return LEXEME_TYPE_INVALID; } if (IsSeparatorChar(wcCurrentChar) && !IsQuoteCharacter(wcOpeningQuote) && !fIsEscaped) { // Unescaped separator char terminates the string PushCharacter(wcCurrentChar, fIsEscaped); break; } // Add character to current string currentString.Append(wcCurrentChar); } if (!IsQuoteCharacter(wcOpeningQuote)) { // Remove trailing white spaces from unquoted string BINDER_LOG(L"StringLexer::ParseString: Trimming string"); TrimTrailingWhiteSpaces(currentString); } BINDER_LOG_STRING(L"string", currentString); return LEXEME_TYPE_STRING; } void StringLexer::TrimTrailingWhiteSpaces(SString &currentString) { SString::Iterator begin = currentString.Begin(); SString::Iterator cursor = currentString.End() - 1; BOOL fFoundWhiteSpace = FALSE; for (;;) { if ((cursor >= begin) && IsWhitespace(cursor[0])) { fFoundWhiteSpace = TRUE; cursor--; continue; } break; } if (fFoundWhiteSpace) { currentString.Truncate(cursor + 1); } } };
mit
x13945/Android-ImageMagick
library/src/main/jni/libwebp-0.3.1/swig/libwebp_java_wrap.c
121
53665
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 2.0.4 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ #define SWIGJAVA /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* exporting methods */ #if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Fix for jlong on some versions of gcc on Windows */ #if defined(__GNUC__) && !defined(__INTEL_COMPILER) typedef long long __int64; #endif /* Fix for jlong on 64-bit x86 Solaris */ #if defined(__x86_64) # ifdef _LP64 # undef _LP64 # endif #endif #include <jni.h> #include <stdlib.h> #include <string.h> /* Support for throwing Java exceptions */ typedef enum { SWIG_JavaOutOfMemoryError = 1, SWIG_JavaIOException, SWIG_JavaRuntimeException, SWIG_JavaIndexOutOfBoundsException, SWIG_JavaArithmeticException, SWIG_JavaIllegalArgumentException, SWIG_JavaNullPointerException, SWIG_JavaDirectorPureVirtual, SWIG_JavaUnknownError } SWIG_JavaExceptionCodes; typedef struct { SWIG_JavaExceptionCodes code; const char *java_exception; } SWIG_JavaExceptions_t; static void SWIGUNUSED SWIG_JavaThrowException(JNIEnv *jenv, SWIG_JavaExceptionCodes code, const char *msg) { jclass excep; static const SWIG_JavaExceptions_t java_exceptions[] = { { SWIG_JavaOutOfMemoryError, "java/lang/OutOfMemoryError" }, { SWIG_JavaIOException, "java/io/IOException" }, { SWIG_JavaRuntimeException, "java/lang/RuntimeException" }, { SWIG_JavaIndexOutOfBoundsException, "java/lang/IndexOutOfBoundsException" }, { SWIG_JavaArithmeticException, "java/lang/ArithmeticException" }, { SWIG_JavaIllegalArgumentException, "java/lang/IllegalArgumentException" }, { SWIG_JavaNullPointerException, "java/lang/NullPointerException" }, { SWIG_JavaDirectorPureVirtual, "java/lang/RuntimeException" }, { SWIG_JavaUnknownError, "java/lang/UnknownError" }, { (SWIG_JavaExceptionCodes)0, "java/lang/UnknownError" } }; const SWIG_JavaExceptions_t *except_ptr = java_exceptions; while (except_ptr->code != code && except_ptr->code) except_ptr++; (*jenv)->ExceptionClear(jenv); excep = (*jenv)->FindClass(jenv, except_ptr->java_exception); if (excep) (*jenv)->ThrowNew(jenv, excep, msg); } /* Contract support */ #define SWIG_contract_assert(nullreturn, expr, msg) if (!(expr)) {SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, msg); return nullreturn; } else /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 SWIGINTERN void SWIG_JavaException(JNIEnv *jenv, int code, const char *msg) { SWIG_JavaExceptionCodes exception_code = SWIG_JavaUnknownError; switch(code) { case SWIG_MemoryError: exception_code = SWIG_JavaOutOfMemoryError; break; case SWIG_IOError: exception_code = SWIG_JavaIOException; break; case SWIG_SystemError: case SWIG_RuntimeError: exception_code = SWIG_JavaRuntimeException; break; case SWIG_OverflowError: case SWIG_IndexError: exception_code = SWIG_JavaIndexOutOfBoundsException; break; case SWIG_DivisionByZero: exception_code = SWIG_JavaArithmeticException; break; case SWIG_SyntaxError: case SWIG_ValueError: case SWIG_TypeError: exception_code = SWIG_JavaIllegalArgumentException; break; case SWIG_UnknownError: default: exception_code = SWIG_JavaUnknownError; break; } SWIG_JavaThrowException(jenv, exception_code, msg); } #if defined(SWIG_NOINCLUDE) || defined(SWIG_NOARRAYS) int SWIG_JavaArrayInSchar (JNIEnv *jenv, jbyte **jarr, signed char **carr, jbyteArray input); void SWIG_JavaArrayArgoutSchar (JNIEnv *jenv, jbyte *jarr, signed char *carr, jbyteArray input); jbyteArray SWIG_JavaArrayOutSchar (JNIEnv *jenv, signed char *result, jsize sz); int SWIG_JavaArrayInUchar (JNIEnv *jenv, jshort **jarr, unsigned char **carr, jshortArray input); void SWIG_JavaArrayArgoutUchar (JNIEnv *jenv, jshort *jarr, unsigned char *carr, jshortArray input); jshortArray SWIG_JavaArrayOutUchar (JNIEnv *jenv, unsigned char *result, jsize sz); int SWIG_JavaArrayInShort (JNIEnv *jenv, jshort **jarr, short **carr, jshortArray input); void SWIG_JavaArrayArgoutShort (JNIEnv *jenv, jshort *jarr, short *carr, jshortArray input); jshortArray SWIG_JavaArrayOutShort (JNIEnv *jenv, short *result, jsize sz); int SWIG_JavaArrayInUshort (JNIEnv *jenv, jint **jarr, unsigned short **carr, jintArray input); void SWIG_JavaArrayArgoutUshort (JNIEnv *jenv, jint *jarr, unsigned short *carr, jintArray input); jintArray SWIG_JavaArrayOutUshort (JNIEnv *jenv, unsigned short *result, jsize sz); int SWIG_JavaArrayInInt (JNIEnv *jenv, jint **jarr, int **carr, jintArray input); void SWIG_JavaArrayArgoutInt (JNIEnv *jenv, jint *jarr, int *carr, jintArray input); jintArray SWIG_JavaArrayOutInt (JNIEnv *jenv, int *result, jsize sz); int SWIG_JavaArrayInUint (JNIEnv *jenv, jlong **jarr, unsigned int **carr, jlongArray input); void SWIG_JavaArrayArgoutUint (JNIEnv *jenv, jlong *jarr, unsigned int *carr, jlongArray input); jlongArray SWIG_JavaArrayOutUint (JNIEnv *jenv, unsigned int *result, jsize sz); int SWIG_JavaArrayInLong (JNIEnv *jenv, jint **jarr, long **carr, jintArray input); void SWIG_JavaArrayArgoutLong (JNIEnv *jenv, jint *jarr, long *carr, jintArray input); jintArray SWIG_JavaArrayOutLong (JNIEnv *jenv, long *result, jsize sz); int SWIG_JavaArrayInUlong (JNIEnv *jenv, jlong **jarr, unsigned long **carr, jlongArray input); void SWIG_JavaArrayArgoutUlong (JNIEnv *jenv, jlong *jarr, unsigned long *carr, jlongArray input); jlongArray SWIG_JavaArrayOutUlong (JNIEnv *jenv, unsigned long *result, jsize sz); int SWIG_JavaArrayInLonglong (JNIEnv *jenv, jlong **jarr, jlong **carr, jlongArray input); void SWIG_JavaArrayArgoutLonglong (JNIEnv *jenv, jlong *jarr, jlong *carr, jlongArray input); jlongArray SWIG_JavaArrayOutLonglong (JNIEnv *jenv, jlong *result, jsize sz); int SWIG_JavaArrayInFloat (JNIEnv *jenv, jfloat **jarr, float **carr, jfloatArray input); void SWIG_JavaArrayArgoutFloat (JNIEnv *jenv, jfloat *jarr, float *carr, jfloatArray input); jfloatArray SWIG_JavaArrayOutFloat (JNIEnv *jenv, float *result, jsize sz); int SWIG_JavaArrayInDouble (JNIEnv *jenv, jdouble **jarr, double **carr, jdoubleArray input); void SWIG_JavaArrayArgoutDouble (JNIEnv *jenv, jdouble *jarr, double *carr, jdoubleArray input); jdoubleArray SWIG_JavaArrayOutDouble (JNIEnv *jenv, double *result, jsize sz); #else /* signed char[] support */ int SWIG_JavaArrayInSchar (JNIEnv *jenv, jbyte **jarr, signed char **carr, jbyteArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetByteArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (signed char*) calloc(sz, sizeof(signed char)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (signed char)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutSchar (JNIEnv *jenv, jbyte *jarr, signed char *carr, jbyteArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jbyte)carr[i]; (*jenv)->ReleaseByteArrayElements(jenv, input, jarr, 0); } jbyteArray SWIG_JavaArrayOutSchar (JNIEnv *jenv, signed char *result, jsize sz) { jbyte *arr; int i; jbyteArray jresult = (*jenv)->NewByteArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetByteArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jbyte)result[i]; (*jenv)->ReleaseByteArrayElements(jenv, jresult, arr, 0); return jresult; } /* unsigned char[] support */ int SWIG_JavaArrayInUchar (JNIEnv *jenv, jshort **jarr, unsigned char **carr, jshortArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetShortArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (unsigned char*) calloc(sz, sizeof(unsigned char)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned char)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUchar (JNIEnv *jenv, jshort *jarr, unsigned char *carr, jshortArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jshort)carr[i]; (*jenv)->ReleaseShortArrayElements(jenv, input, jarr, 0); } jshortArray SWIG_JavaArrayOutUchar (JNIEnv *jenv, unsigned char *result, jsize sz) { jshort *arr; int i; jshortArray jresult = (*jenv)->NewShortArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetShortArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jshort)result[i]; (*jenv)->ReleaseShortArrayElements(jenv, jresult, arr, 0); return jresult; } /* short[] support */ int SWIG_JavaArrayInShort (JNIEnv *jenv, jshort **jarr, short **carr, jshortArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetShortArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (short*) calloc(sz, sizeof(short)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (short)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutShort (JNIEnv *jenv, jshort *jarr, short *carr, jshortArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jshort)carr[i]; (*jenv)->ReleaseShortArrayElements(jenv, input, jarr, 0); } jshortArray SWIG_JavaArrayOutShort (JNIEnv *jenv, short *result, jsize sz) { jshort *arr; int i; jshortArray jresult = (*jenv)->NewShortArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetShortArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jshort)result[i]; (*jenv)->ReleaseShortArrayElements(jenv, jresult, arr, 0); return jresult; } /* unsigned short[] support */ int SWIG_JavaArrayInUshort (JNIEnv *jenv, jint **jarr, unsigned short **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetIntArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (unsigned short*) calloc(sz, sizeof(unsigned short)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned short)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUshort (JNIEnv *jenv, jint *jarr, unsigned short *carr, jintArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; (*jenv)->ReleaseIntArrayElements(jenv, input, jarr, 0); } jintArray SWIG_JavaArrayOutUshort (JNIEnv *jenv, unsigned short *result, jsize sz) { jint *arr; int i; jintArray jresult = (*jenv)->NewIntArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetIntArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; (*jenv)->ReleaseIntArrayElements(jenv, jresult, arr, 0); return jresult; } /* int[] support */ int SWIG_JavaArrayInInt (JNIEnv *jenv, jint **jarr, int **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetIntArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (int*) calloc(sz, sizeof(int)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (int)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutInt (JNIEnv *jenv, jint *jarr, int *carr, jintArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; (*jenv)->ReleaseIntArrayElements(jenv, input, jarr, 0); } jintArray SWIG_JavaArrayOutInt (JNIEnv *jenv, int *result, jsize sz) { jint *arr; int i; jintArray jresult = (*jenv)->NewIntArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetIntArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; (*jenv)->ReleaseIntArrayElements(jenv, jresult, arr, 0); return jresult; } /* unsigned int[] support */ int SWIG_JavaArrayInUint (JNIEnv *jenv, jlong **jarr, unsigned int **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetLongArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (unsigned int*) calloc(sz, sizeof(unsigned int)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned int)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUint (JNIEnv *jenv, jlong *jarr, unsigned int *carr, jlongArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; (*jenv)->ReleaseLongArrayElements(jenv, input, jarr, 0); } jlongArray SWIG_JavaArrayOutUint (JNIEnv *jenv, unsigned int *result, jsize sz) { jlong *arr; int i; jlongArray jresult = (*jenv)->NewLongArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetLongArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; (*jenv)->ReleaseLongArrayElements(jenv, jresult, arr, 0); return jresult; } /* long[] support */ int SWIG_JavaArrayInLong (JNIEnv *jenv, jint **jarr, long **carr, jintArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetIntArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (long*) calloc(sz, sizeof(long)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (long)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutLong (JNIEnv *jenv, jint *jarr, long *carr, jintArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jint)carr[i]; (*jenv)->ReleaseIntArrayElements(jenv, input, jarr, 0); } jintArray SWIG_JavaArrayOutLong (JNIEnv *jenv, long *result, jsize sz) { jint *arr; int i; jintArray jresult = (*jenv)->NewIntArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetIntArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jint)result[i]; (*jenv)->ReleaseIntArrayElements(jenv, jresult, arr, 0); return jresult; } /* unsigned long[] support */ int SWIG_JavaArrayInUlong (JNIEnv *jenv, jlong **jarr, unsigned long **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetLongArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (unsigned long*) calloc(sz, sizeof(unsigned long)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (unsigned long)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUlong (JNIEnv *jenv, jlong *jarr, unsigned long *carr, jlongArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; (*jenv)->ReleaseLongArrayElements(jenv, input, jarr, 0); } jlongArray SWIG_JavaArrayOutUlong (JNIEnv *jenv, unsigned long *result, jsize sz) { jlong *arr; int i; jlongArray jresult = (*jenv)->NewLongArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetLongArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; (*jenv)->ReleaseLongArrayElements(jenv, jresult, arr, 0); return jresult; } /* jlong[] support */ int SWIG_JavaArrayInLonglong (JNIEnv *jenv, jlong **jarr, jlong **carr, jlongArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetLongArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (jlong*) calloc(sz, sizeof(jlong)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (jlong)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutLonglong (JNIEnv *jenv, jlong *jarr, jlong *carr, jlongArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jlong)carr[i]; (*jenv)->ReleaseLongArrayElements(jenv, input, jarr, 0); } jlongArray SWIG_JavaArrayOutLonglong (JNIEnv *jenv, jlong *result, jsize sz) { jlong *arr; int i; jlongArray jresult = (*jenv)->NewLongArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetLongArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jlong)result[i]; (*jenv)->ReleaseLongArrayElements(jenv, jresult, arr, 0); return jresult; } /* float[] support */ int SWIG_JavaArrayInFloat (JNIEnv *jenv, jfloat **jarr, float **carr, jfloatArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetFloatArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (float*) calloc(sz, sizeof(float)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (float)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutFloat (JNIEnv *jenv, jfloat *jarr, float *carr, jfloatArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jfloat)carr[i]; (*jenv)->ReleaseFloatArrayElements(jenv, input, jarr, 0); } jfloatArray SWIG_JavaArrayOutFloat (JNIEnv *jenv, float *result, jsize sz) { jfloat *arr; int i; jfloatArray jresult = (*jenv)->NewFloatArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetFloatArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jfloat)result[i]; (*jenv)->ReleaseFloatArrayElements(jenv, jresult, arr, 0); return jresult; } /* double[] support */ int SWIG_JavaArrayInDouble (JNIEnv *jenv, jdouble **jarr, double **carr, jdoubleArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetDoubleArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (double*) calloc(sz, sizeof(double)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (double)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutDouble (JNIEnv *jenv, jdouble *jarr, double *carr, jdoubleArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jdouble)carr[i]; (*jenv)->ReleaseDoubleArrayElements(jenv, input, jarr, 0); } jdoubleArray SWIG_JavaArrayOutDouble (JNIEnv *jenv, double *result, jsize sz) { jdouble *arr; int i; jdoubleArray jresult = (*jenv)->NewDoubleArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetDoubleArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jdouble)result[i]; (*jenv)->ReleaseDoubleArrayElements(jenv, jresult, arr, 0); return jresult; } #endif #include "webp/types.h" int SWIG_JavaArrayInUint8 (JNIEnv *jenv, jbyte **jarr, uint8_t **carr, jbyteArray input); void SWIG_JavaArrayArgoutUint8 (JNIEnv *jenv, jbyte *jarr, uint8_t *carr, jbyteArray input); jbyteArray SWIG_JavaArrayOutUint8 (JNIEnv *jenv, uint8_t *result, jsize sz); /* uint8_t[] support */ int SWIG_JavaArrayInUint8 (JNIEnv *jenv, jbyte **jarr, uint8_t **carr, jbyteArray input) { int i; jsize sz; if (!input) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null array"); return 0; } sz = (*jenv)->GetArrayLength(jenv, input); *jarr = (*jenv)->GetByteArrayElements(jenv, input, 0); if (!*jarr) return 0; *carr = (uint8_t*) calloc(sz, sizeof(uint8_t)); if (!*carr) { SWIG_JavaThrowException(jenv, SWIG_JavaOutOfMemoryError, "array memory allocation failed"); return 0; } for (i=0; i<sz; i++) (*carr)[i] = (uint8_t)(*jarr)[i]; return 1; } void SWIG_JavaArrayArgoutUint8 (JNIEnv *jenv, jbyte *jarr, uint8_t *carr, jbyteArray input) { int i; jsize sz = (*jenv)->GetArrayLength(jenv, input); for (i=0; i<sz; i++) jarr[i] = (jbyte)carr[i]; (*jenv)->ReleaseByteArrayElements(jenv, input, jarr, 0); } jbyteArray SWIG_JavaArrayOutUint8 (JNIEnv *jenv, uint8_t *result, jsize sz) { jbyte *arr; int i; jbyteArray jresult = (*jenv)->NewByteArray(jenv, sz); if (!jresult) return NULL; arr = (*jenv)->GetByteArrayElements(jenv, jresult, 0); if (!arr) return NULL; for (i=0; i<sz; i++) arr[i] = (jbyte)result[i]; (*jenv)->ReleaseByteArrayElements(jenv, jresult, arr, 0); return jresult; } #include "webp/decode.h" #include "webp/encode.h" #define FillMeInAsSizeCannotBeDeterminedAutomatically \ (result ? (jint)ReturnedBufferSize(__FUNCTION__, arg3, arg4) : 0) static size_t ReturnedBufferSize( const char* function, int* width, int* height) { static const struct sizemap { const char* function; int size_multiplier; } size_map[] = { #ifdef SWIGJAVA { "Java_com_google_webp_libwebpJNI_WebPDecodeRGB", 3 }, { "Java_com_google_webp_libwebpJNI_WebPDecodeRGBA", 4 }, { "Java_com_google_webp_libwebpJNI_WebPDecodeARGB", 4 }, { "Java_com_google_webp_libwebpJNI_WebPDecodeBGR", 3 }, { "Java_com_google_webp_libwebpJNI_WebPDecodeBGRA", 4 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeRGB", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeBGR", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeRGBA", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeBGRA", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessRGB", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessBGR", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessRGBA", 1 }, { "Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessBGRA", 1 }, #endif #ifdef SWIGPYTHON { "WebPDecodeRGB", 3 }, { "WebPDecodeRGBA", 4 }, { "WebPDecodeARGB", 4 }, { "WebPDecodeBGR", 3 }, { "WebPDecodeBGRA", 4 }, { "wrap_WebPEncodeRGB", 1 }, { "wrap_WebPEncodeBGR", 1 }, { "wrap_WebPEncodeRGBA", 1 }, { "wrap_WebPEncodeBGRA", 1 }, { "wrap_WebPEncodeLosslessRGB", 1 }, { "wrap_WebPEncodeLosslessBGR", 1 }, { "wrap_WebPEncodeLosslessRGBA", 1 }, { "wrap_WebPEncodeLosslessBGRA", 1 }, #endif { NULL, 0 } }; const struct sizemap* p; size_t size = 0; for (p = size_map; p->function; ++p) { if (!strcmp(function, p->function)) { size = *width * *height * p->size_multiplier; break; } } return size; } typedef size_t (*WebPEncodeFunction)(const uint8_t* rgb, int width, int height, int stride, float quality_factor, uint8_t** output); typedef size_t (*WebPEncodeLosslessFunction)(const uint8_t* rgb, int width, int height, int stride, uint8_t** output); static uint8_t* EncodeLossy(const uint8_t* rgb, int width, int height, int stride, float quality_factor, WebPEncodeFunction encfn, int* output_size, int* unused) { uint8_t* output = NULL; const size_t image_size = encfn(rgb, width, height, stride, quality_factor, &output); // the values of following two will be interpreted by ReturnedBufferSize() // as 'width' and 'height' in the size calculation. *output_size = image_size; *unused = 1; return image_size ? output : NULL; } static uint8_t* EncodeLossless(const uint8_t* rgb, int width, int height, int stride, WebPEncodeLosslessFunction encfn, int* output_size, int* unused) { uint8_t* output = NULL; const size_t image_size = encfn(rgb, width, height, stride, &output); // the values of the following two will be interpreted by // ReturnedBufferSize() as 'width' and 'height' in the size calculation. *output_size = image_size; *unused = 1; return image_size ? output : NULL; } // Changes the return type of WebPEncode* to more closely match Decode*. // This also makes it easier to wrap the output buffer in a native type rather // than dealing with the return pointer. // The additional parameters are to allow reuse of ReturnedBufferSize(), // unused2 and output_size will be used in this case. #define LOSSY_WRAPPER(FUNC) \ static uint8_t* wrap_##FUNC( \ const uint8_t* rgb, int* unused1, int* unused2, int* output_size, \ int width, int height, int stride, float quality_factor) { \ return EncodeLossy(rgb, width, height, stride, quality_factor, \ FUNC, output_size, unused2); \ } \ LOSSY_WRAPPER(WebPEncodeRGB) LOSSY_WRAPPER(WebPEncodeBGR) LOSSY_WRAPPER(WebPEncodeRGBA) LOSSY_WRAPPER(WebPEncodeBGRA) #undef LOSSY_WRAPPER #define LOSSLESS_WRAPPER(FUNC) \ static uint8_t* wrap_##FUNC( \ const uint8_t* rgb, int* unused1, int* unused2, int* output_size, \ int width, int height, int stride) { \ return EncodeLossless(rgb, width, height, stride, \ FUNC, output_size, unused2); \ } \ LOSSLESS_WRAPPER(WebPEncodeLosslessRGB) LOSSLESS_WRAPPER(WebPEncodeLosslessBGR) LOSSLESS_WRAPPER(WebPEncodeLosslessRGBA) LOSSLESS_WRAPPER(WebPEncodeLosslessBGRA) #undef LOSSLESS_WRAPPER /* Work around broken gcj jni.h */ #ifdef __GCJ_JNI_H__ # undef JNIEXPORT # define JNIEXPORT # undef JNICALL # define JNICALL #endif #ifdef __cplusplus extern "C" { #endif SWIGEXPORT jint JNICALL Java_com_google_webp_libwebpJNI_WebPGetDecoderVersion(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; int result; (void)jenv; (void)jcls; result = (int)WebPGetDecoderVersion(); jresult = (jint)result; return jresult; } SWIGEXPORT jint JNICALL Java_com_google_webp_libwebpJNI_WebPGetInfo(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jint jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; int result; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (int)WebPGetInfo((uint8_t const *)arg1,arg2,arg3,arg4); jresult = (jint)result; SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_WebPDecodeRGB(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (uint8_t *)WebPDecodeRGB((uint8_t const *)arg1,arg2,arg3,arg4); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_WebPDecodeRGBA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (uint8_t *)WebPDecodeRGBA((uint8_t const *)arg1,arg2,arg3,arg4); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_WebPDecodeARGB(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (uint8_t *)WebPDecodeARGB((uint8_t const *)arg1,arg2,arg3,arg4); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_WebPDecodeBGR(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (uint8_t *)WebPDecodeBGR((uint8_t const *)arg1,arg2,arg3,arg4); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_WebPDecodeBGRA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jlong jarg2, jintArray jarg3, jintArray jarg4) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; size_t arg2 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; jbyte *jarr1 ; int temp3 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (size_t)jarg2; { if (!jarg3) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg3) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg3 = &temp3; } { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } result = (uint8_t *)WebPDecodeBGRA((uint8_t const *)arg1,arg2,arg3,arg4); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp3; (*jenv)->SetIntArrayRegion(jenv, jarg3, 0, 1, &jvalue); } { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jint JNICALL Java_com_google_webp_libwebpJNI_WebPGetEncoderVersion(JNIEnv *jenv, jclass jcls) { jint jresult = 0 ; int result; (void)jenv; (void)jcls; result = (int)WebPGetEncoderVersion(); jresult = (jint)result; return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeRGB(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7, jfloat jarg8) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; float arg8 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; arg8 = (float)jarg8; result = (uint8_t *)wrap_WebPEncodeRGB((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeBGR(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7, jfloat jarg8) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; float arg8 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; arg8 = (float)jarg8; result = (uint8_t *)wrap_WebPEncodeBGR((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeRGBA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7, jfloat jarg8) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; float arg8 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; arg8 = (float)jarg8; result = (uint8_t *)wrap_WebPEncodeRGBA((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeBGRA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7, jfloat jarg8) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; float arg8 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; arg8 = (float)jarg8; result = (uint8_t *)wrap_WebPEncodeBGRA((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessRGB(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; result = (uint8_t *)wrap_WebPEncodeLosslessRGB((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessBGR(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; result = (uint8_t *)wrap_WebPEncodeLosslessBGR((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessRGBA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; result = (uint8_t *)wrap_WebPEncodeLosslessRGBA((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } SWIGEXPORT jbyteArray JNICALL Java_com_google_webp_libwebpJNI_wrap_1WebPEncodeLosslessBGRA(JNIEnv *jenv, jclass jcls, jbyteArray jarg1, jint jarg2, jint jarg3, jintArray jarg4, jint jarg5, jint jarg6, jint jarg7) { jbyteArray jresult = 0 ; uint8_t *arg1 = (uint8_t *) 0 ; int *arg2 = (int *) 0 ; int *arg3 = (int *) 0 ; int *arg4 = (int *) 0 ; int arg5 ; int arg6 ; int arg7 ; jbyte *jarr1 ; int temp4 ; uint8_t *result = 0 ; (void)jenv; (void)jcls; if (!SWIG_JavaArrayInUint8(jenv, &jarr1, &arg1, jarg1)) return 0; arg2 = (int *)&jarg2; arg3 = (int *)&jarg3; { if (!jarg4) { SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "array null"); return 0; } if ((*jenv)->GetArrayLength(jenv, jarg4) == 0) { SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, "Array must contain at least 1 element"); return 0; } arg4 = &temp4; } arg5 = (int)jarg5; arg6 = (int)jarg6; arg7 = (int)jarg7; result = (uint8_t *)wrap_WebPEncodeLosslessBGRA((uint8_t const *)arg1,arg2,arg3,arg4,arg5,arg6,arg7); jresult = SWIG_JavaArrayOutUint8(jenv, result, FillMeInAsSizeCannotBeDeterminedAutomatically); SWIG_JavaArrayArgoutUint8(jenv, jarr1, arg1, jarg1); { jint jvalue = (jint)temp4; (*jenv)->SetIntArrayRegion(jenv, jarg4, 0, 1, &jvalue); } free(arg1); free(result); return jresult; } #ifdef __cplusplus } #endif
mit
poizan42/coreclr
src/pal/tests/palsuite/c_runtime/vfprintf/test12/test12.cpp
137
1525
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. /*============================================================================ ** ** Source: test12.c ** ** Purpose: Test #12 for the vfprintf function. Tests the (lowercase) ** hexadecimal specifier (%x) ** ** **==========================================================================*/ #include <palsuite.h> #include "../vfprintf.h" int __cdecl main(int argc, char *argv[]) { int neg = -42; int pos = 0x1234ab; INT64 l = I64(0x1234567887654321); if (PAL_Initialize(argc, argv)) { return FAIL; } DoNumTest("foo %x", pos, "foo 1234ab"); DoNumTest("foo %lx", pos, "foo 1234ab"); DoNumTest("foo %hx", pos, "foo 34ab"); DoNumTest("foo %Lx", pos, "foo 1234ab"); DoI64Test("foo %I64x", l, "0x1234567887654321", "foo 1234567887654321"); DoNumTest("foo %7x", pos, "foo 1234ab"); DoNumTest("foo %-7x", pos, "foo 1234ab "); DoNumTest("foo %.1x", pos, "foo 1234ab"); DoNumTest("foo %.7x", pos, "foo 01234ab"); DoNumTest("foo %07x", pos, "foo 01234ab"); DoNumTest("foo %#x", pos, "foo 0x1234ab"); DoNumTest("foo %+x", pos, "foo 1234ab"); DoNumTest("foo % x", pos, "foo 1234ab"); DoNumTest("foo %+x", neg, "foo ffffffd6"); DoNumTest("foo % x", neg, "foo ffffffd6"); PAL_Terminate(); return PASS; }
mit
mammix2/killercoin-development
src/protocol.cpp
1427
3431
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "protocol.h" #include "util.h" #include "netbase.h" #include "main.h" #ifndef WIN32 # include <arpa/inet.h> #endif static const char* ppszTypeName[] = { "ERROR", "tx", "block", "filtered block" }; CMessageHeader::CMessageHeader() { memcpy(pchMessageStart, ::pchMessageStart, sizeof(pchMessageStart)); memset(pchCommand, 0, sizeof(pchCommand)); pchCommand[1] = 1; nMessageSize = -1; nChecksum = 0; } CMessageHeader::CMessageHeader(const char* pszCommand, unsigned int nMessageSizeIn) { memcpy(pchMessageStart, ::pchMessageStart, sizeof(pchMessageStart)); strncpy(pchCommand, pszCommand, COMMAND_SIZE); nMessageSize = nMessageSizeIn; nChecksum = 0; } std::string CMessageHeader::GetCommand() const { if (pchCommand[COMMAND_SIZE-1] == 0) return std::string(pchCommand, pchCommand + strlen(pchCommand)); else return std::string(pchCommand, pchCommand + COMMAND_SIZE); } bool CMessageHeader::IsValid() const { // Check start string if (memcmp(pchMessageStart, ::pchMessageStart, sizeof(pchMessageStart)) != 0) return false; // Check the command string for errors for (const char* p1 = pchCommand; p1 < pchCommand + COMMAND_SIZE; p1++) { if (*p1 == 0) { // Must be all zeros after the first zero for (; p1 < pchCommand + COMMAND_SIZE; p1++) if (*p1 != 0) return false; } else if (*p1 < ' ' || *p1 > 0x7E) return false; } // Message size if (nMessageSize > MAX_SIZE) { printf("CMessageHeader::IsValid() : (%s, %u bytes) nMessageSize > MAX_SIZE\n", GetCommand().c_str(), nMessageSize); return false; } return true; } CAddress::CAddress() : CService() { Init(); } CAddress::CAddress(CService ipIn, uint64 nServicesIn) : CService(ipIn) { Init(); nServices = nServicesIn; } void CAddress::Init() { nServices = NODE_NETWORK; nTime = 100000000; nLastTry = 0; } CInv::CInv() { type = 0; hash = 0; } CInv::CInv(int typeIn, const uint256& hashIn) { type = typeIn; hash = hashIn; } CInv::CInv(const std::string& strType, const uint256& hashIn) { unsigned int i; for (i = 1; i < ARRAYLEN(ppszTypeName); i++) { if (strType == ppszTypeName[i]) { type = i; break; } } if (i == ARRAYLEN(ppszTypeName)) throw std::out_of_range(strprintf("CInv::CInv(string, uint256) : unknown type '%s'", strType.c_str())); hash = hashIn; } bool operator<(const CInv& a, const CInv& b) { return (a.type < b.type || (a.type == b.type && a.hash < b.hash)); } bool CInv::IsKnownType() const { return (type >= 1 && type < (int)ARRAYLEN(ppszTypeName)); } const char* CInv::GetCommand() const { if (!IsKnownType()) throw std::out_of_range(strprintf("CInv::GetCommand() : type=%d unknown type", type)); return ppszTypeName[type]; } std::string CInv::ToString() const { return strprintf("%s %s", GetCommand(), hash.ToString().c_str()); } void CInv::print() const { printf("CInv(%s)\n", ToString().c_str()); }
mit
kasperdokter/Reo-compiler
FreeRTOSv9.0.0/FreeRTOS/Demo/Common/ethernet/lwIP_132/src/netif/ppp/chpms.c
148
11840
/*** WARNING - THIS CODE HAS NOT BEEN FINISHED! ***/ /*** The original PPPD code is written in a way to require either the UNIX DES encryption functions encrypt(3) and setkey(3) or the DES library libdes. Since both is not included in lwIP, MSCHAP currently does not work! */ /***************************************************************************** * chpms.c - Network MicroSoft Challenge Handshake Authentication Protocol program file. * * Copyright (c) 2003 by Marc Boucher, Services Informatiques (MBSI) inc. * Copyright (c) 1997 by Global Election Systems Inc. All rights reserved. * * The authors hereby grant permission to use, copy, modify, distribute, * and license this software and its documentation for any purpose, provided * that existing copyright notices are retained in all copies and that this * notice and the following disclaimer are included verbatim in any * distributions. No written agreement, license, or royalty fee is required * for any of the authorized uses. * * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS *AS IS* AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** * REVISION HISTORY * * 03-01-01 Marc Boucher <marc@mbsi.ca> * Ported to lwIP. * 97-12-08 Guy Lancaster <lancasterg@acm.org>, Global Election Systems Inc. * Original based on BSD chap_ms.c. *****************************************************************************/ /* * chap_ms.c - Microsoft MS-CHAP compatible implementation. * * Copyright (c) 1995 Eric Rosenquist, Strata Software Limited. * http://www.strataware.com/ * * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by Eric Rosenquist. The name of the author may not be used to * endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* * Modifications by Lauri Pesonen / lpesonen@clinet.fi, april 1997 * * Implemented LANManager type password response to MS-CHAP challenges. * Now pppd provides both NT style and LANMan style blocks, and the * prefered is set by option "ms-lanman". Default is to use NT. * The hash text (StdText) was taken from Win95 RASAPI32.DLL. * * You should also use DOMAIN\\USERNAME as described in README.MSCHAP80 */ #define USE_CRYPT #include "lwip/opt.h" #if PPP_SUPPORT /* don't build if not configured for use in lwipopts.h */ #if MSCHAP_SUPPORT /* don't build if not configured for use in lwipopts.h */ #include "ppp.h" #include "pppdebug.h" #include "md4.h" #ifndef USE_CRYPT #include "des.h" #endif #include "chap.h" #include "chpms.h" /*************************/ /*** LOCAL DEFINITIONS ***/ /*************************/ /************************/ /*** LOCAL DATA TYPES ***/ /************************/ typedef struct { u_char LANManResp[24]; u_char NTResp[24]; u_char UseNT; /* If 1, ignore the LANMan response field */ } MS_ChapResponse; /* We use MS_CHAP_RESPONSE_LEN, rather than sizeof(MS_ChapResponse), in case this struct gets padded. */ /***********************************/ /*** LOCAL FUNCTION DECLARATIONS ***/ /***********************************/ /* XXX Don't know what to do with these. */ extern void setkey(const char *); extern void encrypt(char *, int); static void DesEncrypt (u_char *, u_char *, u_char *); static void MakeKey (u_char *, u_char *); #ifdef USE_CRYPT static void Expand (u_char *, u_char *); static void Collapse (u_char *, u_char *); #endif static void ChallengeResponse( u_char *challenge, /* IN 8 octets */ u_char *pwHash, /* IN 16 octets */ u_char *response /* OUT 24 octets */ ); static void ChapMS_NT( char *rchallenge, int rchallenge_len, char *secret, int secret_len, MS_ChapResponse *response ); static u_char Get7Bits( u_char *input, int startBit ); /***********************************/ /*** PUBLIC FUNCTION DEFINITIONS ***/ /***********************************/ void ChapMS( chap_state *cstate, char *rchallenge, int rchallenge_len, char *secret, int secret_len) { MS_ChapResponse response; #ifdef MSLANMAN extern int ms_lanman; #endif #if 0 CHAPDEBUG((LOG_INFO, "ChapMS: secret is '%.*s'\n", secret_len, secret)); #endif BZERO(&response, sizeof(response)); /* Calculate both always */ ChapMS_NT(rchallenge, rchallenge_len, secret, secret_len, &response); #ifdef MSLANMAN ChapMS_LANMan(rchallenge, rchallenge_len, secret, secret_len, &response); /* prefered method is set by option */ response.UseNT = !ms_lanman; #else response.UseNT = 1; #endif BCOPY(&response, cstate->response, MS_CHAP_RESPONSE_LEN); cstate->resp_length = MS_CHAP_RESPONSE_LEN; } /**********************************/ /*** LOCAL FUNCTION DEFINITIONS ***/ /**********************************/ static void ChallengeResponse( u_char *challenge, /* IN 8 octets */ u_char *pwHash, /* IN 16 octets */ u_char *response /* OUT 24 octets */) { char ZPasswordHash[21]; BZERO(ZPasswordHash, sizeof(ZPasswordHash)); BCOPY(pwHash, ZPasswordHash, 16); #if 0 log_packet(ZPasswordHash, sizeof(ZPasswordHash), "ChallengeResponse - ZPasswordHash", LOG_DEBUG); #endif DesEncrypt(challenge, ZPasswordHash + 0, response + 0); DesEncrypt(challenge, ZPasswordHash + 7, response + 8); DesEncrypt(challenge, ZPasswordHash + 14, response + 16); #if 0 log_packet(response, 24, "ChallengeResponse - response", LOG_DEBUG); #endif } #ifdef USE_CRYPT static void DesEncrypt( u_char *clear, /* IN 8 octets */ u_char *key, /* IN 7 octets */ u_char *cipher /* OUT 8 octets */) { u_char des_key[8]; u_char crypt_key[66]; u_char des_input[66]; MakeKey(key, des_key); Expand(des_key, crypt_key); setkey(crypt_key); #if 0 CHAPDEBUG((LOG_INFO, "DesEncrypt: 8 octet input : %02X%02X%02X%02X%02X%02X%02X%02X\n", clear[0], clear[1], clear[2], clear[3], clear[4], clear[5], clear[6], clear[7])); #endif Expand(clear, des_input); encrypt(des_input, 0); Collapse(des_input, cipher); #if 0 CHAPDEBUG((LOG_INFO, "DesEncrypt: 8 octet output: %02X%02X%02X%02X%02X%02X%02X%02X\n", cipher[0], cipher[1], cipher[2], cipher[3], cipher[4], cipher[5], cipher[6], cipher[7])); #endif } #else /* USE_CRYPT */ static void DesEncrypt( u_char *clear, /* IN 8 octets */ u_char *key, /* IN 7 octets */ u_char *cipher /* OUT 8 octets */) { des_cblock des_key; des_key_schedule key_schedule; MakeKey(key, des_key); des_set_key(&des_key, key_schedule); #if 0 CHAPDEBUG((LOG_INFO, "DesEncrypt: 8 octet input : %02X%02X%02X%02X%02X%02X%02X%02X\n", clear[0], clear[1], clear[2], clear[3], clear[4], clear[5], clear[6], clear[7])); #endif des_ecb_encrypt((des_cblock *)clear, (des_cblock *)cipher, key_schedule, 1); #if 0 CHAPDEBUG((LOG_INFO, "DesEncrypt: 8 octet output: %02X%02X%02X%02X%02X%02X%02X%02X\n", cipher[0], cipher[1], cipher[2], cipher[3], cipher[4], cipher[5], cipher[6], cipher[7])); #endif } #endif /* USE_CRYPT */ static u_char Get7Bits( u_char *input, int startBit) { register unsigned int word; word = (unsigned)input[startBit / 8] << 8; word |= (unsigned)input[startBit / 8 + 1]; word >>= 15 - (startBit % 8 + 7); return word & 0xFE; } #ifdef USE_CRYPT /* in == 8-byte string (expanded version of the 56-bit key) * out == 64-byte string where each byte is either 1 or 0 * Note that the low-order "bit" is always ignored by by setkey() */ static void Expand(u_char *in, u_char *out) { int j, c; int i; for(i = 0; i < 64; in++){ c = *in; for(j = 7; j >= 0; j--) { *out++ = (c >> j) & 01; } i += 8; } } /* The inverse of Expand */ static void Collapse(u_char *in, u_char *out) { int j; int i; unsigned int c; for (i = 0; i < 64; i += 8, out++) { c = 0; for (j = 7; j >= 0; j--, in++) { c |= *in << j; } *out = c & 0xff; } } #endif static void MakeKey( u_char *key, /* IN 56 bit DES key missing parity bits */ u_char *des_key /* OUT 64 bit DES key with parity bits added */) { des_key[0] = Get7Bits(key, 0); des_key[1] = Get7Bits(key, 7); des_key[2] = Get7Bits(key, 14); des_key[3] = Get7Bits(key, 21); des_key[4] = Get7Bits(key, 28); des_key[5] = Get7Bits(key, 35); des_key[6] = Get7Bits(key, 42); des_key[7] = Get7Bits(key, 49); #ifndef USE_CRYPT des_set_odd_parity((des_cblock *)des_key); #endif #if 0 CHAPDEBUG((LOG_INFO, "MakeKey: 56-bit input : %02X%02X%02X%02X%02X%02X%02X\n", key[0], key[1], key[2], key[3], key[4], key[5], key[6])); CHAPDEBUG((LOG_INFO, "MakeKey: 64-bit output: %02X%02X%02X%02X%02X%02X%02X%02X\n", des_key[0], des_key[1], des_key[2], des_key[3], des_key[4], des_key[5], des_key[6], des_key[7])); #endif } static void ChapMS_NT( char *rchallenge, int rchallenge_len, char *secret, int secret_len, MS_ChapResponse *response) { int i; MDstruct md4Context; u_char unicodePassword[MAX_NT_PASSWORD * 2]; static int low_byte_first = -1; /* Initialize the Unicode version of the secret (== password). */ /* This implicitly supports 8-bit ISO8859/1 characters. */ BZERO(unicodePassword, sizeof(unicodePassword)); for (i = 0; i < secret_len; i++) { unicodePassword[i * 2] = (u_char)secret[i]; } MDbegin(&md4Context); MDupdate(&md4Context, unicodePassword, secret_len * 2 * 8); /* Unicode is 2 bytes/char, *8 for bit count */ if (low_byte_first == -1) { low_byte_first = (htons((unsigned short int)1) != 1); } if (low_byte_first == 0) { MDreverse((u_long *)&md4Context); /* sfb 961105 */ } MDupdate(&md4Context, NULL, 0); /* Tell MD4 we're done */ ChallengeResponse(rchallenge, (char *)md4Context.buffer, response->NTResp); } #ifdef MSLANMAN static u_char *StdText = (u_char *)"KGS!@#$%"; /* key from rasapi32.dll */ static void ChapMS_LANMan( char *rchallenge, int rchallenge_len, char *secret, int secret_len, MS_ChapResponse *response) { int i; u_char UcasePassword[MAX_NT_PASSWORD]; /* max is actually 14 */ u_char PasswordHash[16]; /* LANMan password is case insensitive */ BZERO(UcasePassword, sizeof(UcasePassword)); for (i = 0; i < secret_len; i++) { UcasePassword[i] = (u_char)toupper(secret[i]); } DesEncrypt( StdText, UcasePassword + 0, PasswordHash + 0 ); DesEncrypt( StdText, UcasePassword + 7, PasswordHash + 8 ); ChallengeResponse(rchallenge, PasswordHash, response->LANManResp); } #endif #endif /* MSCHAP_SUPPORT */ #endif /* PPP_SUPPORT */
mit
poetryfar/WinObjC
tools/vsimporter/xib2nib/UINavigationController.cpp
156
2685
//****************************************************************************** // // Copyright (c) 2015 Microsoft Corporation. All rights reserved. // // This code is licensed under the MIT License (MIT). // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // //****************************************************************************** #include "UINavigationController.h" #include "UINavigationBar.h" #include "UINavigationItem.h" #include "UIToolbar.h" UINavigationController::UINavigationController() { _navigationBar = NULL; _toolBar = NULL; } void UINavigationController::InitFromXIB(XIBObject *obj) { UIViewController::InitFromXIB(obj); _navigationBar = (UINavigationBar *) FindMember("IBUINavigationBar"); _toolBar = (UIToolbar *) FindMember("IBUIToolbar"); _outputClassName = "UINavigationController"; } void UINavigationController::InitFromStory(XIBObject *obj) { UIViewController::InitFromStory(obj); _navigationBar = (UINavigationBar *) FindMember("navigationBar"); _outputClassName = "UINavigationController"; } void UINavigationController::Awaken() { UIViewController::Awaken(); if ( _navigationBar ) { _navigationBar->_delegate = this; _navigationBar->_autoresizingMask = UIViewAutoresizingFlexibleWidth; } if ( _toolBar ) { _toolBar->_autoresizingMask = UIViewAutoresizingFlexibleWidth | UIViewAutoresizingFlexibleTopMargin; } for ( int i = 0; i < _childViewControllers->count(); i ++ ) { UIViewController *curController = (UIViewController *) _childViewControllers->objectAtIndex(i); if ( curController->_navigationItem ) { if ( !_navigationBar->_items ) { _navigationBar->_items = new XIBArray(); } curController->_navigationItem->_navigationBar = _navigationBar; _navigationBar->_items->AddMember(NULL, curController->_navigationItem); } } } void UINavigationController::ConvertStaticMappings(NIBWriter *writer, XIBObject *obj) { UIViewController::ConvertStaticMappings(writer, obj); if ( _navigationBar ) AddOutputMember(writer, "UINavigationBar", _navigationBar); if ( _toolBar ) AddOutputMember(writer, "UIToolbar", _toolBar); }
mit
robertblackwood/Chipmunk-Spacemanager
Example 2.0/Example 2.0/libs/Chipmunk/src/constraints/cpPivotJoint.c
165
3621
/* Copyright (c) 2007 Scott Lembcke * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "chipmunk_private.h" #include "constraints/util.h" static void preStep(cpPivotJoint *joint, cpFloat dt) { cpBody *a = joint->constraint.a; cpBody *b = joint->constraint.b; joint->r1 = cpvrotate(joint->anchr1, a->rot); joint->r2 = cpvrotate(joint->anchr2, b->rot); // Calculate mass tensor joint-> k = k_tensor(a, b, joint->r1, joint->r2); // calculate bias velocity cpVect delta = cpvsub(cpvadd(b->p, joint->r2), cpvadd(a->p, joint->r1)); joint->bias = cpvclamp(cpvmult(delta, -bias_coef(joint->constraint.errorBias, dt)/dt), joint->constraint.maxBias); } static void applyCachedImpulse(cpPivotJoint *joint, cpFloat dt_coef) { cpBody *a = joint->constraint.a; cpBody *b = joint->constraint.b; apply_impulses(a, b, joint->r1, joint->r2, cpvmult(joint->jAcc, dt_coef)); } static void applyImpulse(cpPivotJoint *joint, cpFloat dt) { cpBody *a = joint->constraint.a; cpBody *b = joint->constraint.b; cpVect r1 = joint->r1; cpVect r2 = joint->r2; // compute relative velocity cpVect vr = relative_velocity(a, b, r1, r2); // compute normal impulse cpVect j = cpMat2x2Transform(joint->k, cpvsub(joint->bias, vr)); cpVect jOld = joint->jAcc; joint->jAcc = cpvclamp(cpvadd(joint->jAcc, j), joint->constraint.maxForce*dt); j = cpvsub(joint->jAcc, jOld); // apply impulse apply_impulses(a, b, joint->r1, joint->r2, j); } static cpFloat getImpulse(cpConstraint *joint) { return cpvlength(((cpPivotJoint *)joint)->jAcc); } static const cpConstraintClass klass = { (cpConstraintPreStepImpl)preStep, (cpConstraintApplyCachedImpulseImpl)applyCachedImpulse, (cpConstraintApplyImpulseImpl)applyImpulse, (cpConstraintGetImpulseImpl)getImpulse, }; CP_DefineClassGetter(cpPivotJoint) cpPivotJoint * cpPivotJointAlloc(void) { return (cpPivotJoint *)cpcalloc(1, sizeof(cpPivotJoint)); } cpPivotJoint * cpPivotJointInit(cpPivotJoint *joint, cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2) { cpConstraintInit((cpConstraint *)joint, &klass, a, b); joint->anchr1 = anchr1; joint->anchr2 = anchr2; joint->jAcc = cpvzero; return joint; } cpConstraint * cpPivotJointNew2(cpBody *a, cpBody *b, cpVect anchr1, cpVect anchr2) { return (cpConstraint *)cpPivotJointInit(cpPivotJointAlloc(), a, b, anchr1, anchr2); } cpConstraint * cpPivotJointNew(cpBody *a, cpBody *b, cpVect pivot) { cpVect anchr1 = (a ? cpBodyWorld2Local(a, pivot) : pivot); cpVect anchr2 = (b ? cpBodyWorld2Local(b, pivot) : pivot); return cpPivotJointNew2(a, b, anchr1, anchr2); }
mit
snowmap/WinObjC
deps/3rdparty/cairolegacy/src/cairo-atomic.c
186
2953
/* cairo - a vector graphics library with display and print output * * Copyright © 2007 Chris Wilson * * This library is free software; you can redistribute it and/or * modify it either under the terms of the GNU Lesser General Public * License version 2.1 as published by the Free Software Foundation * (the "LGPL") or, at your option, under the terms of the Mozilla * Public License Version 1.1 (the "MPL"). If you do not alter this * notice, a recipient may use your version of this file under either * the MPL or the LGPL. * * You should have received a copy of the LGPL along with this library * in the file COPYING-LGPL-2.1; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA * You should have received a copy of the MPL along with this library * in the file COPYING-MPL-1.1 * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY * OF ANY KIND, either express or implied. See the LGPL or the MPL for * the specific language governing rights and limitations. * * The Original Code is the cairo graphics library. * * Contributor(s): * Chris Wilson <chris@chris-wilson.co.uk> */ #include "cairoint.h" #include "cairo-atomic-private.h" #include "cairo-mutex-private.h" #ifdef HAS_ATOMIC_OPS COMPILE_TIME_ASSERT(sizeof(void*) == sizeof(int) || sizeof(void*) == sizeof(long) || sizeof(void*) == sizeof(long long)); #else void _cairo_atomic_int_inc (cairo_atomic_intptr_t *x) { CAIRO_MUTEX_LOCK (_cairo_atomic_mutex); *x += 1; CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex); } cairo_bool_t _cairo_atomic_int_dec_and_test (cairo_atomic_intptr_t *x) { cairo_bool_t ret; CAIRO_MUTEX_LOCK (_cairo_atomic_mutex); ret = --*x == 0; CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex); return ret; } cairo_atomic_intptr_t _cairo_atomic_int_cmpxchg_return_old_impl (cairo_atomic_intptr_t *x, cairo_atomic_intptr_t oldv, cairo_atomic_intptr_t newv) { cairo_atomic_intptr_t ret; CAIRO_MUTEX_LOCK (_cairo_atomic_mutex); ret = *x; if (ret == oldv) *x = newv; CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex); return ret; } void * _cairo_atomic_ptr_cmpxchg_return_old_impl (void **x, void *oldv, void *newv) { void *ret; CAIRO_MUTEX_LOCK (_cairo_atomic_mutex); ret = *x; if (ret == oldv) *x = newv; CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex); return ret; } #ifdef ATOMIC_OP_NEEDS_MEMORY_BARRIER cairo_atomic_intptr_t _cairo_atomic_int_get (cairo_atomic_intptr_t *x) { cairo_atomic_intptr_t ret; CAIRO_MUTEX_LOCK (_cairo_atomic_mutex); ret = *x; CAIRO_MUTEX_UNLOCK (_cairo_atomic_mutex); return ret; } #endif #endif
mit
johnmyqin/WinObjC
deps/3rdparty/icu/icu/source/tools/genrb/genrb.c
188
27271
/* ******************************************************************************* * * Copyright (C) 1998-2014, International Business Machines * Corporation and others. All Rights Reserved. * ******************************************************************************* * * File genrb.c * * Modification History: * * Date Name Description * 05/25/99 stephen Creation. * 5/10/01 Ram removed ustdio dependency ******************************************************************************* */ #include "genrb.h" #include "unicode/uclean.h" #include "ucmndata.h" /* TODO: for reading the pool bundle */ /* Protos */ void processFile(const char *filename, const char* cp, const char *inputDir, const char *outputDir, const char *packageName, UBool omitBinaryCollation, UErrorCode *status); static char *make_res_filename(const char *filename, const char *outputDir, const char *packageName, UErrorCode *status); /* File suffixes */ #define RES_SUFFIX ".res" #define COL_SUFFIX ".col" static char theCurrentFileName[2048]; const char *gCurrentFileName = theCurrentFileName; #ifdef XP_MAC_CONSOLE #include <console.h> #endif enum { HELP1, HELP2, VERBOSE, QUIET, VERSION, SOURCEDIR, DESTDIR, ENCODING, ICUDATADIR, WRITE_JAVA, COPYRIGHT, JAVA_PACKAGE, BUNDLE_NAME, WRITE_XLIFF, STRICT, NO_BINARY_COLLATION, LANGUAGE, NO_COLLATION_RULES, FORMAT_VERSION, WRITE_POOL_BUNDLE, USE_POOL_BUNDLE, INCLUDE_UNIHAN_COLL }; UOption options[]={ UOPTION_HELP_H, UOPTION_HELP_QUESTION_MARK, UOPTION_VERBOSE, UOPTION_QUIET, UOPTION_VERSION, UOPTION_SOURCEDIR, UOPTION_DESTDIR, UOPTION_ENCODING, UOPTION_ICUDATADIR, UOPTION_WRITE_JAVA, UOPTION_COPYRIGHT, UOPTION_DEF("java-package", '\x01', UOPT_REQUIRES_ARG), UOPTION_BUNDLE_NAME, UOPTION_DEF("write-xliff", 'x', UOPT_OPTIONAL_ARG), UOPTION_DEF("strict", 'k', UOPT_NO_ARG), /* 14 */ UOPTION_DEF("noBinaryCollation", 'C', UOPT_NO_ARG),/* 15 */ UOPTION_DEF("language", 'l', UOPT_REQUIRES_ARG), /* 16 */ UOPTION_DEF("omitCollationRules", 'R', UOPT_NO_ARG),/* 17 */ UOPTION_DEF("formatVersion", '\x01', UOPT_REQUIRES_ARG),/* 18 */ UOPTION_DEF("writePoolBundle", '\x01', UOPT_NO_ARG),/* 19 */ UOPTION_DEF("usePoolBundle", '\x01', UOPT_OPTIONAL_ARG),/* 20 */ UOPTION_DEF("includeUnihanColl", '\x01', UOPT_NO_ARG),/* 21 */ /* temporary, don't display in usage info */ }; static UBool write_java = FALSE; static UBool write_xliff = FALSE; static const char* outputEnc =""; static struct SRBRoot *newPoolBundle = NULL; /* TODO: separate header file for ResFile? */ typedef struct ResFile { uint8_t *fBytes; const int32_t *fIndexes; const char *fKeys; int32_t fKeysLength; int32_t fKeysCount; int32_t fChecksum; } ResFile; static ResFile poolBundle = { NULL }; /*added by Jing*/ static const char* language = NULL; static const char* xliffOutputFileName = NULL; int main(int argc, char* argv[]) { UErrorCode status = U_ZERO_ERROR; const char *arg = NULL; const char *outputDir = NULL; /* NULL = no output directory, use current */ const char *inputDir = NULL; const char *encoding = ""; int i; UBool illegalArg = FALSE; U_MAIN_INIT_ARGS(argc, argv); options[JAVA_PACKAGE].value = "com.ibm.icu.impl.data"; options[BUNDLE_NAME].value = "LocaleElements"; argc = u_parseArgs(argc, argv, (int32_t)(sizeof(options)/sizeof(options[0])), options); /* error handling, printing usage message */ if(argc<0) { fprintf(stderr, "%s: error in command line argument \"%s\"\n", argv[0], argv[-argc]); } else if(argc<2) { argc = -1; } if(options[WRITE_POOL_BUNDLE].doesOccur && options[USE_POOL_BUNDLE].doesOccur) { fprintf(stderr, "%s: cannot combine --writePoolBundle and --usePoolBundle\n", argv[0]); argc = -1; } if(options[FORMAT_VERSION].doesOccur) { const char *s = options[FORMAT_VERSION].value; if(uprv_strlen(s) != 1 || (s[0] != '1' && s[0] != '2')) { fprintf(stderr, "%s: unsupported --formatVersion %s\n", argv[0], s); argc = -1; } else if(s[0] == '1' && (options[WRITE_POOL_BUNDLE].doesOccur || options[USE_POOL_BUNDLE].doesOccur) ) { fprintf(stderr, "%s: cannot combine --formatVersion 1 with --writePoolBundle or --usePoolBundle\n", argv[0]); argc = -1; } else { setFormatVersion(s[0] - '0'); } } if(options[VERSION].doesOccur) { fprintf(stderr, "%s version %s (ICU version %s).\n" "%s\n", argv[0], GENRB_VERSION, U_ICU_VERSION, U_COPYRIGHT_STRING); return U_ZERO_ERROR; } if(argc<0) { illegalArg = TRUE; } else if((options[JAVA_PACKAGE].doesOccur || options[BUNDLE_NAME].doesOccur) && !options[WRITE_JAVA].doesOccur) { fprintf(stderr, "%s error: command line argument --java-package or --bundle-name " "without --write-java\n", argv[0]); illegalArg = TRUE; } if(illegalArg || options[HELP1].doesOccur || options[HELP2].doesOccur) { /* * Broken into chunks because the C89 standard says the minimum * required supported string length is 509 bytes. */ fprintf(stderr, "Usage: %s [OPTIONS] [FILES]\n" "\tReads the list of resource bundle source files and creates\n" "\tbinary version of resource bundles (.res files)\n", argv[0]); fprintf(stderr, "Options:\n" "\t-h or -? or --help this usage text\n" "\t-q or --quiet do not display warnings\n" "\t-v or --verbose print extra information when processing files\n" "\t-V or --version prints out version number and exits\n" "\t-c or --copyright include copyright notice\n"); fprintf(stderr, "\t-e or --encoding encoding of source files\n" "\t-d of --destdir destination directory, followed by the path, defaults to %s\n" "\t-s or --sourcedir source directory for files followed by path, defaults to %s\n" "\t-i or --icudatadir directory for locating any needed intermediate data files,\n" "\t followed by path, defaults to %s\n", u_getDataDirectory(), u_getDataDirectory(), u_getDataDirectory()); fprintf(stderr, "\t-j or --write-java write a Java ListResourceBundle for ICU4J, followed by optional encoding\n" "\t defaults to ASCII and \\uXXXX format.\n" "\t --java-package For --write-java: package name for writing the ListResourceBundle,\n" "\t defaults to com.ibm.icu.impl.data\n"); fprintf(stderr, "\t-b or --bundle-name For --write-java: root resource bundle name for writing the ListResourceBundle,\n" "\t defaults to LocaleElements\n" "\t-x or --write-xliff write an XLIFF file for the resource bundle. Followed by\n" "\t an optional output file name.\n" "\t-k or --strict use pedantic parsing of syntax\n" /*added by Jing*/ "\t-l or --language for XLIFF: language code compliant with BCP 47.\n"); fprintf(stderr, "\t-C or --noBinaryCollation do not generate binary collation image;\n" "\t makes .res file smaller but collator instantiation much slower;\n" "\t maintains ability to get tailoring rules\n" "\t-R or --omitCollationRules do not include collation (tailoring) rules;\n" "\t makes .res file smaller and maintains collator instantiation speed\n" "\t but tailoring rules will not be available (they are rarely used)\n"); fprintf(stderr, "\t --formatVersion write a .res file compatible with the requested formatVersion (single digit);\n" "\t for example, --formatVersion 1\n"); fprintf(stderr, "\t --writePoolBundle write a pool.res file with all of the keys of all input bundles\n" "\t --usePoolBundle [path-to-pool.res] point to keys from the pool.res keys pool bundle if they are available there;\n" "\t makes .res files smaller but dependent on the pool bundle\n" "\t (--writePoolBundle and --usePoolBundle cannot be combined)\n"); return illegalArg ? U_ILLEGAL_ARGUMENT_ERROR : U_ZERO_ERROR; } if(options[VERBOSE].doesOccur) { setVerbose(TRUE); } if(options[QUIET].doesOccur) { setShowWarning(FALSE); } if(options[STRICT].doesOccur) { setStrict(TRUE); } if(options[COPYRIGHT].doesOccur){ setIncludeCopyright(TRUE); } if(options[SOURCEDIR].doesOccur) { inputDir = options[SOURCEDIR].value; } if(options[DESTDIR].doesOccur) { outputDir = options[DESTDIR].value; } if(options[ENCODING].doesOccur) { encoding = options[ENCODING].value; } if(options[ICUDATADIR].doesOccur) { u_setDataDirectory(options[ICUDATADIR].value); } /* Initialize ICU */ u_init(&status); if (U_FAILURE(status) && status != U_FILE_ACCESS_ERROR) { /* Note: u_init() will try to open ICU property data. * failures here are expected when building ICU from scratch. * ignore them. */ fprintf(stderr, "%s: can not initialize ICU. status = %s\n", argv[0], u_errorName(status)); exit(1); } status = U_ZERO_ERROR; if(options[WRITE_JAVA].doesOccur) { write_java = TRUE; outputEnc = options[WRITE_JAVA].value; } if(options[WRITE_XLIFF].doesOccur) { write_xliff = TRUE; if(options[WRITE_XLIFF].value != NULL){ xliffOutputFileName = options[WRITE_XLIFF].value; } } initParser(); /*added by Jing*/ if(options[LANGUAGE].doesOccur) { language = options[LANGUAGE].value; } if(options[WRITE_POOL_BUNDLE].doesOccur) { newPoolBundle = bundle_open(NULL, TRUE, &status); if(U_FAILURE(status)) { fprintf(stderr, "unable to create an empty bundle for the pool keys: %s\n", u_errorName(status)); return status; } else { const char *poolResName = "pool.res"; char *nameWithoutSuffix = uprv_malloc(uprv_strlen(poolResName) + 1); if (nameWithoutSuffix == NULL) { fprintf(stderr, "out of memory error\n"); return U_MEMORY_ALLOCATION_ERROR; } uprv_strcpy(nameWithoutSuffix, poolResName); *uprv_strrchr(nameWithoutSuffix, '.') = 0; newPoolBundle->fLocale = nameWithoutSuffix; } } if(options[USE_POOL_BUNDLE].doesOccur) { const char *poolResName = "pool.res"; FileStream *poolFile; int32_t poolFileSize; int32_t indexLength; /* * TODO: Consolidate inputDir/filename handling from main() and processFile() * into a common function, and use it here as well. * Try to create toolutil functions for dealing with dir/filenames and * loading ICU data files without udata_open(). * Share code with icupkg? * Also, make_res_filename() seems to be unused. Review and remove. */ if (options[USE_POOL_BUNDLE].value!=NULL) { uprv_strcpy(theCurrentFileName, options[USE_POOL_BUNDLE].value); uprv_strcat(theCurrentFileName, U_FILE_SEP_STRING); } else if (inputDir) { uprv_strcpy(theCurrentFileName, inputDir); uprv_strcat(theCurrentFileName, U_FILE_SEP_STRING); } else { *theCurrentFileName = 0; } uprv_strcat(theCurrentFileName, poolResName); poolFile = T_FileStream_open(theCurrentFileName, "rb"); if (poolFile == NULL) { fprintf(stderr, "unable to open pool bundle file %s\n", theCurrentFileName); return 1; } poolFileSize = T_FileStream_size(poolFile); if (poolFileSize < 32) { fprintf(stderr, "the pool bundle file %s is too small\n", theCurrentFileName); return 1; } poolBundle.fBytes = (uint8_t *)uprv_malloc((poolFileSize + 15) & ~15); if (poolFileSize > 0 && poolBundle.fBytes == NULL) { fprintf(stderr, "unable to allocate memory for the pool bundle file %s\n", theCurrentFileName); return U_MEMORY_ALLOCATION_ERROR; } else { UDataSwapper *ds; const DataHeader *header; int32_t bytesRead = T_FileStream_read(poolFile, poolBundle.fBytes, poolFileSize); int32_t keysBottom; if (bytesRead != poolFileSize) { fprintf(stderr, "unable to read the pool bundle file %s\n", theCurrentFileName); return 1; } /* * Swap the pool bundle so that a single checked-in file can be used. * The swapper functions also test that the data looks like * a well-formed .res file. */ ds = udata_openSwapperForInputData(poolBundle.fBytes, bytesRead, U_IS_BIG_ENDIAN, U_CHARSET_FAMILY, &status); if (U_FAILURE(status)) { fprintf(stderr, "udata_openSwapperForInputData(pool bundle %s) failed: %s\n", theCurrentFileName, u_errorName(status)); return status; } ures_swap(ds, poolBundle.fBytes, bytesRead, poolBundle.fBytes, &status); udata_closeSwapper(ds); if (U_FAILURE(status)) { fprintf(stderr, "ures_swap(pool bundle %s) failed: %s\n", theCurrentFileName, u_errorName(status)); return status; } header = (const DataHeader *)poolBundle.fBytes; if (header->info.formatVersion[0]!=2) { fprintf(stderr, "invalid format of pool bundle file %s\n", theCurrentFileName); return U_INVALID_FORMAT_ERROR; } poolBundle.fKeys = (const char *)header + header->dataHeader.headerSize; poolBundle.fIndexes = (const int32_t *)poolBundle.fKeys + 1; indexLength = poolBundle.fIndexes[URES_INDEX_LENGTH] & 0xff; if (indexLength <= URES_INDEX_POOL_CHECKSUM) { fprintf(stderr, "insufficient indexes[] in pool bundle file %s\n", theCurrentFileName); return U_INVALID_FORMAT_ERROR; } keysBottom = (1 + indexLength) * 4; poolBundle.fKeys += keysBottom; poolBundle.fKeysLength = (poolBundle.fIndexes[URES_INDEX_KEYS_TOP] * 4) - keysBottom; poolBundle.fChecksum = poolBundle.fIndexes[URES_INDEX_POOL_CHECKSUM]; } for (i = 0; i < poolBundle.fKeysLength; ++i) { if (poolBundle.fKeys[i] == 0) { ++poolBundle.fKeysCount; } } T_FileStream_close(poolFile); setUsePoolBundle(TRUE); } if(options[INCLUDE_UNIHAN_COLL].doesOccur) { puts("genrb option --includeUnihanColl ignored: \n" "CLDR 26/ICU 54 unihan data is small, except\n" "the ucadata-unihan.icu version of the collation root data\n" "is about 300kB larger than the ucadata-implicithan.icu version."); } if((argc-1)!=1) { printf("genrb number of files: %d\n", argc - 1); } /* generate the binary files */ for(i = 1; i < argc; ++i) { status = U_ZERO_ERROR; arg = getLongPathname(argv[i]); if (inputDir) { uprv_strcpy(theCurrentFileName, inputDir); uprv_strcat(theCurrentFileName, U_FILE_SEP_STRING); } else { *theCurrentFileName = 0; } uprv_strcat(theCurrentFileName, arg); if (isVerbose()) { printf("Processing file \"%s\"\n", theCurrentFileName); } processFile(arg, encoding, inputDir, outputDir, NULL, options[NO_BINARY_COLLATION].doesOccur, &status); } uprv_free(poolBundle.fBytes); if(options[WRITE_POOL_BUNDLE].doesOccur) { char outputFileName[256]; bundle_write(newPoolBundle, outputDir, NULL, outputFileName, sizeof(outputFileName), &status); bundle_close(newPoolBundle, &status); if(U_FAILURE(status)) { fprintf(stderr, "unable to write the pool bundle: %s\n", u_errorName(status)); } } u_cleanup(); /* Dont return warnings as a failure */ if (U_SUCCESS(status)) { return 0; } return status; } /* Process a file */ void processFile( const char *filename, const char *cp, const char *inputDir, const char *outputDir, const char *packageName, UBool omitBinaryCollation, UErrorCode *status) { /*FileStream *in = NULL;*/ struct SRBRoot *data = NULL; UCHARBUF *ucbuf = NULL; char *rbname = NULL; char *openFileName = NULL; char *inputDirBuf = NULL; char outputFileName[256]; int32_t dirlen = 0; int32_t filelen = 0; if (status==NULL || U_FAILURE(*status)) { return; } if(filename==NULL){ *status=U_ILLEGAL_ARGUMENT_ERROR; return; }else{ filelen = (int32_t)uprv_strlen(filename); } if(inputDir == NULL) { const char *filenameBegin = uprv_strrchr(filename, U_FILE_SEP_CHAR); openFileName = (char *) uprv_malloc(dirlen + filelen + 2); openFileName[0] = '\0'; if (filenameBegin != NULL) { /* * When a filename ../../../data/root.txt is specified, * we presume that the input directory is ../../../data * This is very important when the resource file includes * another file, like UCARules.txt or thaidict.brk. */ int32_t filenameSize = (int32_t)(filenameBegin - filename + 1); inputDirBuf = uprv_strncpy((char *)uprv_malloc(filenameSize), filename, filenameSize); /* test for NULL */ if(inputDirBuf == NULL) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } inputDirBuf[filenameSize - 1] = 0; inputDir = inputDirBuf; dirlen = (int32_t)uprv_strlen(inputDir); } }else{ dirlen = (int32_t)uprv_strlen(inputDir); if(inputDir[dirlen-1] != U_FILE_SEP_CHAR) { openFileName = (char *) uprv_malloc(dirlen + filelen + 2); /* test for NULL */ if(openFileName == NULL) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } openFileName[0] = '\0'; /* * append the input dir to openFileName if the first char in * filename is not file seperation char and the last char input directory is not '.'. * This is to support : * genrb -s. /home/icu/data * genrb -s. icu/data * The user cannot mix notations like * genrb -s. /icu/data --- the absolute path specified. -s redundant * user should use * genrb -s. icu/data --- start from CWD and look in icu/data dir */ if( (filename[0] != U_FILE_SEP_CHAR) && (inputDir[dirlen-1] !='.')){ uprv_strcpy(openFileName, inputDir); openFileName[dirlen] = U_FILE_SEP_CHAR; } openFileName[dirlen + 1] = '\0'; } else { openFileName = (char *) uprv_malloc(dirlen + filelen + 1); /* test for NULL */ if(openFileName == NULL) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } uprv_strcpy(openFileName, inputDir); } } uprv_strcat(openFileName, filename); ucbuf = ucbuf_open(openFileName, &cp,getShowWarning(),TRUE, status); if(*status == U_FILE_ACCESS_ERROR) { fprintf(stderr, "couldn't open file %s\n", openFileName == NULL ? filename : openFileName); goto finish; } if (ucbuf == NULL || U_FAILURE(*status)) { fprintf(stderr, "An error occured processing file %s. Error: %s\n", openFileName == NULL ? filename : openFileName,u_errorName(*status)); goto finish; } /* auto detected popular encodings? */ if (cp!=NULL && isVerbose()) { printf("autodetected encoding %s\n", cp); } /* Parse the data into an SRBRoot */ data = parse(ucbuf, inputDir, outputDir, filename, !omitBinaryCollation, options[NO_COLLATION_RULES].doesOccur, status); if (data == NULL || U_FAILURE(*status)) { fprintf(stderr, "couldn't parse the file %s. Error:%s\n", filename,u_errorName(*status)); goto finish; } if(options[WRITE_POOL_BUNDLE].doesOccur) { int32_t newKeysLength; const char *newKeys, *newKeysLimit; bundle_compactKeys(data, status); newKeys = bundle_getKeyBytes(data, &newKeysLength); bundle_addKeyBytes(newPoolBundle, newKeys, newKeysLength, status); if(U_FAILURE(*status)) { fprintf(stderr, "bundle_compactKeys(%s) or bundle_getKeyBytes() failed: %s\n", filename, u_errorName(*status)); goto finish; } /* count the number of just-added key strings */ for(newKeysLimit = newKeys + newKeysLength; newKeys < newKeysLimit; ++newKeys) { if(*newKeys == 0) { ++newPoolBundle->fKeysCount; } } } if(options[USE_POOL_BUNDLE].doesOccur) { data->fPoolBundleKeys = poolBundle.fKeys; data->fPoolBundleKeysLength = poolBundle.fKeysLength; data->fPoolBundleKeysCount = poolBundle.fKeysCount; data->fPoolChecksum = poolBundle.fChecksum; } /* Determine the target rb filename */ rbname = make_res_filename(filename, outputDir, packageName, status); if(U_FAILURE(*status)) { fprintf(stderr, "couldn't make the res fileName for bundle %s. Error:%s\n", filename,u_errorName(*status)); goto finish; } if(write_java== TRUE){ bundle_write_java(data,outputDir,outputEnc, outputFileName, sizeof(outputFileName), options[JAVA_PACKAGE].value, options[BUNDLE_NAME].value, status); }else if(write_xliff ==TRUE){ bundle_write_xml(data,outputDir,outputEnc, filename, outputFileName, sizeof(outputFileName),language, xliffOutputFileName,status); }else{ /* Write the data to the file */ bundle_write(data, outputDir, packageName, outputFileName, sizeof(outputFileName), status); } if (U_FAILURE(*status)) { fprintf(stderr, "couldn't write bundle %s. Error:%s\n", outputFileName,u_errorName(*status)); } bundle_close(data, status); finish: if (inputDirBuf != NULL) { uprv_free(inputDirBuf); } if (openFileName != NULL) { uprv_free(openFileName); } if(ucbuf) { ucbuf_close(ucbuf); } if (rbname) { uprv_free(rbname); } } /* Generate the target .res file name from the input file name */ static char* make_res_filename(const char *filename, const char *outputDir, const char *packageName, UErrorCode *status) { char *basename; char *dirname; char *resName; int32_t pkgLen = 0; /* length of package prefix */ if (U_FAILURE(*status)) { return 0; } if(packageName != NULL) { pkgLen = (int32_t)(1 + uprv_strlen(packageName)); } /* setup */ basename = dirname = resName = 0; /* determine basename, and compiled file names */ basename = (char*) uprv_malloc(sizeof(char) * (uprv_strlen(filename) + 1)); if(basename == 0) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } get_basename(basename, filename); dirname = (char*) uprv_malloc(sizeof(char) * (uprv_strlen(filename) + 1)); if(dirname == 0) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } get_dirname(dirname, filename); if (outputDir == NULL) { /* output in same dir as .txt */ resName = (char*) uprv_malloc(sizeof(char) * (uprv_strlen(dirname) + pkgLen + uprv_strlen(basename) + uprv_strlen(RES_SUFFIX) + 8)); if(resName == 0) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } uprv_strcpy(resName, dirname); if(packageName != NULL) { uprv_strcat(resName, packageName); uprv_strcat(resName, "_"); } uprv_strcat(resName, basename); } else { int32_t dirlen = (int32_t)uprv_strlen(outputDir); int32_t basenamelen = (int32_t)uprv_strlen(basename); resName = (char*) uprv_malloc(sizeof(char) * (dirlen + pkgLen + basenamelen + 8)); if (resName == NULL) { *status = U_MEMORY_ALLOCATION_ERROR; goto finish; } uprv_strcpy(resName, outputDir); if(outputDir[dirlen] != U_FILE_SEP_CHAR) { resName[dirlen] = U_FILE_SEP_CHAR; resName[dirlen + 1] = '\0'; } if(packageName != NULL) { uprv_strcat(resName, packageName); uprv_strcat(resName, "_"); } uprv_strcat(resName, basename); } finish: uprv_free(basename); uprv_free(dirname); return resName; } /* * Local Variables: * indent-tabs-mode: nil * End: */
mit
jackalchen/WinObjC
deps/3rdparty/cairolegacy/src/cairo-mutex.c
196
2692
/* cairo - a vector graphics library with display and print output * * Copyright © 2007 Mathias Hasselmann * * This library is free software; you can redistribute it and/or * modify it either under the terms of the GNU Lesser General Public * License version 2.1 as published by the Free Software Foundation * (the "LGPL") or, at your option, under the terms of the Mozilla * Public License Version 1.1 (the "MPL"). If you do not alter this * notice, a recipient may use your version of this file under either * the MPL or the LGPL. * * You should have received a copy of the LGPL along with this library * in the file COPYING-LGPL-2.1; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA * You should have received a copy of the MPL along with this library * in the file COPYING-MPL-1.1 * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY * OF ANY KIND, either express or implied. See the LGPL or the MPL for * the specific language governing rights and limitations. * * The Original Code is the cairo graphics library. * * Contributor(s): * Mathias Hasselmann <mathias.hasselmann@gmx.de> */ #include "cairoint.h" #include "cairo-mutex-private.h" #define CAIRO_MUTEX_DECLARE(mutex) cairo_mutex_t mutex = CAIRO_MUTEX_NIL_INITIALIZER; #include "cairo-mutex-list-private.h" #undef CAIRO_MUTEX_DECLARE #if _CAIRO_MUTEX_IMPL_USE_STATIC_INITIALIZER || _CAIRO_MUTEX_IMPL_USE_STATIC_FINALIZER # if _CAIRO_MUTEX_IMPL_USE_STATIC_INITIALIZER # define _CAIRO_MUTEX_IMPL_INITIALIZED_DEFAULT_VALUE FALSE # else # define _CAIRO_MUTEX_IMPL_INITIALIZED_DEFAULT_VALUE TRUE # endif cairo_bool_t _cairo_mutex_initialized = _CAIRO_MUTEX_IMPL_INITIALIZED_DEFAULT_VALUE; # undef _CAIRO_MUTEX_IMPL_INITIALIZED_DEFAULT_VALUE #endif #if _CAIRO_MUTEX_IMPL_USE_STATIC_INITIALIZER void _cairo_mutex_initialize (void) { if (_cairo_mutex_initialized) return; _cairo_mutex_initialized = TRUE; #define CAIRO_MUTEX_DECLARE(mutex) CAIRO_MUTEX_INIT (mutex); #include "cairo-mutex-list-private.h" #undef CAIRO_MUTEX_DECLARE } #endif #if _CAIRO_MUTEX_IMPL_USE_STATIC_FINALIZER void _cairo_mutex_finalize (void) { if (!_cairo_mutex_initialized) return; _cairo_mutex_initialized = FALSE; #define CAIRO_MUTEX_DECLARE(mutex) CAIRO_MUTEX_FINI (mutex); #include "cairo-mutex-list-private.h" #undef CAIRO_MUTEX_DECLARE } #endif
mit
homerunlathecut/homerunlathecut.github.io
node_modules/node-sass/src/sass_context_wrapper.cpp
1253
1683
#include "sass_context_wrapper.h" extern "C" { using namespace std; void compile_it(uv_work_t* req) { sass_context_wrapper* ctx_w = (sass_context_wrapper*)req->data; if (ctx_w->dctx) { compile_data(ctx_w->dctx); } else if (ctx_w->fctx) { compile_file(ctx_w->fctx); } } void compile_data(struct Sass_Data_Context* dctx) { sass_compile_data_context(dctx); } void compile_file(struct Sass_File_Context* fctx) { sass_compile_file_context(fctx); } sass_context_wrapper* sass_make_context_wrapper() { return (sass_context_wrapper*)calloc(1, sizeof(sass_context_wrapper)); } void sass_free_context_wrapper(sass_context_wrapper* ctx_w) { if (ctx_w->dctx) { sass_delete_data_context(ctx_w->dctx); } else if (ctx_w->fctx) { sass_delete_file_context(ctx_w->fctx); } delete ctx_w->error_callback; delete ctx_w->success_callback; ctx_w->result.Reset(); free(ctx_w->include_path); free(ctx_w->linefeed); free(ctx_w->out_file); free(ctx_w->source_map); free(ctx_w->source_map_root); free(ctx_w->indent); std::vector<CustomImporterBridge *>::iterator imp_it = ctx_w->importer_bridges.begin(); while (imp_it != ctx_w->importer_bridges.end()) { CustomImporterBridge* p = *imp_it; imp_it = ctx_w->importer_bridges.erase(imp_it); delete p; } std::vector<CustomFunctionBridge *>::iterator func_it = ctx_w->function_bridges.begin(); while (func_it != ctx_w->function_bridges.end()) { CustomFunctionBridge* p = *func_it; func_it = ctx_w->function_bridges.erase(func_it); delete p; } free(ctx_w); } }
mit
windofthesky/WinObjC
deps/3rdparty/icu/icu/source/common/normlzr.cpp
244
14795
/* ************************************************************************* * COPYRIGHT: * Copyright (c) 1996-2012, International Business Machines Corporation and * others. All Rights Reserved. ************************************************************************* */ #include "unicode/utypes.h" #if !UCONFIG_NO_NORMALIZATION #include "unicode/uniset.h" #include "unicode/unistr.h" #include "unicode/chariter.h" #include "unicode/schriter.h" #include "unicode/uchriter.h" #include "unicode/normlzr.h" #include "unicode/utf16.h" #include "cmemory.h" #include "normalizer2impl.h" #include "uprops.h" // for uniset_getUnicode32Instance() U_NAMESPACE_BEGIN UOBJECT_DEFINE_RTTI_IMPLEMENTATION(Normalizer) //------------------------------------------------------------------------- // Constructors and other boilerplate //------------------------------------------------------------------------- Normalizer::Normalizer(const UnicodeString& str, UNormalizationMode mode) : UObject(), fFilteredNorm2(NULL), fNorm2(NULL), fUMode(mode), fOptions(0), text(new StringCharacterIterator(str)), currentIndex(0), nextIndex(0), buffer(), bufferPos(0) { init(); } Normalizer::Normalizer(const UChar *str, int32_t length, UNormalizationMode mode) : UObject(), fFilteredNorm2(NULL), fNorm2(NULL), fUMode(mode), fOptions(0), text(new UCharCharacterIterator(str, length)), currentIndex(0), nextIndex(0), buffer(), bufferPos(0) { init(); } Normalizer::Normalizer(const CharacterIterator& iter, UNormalizationMode mode) : UObject(), fFilteredNorm2(NULL), fNorm2(NULL), fUMode(mode), fOptions(0), text(iter.clone()), currentIndex(0), nextIndex(0), buffer(), bufferPos(0) { init(); } Normalizer::Normalizer(const Normalizer &copy) : UObject(copy), fFilteredNorm2(NULL), fNorm2(NULL), fUMode(copy.fUMode), fOptions(copy.fOptions), text(copy.text->clone()), currentIndex(copy.currentIndex), nextIndex(copy.nextIndex), buffer(copy.buffer), bufferPos(copy.bufferPos) { init(); } void Normalizer::init() { UErrorCode errorCode=U_ZERO_ERROR; fNorm2=Normalizer2Factory::getInstance(fUMode, errorCode); if(fOptions&UNORM_UNICODE_3_2) { delete fFilteredNorm2; fNorm2=fFilteredNorm2= new FilteredNormalizer2(*fNorm2, *uniset_getUnicode32Instance(errorCode)); } if(U_FAILURE(errorCode)) { errorCode=U_ZERO_ERROR; fNorm2=Normalizer2Factory::getNoopInstance(errorCode); } } Normalizer::~Normalizer() { delete fFilteredNorm2; delete text; } Normalizer* Normalizer::clone() const { return new Normalizer(*this); } /** * Generates a hash code for this iterator. */ int32_t Normalizer::hashCode() const { return text->hashCode() + fUMode + fOptions + buffer.hashCode() + bufferPos + currentIndex + nextIndex; } UBool Normalizer::operator==(const Normalizer& that) const { return this==&that || (fUMode==that.fUMode && fOptions==that.fOptions && *text==*that.text && buffer==that.buffer && bufferPos==that.bufferPos && nextIndex==that.nextIndex); } //------------------------------------------------------------------------- // Static utility methods //------------------------------------------------------------------------- void U_EXPORT2 Normalizer::normalize(const UnicodeString& source, UNormalizationMode mode, int32_t options, UnicodeString& result, UErrorCode &status) { if(source.isBogus() || U_FAILURE(status)) { result.setToBogus(); if(U_SUCCESS(status)) { status=U_ILLEGAL_ARGUMENT_ERROR; } } else { UnicodeString localDest; UnicodeString *dest; if(&source!=&result) { dest=&result; } else { // the source and result strings are the same object, use a temporary one dest=&localDest; } const Normalizer2 *n2=Normalizer2Factory::getInstance(mode, status); if(U_SUCCESS(status)) { if(options&UNORM_UNICODE_3_2) { FilteredNormalizer2(*n2, *uniset_getUnicode32Instance(status)). normalize(source, *dest, status); } else { n2->normalize(source, *dest, status); } } if(dest==&localDest && U_SUCCESS(status)) { result=*dest; } } } void U_EXPORT2 Normalizer::compose(const UnicodeString& source, UBool compat, int32_t options, UnicodeString& result, UErrorCode &status) { normalize(source, compat ? UNORM_NFKC : UNORM_NFC, options, result, status); } void U_EXPORT2 Normalizer::decompose(const UnicodeString& source, UBool compat, int32_t options, UnicodeString& result, UErrorCode &status) { normalize(source, compat ? UNORM_NFKD : UNORM_NFD, options, result, status); } UNormalizationCheckResult Normalizer::quickCheck(const UnicodeString& source, UNormalizationMode mode, int32_t options, UErrorCode &status) { const Normalizer2 *n2=Normalizer2Factory::getInstance(mode, status); if(U_SUCCESS(status)) { if(options&UNORM_UNICODE_3_2) { return FilteredNormalizer2(*n2, *uniset_getUnicode32Instance(status)). quickCheck(source, status); } else { return n2->quickCheck(source, status); } } else { return UNORM_MAYBE; } } UBool Normalizer::isNormalized(const UnicodeString& source, UNormalizationMode mode, int32_t options, UErrorCode &status) { const Normalizer2 *n2=Normalizer2Factory::getInstance(mode, status); if(U_SUCCESS(status)) { if(options&UNORM_UNICODE_3_2) { return FilteredNormalizer2(*n2, *uniset_getUnicode32Instance(status)). isNormalized(source, status); } else { return n2->isNormalized(source, status); } } else { return FALSE; } } UnicodeString & U_EXPORT2 Normalizer::concatenate(const UnicodeString &left, const UnicodeString &right, UnicodeString &result, UNormalizationMode mode, int32_t options, UErrorCode &errorCode) { if(left.isBogus() || right.isBogus() || U_FAILURE(errorCode)) { result.setToBogus(); if(U_SUCCESS(errorCode)) { errorCode=U_ILLEGAL_ARGUMENT_ERROR; } } else { UnicodeString localDest; UnicodeString *dest; if(&right!=&result) { dest=&result; } else { // the right and result strings are the same object, use a temporary one dest=&localDest; } *dest=left; const Normalizer2 *n2=Normalizer2Factory::getInstance(mode, errorCode); if(U_SUCCESS(errorCode)) { if(options&UNORM_UNICODE_3_2) { FilteredNormalizer2(*n2, *uniset_getUnicode32Instance(errorCode)). append(*dest, right, errorCode); } else { n2->append(*dest, right, errorCode); } } if(dest==&localDest && U_SUCCESS(errorCode)) { result=*dest; } } return result; } //------------------------------------------------------------------------- // Iteration API //------------------------------------------------------------------------- /** * Return the current character in the normalized text. */ UChar32 Normalizer::current() { if(bufferPos<buffer.length() || nextNormalize()) { return buffer.char32At(bufferPos); } else { return DONE; } } /** * Return the next character in the normalized text and advance * the iteration position by one. If the end * of the text has already been reached, {@link #DONE} is returned. */ UChar32 Normalizer::next() { if(bufferPos<buffer.length() || nextNormalize()) { UChar32 c=buffer.char32At(bufferPos); bufferPos+=U16_LENGTH(c); return c; } else { return DONE; } } /** * Return the previous character in the normalized text and decrement * the iteration position by one. If the beginning * of the text has already been reached, {@link #DONE} is returned. */ UChar32 Normalizer::previous() { if(bufferPos>0 || previousNormalize()) { UChar32 c=buffer.char32At(bufferPos-1); bufferPos-=U16_LENGTH(c); return c; } else { return DONE; } } void Normalizer::reset() { currentIndex=nextIndex=text->setToStart(); clearBuffer(); } void Normalizer::setIndexOnly(int32_t index) { text->setIndex(index); // pins index currentIndex=nextIndex=text->getIndex(); clearBuffer(); } /** * Return the first character in the normalized text. This resets * the <tt>Normalizer's</tt> position to the beginning of the text. */ UChar32 Normalizer::first() { reset(); return next(); } /** * Return the last character in the normalized text. This resets * the <tt>Normalizer's</tt> position to be just before the * the input text corresponding to that normalized character. */ UChar32 Normalizer::last() { currentIndex=nextIndex=text->setToEnd(); clearBuffer(); return previous(); } /** * Retrieve the current iteration position in the input text that is * being normalized. This method is useful in applications such as * searching, where you need to be able to determine the position in * the input text that corresponds to a given normalized output character. * <p> * <b>Note:</b> This method sets the position in the <em>input</em>, while * {@link #next} and {@link #previous} iterate through characters in the * <em>output</em>. This means that there is not necessarily a one-to-one * correspondence between characters returned by <tt>next</tt> and * <tt>previous</tt> and the indices passed to and returned from * <tt>setIndex</tt> and {@link #getIndex}. * */ int32_t Normalizer::getIndex() const { if(bufferPos<buffer.length()) { return currentIndex; } else { return nextIndex; } } /** * Retrieve the index of the start of the input text. This is the begin index * of the <tt>CharacterIterator</tt> or the start (i.e. 0) of the <tt>String</tt> * over which this <tt>Normalizer</tt> is iterating */ int32_t Normalizer::startIndex() const { return text->startIndex(); } /** * Retrieve the index of the end of the input text. This is the end index * of the <tt>CharacterIterator</tt> or the length of the <tt>String</tt> * over which this <tt>Normalizer</tt> is iterating */ int32_t Normalizer::endIndex() const { return text->endIndex(); } //------------------------------------------------------------------------- // Property access methods //------------------------------------------------------------------------- void Normalizer::setMode(UNormalizationMode newMode) { fUMode = newMode; init(); } UNormalizationMode Normalizer::getUMode() const { return fUMode; } void Normalizer::setOption(int32_t option, UBool value) { if (value) { fOptions |= option; } else { fOptions &= (~option); } init(); } UBool Normalizer::getOption(int32_t option) const { return (fOptions & option) != 0; } /** * Set the input text over which this <tt>Normalizer</tt> will iterate. * The iteration position is set to the beginning of the input text. */ void Normalizer::setText(const UnicodeString& newText, UErrorCode &status) { if (U_FAILURE(status)) { return; } CharacterIterator *newIter = new StringCharacterIterator(newText); if (newIter == NULL) { status = U_MEMORY_ALLOCATION_ERROR; return; } delete text; text = newIter; reset(); } /** * Set the input text over which this <tt>Normalizer</tt> will iterate. * The iteration position is set to the beginning of the string. */ void Normalizer::setText(const CharacterIterator& newText, UErrorCode &status) { if (U_FAILURE(status)) { return; } CharacterIterator *newIter = newText.clone(); if (newIter == NULL) { status = U_MEMORY_ALLOCATION_ERROR; return; } delete text; text = newIter; reset(); } void Normalizer::setText(const UChar* newText, int32_t length, UErrorCode &status) { if (U_FAILURE(status)) { return; } CharacterIterator *newIter = new UCharCharacterIterator(newText, length); if (newIter == NULL) { status = U_MEMORY_ALLOCATION_ERROR; return; } delete text; text = newIter; reset(); } /** * Copies the text under iteration into the UnicodeString referred to by "result". * @param result Receives a copy of the text under iteration. */ void Normalizer::getText(UnicodeString& result) { text->getText(result); } //------------------------------------------------------------------------- // Private utility methods //------------------------------------------------------------------------- void Normalizer::clearBuffer() { buffer.remove(); bufferPos=0; } UBool Normalizer::nextNormalize() { clearBuffer(); currentIndex=nextIndex; text->setIndex(nextIndex); if(!text->hasNext()) { return FALSE; } // Skip at least one character so we make progress. UnicodeString segment(text->next32PostInc()); while(text->hasNext()) { UChar32 c; if(fNorm2->hasBoundaryBefore(c=text->next32PostInc())) { text->move32(-1, CharacterIterator::kCurrent); break; } segment.append(c); } nextIndex=text->getIndex(); UErrorCode errorCode=U_ZERO_ERROR; fNorm2->normalize(segment, buffer, errorCode); return U_SUCCESS(errorCode) && !buffer.isEmpty(); } UBool Normalizer::previousNormalize() { clearBuffer(); nextIndex=currentIndex; text->setIndex(currentIndex); if(!text->hasPrevious()) { return FALSE; } UnicodeString segment; while(text->hasPrevious()) { UChar32 c=text->previous32(); segment.insert(0, c); if(fNorm2->hasBoundaryBefore(c)) { break; } } currentIndex=text->getIndex(); UErrorCode errorCode=U_ZERO_ERROR; fNorm2->normalize(segment, buffer, errorCode); bufferPos=buffer.length(); return U_SUCCESS(errorCode) && !buffer.isEmpty(); } U_NAMESPACE_END #endif /* #if !UCONFIG_NO_NORMALIZATION */
mit
supriyasingh01/github_basics
Internetworking Distributed Project/Kernal/linux-3.6.3/drivers/media/dvb/frontends/itd1000.c
5031
11347
/* * Driver for the Integrant ITD1000 "Zero-IF Tuner IC for Direct Broadcast Satellite" * * Copyright (c) 2007-8 Patrick Boettcher <pb@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "itd1000.h" #include "itd1000_priv.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define itd_dbg(args...) do { \ if (debug) { \ printk(KERN_DEBUG "ITD1000: " args);\ } \ } while (0) #define itd_warn(args...) do { \ printk(KERN_WARNING "ITD1000: " args); \ } while (0) #define itd_info(args...) do { \ printk(KERN_INFO "ITD1000: " args); \ } while (0) /* don't write more than one byte with flexcop behind */ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 len) { u8 buf[1+len]; struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = buf, .len = len+1 }; buf[0] = reg; memcpy(&buf[1], v, len); /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */ if (i2c_transfer(state->i2c, &msg, 1) != 1) { printk(KERN_WARNING "itd1000 I2C write failed\n"); return -EREMOTEIO; } return 0; } static int itd1000_read_reg(struct itd1000_state *state, u8 reg) { u8 val; struct i2c_msg msg[2] = { { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = &val, .len = 1 }, }; /* ugly flexcop workaround */ itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1); if (i2c_transfer(state->i2c, msg, 2) != 2) { itd_warn("itd1000 I2C read failed\n"); return -EREMOTEIO; } return val; } static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { int ret = itd1000_write_regs(state, r, &v, 1); state->shadow[r] = v; return ret; } static struct { u32 symbol_rate; u8 pgaext : 4; /* PLLFH */ u8 bbgvmin : 4; /* BBGVMIN */ } itd1000_lpf_pga[] = { { 0, 0x8, 0x3 }, { 5200000, 0x8, 0x3 }, { 12200000, 0x4, 0x3 }, { 15400000, 0x2, 0x3 }, { 19800000, 0x2, 0x3 }, { 21500000, 0x2, 0x3 }, { 24500000, 0x2, 0x3 }, { 28400000, 0x2, 0x3 }, { 33400000, 0x2, 0x3 }, { 34400000, 0x1, 0x4 }, { 34400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 38400000, 0x1, 0x4 }, { 40400000, 0x1, 0x4 }, { 45400000, 0x1, 0x4 }, }; static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate) { u8 i; u8 con1 = itd1000_read_reg(state, CON1) & 0xfd; u8 pllfh = itd1000_read_reg(state, PLLFH) & 0x0f; u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0; u8 bw = itd1000_read_reg(state, BW) & 0xf0; itd_dbg("symbol_rate = %d\n", symbol_rate); /* not sure what is that ? - starting to download the table */ itd1000_write_reg(state, CON1, con1 | (1 << 1)); for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++) if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) { itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin); itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4)); itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin)); itd1000_write_reg(state, BW, bw | (i & 0x0f)); break; } itd1000_write_reg(state, CON1, con1 | (0 << 1)); } static struct { u8 vcorg; u32 fmax_rg; } itd1000_vcorg[] = { { 1, 920000 }, { 2, 971000 }, { 3, 1031000 }, { 4, 1091000 }, { 5, 1171000 }, { 6, 1281000 }, { 7, 1381000 }, { 8, 500000 }, /* this is intentional. */ { 9, 1451000 }, { 10, 1531000 }, { 11, 1631000 }, { 12, 1741000 }, { 13, 1891000 }, { 14, 2071000 }, { 15, 2250000 }, }; static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz) { u8 i; u8 gvbb_i2c = itd1000_read_reg(state, GVBB_I2C) & 0xbf; u8 vco_chp1_i2c = itd1000_read_reg(state, VCO_CHP1_I2C) & 0x0f; u8 adcout; /* reserved bit again (reset ?) */ itd1000_write_reg(state, GVBB_I2C, gvbb_i2c | (1 << 6)); for (i = 0; i < ARRAY_SIZE(itd1000_vcorg); i++) { if (freq_khz < itd1000_vcorg[i].fmax_rg) { itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | (itd1000_vcorg[i].vcorg << 4)); msleep(1); adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f; itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c); if (adcout > 13) { if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg + 1) << 4)); } else if (adcout < 2) { if (!(itd1000_vcorg[i].vcorg == 1 || itd1000_vcorg[i].vcorg == 9)) itd1000_write_reg(state, VCO_CHP1_I2C, vco_chp1_i2c | ((itd1000_vcorg[i].vcorg - 1) << 4)); } break; } } } static const struct { u32 freq; u8 values[10]; /* RFTR, RFST1 - RFST9 */ } itd1000_fre_values[] = { { 1075000, { 0x59, 0x1d, 0x1c, 0x17, 0x16, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1250000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1450000, { 0x89, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1650000, { 0x69, 0x1e, 0x1d, 0x17, 0x15, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1750000, { 0x69, 0x1e, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0c, 0x0b, 0x0a } }, { 1850000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1900000, { 0x69, 0x1d, 0x17, 0x15, 0x14, 0x0f, 0x0e, 0x0d, 0x0b, 0x0a } }, { 1950000, { 0x69, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0d, 0x0b, 0x0a } }, { 2050000, { 0x69, 0x1e, 0x1d, 0x17, 0x16, 0x14, 0x13, 0x0e, 0x0b, 0x0a } }, { 2150000, { 0x69, 0x1d, 0x1c, 0x17, 0x15, 0x14, 0x13, 0x0f, 0x0e, 0x0b } } }; #define FREF 16 static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz) { int i, j; u32 plln, pllf; u64 tmp; plln = (freq_khz * 1000) / 2 / FREF; /* Compute the factional part times 1000 */ tmp = plln % 1000000; plln /= 1000000; tmp *= 1048576; do_div(tmp, 1000000); pllf = (u32) tmp; state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF; itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln); itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */; itd1000_write_reg(state, PLLNL, plln & 0xff); itd1000_write_reg(state, PLLFH, (itd1000_read_reg(state, PLLFH) & 0xf0) | ((pllf >> 16) & 0x0f)); itd1000_write_reg(state, PLLFM, (pllf >> 8) & 0xff); itd1000_write_reg(state, PLLFL, (pllf >> 0) & 0xff); for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) { if (freq_khz <= itd1000_fre_values[i].freq) { itd_dbg("fre_values: %d\n", i); itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]); for (j = 0; j < 9; j++) itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]); break; } } itd1000_set_vco(state, freq_khz); } static int itd1000_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct itd1000_state *state = fe->tuner_priv; u8 pllcon1; itd1000_set_lo(state, c->frequency); itd1000_set_lpf_bw(state, c->symbol_rate); pllcon1 = itd1000_read_reg(state, PLLCON1) & 0x7f; itd1000_write_reg(state, PLLCON1, pllcon1 | (1 << 7)); itd1000_write_reg(state, PLLCON1, pllcon1); return 0; } static int itd1000_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct itd1000_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int itd1000_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { return 0; } static u8 itd1000_init_tab[][2] = { { PLLCON1, 0x65 }, /* Register does not change */ { PLLNH, 0x80 }, /* Bits [7:6] do not change */ { RESERVED_0X6D, 0x3b }, { VCO_CHP2_I2C, 0x12 }, { 0x72, 0xf9 }, /* No such regsister defined */ { RESERVED_0X73, 0xff }, { RESERVED_0X74, 0xb2 }, { RESERVED_0X75, 0xc7 }, { EXTGVBBRF, 0xf0 }, { DIVAGCCK, 0x80 }, { BBTR, 0xa0 }, { RESERVED_0X7E, 0x4f }, { 0x82, 0x88 }, /* No such regsister defined */ { 0x83, 0x80 }, /* No such regsister defined */ { 0x84, 0x80 }, /* No such regsister defined */ { RESERVED_0X85, 0x74 }, { RESERVED_0X86, 0xff }, { RESERVED_0X88, 0x02 }, { RESERVED_0X89, 0x16 }, { RFST0, 0x1f }, { RESERVED_0X94, 0x66 }, { RESERVED_0X95, 0x66 }, { RESERVED_0X96, 0x77 }, { RESERVED_0X97, 0x99 }, { RESERVED_0X98, 0xff }, { RESERVED_0X99, 0xfc }, { RESERVED_0X9A, 0xba }, { RESERVED_0X9B, 0xaa }, }; static u8 itd1000_reinit_tab[][2] = { { VCO_CHP1_I2C, 0x8a }, { BW, 0x87 }, { GVBB_I2C, 0x03 }, { BBGVMIN, 0x03 }, { CON1, 0x2e }, }; static int itd1000_init(struct dvb_frontend *fe) { struct itd1000_state *state = fe->tuner_priv; int i; for (i = 0; i < ARRAY_SIZE(itd1000_init_tab); i++) itd1000_write_reg(state, itd1000_init_tab[i][0], itd1000_init_tab[i][1]); for (i = 0; i < ARRAY_SIZE(itd1000_reinit_tab); i++) itd1000_write_reg(state, itd1000_reinit_tab[i][0], itd1000_reinit_tab[i][1]); return 0; } static int itd1000_sleep(struct dvb_frontend *fe) { return 0; } static int itd1000_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops itd1000_tuner_ops = { .info = { .name = "Integrant ITD1000", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 125, /* kHz for QPSK frontends */ }, .release = itd1000_release, .init = itd1000_init, .sleep = itd1000_sleep, .set_params = itd1000_set_parameters, .get_frequency = itd1000_get_frequency, .get_bandwidth = itd1000_get_bandwidth }; struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct itd1000_config *cfg) { struct itd1000_state *state = NULL; u8 i = 0; state = kzalloc(sizeof(struct itd1000_state), GFP_KERNEL); if (state == NULL) return NULL; state->cfg = cfg; state->i2c = i2c; i = itd1000_read_reg(state, 0); if (i != 0) { kfree(state); return NULL; } itd_info("successfully identified (ID: %d)\n", i); memset(state->shadow, 0xff, sizeof(state->shadow)); for (i = 0x65; i < 0x9c; i++) state->shadow[i] = itd1000_read_reg(state, i); memcpy(&fe->ops.tuner_ops, &itd1000_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = state; return fe; } EXPORT_SYMBOL(itd1000_attach); MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>"); MODULE_DESCRIPTION("Integrant ITD1000 driver"); MODULE_LICENSE("GPL");
cc0-1.0
kaskr/CppAD
test_more/general/mul.cpp
1
4293
/* -------------------------------------------------------------------------- CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-17 Bradley M. Bell CppAD is distributed under multiple licenses. This distribution is under the terms of the Eclipse Public License Version 1.0. A copy of this license is included in the COPYING file of this distribution. Please visit http://www.coin-or.org/CppAD/ for information on other licenses. -------------------------------------------------------------------------- */ /* Two old Mul examples now used just for valiadation testing */ # include <cppad/cppad.hpp> namespace { // BEGIN empty namespace bool MulTestOne(void) { bool ok = true; using namespace CppAD; // independent variable vector, indices, values, and declaration CPPAD_TESTVECTOR(AD<double>) U(2); size_t s = 0; size_t t = 1; U[s] = 3.; U[t] = 2.; Independent(U); // assign some parameters AD<double> zero = 0.; AD<double> one = 1.; // dependent variable vector and indices CPPAD_TESTVECTOR(AD<double>) Z(5); size_t x = 0; size_t y = 1; size_t z = 2; size_t u = 3; size_t v = 4; // assign the dependent variables Z[x] = U[s] * U[t]; // AD<double> * AD<double> Z[y] = Z[x] * 4.; // AD<double> * double Z[z] = 4. * Z[y]; // double * AD<double> Z[u] = one * Z[z]; // multiplication by parameter equal to one Z[v] = zero * Z[z]; // multiplication by parameter equal to zero // check multipilcation by zero results in a parameter ok &= Parameter(Z[v]); // create f: U -> Z and vectors used for derivative calculations ADFun<double> f(U, Z); CPPAD_TESTVECTOR(double) q( f.Domain() ); CPPAD_TESTVECTOR(double) r( f.Range() ); // check parameter flag ok &= f.Parameter(v); // check values ok &= ( Z[x] == 3. * 2. ); ok &= ( Z[y] == 3. * 2. * 4. ); ok &= ( Z[z] == 4. * 3. * 2. * 4. ); ok &= ( Z[u] == Z[z] ); ok &= ( Z[v] == 0. ); // forward computation of partials w.r.t. s q[s] = 1.; q[t] = 0.; r = f.Forward(1, q); ok &= ( r[x] == U[t] ); // dx/ds ok &= ( r[y] == U[t] * 4. ); // dy/ds ok &= ( r[z] == 4. * U[t] * 4. ); // dz/ds ok &= ( r[u] == r[z] ); // du/ds ok &= ( r[v] == 0. ); // dv/ds // reverse computation of second partials of z CPPAD_TESTVECTOR(double) d2( f.Domain() * 2 ); r[x] = 0.; r[y] = 0.; r[z] = 1.; r[u] = 0.; r[v] = 0.; d2 = f.Reverse(2, r); // check second order partials ok &= ( d2[2 * s + 1] == 0. ); // d^2 z / (ds ds) ok &= ( d2[2 * t + 1] == 4. * 4. ); // d^2 z / (ds dt) return ok; } bool MulTestTwo(void) { bool ok = true; using namespace CppAD; double eps99 = 99.0 * std::numeric_limits<double>::epsilon(); // independent variable vector double u0 = .5; CPPAD_TESTVECTOR(AD<double>) U(1); U[0] = u0; Independent(U); AD<double> a = U[0] * 1.; // AD<double> * double AD<double> b = a * 2; // AD<double> * int AD<double> c = 3. * b; // double * AD<double> AD<double> d = 4 * c; // int * AD<double> // dependent variable vector CPPAD_TESTVECTOR(AD<double>) Z(1); Z[0] = U[0] * d; // AD<double> * AD<double> // create f: U -> Z and vectors used for derivative calculations ADFun<double> f(U, Z); CPPAD_TESTVECTOR(double) v(1); CPPAD_TESTVECTOR(double) w(1); // check value ok &= NearEqual(Value(Z[0]) , u0*4*3*2*u0, eps99 , eps99); // forward computation of partials w.r.t. u size_t j; size_t p = 5; double jfac = 1.; v[0] = 1.; for(j = 1; j < p; j++) { double value; if( j == 1 ) value = 48. * u0; else if( j == 2 ) value = 48.; else value = 0.; jfac *= double(j); w = f.Forward(j, v); ok &= NearEqual(w[0], value/jfac, eps99, eps99); // d^jz/du^j v[0] = 0.; } // reverse computation of partials of Taylor coefficients CPPAD_TESTVECTOR(double) r(p); w[0] = 1.; r = f.Reverse(p, w); jfac = 1.; for(j = 0; j < p; j++) { double value; if( j == 0 ) value = 48. * u0; else if( j == 1 ) value = 48.; else value = 0.; ok &= NearEqual(r[j], value/jfac, eps99, eps99); // d^jz/du^j jfac *= double(j + 1); } return ok; } } // END empty namespace bool Mul(void) { bool ok = true; ok &= MulTestOne(); ok &= MulTestTwo(); return ok; }
epl-1.0
rizard/bigcode
modules/FME/module/src/fme_config.c
6
4031
/**************************************************************** * * Copyright 2013, Big Switch Networks, Inc. * * Licensed under the Eclipse Public License, Version 1.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.eclipse.org/legal/epl-v10.html * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. * ***************************************************************/ #include <FME/fme_config.h> #include <FME/fme.h> #include "fme_int.h" #include <stdlib.h> #include <string.h> /* <auto.start.cdefs(FME_CONFIG_HEADER).source> */ #define __fme_config_STRINGIFY_NAME(_x) #_x #define __fme_config_STRINGIFY_VALUE(_x) __fme_config_STRINGIFY_NAME(_x) fme_config_settings_t fme_config_settings[] = { #ifdef FME_CONFIG_INCLUDE_LOGGING { __fme_config_STRINGIFY_NAME(FME_CONFIG_INCLUDE_LOGGING), __fme_config_STRINGIFY_VALUE(FME_CONFIG_INCLUDE_LOGGING) }, #else { FME_CONFIG_INCLUDE_LOGGING(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_LOG_OPTIONS_DEFAULT { __fme_config_STRINGIFY_NAME(FME_CONFIG_LOG_OPTIONS_DEFAULT), __fme_config_STRINGIFY_VALUE(FME_CONFIG_LOG_OPTIONS_DEFAULT) }, #else { FME_CONFIG_LOG_OPTIONS_DEFAULT(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_LOG_BITS_DEFAULT { __fme_config_STRINGIFY_NAME(FME_CONFIG_LOG_BITS_DEFAULT), __fme_config_STRINGIFY_VALUE(FME_CONFIG_LOG_BITS_DEFAULT) }, #else { FME_CONFIG_LOG_BITS_DEFAULT(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_LOG_CUSTOM_BITS_DEFAULT { __fme_config_STRINGIFY_NAME(FME_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __fme_config_STRINGIFY_VALUE(FME_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, #else { FME_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_PORTING_STDLIB { __fme_config_STRINGIFY_NAME(FME_CONFIG_PORTING_STDLIB), __fme_config_STRINGIFY_VALUE(FME_CONFIG_PORTING_STDLIB) }, #else { FME_CONFIG_PORTING_STDLIB(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS { __fme_config_STRINGIFY_NAME(FME_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __fme_config_STRINGIFY_VALUE(FME_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, #else { FME_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_INCLUDE_UCLI { __fme_config_STRINGIFY_NAME(FME_CONFIG_INCLUDE_UCLI), __fme_config_STRINGIFY_VALUE(FME_CONFIG_INCLUDE_UCLI) }, #else { FME_CONFIG_INCLUDE_UCLI(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_INCLUDE_UTM { __fme_config_STRINGIFY_NAME(FME_CONFIG_INCLUDE_UTM), __fme_config_STRINGIFY_VALUE(FME_CONFIG_INCLUDE_UTM) }, #else { FME_CONFIG_INCLUDE_UTM(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif #ifdef FME_CONFIG_KEY_SIZE_WORDS { __fme_config_STRINGIFY_NAME(FME_CONFIG_KEY_SIZE_WORDS), __fme_config_STRINGIFY_VALUE(FME_CONFIG_KEY_SIZE_WORDS) }, #else { FME_CONFIG_KEY_SIZE_WORDS(__fme_config_STRINGIFY_NAME), "__undefined__" }, #endif { NULL, NULL } }; #undef __fme_config_STRINGIFY_VALUE #undef __fme_config_STRINGIFY_NAME const char* fme_config_lookup(const char* setting) { int i; for(i = 0; fme_config_settings[i].name; i++) { if(strcmp(fme_config_settings[i].name, setting)) { return fme_config_settings[i].value; } } return NULL; } int fme_config_show(struct aim_pvs_s* pvs) { int i; for(i = 0; fme_config_settings[i].name; i++) { aim_printf(pvs, "%s = %s\n", fme_config_settings[i].name, fme_config_settings[i].value); } return i; } /* <auto.end.cdefs(FME_CONFIG_HEADER).source> */
epl-1.0
tkelman/graphviz
cmd/smyrna/trackball.c
18
8442
/* * (c) Copyright 1993, 1994, Silicon Graphics, Inc. * ALL RIGHTS RESERVED * Permission to use, copy, modify, and distribute this software for * any purpose and without fee is hereby granted, provided that the above * copyright notice appear in all copies and that both the copyright notice * and this permission notice appear in supporting documentation, and that * the name of Silicon Graphics, Inc. not be used in advertising * or publicity pertaining to distribution of the software without specific, * written prior permission. * * THE MATERIAL EMBODIED ON THIS SOFTWARE IS PROVIDED TO YOU "AS-IS" * AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR OTHERWISE, * INCLUDING WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY OR * FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON * GRAPHICS, INC. BE LIABLE TO YOU OR ANYONE ELSE FOR ANY DIRECT, * SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY * KIND, OR ANY DAMAGES WHATSOEVER, INCLUDING WITHOUT LIMITATION, * LOSS OF PROFIT, LOSS OF USE, SAVINGS OR REVENUE, OR THE CLAIMS OF * THIRD PARTIES, WHETHER OR NOT SILICON GRAPHICS, INC. HAS BEEN * ADVISED OF THE POSSIBILITY OF SUCH LOSS, HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE * POSSESSION, USE OR PERFORMANCE OF THIS SOFTWARE. * * US Government Users Restricted Rights * Use, duplication, or disclosure by the Government is subject to * restrictions set forth in FAR 52.227.19(c)(2) or subparagraph * (c)(1)(ii) of the Rights in Technical Data and Computer Software * clause at DFARS 252.227-7013 and/or in similar or successor * clauses in the FAR or the DOD or NASA FAR Supplement. * Unpublished-- rights reserved under the copyright laws of the * United States. Contractor/manufacturer is Silicon Graphics, * Inc., 2011 N. Shoreline Blvd., Mountain View, CA 94039-7311. * * OpenGL(TM) is a trademark of Silicon Graphics, Inc. */ /* * Trackball code: * * Implementation of a virtual trackball. * Implemented by Gavin Bell, lots of ideas from Thant Tessman and * the August '88 issue of Siggraph's "Computer Graphics," pp. 121-129. * * Vector manip code: * * Original code from: * David M. Ciemiewicz, Mark Grossman, Henry Moreton, and Paul Haeberli * * Much mucking with by: * Gavin Bell */ #include <math.h> #include "trackball.h" /* * This size should really be based on the distance from the center of * rotation to the point on the object underneath the mouse. That * point would then track the mouse as closely as possible. This is a * simple example, though, so that is left as an Exercise for the * Programmer. */ #define TRACKBALLSIZE (0.8) /* * Local function prototypes (not defined in trackball.h) */ static float tb_project_to_sphere(float, float, float); static void normalize_quat(float[4]); void vzero(float *v) { v[0] = 0.0; v[1] = 0.0; v[2] = 0.0; } void vset(float *v, float x, float y, float z) { v[0] = x; v[1] = y; v[2] = z; } void vsub(const float *src1, const float *src2, float *dst) { dst[0] = src1[0] - src2[0]; dst[1] = src1[1] - src2[1]; dst[2] = src1[2] - src2[2]; } void vcopy(const float *v1, float *v2) { register int i; for (i = 0; i < 3; i++) v2[i] = v1[i]; } void vcross(const float *v1, const float *v2, float *cross) { float temp[3]; temp[0] = (v1[1] * v2[2]) - (v1[2] * v2[1]); temp[1] = (v1[2] * v2[0]) - (v1[0] * v2[2]); temp[2] = (v1[0] * v2[1]) - (v1[1] * v2[0]); vcopy(temp, cross); } float vlength(const float *v) { return sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]); } void vscale(float *v, float div) { v[0] *= div; v[1] *= div; v[2] *= div; } void vnormal(float *v) { vscale(v, 1.0 / vlength(v)); } float vdot(const float *v1, const float *v2) { return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; } void vadd(const float *src1, const float *src2, float *dst) { dst[0] = src1[0] + src2[0]; dst[1] = src1[1] + src2[1]; dst[2] = src1[2] + src2[2]; } /* * Ok, simulate a track-ball. Project the points onto the virtual * trackball, then figure out the axis of rotation, which is the cross * product of P1 P2 and O P1 (O is the center of the ball, 0,0,0) * Note: This is a deformed trackball-- is a trackball in the center, * but is deformed into a hyperbolic sheet of rotation away from the * center. This particular function was chosen after trying out * several variations. * * It is assumed that the arguments to this routine are in the range * (-1.0 ... 1.0) */ void trackball(float q[4], float p1x, float p1y, float p2x, float p2y) { float a[3]; /* Axis of rotation */ float phi; /* how much to rotate about axis */ float p1[3], p2[3], d[3]; float t; if (p1x == p2x && p1y == p2y) { /* Zero rotation */ vzero(q); q[3] = 1.0; return; } /* * First, figure out z-coordinates for projection of P1 and P2 to * deformed sphere */ vset(p1, p1x, p1y, tb_project_to_sphere(TRACKBALLSIZE, p1x, p1y)); vset(p2, p2x, p2y, tb_project_to_sphere(TRACKBALLSIZE, p2x, p2y)); /* * Now, we want the cross product of P1 and P2 */ vcross(p2, p1, a); /* * Figure out how much to rotate around that axis. */ vsub(p1, p2, d); t = vlength(d) / (2.0 * TRACKBALLSIZE); /* * Avoid problems with out-of-control values... */ if (t > 1.0) t = 1.0; if (t < -1.0) t = -1.0; phi = 2.0 * asin(t); axis_to_quat(a, phi, q); } /* * Given an axis and angle, compute quaternion. */ void axis_to_quat(float a[3], float phi, float q[4]) { vnormal(a); vcopy(a, q); vscale(q, sin(phi / 2.0)); q[3] = cos(phi / 2.0); } /* * Project an x,y pair onto a sphere of radius r OR a hyperbolic sheet * if we are away from the center of the sphere. */ static float tb_project_to_sphere(float r, float x, float y) { float d, t, z; d = sqrt(x * x + y * y); if (d < r * 0.70710678118654752440) { /* Inside sphere */ z = sqrt(r * r - d * d); } else { /* On hyperbola */ t = r / 1.41421356237309504880; z = t * t / d; } return z; } /* * Given two rotations, e1 and e2, expressed as quaternion rotations, * figure out the equivalent single rotation and stuff it into dest. * * This routine also normalizes the result every RENORMCOUNT times it is * called, to keep error from creeping in. * * NOTE: This routine is written so that q1 or q2 may be the same * as dest (or each other). */ #define RENORMCOUNT 97 void add_quats(float q1[4], float q2[4], float dest[4]) { static int count = 0; float t1[4], t2[4], t3[4]; float tf[4]; vcopy(q1, t1); vscale(t1, q2[3]); vcopy(q2, t2); vscale(t2, q1[3]); vcross(q2, q1, t3); vadd(t1, t2, tf); vadd(t3, tf, tf); tf[3] = q1[3] * q2[3] - vdot(q1, q2); dest[0] = tf[0]; dest[1] = tf[1]; dest[2] = tf[2]; dest[3] = tf[3]; if (++count > RENORMCOUNT) { count = 0; normalize_quat(dest); } } /* * Quaternions always obey: a^2 + b^2 + c^2 + d^2 = 1.0 * If they don't add up to 1.0, dividing by their magnitued will * renormalize them. * * Note: See the following for more information on quaternions: * * - Shoemake, K., Animating rotation with quaternion curves, Computer * Graphics 19, No 3 (Proc. SIGGRAPH'85), 245-254, 1985. * - Pletinckx, D., Quaternion calculus as a basic tool in computer * graphics, The Visual Computer 5, 2-13, 1989. */ static void normalize_quat(float q[4]) { int i; float mag; mag = (q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]); for (i = 0; i < 4; i++) q[i] /= mag; } /* * Build a rotation matrix, given a quaternion rotation. * */ void build_rotmatrix(float m[4][4], float q[4]) { m[0][0] = 1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]); m[0][1] = 2.0 * (q[0] * q[1] - q[2] * q[3]); m[0][2] = 2.0 * (q[2] * q[0] + q[1] * q[3]); m[0][3] = 0.0; m[1][0] = 2.0 * (q[0] * q[1] + q[2] * q[3]); m[1][1] = 1.0 - 2.0 * (q[2] * q[2] + q[0] * q[0]); m[1][2] = 2.0 * (q[1] * q[2] - q[0] * q[3]); m[1][3] = 0.0; m[2][0] = 2.0 * (q[2] * q[0] - q[1] * q[3]); m[2][1] = 2.0 * (q[1] * q[2] + q[0] * q[3]); m[2][2] = 1.0 - 2.0 * (q[1] * q[1] + q[0] * q[0]); m[2][3] = 0.0; m[3][0] = 0.0; m[3][1] = 0.0; m[3][2] = 0.0; m[3][3] = 1.0; }
epl-1.0
hunterhu/linux-sunxi
modules/wifi/bcm40181/open-src/src/bcmsdio/sys/bcmsdh.c
256
16076
/* * BCMSDH interface glue * implement bcmsdh API for SDIOH driver * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: bcmsdh.c,v 1.57.6.4 2010/12/23 01:13:15 Exp $ */ /* ****************** BCMSDH Interface Functions *************************** */ #include <typedefs.h> #include <bcmdevs.h> #include <bcmendian.h> #include <bcmutils.h> #include <hndsoc.h> #include <siutils.h> #include <osl.h> #include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */ #include <bcmsdbus.h> /* common SDIO/controller interface */ #include <sbsdio.h> /* BRCM sdio device core */ #include <sdio.h> /* sdio spec */ #define SDIOH_API_ACCESS_RETRY_LIMIT 2 const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; struct bcmsdh_info { bool init_success; /* underlying driver successfully attached */ void *sdioh; /* handler for sdioh */ uint32 vendevid; /* Target Vendor and Device ID on SD bus */ osl_t *osh; bool regfail; /* Save status of last reg_read/reg_write call */ uint32 sbwad; /* Save backplane window address */ }; /* local copy of bcm sd handler */ bcmsdh_info_t * l_bcmsdh = NULL; #if defined(OOB_INTR_ONLY) && defined(HW_OOB) extern int sdioh_enable_hw_oob_intr(void *sdioh, bool enable); void bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) { sdioh_enable_hw_oob_intr(sdh->sdioh, enable); } #endif bcmsdh_info_t * bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq) { bcmsdh_info_t *bcmsdh; if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); return NULL; } bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); /* save the handler locally */ l_bcmsdh = bcmsdh; if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) { bcmsdh_detach(osh, bcmsdh); return NULL; } bcmsdh->osh = osh; bcmsdh->init_success = TRUE; *regsva = (uint32 *)SI_ENUM_BASE; /* Report the BAR, to fix if needed */ bcmsdh->sbwad = SI_ENUM_BASE; return bcmsdh; } int bcmsdh_detach(osl_t *osh, void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; if (bcmsdh != NULL) { if (bcmsdh->sdioh) { sdioh_detach(osh, bcmsdh->sdioh); bcmsdh->sdioh = NULL; } MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); } l_bcmsdh = NULL; return 0; } int bcmsdh_iovar_op(void *sdh, const char *name, void *params, int plen, void *arg, int len, bool set) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); } bool bcmsdh_intr_query(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; bool on; ASSERT(bcmsdh); status = sdioh_interrupt_query(bcmsdh->sdioh, &on); if (SDIOH_API_SUCCESS(status)) return FALSE; else return on; } int bcmsdh_intr_enable(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; ASSERT(bcmsdh); status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } int bcmsdh_intr_disable(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; ASSERT(bcmsdh); status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; ASSERT(bcmsdh); status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } int bcmsdh_intr_dereg(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; ASSERT(bcmsdh); status = sdioh_interrupt_deregister(bcmsdh->sdioh); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } #if defined(DHD_DEBUG) bool bcmsdh_intr_pending(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; ASSERT(sdh); return sdioh_interrupt_pending(bcmsdh->sdioh); } #endif int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) { ASSERT(sdh); /* don't support yet */ return BCME_UNSUPPORTED; } uint8 bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; #ifdef SDIOH_API_ACCESS_RETRY_LIMIT int32 retry = 0; #endif uint8 data = 0; if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); #ifdef SDIOH_API_ACCESS_RETRY_LIMIT do { if (retry) /* wait for 1 ms till bus get settled down */ OSL_DELAY(1000); #endif status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); #ifdef SDIOH_API_ACCESS_RETRY_LIMIT } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); #endif if (err) *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, fnc_num, addr, data)); return data; } void bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; #ifdef SDIOH_API_ACCESS_RETRY_LIMIT int32 retry = 0; #endif if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); #ifdef SDIOH_API_ACCESS_RETRY_LIMIT do { if (retry) /* wait for 1 ms till bus get settled down */ OSL_DELAY(1000); #endif status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); #ifdef SDIOH_API_ACCESS_RETRY_LIMIT } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); #endif if (err) *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, fnc_num, addr, data)); } uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint32 data = 0; if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, addr, &data, 4); if (err) *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, addr, data)); return data; } void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, addr, &data, 4); if (err) *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, addr, data)); } int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint8 *tmp_buf, *tmp_ptr; uint8 *ptr; bool ascii = func & ~0xf; func &= 0x7; if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); ASSERT(cis); ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); if (ascii) { /* Move binary bits to tmp and format them into the provided buffer. */ if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); return BCME_NOMEM; } bcopy(cis, tmp_buf, length); for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff); if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) ptr += sprintf((char *)ptr, "\n"); } MFREE(bcmsdh->osh, tmp_buf, length); } return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } static int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address) { int err = 0; bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); if (!err) bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); if (!err) bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); return err; } uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint32 word = 0; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); if (bar0 != bcmsdh->sbwad) { if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0)) return 0xFFFFFFFF; bcmsdh->sbwad = bar0; } addr &= SBSDIO_SB_OFT_ADDR_MASK; if (size == 4) addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, SDIO_FUNC_1, addr, &word, size); bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); BCMSDH_INFO(("uint32data = 0x%x\n", word)); /* if ok, return appropriately masked word */ if (SDIOH_API_SUCCESS(status)) { switch (size) { case sizeof(uint8): return (word & 0xff); case sizeof(uint16): return (word & 0xffff); case sizeof(uint32): return word; default: bcmsdh->regfail = TRUE; } } /* otherwise, bad sdio access or invalid size */ BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); return 0xFFFFFFFF; } uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", __FUNCTION__, addr, size*8, data)); if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); if (bar0 != bcmsdh->sbwad) { if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) return err; bcmsdh->sbwad = bar0; } addr &= SBSDIO_SB_OFT_ADDR_MASK; if (size == 4) addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, addr, &data, size); bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); if (SDIOH_API_SUCCESS(status)) return 0; BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", __FUNCTION__, data, addr, size)); return 0xFFFFFFFF; } bool bcmsdh_regfail(void *sdh) { return ((bcmsdh_info_t *)sdh)->regfail; } int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint incr_fix; uint width; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; ASSERT(bcmsdh); ASSERT(bcmsdh->init_success); BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", __FUNCTION__, fn, addr, nbytes)); /* Async not implemented yet */ ASSERT(!(flags & SDIO_REQ_ASYNC)); if (flags & SDIO_REQ_ASYNC) return BCME_UNSUPPORTED; if (bar0 != bcmsdh->sbwad) { if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) return err; bcmsdh->sbwad = bar0; } addr &= SBSDIO_SB_OFT_ADDR_MASK; incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; if (width == 4) addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, SDIOH_READ, fn, addr, width, nbytes, buf, pkt); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); } int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint incr_fix; uint width; uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; int err = 0; ASSERT(bcmsdh); ASSERT(bcmsdh->init_success); BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", __FUNCTION__, fn, addr, nbytes)); /* Async not implemented yet */ ASSERT(!(flags & SDIO_REQ_ASYNC)); if (flags & SDIO_REQ_ASYNC) return BCME_UNSUPPORTED; if (bar0 != bcmsdh->sbwad) { if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))) return err; bcmsdh->sbwad = bar0; } addr &= SBSDIO_SB_OFT_ADDR_MASK; incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; if (width == 4) addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; ASSERT(bcmsdh); ASSERT(bcmsdh->init_success); ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); addr &= SBSDIO_SB_OFT_ADDR_MASK; addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, addr, 4, nbytes, buf, NULL); return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } int bcmsdh_abort(void *sdh, uint fn) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return sdioh_abort(bcmsdh->sdioh, fn); } int bcmsdh_start(void *sdh, int stage) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return sdioh_start(bcmsdh->sdioh, stage); } int bcmsdh_stop(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return sdioh_stop(bcmsdh->sdioh); } int bcmsdh_waitlockfree(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; if (!bcmsdh) bcmsdh = l_bcmsdh; return sdioh_waitlockfree(bcmsdh->sdioh); } int bcmsdh_query_device(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; return (bcmsdh->vendevid); } uint bcmsdh_query_iofnum(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; if (!bcmsdh) bcmsdh = l_bcmsdh; return (sdioh_query_iofnum(bcmsdh->sdioh)); } int bcmsdh_reset(bcmsdh_info_t *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return sdioh_sdio_reset(bcmsdh->sdioh); } void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) { ASSERT(sdh); return sdh->sdioh; } /* Function to pass device-status bits to DHD. */ uint32 bcmsdh_get_dstatus(void *sdh) { return 0; } uint32 bcmsdh_cur_sbwad(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; if (!bcmsdh) bcmsdh = l_bcmsdh; return (bcmsdh->sbwad); } void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) { return; }
gpl-2.0
samno1607/P920-SU760-Source-Differences-GB
fs/ocfs2/refcounttree.c
512
115843
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * refcounttree.c * * Copyright (C) 2009 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/sort.h> #define MLOG_MASK_PREFIX ML_REFCOUNT #include <cluster/masklog.h> #include "ocfs2.h" #include "inode.h" #include "alloc.h" #include "suballoc.h" #include "journal.h" #include "uptodate.h" #include "super.h" #include "buffer_head_io.h" #include "blockcheck.h" #include "refcounttree.h" #include "sysfile.h" #include "dlmglue.h" #include "extent_map.h" #include "aops.h" #include "xattr.h" #include "namei.h" #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/swap.h> #include <linux/security.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/namei.h> #include <linux/mount.h> struct ocfs2_cow_context { struct inode *inode; u32 cow_start; u32 cow_len; struct ocfs2_extent_tree data_et; struct ocfs2_refcount_tree *ref_tree; struct buffer_head *ref_root_bh; struct ocfs2_alloc_context *meta_ac; struct ocfs2_alloc_context *data_ac; struct ocfs2_cached_dealloc_ctxt dealloc; void *cow_object; struct ocfs2_post_refcount *post_refcount; int extra_credits; int (*get_clusters)(struct ocfs2_cow_context *context, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, unsigned int *extent_flags); int (*cow_duplicate_clusters)(handle_t *handle, struct ocfs2_cow_context *context, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len); }; static inline struct ocfs2_refcount_tree * cache_info_to_refcount(struct ocfs2_caching_info *ci) { return container_of(ci, struct ocfs2_refcount_tree, rf_ci); } static int ocfs2_validate_refcount_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)bh->b_data; mlog(0, "Validating refcount block %llu\n", (unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); if (rc) { mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", (unsigned long long)bh->b_blocknr); return rc; } if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { ocfs2_error(sb, "Refcount block #%llu has bad signature %.*s", (unsigned long long)bh->b_blocknr, 7, rb->rf_signature); return -EINVAL; } if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { ocfs2_error(sb, "Refcount block #%llu has an invalid rf_blkno " "of %llu", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(rb->rf_blkno)); return -EINVAL; } if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { ocfs2_error(sb, "Refcount block #%llu has an invalid " "rf_fs_generation of #%u", (unsigned long long)bh->b_blocknr, le32_to_cpu(rb->rf_fs_generation)); return -EINVAL; } return 0; } static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, u64 rb_blkno, struct buffer_head **bh) { int rc; struct buffer_head *tmp = *bh; rc = ocfs2_read_block(ci, rb_blkno, &tmp, ocfs2_validate_refcount_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!rc && !*bh) *bh = tmp; return rc; } static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); return rf->rf_blkno; } static struct super_block * ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); return rf->rf_sb; } static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); spin_lock(&rf->rf_lock); } static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); spin_unlock(&rf->rf_lock); } static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); mutex_lock(&rf->rf_io_mutex); } static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) { struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); mutex_unlock(&rf->rf_io_mutex); } static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { .co_owner = ocfs2_refcount_cache_owner, .co_get_super = ocfs2_refcount_cache_get_super, .co_cache_lock = ocfs2_refcount_cache_lock, .co_cache_unlock = ocfs2_refcount_cache_unlock, .co_io_lock = ocfs2_refcount_cache_io_lock, .co_io_unlock = ocfs2_refcount_cache_io_unlock, }; static struct ocfs2_refcount_tree * ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) { struct rb_node *n = osb->osb_rf_lock_tree.rb_node; struct ocfs2_refcount_tree *tree = NULL; while (n) { tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); if (blkno < tree->rf_blkno) n = n->rb_left; else if (blkno > tree->rf_blkno) n = n->rb_right; else return tree; } return NULL; } /* osb_lock is already locked. */ static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, struct ocfs2_refcount_tree *new) { u64 rf_blkno = new->rf_blkno; struct rb_node *parent = NULL; struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; struct ocfs2_refcount_tree *tmp; while (*p) { parent = *p; tmp = rb_entry(parent, struct ocfs2_refcount_tree, rf_node); if (rf_blkno < tmp->rf_blkno) p = &(*p)->rb_left; else if (rf_blkno > tmp->rf_blkno) p = &(*p)->rb_right; else { /* This should never happen! */ mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", (unsigned long long)rf_blkno); BUG(); } } rb_link_node(&new->rf_node, parent, p); rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); } static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) { ocfs2_metadata_cache_exit(&tree->rf_ci); ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); ocfs2_lock_res_free(&tree->rf_lockres); kfree(tree); } static inline void ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, struct ocfs2_refcount_tree *tree) { rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) osb->osb_ref_tree_lru = NULL; } static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, struct ocfs2_refcount_tree *tree) { spin_lock(&osb->osb_lock); ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); spin_unlock(&osb->osb_lock); } static void ocfs2_kref_remove_refcount_tree(struct kref *kref) { struct ocfs2_refcount_tree *tree = container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); ocfs2_free_refcount_tree(tree); } static inline void ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) { kref_get(&tree->rf_getcnt); } static inline void ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) { kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); } static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, struct super_block *sb) { ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); mutex_init(&new->rf_io_mutex); new->rf_sb = sb; spin_lock_init(&new->rf_lock); } static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, struct ocfs2_refcount_tree *new, u64 rf_blkno, u32 generation) { init_rwsem(&new->rf_sem); ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, rf_blkno, generation); } static struct ocfs2_refcount_tree* ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) { struct ocfs2_refcount_tree *new; new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); if (!new) return NULL; new->rf_blkno = rf_blkno; kref_init(&new->rf_getcnt); ocfs2_init_refcount_tree_ci(new, osb->sb); return new; } static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, struct ocfs2_refcount_tree **ret_tree) { int ret = 0; struct ocfs2_refcount_tree *tree, *new = NULL; struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_block *ref_rb; spin_lock(&osb->osb_lock); if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru->rf_blkno == rf_blkno) tree = osb->osb_ref_tree_lru; else tree = ocfs2_find_refcount_tree(osb, rf_blkno); if (tree) goto out; spin_unlock(&osb->osb_lock); new = ocfs2_allocate_refcount_tree(osb, rf_blkno); if (!new) { ret = -ENOMEM; mlog_errno(ret); return ret; } /* * We need the generation to create the refcount tree lock and since * it isn't changed during the tree modification, we are safe here to * read without protection. * We also have to purge the cache after we create the lock since the * refcount block may have the stale data. It can only be trusted when * we hold the refcount lock. */ ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); if (ret) { mlog_errno(ret); ocfs2_metadata_cache_exit(&new->rf_ci); kfree(new); return ret; } ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; new->rf_generation = le32_to_cpu(ref_rb->rf_generation); ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, new->rf_generation); ocfs2_metadata_cache_purge(&new->rf_ci); spin_lock(&osb->osb_lock); tree = ocfs2_find_refcount_tree(osb, rf_blkno); if (tree) goto out; ocfs2_insert_refcount_tree(osb, new); tree = new; new = NULL; out: *ret_tree = tree; osb->osb_ref_tree_lru = tree; spin_unlock(&osb->osb_lock); if (new) ocfs2_free_refcount_tree(new); brelse(ref_root_bh); return ret; } static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) { int ret; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); di = (struct ocfs2_dinode *)di_bh->b_data; *ref_blkno = le64_to_cpu(di->i_refcount_loc); brelse(di_bh); out: return ret; } static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, struct ocfs2_refcount_tree *tree, int rw) { int ret; ret = ocfs2_refcount_lock(tree, rw); if (ret) { mlog_errno(ret); goto out; } if (rw) down_write(&tree->rf_sem); else down_read(&tree->rf_sem); out: return ret; } /* * Lock the refcount tree pointed by ref_blkno and return the tree. * In most case, we lock the tree and read the refcount block. * So read it here if the caller really needs it. * * If the tree has been re-created by other node, it will free the * old one and re-create it. */ int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, u64 ref_blkno, int rw, struct ocfs2_refcount_tree **ret_tree, struct buffer_head **ref_bh) { int ret, delete_tree = 0; struct ocfs2_refcount_tree *tree = NULL; struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_block *rb; again: ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); if (ret) { mlog_errno(ret); return ret; } ocfs2_refcount_tree_get(tree); ret = __ocfs2_lock_refcount_tree(osb, tree, rw); if (ret) { mlog_errno(ret); ocfs2_refcount_tree_put(tree); goto out; } ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, &ref_root_bh); if (ret) { mlog_errno(ret); ocfs2_unlock_refcount_tree(osb, tree, rw); ocfs2_refcount_tree_put(tree); goto out; } rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; /* * If the refcount block has been freed and re-created, we may need * to recreate the refcount tree also. * * Here we just remove the tree from the rb-tree, and the last * kref holder will unlock and delete this refcount_tree. * Then we goto "again" and ocfs2_get_refcount_tree will create * the new refcount tree for us. */ if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { if (!tree->rf_removed) { ocfs2_erase_refcount_tree_from_list(osb, tree); tree->rf_removed = 1; delete_tree = 1; } ocfs2_unlock_refcount_tree(osb, tree, rw); /* * We get an extra reference when we create the refcount * tree, so another put will destroy it. */ if (delete_tree) ocfs2_refcount_tree_put(tree); brelse(ref_root_bh); ref_root_bh = NULL; goto again; } *ret_tree = tree; if (ref_bh) { *ref_bh = ref_root_bh; ref_root_bh = NULL; } out: brelse(ref_root_bh); return ret; } void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, struct ocfs2_refcount_tree *tree, int rw) { if (rw) up_write(&tree->rf_sem); else up_read(&tree->rf_sem); ocfs2_refcount_unlock(tree, rw); ocfs2_refcount_tree_put(tree); } void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) { struct rb_node *node; struct ocfs2_refcount_tree *tree; struct rb_root *root = &osb->osb_rf_lock_tree; while ((node = rb_last(root)) != NULL) { tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); mlog(0, "Purge tree %llu\n", (unsigned long long) tree->rf_blkno); rb_erase(&tree->rf_node, root); ocfs2_free_refcount_tree(tree); } } /* * Create a refcount tree for an inode. * We take for granted that the inode is already locked. */ static int ocfs2_create_refcount_tree(struct inode *inode, struct buffer_head *di_bh) { int ret; handle_t *handle = NULL; struct ocfs2_alloc_context *meta_ac = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *new_bh = NULL; struct ocfs2_refcount_block *rb; struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; u16 suballoc_bit_start; u32 num_got; u64 suballoc_loc, first_blkno; BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); mlog(0, "create tree for inode %lu\n", inode->i_ino); ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); if (ret) { mlog_errno(ret); goto out; } handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, &suballoc_bit_start, &num_got, &first_blkno); if (ret) { mlog_errno(ret); goto out_commit; } new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); if (!new_tree) { ret = -ENOMEM; mlog_errno(ret); goto out_commit; } new_bh = sb_getblk(inode->i_sb, first_blkno); ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } /* Initialize ocfs2_refcount_block. */ rb = (struct ocfs2_refcount_block *)new_bh->b_data; memset(rb, 0, inode->i_sb->s_blocksize); strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); rb->rf_blkno = cpu_to_le64(first_blkno); rb->rf_count = cpu_to_le32(1); rb->rf_records.rl_count = cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); spin_lock(&osb->osb_lock); rb->rf_generation = osb->s_next_generation++; spin_unlock(&osb->osb_lock); ocfs2_journal_dirty(handle, new_bh); spin_lock(&oi->ip_lock); oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); di->i_refcount_loc = cpu_to_le64(first_blkno); spin_unlock(&oi->ip_lock); mlog(0, "created tree for inode %lu, refblock %llu\n", inode->i_ino, (unsigned long long)first_blkno); ocfs2_journal_dirty(handle, di_bh); /* * We have to init the tree lock here since it will use * the generation number to create it. */ new_tree->rf_generation = le32_to_cpu(rb->rf_generation); ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, new_tree->rf_generation); spin_lock(&osb->osb_lock); tree = ocfs2_find_refcount_tree(osb, first_blkno); /* * We've just created a new refcount tree in this block. If * we found a refcount tree on the ocfs2_super, it must be * one we just deleted. We free the old tree before * inserting the new tree. */ BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); if (tree) ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); ocfs2_insert_refcount_tree(osb, new_tree); spin_unlock(&osb->osb_lock); new_tree = NULL; if (tree) ocfs2_refcount_tree_put(tree); out_commit: ocfs2_commit_trans(osb, handle); out: if (new_tree) { ocfs2_metadata_cache_exit(&new_tree->rf_ci); kfree(new_tree); } brelse(new_bh); if (meta_ac) ocfs2_free_alloc_context(meta_ac); return ret; } static int ocfs2_set_refcount_tree(struct inode *inode, struct buffer_head *di_bh, u64 refcount_loc) { int ret; handle_t *handle = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_block *rb; struct ocfs2_refcount_tree *ref_tree; BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); return ret; } handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; le32_add_cpu(&rb->rf_count, 1); ocfs2_journal_dirty(handle, ref_root_bh); spin_lock(&oi->ip_lock); oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); di->i_refcount_loc = cpu_to_le64(refcount_loc); spin_unlock(&oi->ip_lock); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(osb, handle); out: ocfs2_unlock_refcount_tree(osb, ref_tree, 1); brelse(ref_root_bh); return ret; } int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) { int ret, delete_tree = 0; handle_t *handle = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_refcount_block *rb; struct inode *alloc_inode = NULL; struct buffer_head *alloc_bh = NULL; struct buffer_head *blk_bh = NULL; struct ocfs2_refcount_tree *ref_tree; int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); u16 bit = 0; if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) return 0; BUG_ON(!ref_blkno); ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); if (ret) { mlog_errno(ret); return ret; } rb = (struct ocfs2_refcount_block *)blk_bh->b_data; /* * If we are the last user, we need to free the block. * So lock the allocator ahead. */ if (le32_to_cpu(rb->rf_count) == 1) { blk = le64_to_cpu(rb->rf_blkno); bit = le16_to_cpu(rb->rf_suballoc_bit); if (rb->rf_suballoc_loc) bg_blkno = le64_to_cpu(rb->rf_suballoc_loc); else bg_blkno = ocfs2_which_suballoc_group(blk, bit); alloc_inode = ocfs2_get_system_file_inode(osb, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(rb->rf_suballoc_slot)); if (!alloc_inode) { ret = -ENOMEM; mlog_errno(ret); goto out; } mutex_lock(&alloc_inode->i_mutex); ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); if (ret) { mlog_errno(ret); goto out_mutex; } credits += OCFS2_SUBALLOC_FREE; } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); di->i_refcount_loc = 0; spin_unlock(&oi->ip_lock); ocfs2_journal_dirty(handle, di_bh); le32_add_cpu(&rb->rf_count , -1); ocfs2_journal_dirty(handle, blk_bh); if (!rb->rf_count) { delete_tree = 1; ocfs2_erase_refcount_tree_from_list(osb, ref_tree); ret = ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh, bit, bg_blkno, 1); if (ret) mlog_errno(ret); } out_commit: ocfs2_commit_trans(osb, handle); out_unlock: if (alloc_inode) { ocfs2_inode_unlock(alloc_inode, 1); brelse(alloc_bh); } out_mutex: if (alloc_inode) { mutex_unlock(&alloc_inode->i_mutex); iput(alloc_inode); } out: ocfs2_unlock_refcount_tree(osb, ref_tree, 1); if (delete_tree) ocfs2_refcount_tree_put(ref_tree); brelse(blk_bh); return ret; } static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, struct buffer_head *ref_leaf_bh, u64 cpos, unsigned int len, struct ocfs2_refcount_rec *ret_rec, int *index) { int i = 0; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_rec *rec = NULL; for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { rec = &rb->rf_records.rl_recs[i]; if (le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters) <= cpos) continue; else if (le64_to_cpu(rec->r_cpos) > cpos) break; /* ok, cpos fail in this rec. Just return. */ if (ret_rec) *ret_rec = *rec; goto out; } if (ret_rec) { /* We meet with a hole here, so fake the rec. */ ret_rec->r_cpos = cpu_to_le64(cpos); ret_rec->r_refcount = 0; if (i < le16_to_cpu(rb->rf_records.rl_used) && le64_to_cpu(rec->r_cpos) < cpos + len) ret_rec->r_clusters = cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); else ret_rec->r_clusters = cpu_to_le32(len); } out: *index = i; } /* * Try to remove refcount tree. The mechanism is: * 1) Check whether i_clusters == 0, if no, exit. * 2) check whether we have i_xattr_loc in dinode. if yes, exit. * 3) Check whether we have inline xattr stored outside, if yes, exit. * 4) Remove the tree. */ int ocfs2_try_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) { int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; down_write(&oi->ip_xattr_sem); down_write(&oi->ip_alloc_sem); if (oi->ip_clusters) goto out; if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) goto out; if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && ocfs2_has_inline_xattr_value_outside(inode, di)) goto out; ret = ocfs2_remove_refcount_tree(inode, di_bh); if (ret) mlog_errno(ret); out: up_write(&oi->ip_alloc_sem); up_write(&oi->ip_xattr_sem); return 0; } /* * Find the end range for a leaf refcount block indicated by * el->l_recs[index].e_blkno. */ static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct ocfs2_extent_block *eb, struct ocfs2_extent_list *el, int index, u32 *cpos_end) { int ret, i, subtree_root; u32 cpos; u64 blkno; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct ocfs2_path *left_path = NULL, *right_path = NULL; struct ocfs2_extent_tree et; struct ocfs2_extent_list *tmp_el; if (index < le16_to_cpu(el->l_next_free_rec) - 1) { /* * We have a extent rec after index, so just use the e_cpos * of the next extent rec. */ *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos); return 0; } if (!eb || (eb && !eb->h_next_leaf_blk)) { /* * We are the last extent rec, so any high cpos should * be stored in this leaf refcount block. */ *cpos_end = UINT_MAX; return 0; } /* * If the extent block isn't the last one, we have to find * the subtree root between this extent block and the next * leaf extent block and get the corresponding e_cpos from * the subroot. Otherwise we may corrupt the b-tree. */ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); left_path = ocfs2_new_path_from_et(&et); if (!left_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); ret = ocfs2_find_path(ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } right_path = ocfs2_new_path_from_path(left_path); if (!right_path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_find_path(ci, right_path, cpos); if (ret) { mlog_errno(ret); goto out; } subtree_root = ocfs2_find_subtree_root(&et, left_path, right_path); tmp_el = left_path->p_node[subtree_root].el; blkno = left_path->p_node[subtree_root+1].bh->b_blocknr; for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) { if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) { *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos); break; } } BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec)); out: ocfs2_free_path(left_path); ocfs2_free_path(right_path); return ret; } /* * Given a cpos and len, try to find the refcount record which contains cpos. * 1. If cpos can be found in one refcount record, return the record. * 2. If cpos can't be found, return a fake record which start from cpos * and end at a small value between cpos+len and start of the next record. * This fake record has r_refcount = 0. */ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, u64 cpos, unsigned int len, struct ocfs2_refcount_rec *ret_rec, int *index, struct buffer_head **ret_bh) { int ret = 0, i, found; u32 low_cpos, uninitialized_var(cpos_end); struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_extent_block *eb = NULL; struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, ret_rec, index); *ret_bh = ref_root_bh; get_bh(ref_root_bh); return 0; } el = &rb->rf_list; low_cpos = cpos & OCFS2_32BIT_POS_MASK; if (el->l_tree_depth) { ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(sb, "refcount tree %llu has non zero tree " "depth in leaf btree tree block %llu\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } } found = 0; for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { rec = &el->l_recs[i]; if (le32_to_cpu(rec->e_cpos) <= low_cpos) { found = 1; break; } } if (found) { ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh, eb, el, i, &cpos_end); if (ret) { mlog_errno(ret); goto out; } if (cpos_end < low_cpos + len) len = cpos_end - low_cpos; } ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out; } ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, ret_rec, index); *ret_bh = ref_leaf_bh; out: brelse(eb_bh); return ret; } enum ocfs2_ref_rec_contig { REF_CONTIG_NONE = 0, REF_CONTIG_LEFT, REF_CONTIG_RIGHT, REF_CONTIG_LEFTRIGHT, }; static enum ocfs2_ref_rec_contig ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, int index) { if ((rb->rf_records.rl_recs[index].r_refcount == rb->rf_records.rl_recs[index + 1].r_refcount) && (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) return REF_CONTIG_RIGHT; return REF_CONTIG_NONE; } static enum ocfs2_ref_rec_contig ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, int index) { enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) ret = ocfs2_refcount_rec_adjacent(rb, index); if (index > 0) { enum ocfs2_ref_rec_contig tmp; tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); if (tmp == REF_CONTIG_RIGHT) { if (ret == REF_CONTIG_RIGHT) ret = REF_CONTIG_LEFTRIGHT; else ret = REF_CONTIG_LEFT; } } return ret; } static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, int index) { BUG_ON(rb->rf_records.rl_recs[index].r_refcount != rb->rf_records.rl_recs[index+1].r_refcount); le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) memmove(&rb->rf_records.rl_recs[index + 1], &rb->rf_records.rl_recs[index + 2], sizeof(struct ocfs2_refcount_rec) * (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], 0, sizeof(struct ocfs2_refcount_rec)); le16_add_cpu(&rb->rf_records.rl_used, -1); } /* * Merge the refcount rec if we are contiguous with the adjacent recs. */ static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, int index) { enum ocfs2_ref_rec_contig contig = ocfs2_refcount_rec_contig(rb, index); if (contig == REF_CONTIG_NONE) return; if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { BUG_ON(index == 0); index--; } ocfs2_rotate_refcount_rec_left(rb, index); if (contig == REF_CONTIG_LEFTRIGHT) ocfs2_rotate_refcount_rec_left(rb, index); } /* * Change the refcount indexed by "index" in ref_bh. * If refcount reaches 0, remove it. */ static int ocfs2_change_refcount_rec(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_leaf_bh, int index, int merge, int change) { int ret; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_list *rl = &rb->rf_records; struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } mlog(0, "change index %d, old count %u, change %d\n", index, le32_to_cpu(rec->r_refcount), change); le32_add_cpu(&rec->r_refcount, change); if (!rec->r_refcount) { if (index != le16_to_cpu(rl->rl_used) - 1) { memmove(rec, rec + 1, (le16_to_cpu(rl->rl_used) - index - 1) * sizeof(struct ocfs2_refcount_rec)); memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], 0, sizeof(struct ocfs2_refcount_rec)); } le16_add_cpu(&rl->rl_used, -1); } else if (merge) ocfs2_refcount_rec_merge(rb, index); ocfs2_journal_dirty(handle, ref_leaf_bh); out: return ret; } static int ocfs2_expand_inline_ref_root(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head **ref_leaf_bh, struct ocfs2_alloc_context *meta_ac) { int ret; u16 suballoc_bit_start; u32 num_got; u64 suballoc_loc, blkno; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct buffer_head *new_bh = NULL; struct ocfs2_refcount_block *new_rb; struct ocfs2_refcount_block *root_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, &suballoc_bit_start, &num_got, &blkno); if (ret) { mlog_errno(ret); goto out; } new_bh = sb_getblk(sb, blkno); if (new_bh == NULL) { ret = -EIO; mlog_errno(ret); goto out; } ocfs2_set_new_buffer_uptodate(ci, new_bh); ret = ocfs2_journal_access_rb(handle, ci, new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out; } /* * Initialize ocfs2_refcount_block. * It should contain the same information as the old root. * so just memcpy it and change the corresponding field. */ memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); new_rb->rf_blkno = cpu_to_le64(blkno); new_rb->rf_cpos = cpu_to_le32(0); new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); ocfs2_journal_dirty(handle, new_bh); /* Now change the root. */ memset(&root_rb->rf_list, 0, sb->s_blocksize - offsetof(struct ocfs2_refcount_block, rf_list)); root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); root_rb->rf_clusters = cpu_to_le32(1); root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); ocfs2_journal_dirty(handle, ref_root_bh); mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno, le16_to_cpu(new_rb->rf_records.rl_used)); *ref_leaf_bh = new_bh; new_bh = NULL; out: brelse(new_bh); return ret; } static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, struct ocfs2_refcount_rec *next) { if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= ocfs2_get_ref_rec_low_cpos(next)) return 1; return 0; } static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) { const struct ocfs2_refcount_rec *l = a, *r = b; u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); if (l_cpos > r_cpos) return 1; if (l_cpos < r_cpos) return -1; return 0; } static int cmp_refcount_rec_by_cpos(const void *a, const void *b) { const struct ocfs2_refcount_rec *l = a, *r = b; u64 l_cpos = le64_to_cpu(l->r_cpos); u64 r_cpos = le64_to_cpu(r->r_cpos); if (l_cpos > r_cpos) return 1; if (l_cpos < r_cpos) return -1; return 0; } static void swap_refcount_rec(void *a, void *b, int size) { struct ocfs2_refcount_rec *l = a, *r = b, tmp; tmp = *(struct ocfs2_refcount_rec *)l; *(struct ocfs2_refcount_rec *)l = *(struct ocfs2_refcount_rec *)r; *(struct ocfs2_refcount_rec *)r = tmp; } /* * The refcount cpos are ordered by their 64bit cpos, * But we will use the low 32 bit to be the e_cpos in the b-tree. * So we need to make sure that this pos isn't intersected with others. * * Note: The refcount block is already sorted by their low 32 bit cpos, * So just try the middle pos first, and we will exit when we find * the good position. */ static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, u32 *split_pos, int *split_index) { int num_used = le16_to_cpu(rl->rl_used); int delta, middle = num_used / 2; for (delta = 0; delta < middle; delta++) { /* Let's check delta earlier than middle */ if (ocfs2_refcount_rec_no_intersect( &rl->rl_recs[middle - delta - 1], &rl->rl_recs[middle - delta])) { *split_index = middle - delta; break; } /* For even counts, don't walk off the end */ if ((middle + delta + 1) == num_used) continue; /* Now try delta past middle */ if (ocfs2_refcount_rec_no_intersect( &rl->rl_recs[middle + delta], &rl->rl_recs[middle + delta + 1])) { *split_index = middle + delta + 1; break; } } if (delta >= middle) return -ENOSPC; *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); return 0; } static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, struct buffer_head *new_bh, u32 *split_cpos) { int split_index = 0, num_moved, ret; u32 cpos = 0; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_list *rl = &rb->rf_records; struct ocfs2_refcount_block *new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n", (unsigned long long)ref_leaf_bh->b_blocknr, le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); /* * XXX: Improvement later. * If we know all the high 32 bit cpos is the same, no need to sort. * * In order to make the whole process safe, we do: * 1. sort the entries by their low 32 bit cpos first so that we can * find the split cpos easily. * 2. call ocfs2_insert_extent to insert the new refcount block. * 3. move the refcount rec to the new block. * 4. sort the entries by their 64 bit cpos. * 5. dirty the new_rb and rb. */ sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), sizeof(struct ocfs2_refcount_rec), cmp_refcount_rec_by_low_cpos, swap_refcount_rec); ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); if (ret) { mlog_errno(ret); return ret; } new_rb->rf_cpos = cpu_to_le32(cpos); /* move refcount records starting from split_index to the new block. */ num_moved = le16_to_cpu(rl->rl_used) - split_index; memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], num_moved * sizeof(struct ocfs2_refcount_rec)); /*ok, remove the entries we just moved over to the other block. */ memset(&rl->rl_recs[split_index], 0, num_moved * sizeof(struct ocfs2_refcount_rec)); /* change old and new rl_used accordingly. */ le16_add_cpu(&rl->rl_used, -num_moved); new_rl->rl_used = cpu_to_le16(num_moved); sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), sizeof(struct ocfs2_refcount_rec), cmp_refcount_rec_by_cpos, swap_refcount_rec); sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), sizeof(struct ocfs2_refcount_rec), cmp_refcount_rec_by_cpos, swap_refcount_rec); *split_cpos = cpos; return 0; } static int ocfs2_new_leaf_refcount_block(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_alloc_context *meta_ac) { int ret; u16 suballoc_bit_start; u32 num_got, new_cpos; u64 suballoc_loc, blkno; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct ocfs2_refcount_block *root_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; struct buffer_head *new_bh = NULL; struct ocfs2_refcount_block *new_rb; struct ocfs2_extent_tree ref_et; BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, &suballoc_bit_start, &num_got, &blkno); if (ret) { mlog_errno(ret); goto out; } new_bh = sb_getblk(sb, blkno); if (new_bh == NULL) { ret = -EIO; mlog_errno(ret); goto out; } ocfs2_set_new_buffer_uptodate(ci, new_bh); ret = ocfs2_journal_access_rb(handle, ci, new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out; } /* Initialize ocfs2_refcount_block. */ new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; memset(new_rb, 0, sb->s_blocksize); strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc); new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); new_rb->rf_blkno = cpu_to_le64(blkno); new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); new_rb->rf_records.rl_count = cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); new_rb->rf_generation = root_rb->rf_generation; ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); if (ret) { mlog_errno(ret); goto out; } ocfs2_journal_dirty(handle, ref_leaf_bh); ocfs2_journal_dirty(handle, new_bh); ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); mlog(0, "insert new leaf block %llu at %u\n", (unsigned long long)new_bh->b_blocknr, new_cpos); /* Insert the new leaf block with the specific offset cpos. */ ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, 1, 0, meta_ac); if (ret) mlog_errno(ret); out: brelse(new_bh); return ret; } static int ocfs2_expand_refcount_tree(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_alloc_context *meta_ac) { int ret; struct buffer_head *expand_bh = NULL; if (ref_root_bh == ref_leaf_bh) { /* * the old root bh hasn't been expanded to a b-tree, * so expand it first. */ ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, &expand_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } } else { expand_bh = ref_leaf_bh; get_bh(expand_bh); } /* Now add a new refcount block into the tree.*/ ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, expand_bh, meta_ac); if (ret) mlog_errno(ret); out: brelse(expand_bh); return ret; } /* * Adjust the extent rec in b-tree representing ref_leaf_bh. * * Only called when we have inserted a new refcount rec at index 0 * which means ocfs2_extent_rec.e_cpos may need some change. */ static int ocfs2_adjust_refcount_rec(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_refcount_rec *rec) { int ret = 0, i; u32 new_cpos, old_cpos; struct ocfs2_path *path = NULL; struct ocfs2_extent_tree et; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; struct ocfs2_extent_list *el; if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) goto out; rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; old_cpos = le32_to_cpu(rb->rf_cpos); new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; if (old_cpos <= new_cpos) goto out; ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); path = ocfs2_new_path_from_et(&et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(ci, path, old_cpos); if (ret) { mlog_errno(ret); goto out; } /* * 2 more credits, one for the leaf refcount block, one for * the extent block contains the extent rec. */ ret = ocfs2_extend_trans(handle, 2); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } /* change the leaf extent block first. */ el = path_leaf_el(path); for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) break; BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); /* change the r_cpos in the leaf block. */ rb->rf_cpos = cpu_to_le32(new_cpos); ocfs2_journal_dirty(handle, path_leaf_bh(path)); ocfs2_journal_dirty(handle, ref_leaf_bh); out: ocfs2_free_path(path); return ret; } static int ocfs2_insert_refcount_rec(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_refcount_rec *rec, int index, int merge, struct ocfs2_alloc_context *meta_ac) { int ret; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_list *rf_list = &rb->rf_records; struct buffer_head *new_bh = NULL; BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); if (rf_list->rl_used == rf_list->rl_count) { u64 cpos = le64_to_cpu(rec->r_cpos); u32 len = le32_to_cpu(rec->r_clusters); ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, ref_leaf_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_get_refcount_rec(ci, ref_root_bh, cpos, len, NULL, &index, &new_bh); if (ret) { mlog_errno(ret); goto out; } ref_leaf_bh = new_bh; rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; rf_list = &rb->rf_records; } ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (index < le16_to_cpu(rf_list->rl_used)) memmove(&rf_list->rl_recs[index + 1], &rf_list->rl_recs[index], (le16_to_cpu(rf_list->rl_used) - index) * sizeof(struct ocfs2_refcount_rec)); mlog(0, "insert refcount record start %llu, len %u, count %u " "to leaf block %llu at index %d\n", (unsigned long long)le64_to_cpu(rec->r_cpos), le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount), (unsigned long long)ref_leaf_bh->b_blocknr, index); rf_list->rl_recs[index] = *rec; le16_add_cpu(&rf_list->rl_used, 1); if (merge) ocfs2_refcount_rec_merge(rb, index); ocfs2_journal_dirty(handle, ref_leaf_bh); if (index == 0) { ret = ocfs2_adjust_refcount_rec(handle, ci, ref_root_bh, ref_leaf_bh, rec); if (ret) mlog_errno(ret); } out: brelse(new_bh); return ret; } /* * Split the refcount_rec indexed by "index" in ref_leaf_bh. * This is much simple than our b-tree code. * split_rec is the new refcount rec we want to insert. * If split_rec->r_refcount > 0, we are changing the refcount(in case we * increase refcount or decrease a refcount to non-zero). * If split_rec->r_refcount == 0, we are punching a hole in current refcount * rec( in case we decrease a refcount to zero). */ static int ocfs2_split_refcount_rec(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_refcount_rec *split_rec, int index, int merge, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, recs_need; u32 len; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_list *rf_list = &rb->rf_records; struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; struct ocfs2_refcount_rec *tail_rec = NULL; struct buffer_head *new_bh = NULL; BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n", le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters), le64_to_cpu(split_rec->r_cpos), le32_to_cpu(split_rec->r_clusters)); /* * If we just need to split the header or tail clusters, * no more recs are needed, just split is OK. * Otherwise we at least need one new recs. */ if (!split_rec->r_refcount && (split_rec->r_cpos == orig_rec->r_cpos || le64_to_cpu(split_rec->r_cpos) + le32_to_cpu(split_rec->r_clusters) == le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) recs_need = 0; else recs_need = 1; /* * We need one more rec if we split in the middle and the new rec have * some refcount in it. */ if (split_rec->r_refcount && (split_rec->r_cpos != orig_rec->r_cpos && le64_to_cpu(split_rec->r_cpos) + le32_to_cpu(split_rec->r_clusters) != le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) recs_need++; /* If the leaf block don't have enough record, expand it. */ if (le16_to_cpu(rf_list->rl_used) + recs_need > le16_to_cpu(rf_list->rl_count)) { struct ocfs2_refcount_rec tmp_rec; u64 cpos = le64_to_cpu(orig_rec->r_cpos); len = le32_to_cpu(orig_rec->r_clusters); ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, ref_leaf_bh, meta_ac); if (ret) { mlog_errno(ret); goto out; } /* * We have to re-get it since now cpos may be moved to * another leaf block. */ ret = ocfs2_get_refcount_rec(ci, ref_root_bh, cpos, len, &tmp_rec, &index, &new_bh); if (ret) { mlog_errno(ret); goto out; } ref_leaf_bh = new_bh; rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; rf_list = &rb->rf_records; orig_rec = &rf_list->rl_recs[index]; } ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } /* * We have calculated out how many new records we need and store * in recs_need, so spare enough space first by moving the records * after "index" to the end. */ if (index != le16_to_cpu(rf_list->rl_used) - 1) memmove(&rf_list->rl_recs[index + 1 + recs_need], &rf_list->rl_recs[index + 1], (le16_to_cpu(rf_list->rl_used) - index - 1) * sizeof(struct ocfs2_refcount_rec)); len = (le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)) - (le64_to_cpu(split_rec->r_cpos) + le32_to_cpu(split_rec->r_clusters)); /* * If we have "len", the we will split in the tail and move it * to the end of the space we have just spared. */ if (len) { tail_rec = &rf_list->rl_recs[index + recs_need]; memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); le64_add_cpu(&tail_rec->r_cpos, le32_to_cpu(tail_rec->r_clusters) - len); tail_rec->r_clusters = cpu_to_le32(len); } /* * If the split pos isn't the same as the original one, we need to * split in the head. * * Note: We have the chance that split_rec.r_refcount = 0, * recs_need = 0 and len > 0, which means we just cut the head from * the orig_rec and in that case we have done some modification in * orig_rec above, so the check for r_cpos is faked. */ if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { len = le64_to_cpu(split_rec->r_cpos) - le64_to_cpu(orig_rec->r_cpos); orig_rec->r_clusters = cpu_to_le32(len); index++; } le16_add_cpu(&rf_list->rl_used, recs_need); if (split_rec->r_refcount) { rf_list->rl_recs[index] = *split_rec; mlog(0, "insert refcount record start %llu, len %u, count %u " "to leaf block %llu at index %d\n", (unsigned long long)le64_to_cpu(split_rec->r_cpos), le32_to_cpu(split_rec->r_clusters), le32_to_cpu(split_rec->r_refcount), (unsigned long long)ref_leaf_bh->b_blocknr, index); if (merge) ocfs2_refcount_rec_merge(rb, index); } ocfs2_journal_dirty(handle, ref_leaf_bh); out: brelse(new_bh); return ret; } static int __ocfs2_increase_refcount(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, u64 cpos, u32 len, int merge, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0, index; struct buffer_head *ref_leaf_bh = NULL; struct ocfs2_refcount_rec rec; unsigned int set_len = 0; mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)cpos, len); while (len) { ret = ocfs2_get_refcount_rec(ci, ref_root_bh, cpos, len, &rec, &index, &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out; } set_len = le32_to_cpu(rec.r_clusters); /* * Here we may meet with 3 situations: * * 1. If we find an already existing record, and the length * is the same, cool, we just need to increase the r_refcount * and it is OK. * 2. If we find a hole, just insert it with r_refcount = 1. * 3. If we are in the middle of one extent record, split * it. */ if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && set_len <= len) { mlog(0, "increase refcount rec, start %llu, len %u, " "count %u\n", (unsigned long long)cpos, set_len, le32_to_cpu(rec.r_refcount)); ret = ocfs2_change_refcount_rec(handle, ci, ref_leaf_bh, index, merge, 1); if (ret) { mlog_errno(ret); goto out; } } else if (!rec.r_refcount) { rec.r_refcount = cpu_to_le32(1); mlog(0, "insert refcount rec, start %llu, len %u\n", (unsigned long long)le64_to_cpu(rec.r_cpos), set_len); ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, ref_leaf_bh, &rec, index, merge, meta_ac); if (ret) { mlog_errno(ret); goto out; } } else { set_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + set_len) - cpos; rec.r_cpos = cpu_to_le64(cpos); rec.r_clusters = cpu_to_le32(set_len); le32_add_cpu(&rec.r_refcount, 1); mlog(0, "split refcount rec, start %llu, " "len %u, count %u\n", (unsigned long long)le64_to_cpu(rec.r_cpos), set_len, le32_to_cpu(rec.r_refcount)); ret = ocfs2_split_refcount_rec(handle, ci, ref_root_bh, ref_leaf_bh, &rec, index, merge, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out; } } cpos += set_len; len -= set_len; brelse(ref_leaf_bh); ref_leaf_bh = NULL; } out: brelse(ref_leaf_bh); return ret; } static int ocfs2_remove_refcount_extent(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_extent_tree et; BUG_ON(rb->rf_records.rl_used); ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), 1, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_from_cache(ci, ref_leaf_bh); /* * add the freed block to the dealloc so that it will be freed * when we run dealloc. */ ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(rb->rf_suballoc_slot), le64_to_cpu(rb->rf_suballoc_loc), le64_to_cpu(rb->rf_blkno), le16_to_cpu(rb->rf_suballoc_bit)); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; le32_add_cpu(&rb->rf_clusters, -1); /* * check whether we need to restore the root refcount block if * there is no leaf extent block at atll. */ if (!rb->rf_list.l_next_free_rec) { BUG_ON(rb->rf_clusters); mlog(0, "reset refcount tree root %llu to be a record block.\n", (unsigned long long)ref_root_bh->b_blocknr); rb->rf_flags = 0; rb->rf_parent = 0; rb->rf_cpos = 0; memset(&rb->rf_records, 0, sb->s_blocksize - offsetof(struct ocfs2_refcount_block, rf_records)); rb->rf_records.rl_count = cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); } ocfs2_journal_dirty(handle, ref_root_bh); out: return ret; } int ocfs2_increase_refcount(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, u64 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { return __ocfs2_increase_refcount(handle, ci, ref_root_bh, cpos, len, 1, meta_ac, dealloc); } static int ocfs2_decrease_refcount_rec(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, struct buffer_head *ref_leaf_bh, int index, u64 cpos, unsigned int len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; struct ocfs2_refcount_block *rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); BUG_ON(cpos + len > le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); if (cpos == le64_to_cpu(rec->r_cpos) && len == le32_to_cpu(rec->r_clusters)) ret = ocfs2_change_refcount_rec(handle, ci, ref_leaf_bh, index, 1, -1); else { struct ocfs2_refcount_rec split = *rec; split.r_cpos = cpu_to_le64(cpos); split.r_clusters = cpu_to_le32(len); le32_add_cpu(&split.r_refcount, -1); mlog(0, "split refcount rec, start %llu, " "len %u, count %u, original start %llu, len %u\n", (unsigned long long)le64_to_cpu(split.r_cpos), len, le32_to_cpu(split.r_refcount), (unsigned long long)le64_to_cpu(rec->r_cpos), le32_to_cpu(rec->r_clusters)); ret = ocfs2_split_refcount_rec(handle, ci, ref_root_bh, ref_leaf_bh, &split, index, 1, meta_ac, dealloc); } if (ret) { mlog_errno(ret); goto out; } /* Remove the leaf refcount block if it contains no refcount record. */ if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, ref_leaf_bh, meta_ac, dealloc); if (ret) mlog_errno(ret); } out: return ret; } static int __ocfs2_decrease_refcount(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, u64 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc, int delete) { int ret = 0, index = 0; struct ocfs2_refcount_rec rec; unsigned int r_count = 0, r_len; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); struct buffer_head *ref_leaf_bh = NULL; mlog(0, "Tree owner %llu, decrease refcount start %llu, " "len %u, delete %u\n", (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)cpos, len, delete); while (len) { ret = ocfs2_get_refcount_rec(ci, ref_root_bh, cpos, len, &rec, &index, &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out; } r_count = le32_to_cpu(rec.r_refcount); BUG_ON(r_count == 0); if (!delete) BUG_ON(r_count > 1); r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + le32_to_cpu(rec.r_clusters)) - cpos; ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, ref_leaf_bh, index, cpos, r_len, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out; } if (le32_to_cpu(rec.r_refcount) == 1 && delete) { ret = ocfs2_cache_cluster_dealloc(dealloc, ocfs2_clusters_to_blocks(sb, cpos), r_len); if (ret) { mlog_errno(ret); goto out; } } cpos += r_len; len -= r_len; brelse(ref_leaf_bh); ref_leaf_bh = NULL; } out: brelse(ref_leaf_bh); return ret; } /* Caller must hold refcount tree lock. */ int ocfs2_decrease_refcount(struct inode *inode, handle_t *handle, u32 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc, int delete) { int ret; u64 ref_blkno; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_tree *tree; BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_get_refcount_block(inode, &ref_blkno); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, cpos, len, meta_ac, dealloc, delete); if (ret) mlog_errno(ret); out: brelse(ref_root_bh); return ret; } /* * Mark the already-existing extent at cpos as refcounted for len clusters. * This adds the refcount extent flag. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ static int ocfs2_mark_extent_refcounted(struct inode *inode, struct ocfs2_extent_tree *et, handle_t *handle, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n", inode->i_ino, cpos, len, phys); if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " "tree, but the feature bit is not set in the " "super block.", inode->i_ino); ret = -EROFS; goto out; } ret = ocfs2_change_extent_flag(handle, et, cpos, len, phys, meta_ac, dealloc, OCFS2_EXT_REFCOUNTED, 0); if (ret) mlog_errno(ret); out: return ret; } /* * Given some contiguous physical clusters, calculate what we need * for modifying their refcount. */ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, struct ocfs2_caching_info *ci, struct buffer_head *ref_root_bh, u64 start_cpos, u32 clusters, int *meta_add, int *credits) { int ret = 0, index, ref_blocks = 0, recs_add = 0; u64 cpos = start_cpos; struct ocfs2_refcount_block *rb; struct ocfs2_refcount_rec rec; struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; u32 len; mlog(0, "start_cpos %llu, clusters %u\n", (unsigned long long)start_cpos, clusters); while (clusters) { ret = ocfs2_get_refcount_rec(ci, ref_root_bh, cpos, clusters, &rec, &index, &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out; } if (ref_leaf_bh != prev_bh) { /* * Now we encounter a new leaf block, so calculate * whether we need to extend the old leaf. */ if (prev_bh) { rb = (struct ocfs2_refcount_block *) prev_bh->b_data; if (le64_to_cpu(rb->rf_records.rl_used) + recs_add > le16_to_cpu(rb->rf_records.rl_count)) ref_blocks++; } recs_add = 0; *credits += 1; brelse(prev_bh); prev_bh = ref_leaf_bh; get_bh(prev_bh); } rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu," "rec->r_clusters %u, rec->r_refcount %u, index %d\n", recs_add, (unsigned long long)cpos, clusters, (unsigned long long)le64_to_cpu(rec.r_cpos), le32_to_cpu(rec.r_clusters), le32_to_cpu(rec.r_refcount), index); len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + le32_to_cpu(rec.r_clusters)) - cpos; /* * We record all the records which will be inserted to the * same refcount block, so that we can tell exactly whether * we need a new refcount block or not. * * If we will insert a new one, this is easy and only happens * during adding refcounted flag to the extent, so we don't * have a chance of spliting. We just need one record. * * If the refcount rec already exists, that would be a little * complicated. we may have to: * 1) split at the beginning if the start pos isn't aligned. * we need 1 more record in this case. * 2) split int the end if the end pos isn't aligned. * we need 1 more record in this case. * 3) split in the middle because of file system fragmentation. * we need 2 more records in this case(we can't detect this * beforehand, so always think of the worst case). */ if (rec.r_refcount) { recs_add += 2; /* Check whether we need a split at the beginning. */ if (cpos == start_cpos && cpos != le64_to_cpu(rec.r_cpos)) recs_add++; /* Check whether we need a split in the end. */ if (cpos + clusters < le64_to_cpu(rec.r_cpos) + le32_to_cpu(rec.r_clusters)) recs_add++; } else recs_add++; brelse(ref_leaf_bh); ref_leaf_bh = NULL; clusters -= len; cpos += len; } if (prev_bh) { rb = (struct ocfs2_refcount_block *)prev_bh->b_data; if (le64_to_cpu(rb->rf_records.rl_used) + recs_add > le16_to_cpu(rb->rf_records.rl_count)) ref_blocks++; *credits += 1; } if (!ref_blocks) goto out; mlog(0, "we need ref_blocks %d\n", ref_blocks); *meta_add += ref_blocks; *credits += ref_blocks; /* * So we may need ref_blocks to insert into the tree. * That also means we need to change the b-tree and add that number * of records since we never merge them. * We need one more block for expansion since the new created leaf * block is also full and needs split. */ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { struct ocfs2_extent_tree et; ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); *meta_add += ocfs2_extend_meta_needed(et.et_root_el); *credits += ocfs2_calc_extend_credits(sb, et.et_root_el, ref_blocks); } else { *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; *meta_add += 1; } out: brelse(ref_leaf_bh); brelse(prev_bh); return ret; } /* * For refcount tree, we will decrease some contiguous clusters * refcount count, so just go through it to see how many blocks * we gonna touch and whether we need to create new blocks. * * Normally the refcount blocks store these refcount should be * contiguous also, so that we can get the number easily. * We will at most add split 2 refcount records and 2 more * refcount blocks, so just check it in a rough way. * * Caller must hold refcount tree lock. */ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, u64 refcount_loc, u64 phys_blkno, u32 clusters, int *credits, int *ref_blocks) { int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_tree *tree; u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " "tree, but the feature bit is not set in the " "super block.", inode->i_ino); ret = -EROFS; goto out; } BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), refcount_loc, &tree); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, &tree->rf_ci, ref_root_bh, start_cpos, clusters, ref_blocks, credits); if (ret) { mlog_errno(ret); goto out; } mlog(0, "reserve new metadata %d blocks, credits = %d\n", *ref_blocks, *credits); out: brelse(ref_root_bh); return ret; } #define MAX_CONTIG_BYTES 1048576 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) { return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); } static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) { return ~(ocfs2_cow_contig_clusters(sb) - 1); } /* * Given an extent that starts at 'start' and an I/O that starts at 'cpos', * find an offset (start + (n * contig_clusters)) that is closest to cpos * while still being less than or equal to it. * * The goal is to break the extent at a multiple of contig_clusters. */ static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, unsigned int start, unsigned int cpos) { BUG_ON(start > cpos); return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); } /* * Given a cluster count of len, pad it out so that it is a multiple * of contig_clusters. */ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, unsigned int len) { unsigned int padded = (len + (ocfs2_cow_contig_clusters(sb) - 1)) & ocfs2_cow_contig_mask(sb); /* Did we wrap? */ if (padded < len) padded = UINT_MAX; return padded; } /* * Calculate out the start and number of virtual clusters we need to to CoW. * * cpos is vitual start cluster position we want to do CoW in a * file and write_len is the cluster length. * max_cpos is the place where we want to stop CoW intentionally. * * Normal we will start CoW from the beginning of extent record cotaining cpos. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we * get good I/O from the resulting extent tree. */ static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, struct ocfs2_extent_list *el, u32 cpos, u32 write_len, u32 max_cpos, u32 *cow_start, u32 *cow_len) { int ret = 0; int tree_height = le16_to_cpu(el->l_tree_depth), i; struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb = NULL; struct ocfs2_extent_rec *rec; unsigned int want_clusters, rec_end = 0; int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); int leaf_clusters; BUG_ON(cpos + write_len > max_cpos); if (tree_height > 0) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " "leaf block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } } *cow_len = 0; for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { rec = &el->l_recs[i]; if (ocfs2_is_empty_extent(rec)) { mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " "index %d\n", inode->i_ino, i); continue; } if (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters) <= cpos) continue; if (*cow_len == 0) { /* * We should find a refcounted record in the * first pass. */ BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); *cow_start = le32_to_cpu(rec->e_cpos); } /* * If we encounter a hole, a non-refcounted record or * pass the max_cpos, stop the search. */ if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || (max_cpos <= le32_to_cpu(rec->e_cpos))) break; leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; if (rec_end > max_cpos) { rec_end = max_cpos; leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); } /* * How many clusters do we actually need from * this extent? First we see how many we actually * need to complete the write. If that's smaller * than contig_clusters, we try for contig_clusters. */ if (!*cow_len) want_clusters = write_len; else want_clusters = (cpos + write_len) - (*cow_start + *cow_len); if (want_clusters < contig_clusters) want_clusters = contig_clusters; /* * If the write does not cover the whole extent, we * need to calculate how we're going to split the extent. * We try to do it on contig_clusters boundaries. * * Any extent smaller than contig_clusters will be * CoWed in its entirety. */ if (leaf_clusters <= contig_clusters) *cow_len += leaf_clusters; else if (*cow_len || (*cow_start == cpos)) { /* * This extent needs to be CoW'd from its * beginning, so all we have to do is compute * how many clusters to grab. We align * want_clusters to the edge of contig_clusters * to get better I/O. */ want_clusters = ocfs2_cow_align_length(inode->i_sb, want_clusters); if (leaf_clusters < want_clusters) *cow_len += leaf_clusters; else *cow_len += want_clusters; } else if ((*cow_start + contig_clusters) >= (cpos + write_len)) { /* * Breaking off contig_clusters at the front * of the extent will cover our write. That's * easy. */ *cow_len = contig_clusters; } else if ((rec_end - cpos) <= contig_clusters) { /* * Breaking off contig_clusters at the tail of * this extent will cover cpos. */ *cow_start = rec_end - contig_clusters; *cow_len = contig_clusters; } else if ((rec_end - cpos) <= want_clusters) { /* * While we can't fit the entire write in this * extent, we know that the write goes from cpos * to the end of the extent. Break that off. * We try to break it at some multiple of * contig_clusters from the front of the extent. * Failing that (ie, cpos is within * contig_clusters of the front), we'll CoW the * entire extent. */ *cow_start = ocfs2_cow_align_start(inode->i_sb, *cow_start, cpos); *cow_len = rec_end - *cow_start; } else { /* * Ok, the entire write lives in the middle of * this extent. Let's try to slice the extent up * nicely. Optimally, our CoW region starts at * m*contig_clusters from the beginning of the * extent and goes for n*contig_clusters, * covering the entire write. */ *cow_start = ocfs2_cow_align_start(inode->i_sb, *cow_start, cpos); want_clusters = (cpos + write_len) - *cow_start; want_clusters = ocfs2_cow_align_length(inode->i_sb, want_clusters); if (*cow_start + want_clusters <= rec_end) *cow_len = want_clusters; else *cow_len = rec_end - *cow_start; } /* Have we covered our entire write yet? */ if ((*cow_start + *cow_len) >= (cpos + write_len)) break; /* * If we reach the end of the extent block and don't get enough * clusters, continue with the next extent block if possible. */ if (i + 1 == le16_to_cpu(el->l_next_free_rec) && eb && eb->h_next_leaf_blk) { brelse(eb_bh); eb_bh = NULL; ret = ocfs2_read_extent_block(INODE_CACHE(inode), le64_to_cpu(eb->h_next_leaf_blk), &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; i = -1; } } out: brelse(eb_bh); return ret; } /* * Prepare meta_ac, data_ac and calculate credits when we want to add some * num_clusters in data_tree "et" and change the refcount for the old * clusters(starting form p_cluster) in the refcount tree. * * Note: * 1. since we may split the old tree, so we at most will need num_clusters + 2 * more new leaf records. * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so * just give data_ac = NULL. */ static int ocfs2_lock_refcount_allocators(struct super_block *sb, u32 p_cluster, u32 num_clusters, struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_alloc_context **meta_ac, struct ocfs2_alloc_context **data_ac, int *credits) { int ret = 0, meta_add = 0; int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (num_free_extents < num_clusters + 2) meta_add = ocfs2_extend_meta_needed(et->et_root_el); *credits += ocfs2_calc_extend_credits(sb, et->et_root_el, num_clusters + 2); ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, p_cluster, num_clusters, &meta_add, credits); if (ret) { mlog_errno(ret); goto out; } mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n", meta_add, num_clusters, *credits); ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) { ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, data_ac); if (ret) mlog_errno(ret); } out: if (ret) { if (*meta_ac) { ocfs2_free_alloc_context(*meta_ac); *meta_ac = NULL; } } return ret; } static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) { BUG_ON(buffer_dirty(bh)); clear_buffer_mapped(bh); return 0; } static int ocfs2_duplicate_clusters_by_page(handle_t *handle, struct ocfs2_cow_context *context, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0, partial; struct ocfs2_caching_info *ci = context->data_et.et_ci; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); struct page *page; pgoff_t page_index; unsigned int from, to; loff_t offset, end, map_end; struct address_space *mapping = context->inode->i_mapping; mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster, new_cluster, new_len, cpos); offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); /* * We only duplicate pages until we reach the page contains i_size - 1. * So trim 'end' to i_size. */ if (end > i_size_read(context->inode)) end = i_size_read(context->inode); while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; /* from, to is the offset within the page. */ from = offset & (PAGE_CACHE_SIZE - 1); to = PAGE_CACHE_SIZE; if (map_end & (PAGE_CACHE_SIZE - 1)) to = map_end & (PAGE_CACHE_SIZE - 1); page = grab_cache_page(mapping, page_index); /* * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page * can't be dirtied before we CoW it out. */ if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); if (ret) { mlog_errno(ret); goto unlock; } lock_page(page); } if (page_has_buffers(page)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, &partial, ocfs2_clear_cow_buffer); if (ret) { mlog_errno(ret); goto unlock; } } ocfs2_map_and_dirty_page(context->inode, handle, from, to, page, 0, &new_block); mark_page_accessed(page); unlock: unlock_page(page); page_cache_release(page); page = NULL; offset = map_end; if (ret) break; } return ret; } static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, struct ocfs2_cow_context *context, u32 cpos, u32 old_cluster, u32 new_cluster, u32 new_len) { int ret = 0; struct super_block *sb = context->inode->i_sb; struct ocfs2_caching_info *ci = context->data_et.et_ci; int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *old_bh = NULL; struct buffer_head *new_bh = NULL; mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster, new_cluster, new_len); for (i = 0; i < blocks; i++, old_block++, new_block++) { new_bh = sb_getblk(osb->sb, new_block); if (new_bh == NULL) { ret = -EIO; mlog_errno(ret); break; } ocfs2_set_new_buffer_uptodate(ci, new_bh); ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); if (ret) { mlog_errno(ret); break; } ret = ocfs2_journal_access(handle, ci, new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); break; } memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); ocfs2_journal_dirty(handle, new_bh); brelse(new_bh); brelse(old_bh); new_bh = NULL; old_bh = NULL; } brelse(new_bh); brelse(old_bh); return ret; } static int ocfs2_clear_ext_refcount(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u32 p_cluster, u32 len, unsigned int ext_flags, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, index; struct ocfs2_extent_rec replace_rec; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); u64 ino = ocfs2_metadata_cache_owner(et->et_ci); mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n", (unsigned long long)ino, cpos, len, p_cluster, ext_flags); memset(&replace_rec, 0, sizeof(replace_rec)); replace_rec.e_cpos = cpu_to_le32(cpos); replace_rec.e_leaf_clusters = cpu_to_le16(len); replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, p_cluster)); replace_rec.e_flags = ext_flags; replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; path = ocfs2_new_path_from_et(et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(sb, "Inode %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long)ino, cpos); ret = -EROFS; goto out; } ret = ocfs2_split_extent(handle, et, path, index, &replace_rec, meta_ac, dealloc); if (ret) mlog_errno(ret); out: ocfs2_free_path(path); return ret; } static int ocfs2_replace_clusters(handle_t *handle, struct ocfs2_cow_context *context, u32 cpos, u32 old, u32 new, u32 len, unsigned int ext_flags) { int ret; struct ocfs2_caching_info *ci = context->data_et.et_ci; u64 ino = ocfs2_metadata_cache_owner(ci); mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n", (unsigned long long)ino, cpos, old, new, len, ext_flags); /*If the old clusters is unwritten, no need to duplicate. */ if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { ret = context->cow_duplicate_clusters(handle, context, cpos, old, new, len); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_clear_ext_refcount(handle, &context->data_et, cpos, new, len, ext_flags, context->meta_ac, &context->dealloc); if (ret) mlog_errno(ret); out: return ret; } static int ocfs2_cow_sync_writeback(struct super_block *sb, struct ocfs2_cow_context *context, u32 cpos, u32 num_clusters) { int ret = 0; loff_t offset, end, map_end; pgoff_t page_index; struct page *page; if (ocfs2_should_order_data(context->inode)) return 0; offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); ret = filemap_fdatawrite_range(context->inode->i_mapping, offset, end - 1); if (ret < 0) { mlog_errno(ret); return ret; } while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; page = grab_cache_page(context->inode->i_mapping, page_index); BUG_ON(!page); wait_on_page_writeback(page); if (PageError(page)) { ret = -EIO; mlog_errno(ret); } else mark_page_accessed(page); unlock_page(page); page_cache_release(page); page = NULL; offset = map_end; if (ret) break; } return ret; } static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, unsigned int *extent_flags) { return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, num_clusters, extent_flags); } static int ocfs2_make_clusters_writable(struct super_block *sb, struct ocfs2_cow_context *context, u32 cpos, u32 p_cluster, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; u32 new_bit, new_len; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; struct buffer_head *ref_leaf_bh = NULL; struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; struct ocfs2_refcount_rec rec; mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n", cpos, p_cluster, num_clusters, e_flags); ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, &context->data_et, ref_ci, context->ref_root_bh, &context->meta_ac, &context->data_ac, &credits); if (ret) { mlog_errno(ret); return ret; } if (context->post_refcount) credits += context->post_refcount->credits; credits += context->extra_credits; handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, &rec, &index, &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out_commit; } BUG_ON(!rec.r_refcount); set_len = min((u64)p_cluster + num_clusters, le64_to_cpu(rec.r_cpos) + le32_to_cpu(rec.r_clusters)) - p_cluster; /* * There are many different situation here. * 1. If refcount == 1, remove the flag and don't COW. * 2. If refcount > 1, allocate clusters. * Here we may not allocate r_len once at a time, so continue * until we reach num_clusters. */ if (le32_to_cpu(rec.r_refcount) == 1) { delete = 0; ret = ocfs2_clear_ext_refcount(handle, &context->data_et, cpos, p_cluster, set_len, e_flags, context->meta_ac, &context->dealloc); if (ret) { mlog_errno(ret); goto out_commit; } } else { delete = 1; ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, set_len, &new_bit, &new_len); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_replace_clusters(handle, context, cpos, p_cluster, new_bit, new_len, e_flags); if (ret) { mlog_errno(ret); goto out_commit; } set_len = new_len; } ret = __ocfs2_decrease_refcount(handle, ref_ci, context->ref_root_bh, p_cluster, set_len, context->meta_ac, &context->dealloc, delete); if (ret) { mlog_errno(ret); goto out_commit; } cpos += set_len; p_cluster += set_len; num_clusters -= set_len; brelse(ref_leaf_bh); ref_leaf_bh = NULL; } /* handle any post_cow action. */ if (context->post_refcount && context->post_refcount->func) { ret = context->post_refcount->func(context->inode, handle, context->post_refcount->para); if (ret) { mlog_errno(ret); goto out_commit; } } /* * Here we should write the new page out first if we are * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); if (ret) mlog_errno(ret); } out_commit: ocfs2_commit_trans(osb, handle); out: if (context->data_ac) { ocfs2_free_alloc_context(context->data_ac); context->data_ac = NULL; } if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } brelse(ref_leaf_bh); return ret; } static int ocfs2_replace_cow(struct ocfs2_cow_context *context) { int ret = 0; struct inode *inode = context->inode; u32 cow_start = context->cow_start, cow_len = context->cow_len; u32 p_cluster, num_clusters; unsigned int ext_flags; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " "tree, but the feature bit is not set in the " "super block.", inode->i_ino); return -EROFS; } ocfs2_init_dealloc_ctxt(&context->dealloc); while (cow_len) { ret = context->get_clusters(context, cow_start, &p_cluster, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); break; } BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); if (cow_len < num_clusters) num_clusters = cow_len; ret = ocfs2_make_clusters_writable(inode->i_sb, context, cow_start, p_cluster, num_clusters, ext_flags); if (ret) { mlog_errno(ret); break; } cow_len -= num_clusters; cow_start += num_clusters; } if (ocfs2_dealloc_has_cluster(&context->dealloc)) { ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &context->dealloc); } return ret; } /* * Starting at cpos, try to CoW write_len clusters. Don't CoW * past max_cpos. This will stop when it runs into a hole or an * unrefcounted extent. */ static int ocfs2_refcount_cow_hunk(struct inode *inode, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { int ret; u32 cow_start = 0, cow_len = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_tree *ref_tree; struct ocfs2_cow_context *context = NULL; BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, cpos, write_len, max_cpos, &cow_start, &cow_len); if (ret) { mlog_errno(ret); goto out; } mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, " "cow_len %u\n", inode->i_ino, cpos, write_len, cow_start, cow_len); BUG_ON(cow_len == 0); context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); if (!context) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } context->inode = inode; context->cow_start = cow_start; context->cow_len = cow_len; context->ref_tree = ref_tree; context->ref_root_bh = ref_root_bh; context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; context->get_clusters = ocfs2_di_get_clusters; ocfs2_init_dinode_extent_tree(&context->data_et, INODE_CACHE(inode), di_bh); ret = ocfs2_replace_cow(context); if (ret) mlog_errno(ret); /* * truncate the extent map here since no matter whether we meet with * any error during the action, we shouldn't trust cached extent map * any more. */ ocfs2_extent_map_trunc(inode, cow_start); ocfs2_unlock_refcount_tree(osb, ref_tree, 1); brelse(ref_root_bh); out: kfree(context); return ret; } /* * CoW any and all clusters between cpos and cpos+write_len. * Don't CoW past max_cpos. If this returns successfully, all * clusters between cpos and cpos+write_len are safe to modify. */ int ocfs2_refcount_cow(struct inode *inode, struct buffer_head *di_bh, u32 cpos, u32 write_len, u32 max_cpos) { int ret = 0; u32 p_cluster, num_clusters; unsigned int ext_flags; while (write_len) { ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); break; } if (write_len < num_clusters) num_clusters = write_len; if (ext_flags & OCFS2_EXT_REFCOUNTED) { ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, num_clusters, max_cpos); if (ret) { mlog_errno(ret); break; } } write_len -= num_clusters; cpos += num_clusters; } return ret; } static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, unsigned int *extent_flags) { struct inode *inode = context->inode; struct ocfs2_xattr_value_root *xv = context->cow_object; return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, num_clusters, &xv->xr_list, extent_flags); } /* * Given a xattr value root, calculate the most meta/credits we need for * refcount tree change if we truncate it to 0. */ int ocfs2_refcounted_xattr_delete_need(struct inode *inode, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_xattr_value_root *xv, int *meta_add, int *credits) { int ret = 0, index, ref_blocks = 0; u32 p_cluster, num_clusters; u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); struct ocfs2_refcount_block *rb; struct ocfs2_refcount_rec rec; struct buffer_head *ref_leaf_bh = NULL; while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, &num_clusters, &xv->xr_list, NULL); if (ret) { mlog_errno(ret); goto out; } cpos += num_clusters; while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, p_cluster, num_clusters, &rec, &index, &ref_leaf_bh); if (ret) { mlog_errno(ret); goto out; } BUG_ON(!rec.r_refcount); rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; /* * We really don't know whether the other clusters is in * this refcount block or not, so just take the worst * case that all the clusters are in this block and each * one will split a refcount rec, so totally we need * clusters * 2 new refcount rec. */ if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 > le16_to_cpu(rb->rf_records.rl_count)) ref_blocks++; *credits += 1; brelse(ref_leaf_bh); ref_leaf_bh = NULL; if (num_clusters <= le32_to_cpu(rec.r_clusters)) break; else num_clusters -= le32_to_cpu(rec.r_clusters); p_cluster += num_clusters; } } *meta_add += ref_blocks; if (!ref_blocks) goto out; rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; else { struct ocfs2_extent_tree et; ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); *credits += ocfs2_calc_extend_credits(inode->i_sb, et.et_root_el, ref_blocks); } out: brelse(ref_leaf_bh); return ret; } /* * Do CoW for xattr. */ int ocfs2_refcount_cow_xattr(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_value_buf *vb, struct ocfs2_refcount_tree *ref_tree, struct buffer_head *ref_root_bh, u32 cpos, u32 write_len, struct ocfs2_post_refcount *post) { int ret; struct ocfs2_xattr_value_root *xv = vb->vb_xv; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_cow_context *context = NULL; u32 cow_start, cow_len; BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, cpos, write_len, UINT_MAX, &cow_start, &cow_len); if (ret) { mlog_errno(ret); goto out; } BUG_ON(cow_len == 0); context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); if (!context) { ret = -ENOMEM; mlog_errno(ret); goto out; } context->inode = inode; context->cow_start = cow_start; context->cow_len = cow_len; context->ref_tree = ref_tree; context->ref_root_bh = ref_root_bh;; context->cow_object = xv; context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; /* We need the extra credits for duplicate_clusters by jbd. */ context->extra_credits = ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; context->get_clusters = ocfs2_xattr_value_get_clusters; context->post_refcount = post; ocfs2_init_xattr_value_extent_tree(&context->data_et, INODE_CACHE(inode), vb); ret = ocfs2_replace_cow(context); if (ret) mlog_errno(ret); out: kfree(context); return ret; } /* * Insert a new extent into refcount tree and mark a extent rec * as refcounted in the dinode tree. */ int ocfs2_add_refcount_flag(struct inode *inode, struct ocfs2_extent_tree *data_et, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, u32 cpos, u32 p_cluster, u32 num_clusters, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_post_refcount *post) { int ret; handle_t *handle; int credits = 1, ref_blocks = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_alloc_context *meta_ac = NULL; ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, ref_ci, ref_root_bh, p_cluster, num_clusters, &ref_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } mlog(0, "reserve new metadata %d, credits = %d\n", ref_blocks, credits); if (ref_blocks) { ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), ref_blocks, &meta_ac); if (ret) { mlog_errno(ret); goto out; } } if (post) credits += post->credits; handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, cpos, num_clusters, p_cluster, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out_commit; } ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, p_cluster, num_clusters, 0, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out_commit; } if (post && post->func) { ret = post->func(inode, handle, post->para); if (ret) mlog_errno(ret); } out_commit: ocfs2_commit_trans(osb, handle); out: if (meta_ac) ocfs2_free_alloc_context(meta_ac); return ret; } static int ocfs2_change_ctime(struct inode *inode, struct buffer_head *di_bh) { int ret; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } inode->i_ctime = CURRENT_TIME; di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: return ret; } static int ocfs2_attach_refcount_tree(struct inode *inode, struct buffer_head *di_bh) { int ret, data_changed = 0; struct buffer_head *ref_root_bh = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_refcount_tree *ref_tree; unsigned int ext_flags; loff_t size; u32 cpos, num_clusters, clusters, p_cluster; struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree di_et; ocfs2_init_dealloc_ctxt(&dealloc); if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) { ret = ocfs2_create_refcount_tree(inode, di_bh); if (ret) { mlog_errno(ret); goto out; } } BUG_ON(!di->i_refcount_loc); ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) goto attach_xattr; ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); size = i_size_read(inode); clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); cpos = 0; while (cpos < clusters) { ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters, &ext_flags); if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { ret = ocfs2_add_refcount_flag(inode, &di_et, &ref_tree->rf_ci, ref_root_bh, cpos, p_cluster, num_clusters, &dealloc, NULL); if (ret) { mlog_errno(ret); goto unlock; } data_changed = 1; } cpos += num_clusters; } attach_xattr: if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, &ref_tree->rf_ci, ref_root_bh, &dealloc); if (ret) { mlog_errno(ret); goto unlock; } } if (data_changed) { ret = ocfs2_change_ctime(inode, di_bh); if (ret) mlog_errno(ret); } unlock: ocfs2_unlock_refcount_tree(osb, ref_tree, 1); brelse(ref_root_bh); if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); } out: /* * Empty the extent map so that we may get the right extent * record from the disk. */ ocfs2_extent_map_trunc(inode, 0); return ret; } static int ocfs2_add_refcounted_extent(struct inode *inode, struct ocfs2_extent_tree *et, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, u32 cpos, u32 p_cluster, u32 num_clusters, unsigned int ext_flags, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret; handle_t *handle; int credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_alloc_context *meta_ac = NULL; ret = ocfs2_lock_refcount_allocators(inode->i_sb, p_cluster, num_clusters, et, ref_ci, ref_root_bh, &meta_ac, NULL, &credits); if (ret) { mlog_errno(ret); goto out; } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_insert_extent(handle, et, cpos, ocfs2_clusters_to_blocks(inode->i_sb, p_cluster), num_clusters, ext_flags, meta_ac); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, p_cluster, num_clusters, meta_ac, dealloc); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out: if (meta_ac) ocfs2_free_alloc_context(meta_ac); return ret; } static int ocfs2_duplicate_inline_data(struct inode *s_inode, struct buffer_head *s_bh, struct inode *t_inode, struct buffer_head *t_bh) { int ret; handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data; BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } t_di->id2.i_data.id_count = s_di->id2.i_data.id_count; memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data, le16_to_cpu(s_di->id2.i_data.id_count)); spin_lock(&OCFS2_I(t_inode)->ip_lock); OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL; t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features); spin_unlock(&OCFS2_I(t_inode)->ip_lock); ocfs2_journal_dirty(handle, t_bh); out_commit: ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_duplicate_extent_list(struct inode *s_inode, struct inode *t_inode, struct buffer_head *t_bh, struct ocfs2_caching_info *ref_ci, struct buffer_head *ref_root_bh, struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; u32 p_cluster, num_clusters, clusters, cpos; loff_t size; unsigned int ext_flags; struct ocfs2_extent_tree et; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); size = i_size_read(s_inode); clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); cpos = 0; while (cpos < clusters) { ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, &num_clusters, &ext_flags); if (p_cluster) { ret = ocfs2_add_refcounted_extent(t_inode, &et, ref_ci, ref_root_bh, cpos, p_cluster, num_clusters, ext_flags, dealloc); if (ret) { mlog_errno(ret); goto out; } } cpos += num_clusters; } out: return ret; } /* * change the new file's attributes to the src. * * reflink creates a snapshot of a file, that means the attributes * must be identical except for three exceptions - nlink, ino, and ctime. */ static int ocfs2_complete_reflink(struct inode *s_inode, struct buffer_head *s_bh, struct inode *t_inode, struct buffer_head *t_bh, bool preserve) { int ret; handle_t *handle; struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; loff_t size = i_size_read(s_inode); handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); return ret; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } spin_lock(&OCFS2_I(t_inode)->ip_lock); OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; spin_unlock(&OCFS2_I(t_inode)->ip_lock); i_size_write(t_inode, size); t_inode->i_blocks = s_inode->i_blocks; di->i_xattr_inline_size = s_di->i_xattr_inline_size; di->i_clusters = s_di->i_clusters; di->i_size = s_di->i_size; di->i_dyn_features = s_di->i_dyn_features; di->i_attr = s_di->i_attr; if (preserve) { t_inode->i_uid = s_inode->i_uid; t_inode->i_gid = s_inode->i_gid; t_inode->i_mode = s_inode->i_mode; di->i_uid = s_di->i_uid; di->i_gid = s_di->i_gid; di->i_mode = s_di->i_mode; /* * update time. * we want mtime to appear identical to the source and * update ctime. */ t_inode->i_ctime = CURRENT_TIME; di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec); di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec); t_inode->i_mtime = s_inode->i_mtime; di->i_mtime = s_di->i_mtime; di->i_mtime_nsec = s_di->i_mtime_nsec; } ocfs2_journal_dirty(handle, t_bh); out_commit: ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); return ret; } static int ocfs2_create_reflink_node(struct inode *s_inode, struct buffer_head *s_bh, struct inode *t_inode, struct buffer_head *t_bh, bool preserve) { int ret; struct buffer_head *ref_root_bh = NULL; struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); struct ocfs2_refcount_block *rb; struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; struct ocfs2_refcount_tree *ref_tree; ocfs2_init_dealloc_ctxt(&dealloc); ret = ocfs2_set_refcount_tree(t_inode, t_bh, le64_to_cpu(di->i_refcount_loc)); if (ret) { mlog_errno(ret); goto out; } if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh); if (ret) mlog_errno(ret); goto out; } ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 1, &ref_tree, &ref_root_bh); if (ret) { mlog_errno(ret); goto out; } rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, &ref_tree->rf_ci, ref_root_bh, &dealloc); if (ret) { mlog_errno(ret); goto out_unlock_refcount; } out_unlock_refcount: ocfs2_unlock_refcount_tree(osb, ref_tree, 1); brelse(ref_root_bh); out: if (ocfs2_dealloc_has_cluster(&dealloc)) { ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); } return ret; } static int __ocfs2_reflink(struct dentry *old_dentry, struct buffer_head *old_bh, struct inode *new_inode, bool preserve) { int ret; struct inode *inode = old_dentry->d_inode; struct buffer_head *new_bh = NULL; if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) { ret = -EINVAL; mlog_errno(ret); goto out; } ret = filemap_fdatawrite(inode->i_mapping); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_attach_refcount_tree(inode, old_bh); if (ret) { mlog_errno(ret); goto out; } mutex_lock(&new_inode->i_mutex); ret = ocfs2_inode_lock(new_inode, &new_bh, 1); if (ret) { mlog_errno(ret); goto out_unlock; } ret = ocfs2_create_reflink_node(inode, old_bh, new_inode, new_bh, preserve); if (ret) { mlog_errno(ret); goto inode_unlock; } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) { ret = ocfs2_reflink_xattrs(inode, old_bh, new_inode, new_bh, preserve); if (ret) { mlog_errno(ret); goto inode_unlock; } } ret = ocfs2_complete_reflink(inode, old_bh, new_inode, new_bh, preserve); if (ret) mlog_errno(ret); inode_unlock: ocfs2_inode_unlock(new_inode, 1); brelse(new_bh); out_unlock: mutex_unlock(&new_inode->i_mutex); out: if (!ret) { ret = filemap_fdatawait(inode->i_mapping); if (ret) mlog_errno(ret); } return ret; } static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { int error; struct inode *inode = old_dentry->d_inode; struct buffer_head *old_bh = NULL; struct inode *new_orphan_inode = NULL; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, &new_orphan_inode); if (error) { mlog_errno(error); goto out; } error = ocfs2_inode_lock(inode, &old_bh, 1); if (error) { mlog_errno(error); goto out; } down_write(&OCFS2_I(inode)->ip_xattr_sem); down_write(&OCFS2_I(inode)->ip_alloc_sem); error = __ocfs2_reflink(old_dentry, old_bh, new_orphan_inode, preserve); up_write(&OCFS2_I(inode)->ip_alloc_sem); up_write(&OCFS2_I(inode)->ip_xattr_sem); ocfs2_inode_unlock(inode, 1); brelse(old_bh); if (error) { mlog_errno(error); goto out; } /* If the security isn't preserved, we need to re-initialize them. */ if (!preserve) { error = ocfs2_init_security_and_acl(dir, new_orphan_inode); if (error) mlog_errno(error); } out: if (!error) { error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, new_dentry); if (error) mlog_errno(error); } if (new_orphan_inode) { /* * We need to open_unlock the inode no matter whether we * succeed or not, so that other nodes can delete it later. */ ocfs2_open_unlock(new_orphan_inode); if (error) iput(new_orphan_inode); } return error; } /* * Below here are the bits used by OCFS2_IOC_REFLINK() to fake * sys_reflink(). This will go away when vfs_reflink() exists in * fs/namei.c. */ /* copied from may_create in VFS. */ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) { if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* copied from user_path_parent. */ static int ocfs2_user_path_parent(const char __user *path, struct nameidata *nd, char **name) { char *s = getname(path); int error; if (IS_ERR(s)) return PTR_ERR(s); error = path_lookup(s, LOOKUP_PARENT, nd); if (error) putname(s); else *name = s; return error; } /** * ocfs2_vfs_reflink - Create a reference-counted link * * @old_dentry: source dentry + inode * @dir: directory to create the target * @new_dentry: target dentry * @preserve: if true, preserve all file attributes */ static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { struct inode *inode = old_dentry->d_inode; int error; if (!inode) return -ENOENT; error = ocfs2_may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A reflink to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; /* Only regular files can be reflinked. */ if (!S_ISREG(inode->i_mode)) return -EPERM; /* * If the caller wants to preserve ownership, they require the * rights to do so. */ if (preserve) { if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN)) return -EPERM; if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) return -EPERM; } /* * If the caller is modifying any aspect of the attributes, they * are not creating a snapshot. They need read permission on the * file. */ if (!preserve) { error = inode_permission(inode, MAY_READ); if (error) return error; } mutex_lock(&inode->i_mutex); dquot_initialize(dir); error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); mutex_unlock(&inode->i_mutex); if (!error) fsnotify_create(dir, new_dentry); return error; } /* * Most codes are copied from sys_linkat. */ int ocfs2_reflink_ioctl(struct inode *inode, const char __user *oldname, const char __user *newname, bool preserve) { struct dentry *new_dentry; struct nameidata nd; struct path old_path; int error; char *to = NULL; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; error = user_path_at(AT_FDCWD, oldname, 0, &old_path); if (error) { mlog_errno(error); return error; } error = ocfs2_user_path_parent(newname, &nd, &to); if (error) { mlog_errno(error); goto out; } error = -EXDEV; if (old_path.mnt != nd.path.mnt) goto out_release; new_dentry = lookup_create(&nd, 0); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) { mlog_errno(error); goto out_unlock; } error = mnt_want_write(nd.path.mnt); if (error) { mlog_errno(error); goto out_dput; } error = ocfs2_vfs_reflink(old_path.dentry, nd.path.dentry->d_inode, new_dentry, preserve); mnt_drop_write(nd.path.mnt); out_dput: dput(new_dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); out_release: path_put(&nd.path); putname(to); out: path_put(&old_path); return error; }
gpl-2.0
zeroprobe/ZeroMHL-Overclocked-V3
drivers/gpu/drm/i915/intel_display.c
512
224646
/* * Copyright © 2006-2007 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/module.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vgaarb.h> #include "drmP.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" #include "drm_dp_helper.h" #include "drm_crtc_helper.h" #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); typedef struct { /* given values */ int n; int m1, m2; int p1, p2; /* derived values */ int dot; int vco; int m; int p; } intel_clock_t; typedef struct { int min, max; } intel_range_t; typedef struct { int dot_limit; int p2_slow, p2_fast; } intel_p2_t; #define INTEL_P2_NUM 2 typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; intel_p2_t p2; bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, int, int, intel_clock_t *); }; /* FDI */ #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); static bool intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); static bool intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { if (IS_GEN5(dev)) { struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; } else return 27; } static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 2, .max = 33 }, .p2 = { .dot_limit = 165000, .p2_slow = 4, .p2_fast = 2 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, .m = { .min = 96, .max = 140 }, .m1 = { .min = 18, .max = 26 }, .m2 = { .min = 6, .max = 16 }, .p = { .min = 4, .max = 128 }, .p1 = { .min = 1, .max = 6 }, .p2 = { .dot_limit = 165000, .p2_slow = 14, .p2_fast = 7 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 10, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, .m1 = { .min = 10, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 7, .max = 98 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 7 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 10, .max = 30 }, .p1 = { .min = 1, .max = 3}, .p2 = { .dot_limit = 270000, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 16, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8}, .p2 = { .dot_limit = 165000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 0, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 104, .max = 138 }, .m1 = { .min = 17, .max = 23 }, .m2 = { .min = 5, .max = 11 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 0, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_display_port = { .dot = { .min = 161670, .max = 227000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 2 }, .m = { .min = 97, .max = 108 }, .m1 = { .min = 0x10, .max = 0x12 }, .m2 = { .min = 0x05, .max = 0x06 }, .p = { .min = 10, .max = 20 }, .p1 = { .min = 1, .max = 2}, .p2 = { .dot_limit = 0, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_find_pll_g4x_dp, }; static const intel_limit_t intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, /* Pineview only has one combined m divider, which we treat as m2. */ .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, .m = { .min = 2, .max = 256 }, .m1 = { .min = 0, .max = 0 }, .m2 = { .min = 0, .max = 254 }, .p = { .min = 7, .max = 112 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_find_best_PLL, }; /* Ironlake / Sandybridge * * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ static const intel_limit_t intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 10, .p2_fast = 5 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 118 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 127 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 56 }, .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; /* LVDS 100mhz refclk limits. */ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 28, .max = 112 }, .p1 = { .min = 2,.max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, .m = { .min = 79, .max = 126 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 14, .max = 42 }, .p1 = { .min = 2,.max = 6 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_display_port = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000}, .n = { .min = 1, .max = 2 }, .m = { .min = 81, .max = 90 }, .m1 = { .min = 12, .max = 22 }, .m2 = { .min = 5, .max = 9 }, .p = { .min = 10, .max = 20 }, .p1 = { .min = 1, .max = 2}, .p2 = { .dot_limit = 0, .p2_slow = 10, .p2_fast = 10 }, .find_pll = intel_find_pll_ironlake_dp, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) { /* LVDS dual channel */ if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else limit = &intel_limits_ironlake_dual_lvds; } else { if (refclk == 100000) limit = &intel_limits_ironlake_single_lvds_100m; else limit = &intel_limits_ironlake_single_lvds; } } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || HAS_eDP) limit = &intel_limits_ironlake_display_port; else limit = &intel_limits_ironlake_dac; return limit; } static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) /* LVDS with dual channel */ limit = &intel_limits_g4x_dual_channel_lvds; else /* LVDS with dual channel */ limit = &intel_limits_g4x_single_channel_lvds; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { limit = &intel_limits_g4x_display_port; } else /* The option is for other outputs */ limit = &intel_limits_i9xx_sdvo; return limit; } static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) limit = intel_ironlake_limit(crtc, refclk); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); } else if (IS_PINEVIEW(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; } else if (!IS_GEN2(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; else limit = &intel_limits_i9xx_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; else limit = &intel_limits_i8xx_dvo; } return limit; } /* m1 is reserved as 0 in Pineview, n is a ring counter */ static void pineview_clock(int refclk, intel_clock_t *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / clock->n; clock->dot = clock->vco / clock->p; } static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) { if (IS_PINEVIEW(dev)) { pineview_clock(refclk, clock); return; } clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } /** * Returns whether any output on the specified pipe is of the specified type */ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->base.crtc == crtc && encoder->type == type) return true; return false; } #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ static bool intel_PLL_is_valid(struct drm_device *dev, const intel_limit_t *limit, const intel_clock_t *clock) { if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid ("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid ("p out of range\n"); if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) INTELPllInvalid ("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid ("m1 out of range\n"); if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) INTELPllInvalid ("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid ("m out of range\n"); if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid ("n out of range\n"); if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) INTELPllInvalid ("vco out of range\n"); /* XXX: We may need to be checking "Dot clock" depending on the multiplier, * connector, etc., rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) INTELPllInvalid ("dot out of range\n"); return true; } static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; int err = target; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && (I915_READ(LVDS)) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to * reliably set up different single/dual channel state, if we * even can. */ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset (best_clock, 0, sizeof (*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { /* m1 is always 0 in Pineview */ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return (err != target); } static bool intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; int max_n; bool found; /* approximately equals target * 0.00585 */ int err_most = (target >> 8) + (target >> 9); found = false; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { int lvds_reg; if (HAS_PCH_SPLIT(dev)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset(best_clock, 0, sizeof(*best_clock)); max_n = limit->n.max; /* based on hardware requirement, prefer smaller n to precision */ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { /* based on hardware requirement, prefere larger m1,m2 */ for (clock.m1 = limit->m1.max; clock.m1 >= limit->m1.min; clock.m1--) { for (clock.m2 = limit->m2.max; clock.m2 >= limit->m2.min; clock.m2--) { for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { int this_err; intel_clock(dev, refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; this_err = abs(clock.dot - target); if (this_err < err_most) { *best_clock = clock; err_most = this_err; max_n = clock.n; found = true; } } } } } return found; } static bool intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; intel_clock_t clock; if (target < 200000) { clock.n = 1; clock.p1 = 2; clock.p2 = 10; clock.m1 = 12; clock.m2 = 9; } else { clock.n = 2; clock.p1 = 1; clock.p2 = 10; clock.m1 = 14; clock.m2 = 8; } intel_clock(dev, refclk, &clock); memcpy(best_clock, &clock, sizeof(intel_clock_t)); return true; } /* DisplayPort has only two frequencies, 162MHz and 270MHz */ static bool intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { intel_clock_t clock; if (target < 200000) { clock.p1 = 2; clock.p2 = 10; clock.n = 2; clock.m1 = 23; clock.m2 = 8; } else { clock.p1 = 1; clock.p2 = 10; clock.n = 1; clock.m1 = 14; clock.m2 = 2; } clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); clock.p = (clock.p1 * clock.p2); clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; clock.vco = 0; memcpy(best_clock, &clock, sizeof(intel_clock_t)); return true; } /** * intel_wait_for_vblank - wait for vblank on a given pipe * @dev: drm device * @pipe: pipe to wait for * * Wait for vblank to occur on a given pipe. Needed for various bits of * mode setting code. */ void intel_wait_for_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; int pipestat_reg = PIPESTAT(pipe); /* Clear existing vblank status. Note this will clear any other * sticky status fields as well. * * This races with i915_driver_irq_handler() with the result * that either function could miss a vblank event. Here it is not * fatal, as we will either wait upon the next vblank interrupt or * timeout. Generally speaking intel_wait_for_vblank() is only * called during modeset at which time the GPU should be idle and * should *not* be performing page flips and thus not waiting on * vblanks... * Currently, the result of us stealing a vblank from the irq * handler is that a single frame will be skipped during swapbuffers. */ I915_WRITE(pipestat_reg, I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); /* Wait for vblank interrupt bit to set */ if (wait_for(I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS, 50)) DRM_DEBUG_KMS("vblank wait timed out\n"); } /* * intel_wait_for_pipe_off - wait for pipe to turn off * @dev: drm device * @pipe: pipe to wait for * * After disabling a pipe, we can't wait for vblank in the usual way, * spinning on the vblank interrupt status bit, since we won't actually * see an interrupt when the pipe is disabled. * * On Gen4 and above: * wait for the pipe register state bit to turn off * * Otherwise: * wait for the display line value to settle (it usually * ends up stopping at the start of the next frame). * */ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 4) { int reg = PIPECONF(pipe); /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100)) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } else { u32 last_line; int reg = PIPEDSL(pipe); unsigned long timeout = jiffies + msecs_to_jiffies(100); /* Wait for the display line to settle */ do { last_line = I915_READ(reg) & DSL_LINEMASK; mdelay(5); } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && time_after(timeout, jiffies)); if (time_after(jiffies, timeout)) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } } static const char *state_string(bool enabled) { return enabled ? "on" : "off"; } /* Only for pre-ILK configs */ static void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = DPLL(pipe); val = I915_READ(reg); cur_state = !!(val & DPLL_VCO_ENABLE); WARN(cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_disabled(d, p) assert_pll(d, p, false) /* For ILK+ */ static void assert_pch_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = PCH_DPLL(pipe); val = I915_READ(reg); cur_state = !!(val & DPLL_VCO_ENABLE); WARN(cur_state != state, "PCH PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = FDI_TX_CTL(pipe); val = I915_READ(reg); cur_state = !!(val & FDI_TX_ENABLE); WARN(cur_state != state, "FDI TX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) static void assert_fdi_rx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = FDI_RX_CTL(pipe); val = I915_READ(reg); cur_state = !!(val & FDI_RX_ENABLE); WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* ILK FDI PLL is always enabled */ if (dev_priv->info->gen == 5) return; reg = FDI_TX_CTL(pipe); val = I915_READ(reg); WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); } static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; reg = FDI_RX_CTL(pipe); val = I915_READ(reg); WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); } static void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { int pp_reg, lvds_reg; u32 val; enum pipe panel_pipe = PIPE_A; bool locked = locked; if (HAS_PCH_SPLIT(dev_priv->dev)) { pp_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; } else { pp_reg = PP_CONTROL; lvds_reg = LVDS; } val = I915_READ(pp_reg); if (!(val & PANEL_POWER_ON) || ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) locked = false; if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; WARN(panel_pipe == pipe && locked, "panel assertion failure, pipe %c regs locked\n", pipe_name(pipe)); } static void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { int reg; u32 val; bool cur_state; reg = PIPECONF(pipe); val = I915_READ(reg); cur_state = !!(val & PIPECONF_ENABLE); WARN(cur_state != state, "pipe %c assertion failure (expected %s, current %s)\n", pipe_name(pipe), state_string(state), state_string(cur_state)); } #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) static void assert_plane_enabled(struct drm_i915_private *dev_priv, enum plane plane) { int reg; u32 val; reg = DSPCNTR(plane); val = I915_READ(reg); WARN(!(val & DISPLAY_PLANE_ENABLE), "plane %c assertion failure, should be active but is disabled\n", plane_name(plane)); } static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg, i; u32 val; int cur_pipe; /* Planes are fixed to pipes on ILK+ */ if (HAS_PCH_SPLIT(dev_priv->dev)) return; /* Need to check both planes against the pipe */ for (i = 0; i < 2; i++) { reg = DSPCNTR(i); val = I915_READ(reg); cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> DISPPLANE_SEL_PIPE_SHIFT; WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, "plane %c assertion failure, should be off on pipe %c but is still active\n", plane_name(i), pipe_name(pipe)); } } static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) { u32 val; bool enabled; val = I915_READ(PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); } static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; bool enabled; reg = TRANSCONF(pipe); val = I915_READ(reg); enabled = !!(val & TRANS_ENABLE); WARN(enabled, "transcoder assertion failed, should be off on pipe %c but is still active\n", pipe_name(pipe)); } static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); WARN(DP_PIPE_ENABLED(val, pipe), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); WARN(HDMI_PIPE_ENABLED(val, pipe), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); reg = PCH_ADPA; val = I915_READ(reg); WARN(ADPA_PIPE_ENABLED(val, pipe), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); WARN(LVDS_PIPE_ENABLED(val, pipe), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); } /** * intel_enable_pll - enable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to * make sure the PLL reg is writable first though, since the panel write * protect mechanism may be enabled. * * Note! This is for pre-ILK only. */ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* No really, not for ILK+ */ BUG_ON(dev_priv->info->gen >= 5); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) assert_panel_unlocked(dev_priv, pipe); reg = DPLL(pipe); val = I915_READ(reg); val |= DPLL_VCO_ENABLE; /* We do this three times for luck */ I915_WRITE(reg, val); POSTING_READ(reg); udelay(150); /* wait for warmup */ I915_WRITE(reg, val); POSTING_READ(reg); udelay(150); /* wait for warmup */ I915_WRITE(reg, val); POSTING_READ(reg); udelay(150); /* wait for warmup */ } /** * intel_disable_pll - disable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to disable * * Disable the PLL for @pipe, making sure the pipe is off first. * * Note! This is for pre-ILK only. */ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); reg = DPLL(pipe); val = I915_READ(reg); val &= ~DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); } /** * intel_enable_pch_pll - enable PCH PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * The PCH PLL needs to be enabled before the PCH transcoder, since it * drives the transcoder clock. */ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); /* PCH refclock must be enabled first */ assert_pch_refclk_enabled(dev_priv); reg = PCH_DPLL(pipe); val = I915_READ(reg); val |= DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); udelay(200); } static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); /* Make sure transcoder isn't still depending on us */ assert_transcoder_disabled(dev_priv, pipe); reg = PCH_DPLL(pipe); val = I915_READ(reg); val &= ~DPLL_VCO_ENABLE; I915_WRITE(reg, val); POSTING_READ(reg); udelay(200); } static void intel_enable_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); /* Make sure PCH DPLL is enabled */ assert_pch_pll_enabled(dev_priv, pipe); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); reg = TRANSCONF(pipe); val = I915_READ(reg); /* * make the BPC in transcoder be consistent with * that in pipeconf reg. */ val &= ~PIPE_BPC_MASK; val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; I915_WRITE(reg, val | TRANS_ENABLE); if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder %d\n", pipe); } static void intel_disable_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); /* Ports must be off as well */ assert_pch_ports_disabled(dev_priv, pipe); reg = TRANSCONF(pipe); val = I915_READ(reg); val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder\n"); } /** * intel_enable_pipe - enable a pipe, asserting requirements * @dev_priv: i915 private structure * @pipe: pipe to enable * @pch_port: on ILK+, is this pipe driving a PCH port or not * * Enable @pipe, making sure that various hardware specific requirements * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. * * @pipe should be %PIPE_A or %PIPE_B. * * Will wait until the pipe is actually running (i.e. first vblank) before * returning. */ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool pch_port) { int reg; u32 val; /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ if (!HAS_PCH_SPLIT(dev_priv->dev)) assert_pll_enabled(dev_priv, pipe); else { if (pch_port) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, pipe); assert_fdi_tx_pll_enabled(dev_priv, pipe); } /* FIXME: assert CPU port conditions for SNB+ */ } reg = PIPECONF(pipe); val = I915_READ(reg); if (val & PIPECONF_ENABLE) return; I915_WRITE(reg, val | PIPECONF_ENABLE); intel_wait_for_vblank(dev_priv->dev, pipe); } /** * intel_disable_pipe - disable a pipe, asserting requirements * @dev_priv: i915 private structure * @pipe: pipe to disable * * Disable @pipe, making sure that various hardware specific requirements * are met, if applicable, e.g. plane disabled, panel fitter off, etc. * * @pipe should be %PIPE_A or %PIPE_B. * * Will wait until the pipe has shut down before returning. */ static void intel_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { int reg; u32 val; /* * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ assert_planes_disabled(dev_priv, pipe); /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; reg = PIPECONF(pipe); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) return; I915_WRITE(reg, val & ~PIPECONF_ENABLE); intel_wait_for_pipe_off(dev_priv->dev, pipe); } /** * intel_enable_plane - enable a display plane on a given pipe * @dev_priv: i915 private structure * @plane: plane to enable * @pipe: pipe being fed * * Enable @plane on @pipe, making sure that @pipe is running first. */ static void intel_enable_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { int reg; u32 val; /* If the pipe isn't enabled, we can't pump pixels and may hang */ assert_pipe_enabled(dev_priv, pipe); reg = DSPCNTR(plane); val = I915_READ(reg); if (val & DISPLAY_PLANE_ENABLE) return; I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); intel_wait_for_vblank(dev_priv->dev, pipe); } /* * Plane regs are double buffered, going from enabled->disabled needs a * trigger in order to latch. The display address reg provides this. */ static void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane) { u32 reg = DSPADDR(plane); I915_WRITE(reg, I915_READ(reg)); } /** * intel_disable_plane - disable a display plane * @dev_priv: i915 private structure * @plane: plane to disable * @pipe: pipe consuming the data * * Disable @plane; should be an independent operation. */ static void intel_disable_plane(struct drm_i915_private *dev_priv, enum plane plane, enum pipe pipe) { int reg; u32 val; reg = DSPCNTR(plane); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) == 0) return; I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); intel_flush_display_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } static void disable_pch_dp(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); if (DP_PIPE_ENABLED(val, pipe)) I915_WRITE(reg, val & ~DP_PORT_EN); } static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); if (HDMI_PIPE_ENABLED(val, pipe)) I915_WRITE(reg, val & ~PORT_ENABLE); } /* Disable any ports connected to this transcoder */ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, enum pipe pipe) { u32 reg, val; val = I915_READ(PCH_PP_CONTROL); I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); disable_pch_dp(dev_priv, pipe, PCH_DP_B); disable_pch_dp(dev_priv, pipe, PCH_DP_C); disable_pch_dp(dev_priv, pipe, PCH_DP_D); reg = PCH_ADPA; val = I915_READ(reg); if (ADPA_PIPE_ENABLED(val, pipe)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); if (LVDS_PIPE_ENABLED(val, pipe)) { I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); udelay(100); } disable_pch_hdmi(dev_priv, pipe, HDMIB); disable_pch_hdmi(dev_priv, pipe, HDMIC); disable_pch_hdmi(dev_priv, pipe, HDMID); } static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; if (fb->pitch == dev_priv->cfb_pitch && obj->fence_reg == dev_priv->cfb_fence && intel_crtc->plane == dev_priv->cfb_plane && I915_READ(FBC_CONTROL) & FBC_CTL_EN) return; i8xx_disable_fbc(dev); dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; if (fb->pitch < dev_priv->cfb_pitch) dev_priv->cfb_pitch = fb->pitch; /* FBC_CTL wants 64B units */ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) I915_WRITE(FBC_TAG + (i * 4), 0); /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl2 |= FBC_CTL_CPU_FENCE; I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, crtc->y); /* enable it... */ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl |= dev_priv->cfb_fence; I915_WRITE(FBC_CONTROL, fbc_ctl); DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); } void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 fbc_ctl; /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); return; } DRM_DEBUG_KMS("disabled FBC\n"); } static bool i8xx_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(FBC_CONTROL) & FBC_CTL_EN; } static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_y == crtc->y) return; I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); intel_wait_for_vblank(dev, intel_crtc->pipe); } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); } else { I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); } I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(DPFC_FENCE_YOFF, crtc->y); /* enable it... */ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } void g4x_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(DPFC_CONTROL, dpfc_ctl); DRM_DEBUG_KMS("disabled FBC\n"); } } static bool g4x_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } static void sandybridge_blit_fbc_update(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 blt_ecoskpd; /* Make sure blitter notifies FBC of writes */ gen6_gt_force_wake_get(dev_priv); blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << GEN6_BLITTER_LOCK_SHIFT; I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << GEN6_BLITTER_LOCK_SHIFT); I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); POSTING_READ(GEN6_BLITTER_ECOSKPD); gen6_gt_force_wake_put(dev_priv); } static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_offset == obj->gtt_offset && dev_priv->cfb_y == crtc->y) return; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); intel_wait_for_vblank(dev, intel_crtc->pipe); } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_offset = obj->gtt_offset; dev_priv->cfb_y = crtc->y; dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); } else { I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); } I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); sandybridge_blit_fbc_update(dev); } DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } void ironlake_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpfc_ctl; /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); DRM_DEBUG_KMS("disabled FBC\n"); } } static bool ironlake_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; } bool intel_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->display.fbc_enabled) return false; return dev_priv->display.fbc_enabled(dev); } void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; if (!dev_priv->display.enable_fbc) return; dev_priv->display.enable_fbc(crtc, interval); } void intel_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->display.disable_fbc) return; dev_priv->display.disable_fbc(dev); } /** * intel_update_fbc - enable/disable FBC as needed * @dev: the drm_device * * Set up the framebuffer compression hardware at mode set time. We * enable it if possible: * - plane A only (on pre-965) * - no pixel mulitply/line duplication * - no alpha buffer discard * - no dual wide * - framebuffer <= 2048 in width, 1536 in height * * We can't assume that any compression will take place (worst case), * so the compressed buffer has to be the same size as the uncompressed * one. It also must reside (along with the line length buffer) in * stolen memory. * * We need to enable/disable FBC on a global basis. */ static void intel_update_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL, *tmp_crtc; struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; DRM_DEBUG_KMS("\n"); if (!i915_powersave) return; if (!I915_HAS_FBC(dev)) return; /* * If FBC is already on, we just have to verify that we can * keep it that way... * Need to disable if: * - more than one pipe is active * - changing FBC params (stride, fence, mode) * - new fb is too large to fit in compressed buffer * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { if (tmp_crtc->enabled && tmp_crtc->fb) { if (crtc) { DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; goto out_disable; } crtc = tmp_crtc; } } if (!crtc || crtc->fb == NULL) { DRM_DEBUG_KMS("no output, disabling\n"); dev_priv->no_fbc_reason = FBC_NO_OUTPUT; goto out_disable; } intel_crtc = to_intel_crtc(crtc); fb = crtc->fb; intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; if (!i915_enable_fbc) { DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); dev_priv->no_fbc_reason = FBC_MODULE_PARAM; goto out_disable; } if (intel_fb->obj->base.size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " "disabling\n"); dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } if ((crtc->mode.hdisplay > 2048) || (crtc->mode.vdisplay > 1536)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; } if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { DRM_DEBUG_KMS("plane not 0, disabling compression\n"); dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } if (obj->tiling_mode != I915_TILING_X) { DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); dev_priv->no_fbc_reason = FBC_NOT_TILED; goto out_disable; } /* If the kernel debugger is active, always disable compression */ if (in_dbg_master()) goto out_disable; intel_enable_fbc(crtc, 500); return; out_disable: /* Multiple disables should be harmless */ if (intel_fbc_enabled(dev)) { DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); intel_disable_fbc(dev); } } int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined) { struct drm_i915_private *dev_priv = dev->dev_private; u32 alignment; int ret; switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; else if (INTEL_INFO(dev)->gen >= 4) alignment = 4 * 1024; else alignment = 64 * 1024; break; case I915_TILING_X: /* pin() will align the object as required by fence */ alignment = 0; break; case I915_TILING_Y: /* FIXME: Is this true? */ DRM_ERROR("Y tiled not allowed for scan out buffers\n"); return -EINVAL; default: BUG(); } dev_priv->mm.interruptible = false; ret = i915_gem_object_pin(obj, alignment, true); if (ret) goto err_interruptible; ret = i915_gem_object_set_to_display_plane(obj, pipelined); if (ret) goto err_unpin; /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence(obj, pipelined); if (ret) goto err_unpin; } dev_priv->mm.interruptible = true; return 0; err_unpin: i915_gem_object_unpin(obj); err_interruptible: dev_priv->mm.interruptible = true; return ret; } /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; u32 reg; switch (plane) { case 0: case 1: break; default: DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (fb->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: DRM_ERROR("Unknown color depth\n"); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; } if (HAS_PCH_SPLIT(dev)) /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; I915_WRITE(reg, dspcntr); Start = obj->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitch); I915_WRITE(DSPSTRIDE(plane), fb->pitch); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), Start); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPADDR(plane), Offset); } else I915_WRITE(DSPADDR(plane), Start + Offset); POSTING_READ(reg); intel_update_fbc(dev); intel_increase_pllclock(crtc); return 0; } static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int ret; /* no fb bound */ if (!crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } switch (intel_crtc->plane) { case 0: case 1: break; default: return -EINVAL; } mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, to_intel_framebuffer(crtc->fb)->obj, NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; } if (old_fb) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, atomic_read(&dev_priv->mm.wedged) || atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. * * This should only fail upon a hung GPU, in which case we * can safely continue. */ ret = i915_gem_object_flush_gpu(obj); (void) ret; } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, LEAVE_ATOMIC_MODE_SET); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); return ret; } if (old_fb) { intel_wait_for_vblank(dev, intel_crtc->pipe); i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); } mutex_unlock(&dev->struct_mutex); if (!dev->primary->master) return 0; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) return 0; if (intel_crtc->pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; } else { master_priv->sarea_priv->pipeA_x = x; master_priv->sarea_priv->pipeA_y = y; } return 0; } static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); dpa_ctl = I915_READ(DP_A); dpa_ctl &= ~DP_PLL_FREQ_MASK; if (clock < 200000) { u32 temp; dpa_ctl |= DP_PLL_FREQ_160MHZ; /* workaround for 160Mhz: 1) program 0x4600c bits 15:0 = 0x8124 2) program 0x46010 bit 0 = 1 3) program 0x46034 bit 24 = 1 4) program 0x64000 bit 14 = 1 */ temp = I915_READ(0x4600c); temp &= 0xffff0000; I915_WRITE(0x4600c, temp | 0x8124); temp = I915_READ(0x46010); I915_WRITE(0x46010, temp | 1); temp = I915_READ(0x46034); I915_WRITE(0x46034, temp | (1 << 24)); } else { dpa_ctl |= DP_PLL_FREQ_270MHZ; } I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(500); } static void intel_fdi_normal_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* enable normal train */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); if (IS_IVYBRIDGE(dev)) { temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; } I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE; } I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); /* wait one idle pattern time */ POSTING_READ(reg); udelay(1000); /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev)) I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp, tries; /* FDI needs bits from pipe & plane first */ assert_pipe_enabled(dev_priv, pipe); assert_plane_enabled(dev_priv, plane); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); I915_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); } reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { DRM_DEBUG_KMS("FDI train 1 done.\n"); I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(150); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (tries == 5) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); } static const int snb_b_fdi_train_param [] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, FDI_LINK_TRAIN_800MV_0DB_SNB_B, }; /* The FDI link training functions for SNB/Cougarpoint. */ static void gen6_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp, i; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK) { I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_GEN6(dev)) { temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } I915_WRITE(reg, temp); POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } /* Manual link training for Ivy Bridge A0 parts */ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp, i; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_AUTO; temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; I915_WRITE(reg, temp | FDI_RX_ENABLE); POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK || (I915_READ(reg) & FDI_RX_BIT_LOCK)) { I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(500); reg = FDI_RX_IIR(pipe); temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (i == 4) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* Write the TU size bits so error detection works */ I915_WRITE(FDI_RX_TUSIZE1(pipe), I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~((0x7 << 19) | (0x7 << 16)); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); POSTING_READ(reg); udelay(200); /* Switch from Rawclk to PCDclk */ temp = I915_READ(reg); I915_WRITE(reg, temp | FDI_PCDCLK); POSTING_READ(reg); udelay(200); /* Enable CPU FDI TX PLL, always on for Ironlake */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); if ((temp & FDI_TX_PLL_ENABLE) == 0) { I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); POSTING_READ(reg); udelay(100); } } static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* disable CPU FDI tx and PCH FDI rx */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_TX_ENABLE); POSTING_READ(reg); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~(0x7 << 16); temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp & ~FDI_RX_ENABLE); POSTING_READ(reg); udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); I915_WRITE(FDI_RX_CHICKEN(pipe), I915_READ(FDI_RX_CHICKEN(pipe) & ~FDI_RX_PHASE_SYNC_POINTER_EN)); } /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(reg, temp); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } /* BPC in FDI rx is consistent with that in PIPECONF */ temp &= ~(0x07 << 16); temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; I915_WRITE(reg, temp); POSTING_READ(reg); udelay(100); } /* * When we disable a pipe, we need to clear any pending scanline wait events * to avoid hanging the ring, which we assume we are waiting on. */ static void intel_clear_scanline_wait(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; u32 tmp; if (IS_GEN2(dev)) /* Can't break the hang on i8xx */ return; ring = LP_RING(dev_priv); tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) I915_WRITE_CTL(ring, tmp); } static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { struct drm_i915_gem_object *obj; struct drm_i915_private *dev_priv; if (crtc->fb == NULL) return; obj = to_intel_framebuffer(crtc->fb)->obj; dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, atomic_read(&obj->pending_flip) == 0); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; /* * If there's a non-PCH eDP on this crtc, it must be DP_A, and that * must be driven by its own crtc; no sharing is possible. */ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_EDP: if (!intel_encoder_is_pch_edp(&encoder->base)) return false; continue; } } return true; } /* * Enable PCH resources required for PCH ports: * - PCH PLLs * - FDI training & RX/TX * - update transcoder timings * - DP transcoding bits * - transcoder */ static void ironlake_pch_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); intel_enable_pch_pll(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* Be sure PCH DPLL SEL is set */ temp = I915_READ(PCH_DPLL_SEL); if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); I915_WRITE(PCH_DPLL_SEL, temp); } /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_SYNC_MASK | TRANS_DP_BPC_MASK); temp |= (TRANS_DP_OUTPUT_ENABLE | TRANS_DP_ENH_FRAMING); temp |= TRANS_DP_8BPC; if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { case PCH_DP_B: temp |= TRANS_DP_PORT_SEL_B; break; case PCH_DP_C: temp |= TRANS_DP_PORT_SEL_C; break; case PCH_DP_D: temp |= TRANS_DP_PORT_SEL_D; break; default: DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); temp |= TRANS_DP_PORT_SEL_B; break; } I915_WRITE(reg, temp); } intel_enable_transcoder(dev_priv, pipe); } static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 temp; bool is_pch_port; if (intel_crtc->active) return; intel_crtc->active = true; intel_update_watermarks(dev); if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { temp = I915_READ(PCH_LVDS); if ((temp & LVDS_PORT_EN) == 0) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } is_pch_port = intel_crtc_driving_pch(crtc); if (is_pch_port) ironlake_fdi_pll_enable(crtc); else ironlake_fdi_disable(crtc); /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. */ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } /* * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ intel_crtc_load_lut(crtc); intel_enable_pipe(dev_priv, pipe, is_pch_port); intel_enable_plane(dev_priv, plane, pipe); if (is_pch_port) ironlake_pch_enable(crtc); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); intel_crtc_update_cursor(crtc, true); } static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; if (!intel_crtc->active) return; intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); intel_disable_plane(dev_priv, plane, pipe); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); intel_disable_pipe(dev_priv, pipe); /* Disable PF */ I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0); ironlake_fdi_disable(crtc); /* This is a horrible layering violation; we should be doing this in * the connector/encoder ->prepare instead, but we don't always have * enough information there about the config to know whether it will * actually be necessary or just cause undesired flicker. */ intel_disable_pch_ports(dev_priv, pipe); intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); temp |= TRANS_DP_PORT_SEL_NONE; I915_WRITE(reg, temp); /* disable DPLL_SEL */ temp = I915_READ(PCH_DPLL_SEL); switch (pipe) { case 0: temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); break; case 1: temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); break; case 2: /* FIXME: manage transcoder PLLs? */ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); break; default: BUG(); /* wtf */ } I915_WRITE(PCH_DPLL_SEL, temp); } /* disable PCH DPLL */ intel_disable_pch_pll(dev_priv, pipe); /* Switch from PCDclk to Rawclk */ reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_PCDCLK); /* Disable CPU FDI TX PLL */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); POSTING_READ(reg); udelay(100); reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); /* Wait for the clocks to turn off. */ POSTING_READ(reg); udelay(100); intel_crtc->active = false; intel_update_watermarks(dev); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); intel_clear_scanline_wait(dev); mutex_unlock(&dev->struct_mutex); } static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); ironlake_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); ironlake_crtc_disable(crtc); break; } } static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; mutex_lock(&dev->struct_mutex); dev_priv->mm.interruptible = false; (void) intel_overlay_switch_off(intel_crtc->overlay); dev_priv->mm.interruptible = true; mutex_unlock(&dev->struct_mutex); } /* Let userspace switch the overlay on again. In most cases userspace * has to recompute where to put it anyway. */ } static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; if (intel_crtc->active) return; intel_crtc->active = true; intel_update_watermarks(dev); intel_enable_pll(dev_priv, pipe); intel_enable_pipe(dev_priv, pipe, false); intel_enable_plane(dev_priv, plane, pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); intel_crtc_update_cursor(crtc, true); } static void i9xx_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; if (!intel_crtc->active) return; /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); intel_disable_pll(dev_priv, pipe); intel_crtc->active = false; intel_update_fbc(dev); intel_update_watermarks(dev); intel_clear_scanline_wait(dev); } static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) { /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: i9xx_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: i9xx_crtc_disable(crtc); break; } } /** * Sets the power management mode of the pipe and plane. */ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool enabled; if (intel_crtc->dpms_mode == mode) return; intel_crtc->dpms_mode = mode; dev_priv->display.dpms(crtc, mode); if (!dev->primary->master) return; master_priv = dev->primary->master->driver_priv; if (!master_priv->sarea_priv) return; enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; switch (pipe) { case 0: master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; break; case 1: master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; break; default: DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); break; } } static void intel_crtc_disable(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->fb) { mutex_lock(&dev->struct_mutex); i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); } } /* Prepare for a mode set. * * Note we could be a lot smarter here. We need to figure out which outputs * will be enabled, which disabled (in short, how the config will changes) * and perform the minimum necessary steps to accomplish that, e.g. updating * watermarks, FBC configuration, making sure PLLs are programmed correctly, * panel fitting is in the proper state, etc. */ static void i9xx_crtc_prepare(struct drm_crtc *crtc) { i9xx_crtc_disable(crtc); } static void i9xx_crtc_commit(struct drm_crtc *crtc) { i9xx_crtc_enable(crtc); } static void ironlake_crtc_prepare(struct drm_crtc *crtc) { ironlake_crtc_disable(crtc); } static void ironlake_crtc_commit(struct drm_crtc *crtc) { ironlake_crtc_enable(crtc); } void intel_encoder_prepare (struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of prepare see intel_lvds_prepare */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } void intel_encoder_commit (struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of commit see intel_lvds_commit */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); drm_encoder_cleanup(encoder); kfree(intel_encoder); } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) return false; } /* XXX some encoders set the crtcinfo, others don't. * Obviously we need some form of conflict resolution here... */ if (adjusted_mode->crtc_htotal == 0) drm_mode_set_crtcinfo(adjusted_mode, 0); return true; } static int i945_get_display_clock_speed(struct drm_device *dev) { return 400000; } static int i915_get_display_clock_speed(struct drm_device *dev) { return 333000; } static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) { return 200000; } static int i915gm_get_display_clock_speed(struct drm_device *dev) { u16 gcfgc = 0; pci_read_config_word(dev->pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) return 133000; else { switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_MHZ: return 333000; default: case GC_DISPLAY_CLOCK_190_200_MHZ: return 190000; } } } static int i865_get_display_clock_speed(struct drm_device *dev) { return 266000; } static int i855_get_display_clock_speed(struct drm_device *dev) { u16 hpllcc = 0; /* Assume that the hardware is in the high speed state. This * should be the default. */ switch (hpllcc & GC_CLOCK_CONTROL_MASK) { case GC_CLOCK_133_200: case GC_CLOCK_100_200: return 200000; case GC_CLOCK_166_250: return 250000; case GC_CLOCK_100_133: return 133000; } /* Shouldn't happen */ return 0; } static int i830_get_display_clock_speed(struct drm_device *dev) { return 133000; } struct fdi_m_n { u32 tu; u32 gmch_m; u32 gmch_n; u32 link_m; u32 link_n; }; static void fdi_reduce_ratio(u32 *num, u32 *den) { while (*num > 0xffffff || *den > 0xffffff) { *num >>= 1; *den >>= 1; } } static void ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct fdi_m_n *m_n) { m_n->tu = 64; /* default size */ /* BUG_ON(pixel_clock > INT_MAX / 36); */ m_n->gmch_m = bits_per_pixel * pixel_clock; m_n->gmch_n = link_clock * nlanes * 8; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; m_n->link_n = link_clock; fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); } struct intel_watermark_params { unsigned long fifo_size; unsigned long max_wm; unsigned long default_wm; unsigned long guard_size; unsigned long cacheline_size; }; /* Pineview has different values for various configs */ static const struct intel_watermark_params pineview_display_wm = { PINEVIEW_DISPLAY_FIFO, PINEVIEW_MAX_WM, PINEVIEW_DFT_WM, PINEVIEW_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; static const struct intel_watermark_params pineview_display_hplloff_wm = { PINEVIEW_DISPLAY_FIFO, PINEVIEW_MAX_WM, PINEVIEW_DFT_HPLLOFF_WM, PINEVIEW_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; static const struct intel_watermark_params pineview_cursor_wm = { PINEVIEW_CURSOR_FIFO, PINEVIEW_CURSOR_MAX_WM, PINEVIEW_CURSOR_DFT_WM, PINEVIEW_CURSOR_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_cursor_hplloff_wm = { PINEVIEW_CURSOR_FIFO, PINEVIEW_CURSOR_MAX_WM, PINEVIEW_CURSOR_DFT_WM, PINEVIEW_CURSOR_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; static const struct intel_watermark_params g4x_wm_info = { G4X_FIFO_SIZE, G4X_MAX_WM, G4X_MAX_WM, 2, G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params g4x_cursor_wm_info = { I965_CURSOR_FIFO, I965_CURSOR_MAX_WM, I965_CURSOR_DFT_WM, 2, G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i965_cursor_wm_info = { I965_CURSOR_FIFO, I965_CURSOR_MAX_WM, I965_CURSOR_DFT_WM, 2, I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i945_wm_info = { I945_FIFO_SIZE, I915_MAX_WM, 1, 2, I915_FIFO_LINE_SIZE }; static const struct intel_watermark_params i915_wm_info = { I915_FIFO_SIZE, I915_MAX_WM, 1, 2, I915_FIFO_LINE_SIZE }; static const struct intel_watermark_params i855_wm_info = { I855GM_FIFO_SIZE, I915_MAX_WM, 1, 2, I830_FIFO_LINE_SIZE }; static const struct intel_watermark_params i830_wm_info = { I830_FIFO_SIZE, I915_MAX_WM, 1, 2, I830_FIFO_LINE_SIZE }; static const struct intel_watermark_params ironlake_display_wm_info = { ILK_DISPLAY_FIFO, ILK_DISPLAY_MAXWM, ILK_DISPLAY_DFTWM, 2, ILK_FIFO_LINE_SIZE }; static const struct intel_watermark_params ironlake_cursor_wm_info = { ILK_CURSOR_FIFO, ILK_CURSOR_MAXWM, ILK_CURSOR_DFTWM, 2, ILK_FIFO_LINE_SIZE }; static const struct intel_watermark_params ironlake_display_srwm_info = { ILK_DISPLAY_SR_FIFO, ILK_DISPLAY_MAX_SRWM, ILK_DISPLAY_DFT_SRWM, 2, ILK_FIFO_LINE_SIZE }; static const struct intel_watermark_params ironlake_cursor_srwm_info = { ILK_CURSOR_SR_FIFO, ILK_CURSOR_MAX_SRWM, ILK_CURSOR_DFT_SRWM, 2, ILK_FIFO_LINE_SIZE }; static const struct intel_watermark_params sandybridge_display_wm_info = { SNB_DISPLAY_FIFO, SNB_DISPLAY_MAXWM, SNB_DISPLAY_DFTWM, 2, SNB_FIFO_LINE_SIZE }; static const struct intel_watermark_params sandybridge_cursor_wm_info = { SNB_CURSOR_FIFO, SNB_CURSOR_MAXWM, SNB_CURSOR_DFTWM, 2, SNB_FIFO_LINE_SIZE }; static const struct intel_watermark_params sandybridge_display_srwm_info = { SNB_DISPLAY_SR_FIFO, SNB_DISPLAY_MAX_SRWM, SNB_DISPLAY_DFT_SRWM, 2, SNB_FIFO_LINE_SIZE }; static const struct intel_watermark_params sandybridge_cursor_srwm_info = { SNB_CURSOR_SR_FIFO, SNB_CURSOR_MAX_SRWM, SNB_CURSOR_DFT_SRWM, 2, SNB_FIFO_LINE_SIZE }; /** * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock * @wm: chip FIFO params * @pixel_size: display pixel size * @latency_ns: memory latency for the platform * * Calculate the watermark level (the level at which the display plane will * start fetching from memory again). Each chip has a different display * FIFO size and allocation, so the caller needs to figure that out and pass * in the correct intel_watermark_params structure. * * As the pixel clock runs, the FIFO will be drained at a rate that depends * on the pixel size. When it reaches the watermark level, it'll start * fetching FIFO line sized based chunks from memory until the FIFO fills * past the watermark point. If the FIFO drains completely, a FIFO underrun * will occur, and a display engine hang could result. */ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, const struct intel_watermark_params *wm, int fifo_size, int pixel_size, unsigned long latency_ns) { long entries_required, wm_size; /* * Note: we need to make sure we don't overflow for various clock & * latency values. * clocks go from a few thousand to several hundred thousand. * latency is usually a few thousand */ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 1000; entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); wm_size = fifo_size - (entries_required + wm->guard_size); DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); /* Don't promote wm_size to unsigned... */ if (wm_size > (long)wm->max_wm) wm_size = wm->max_wm; if (wm_size <= 0) wm_size = wm->default_wm; return wm_size; } struct cxsr_latency { int is_desktop; int is_ddr3; unsigned long fsb_freq; unsigned long mem_freq; unsigned long display_sr; unsigned long display_hpll_disable; unsigned long cursor_sr; unsigned long cursor_hpll_disable; }; static const struct cxsr_latency cxsr_latency_table[] = { {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, int fsb, int mem) { const struct cxsr_latency *latency; int i; if (fsb == 0 || mem == 0) return NULL; for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { latency = &cxsr_latency_table[i]; if (is_desktop == latency->is_desktop && is_ddr3 == latency->is_ddr3 && fsb == latency->fsb_freq && mem == latency->mem_freq) return latency; } DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); return NULL; } static void pineview_disable_cxsr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* deactivate cxsr */ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); } /* * Latency for FIFO fetches is dependent on several factors: * - memory configuration (speed, channels) * - chipset * - current MCH state * It can be fairly high in some situations, so here we assume a fairly * pessimal value. It's a tradeoff between extra memory fetches (if we * set this value too high, the FIFO will fetch frequently to stay full) * and power consumption (set it too low to save power and we might see * FIFO underruns and display "flicker"). * * A value of 5us seems to be a good balance; safe for very low end * platforms but not overly aggressive on lower latency configs. */ static const int latency_ns = 5000; static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; if (plane) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); return size; } static int i85x_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x1ff; if (plane) size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); return size; } static int i845_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; size >>= 2; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); return size; } static int i830_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dsparb = I915_READ(DSPARB); int size; size = dsparb & 0x7f; size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); return size; } static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) { struct drm_crtc *crtc, *enabled = NULL; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->enabled && crtc->fb) { if (enabled) return NULL; enabled = crtc; } } return enabled; } static void pineview_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; const struct cxsr_latency *latency; u32 reg; unsigned long wm; latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); pineview_disable_cxsr(dev); return; } crtc = single_enabled_crtc(dev); if (crtc) { int clock = crtc->mode.clock; int pixel_size = crtc->fb->bits_per_pixel / 8; /* Display SR */ wm = intel_calculate_wm(clock, &pineview_display_wm, pineview_display_wm.fifo_size, pixel_size, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= wm << DSPFW_SR_SHIFT; I915_WRITE(DSPFW1, reg); DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); /* cursor SR */ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pineview_display_wm.fifo_size, pixel_size, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; I915_WRITE(DSPFW3, reg); /* Display HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, pineview_display_hplloff_wm.fifo_size, pixel_size, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; reg |= wm & DSPFW_HPLL_SR_MASK; I915_WRITE(DSPFW3, reg); /* cursor HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pineview_display_hplloff_wm.fifo_size, pixel_size, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; I915_WRITE(DSPFW3, reg); DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); /* activate cxsr */ I915_WRITE(DSPFW3, I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); DRM_DEBUG_KMS("Self-refresh is enabled\n"); } else { pineview_disable_cxsr(dev); DRM_DEBUG_KMS("Self-refresh is disabled\n"); } } static bool g4x_compute_wm0(struct drm_device *dev, int plane, const struct intel_watermark_params *display, int display_latency_ns, const struct intel_watermark_params *cursor, int cursor_latency_ns, int *plane_wm, int *cursor_wm) { struct drm_crtc *crtc; int htotal, hdisplay, clock, pixel_size; int line_time_us, line_count; int entries, tlb_miss; crtc = intel_get_crtc_for_plane(dev, plane); if (crtc->fb == NULL || !crtc->enabled) { *cursor_wm = cursor->guard_size; *plane_wm = display->guard_size; return false; } htotal = crtc->mode.htotal; hdisplay = crtc->mode.hdisplay; clock = crtc->mode.clock; pixel_size = crtc->fb->bits_per_pixel / 8; /* Use the small buffer method to calculate plane watermark */ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; entries = DIV_ROUND_UP(entries, display->cacheline_size); *plane_wm = entries + display->guard_size; if (*plane_wm > (int)display->max_wm) *plane_wm = display->max_wm; /* Use the large buffer method to calculate cursor watermark */ line_time_us = ((htotal * 1000) / clock); line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; entries = line_count * 64 * pixel_size; tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; if (*cursor_wm > (int)cursor->max_wm) *cursor_wm = (int)cursor->max_wm; return true; } /* * Check the wm result. * * If any calculated watermark values is larger than the maximum value that * can be programmed into the associated watermark register, that watermark * must be disabled. */ static bool g4x_check_srwm(struct drm_device *dev, int display_wm, int cursor_wm, const struct intel_watermark_params *display, const struct intel_watermark_params *cursor) { DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", display_wm, cursor_wm); if (display_wm > display->max_wm) { DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", display_wm, display->max_wm); return false; } if (cursor_wm > cursor->max_wm) { DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", cursor_wm, cursor->max_wm); return false; } if (!(display_wm || cursor_wm)) { DRM_DEBUG_KMS("SR latency is 0, disabling\n"); return false; } return true; } static bool g4x_compute_srwm(struct drm_device *dev, int plane, int latency_ns, const struct intel_watermark_params *display, const struct intel_watermark_params *cursor, int *display_wm, int *cursor_wm) { struct drm_crtc *crtc; int hdisplay, htotal, pixel_size, clock; unsigned long line_time_us; int line_count, line_size; int small, large; int entries; if (!latency_ns) { *display_wm = *cursor_wm = 0; return false; } crtc = intel_get_crtc_for_plane(dev, plane); hdisplay = crtc->mode.hdisplay; htotal = crtc->mode.htotal; clock = crtc->mode.clock; pixel_size = crtc->fb->bits_per_pixel / 8; line_time_us = (htotal * 1000) / clock; line_count = (latency_ns / line_time_us + 1000) / 1000; line_size = hdisplay * pixel_size; /* Use the minimum of the small and large buffer method for primary */ small = ((clock * pixel_size / 1000) * latency_ns) / 1000; large = line_count * line_size; entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); *display_wm = entries + display->guard_size; /* calculate the self-refresh watermark for display cursor */ entries = line_count * pixel_size * 64; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; return g4x_check_srwm(dev, *display_wm, *cursor_wm, display, cursor); } #define single_plane_enabled(mask) is_power_of_2(mask) static void g4x_update_wm(struct drm_device *dev) { static const int sr_latency_ns = 12000; struct drm_i915_private *dev_priv = dev->dev_private; int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; unsigned int enabled = 0; if (g4x_compute_wm0(dev, 0, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) enabled |= 1; if (g4x_compute_wm0(dev, 1, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) enabled |= 2; plane_sr = cursor_sr = 0; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, sr_latency_ns, &g4x_wm_info, &g4x_cursor_wm_info, &plane_sr, &cursor_sr)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); else I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, planeb_wm, cursorb_wm, plane_sr, cursor_sr); I915_WRITE(DSPFW1, (plane_sr << DSPFW_SR_SHIFT) | (cursorb_wm << DSPFW_CURSORB_SHIFT) | (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | (cursora_wm << DSPFW_CURSORA_SHIFT)); /* HPLL off in SR has some issues on G4x... disable it */ I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } static void i965_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; int srwm = 1; int cursor_sr = 16; /* Calc sr entries for one plane configs */ crtc = single_enabled_crtc(dev); if (crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; int clock = crtc->mode.clock; int htotal = crtc->mode.htotal; int hdisplay = crtc->mode.hdisplay; int pixel_size = crtc->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; line_time_us = ((htotal * 1000) / clock); /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * hdisplay; entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) srwm = 1; srwm &= 0x1ff; DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", entries, srwm); entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * 64; entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - (entries + i965_cursor_wm_info.guard_size); if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; DRM_DEBUG_KMS("self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", srwm); /* 965 has limitations... */ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | (8 << 0)); I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); /* update cursor SR watermark */ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } static void i9xx_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; const struct intel_watermark_params *wm_info; uint32_t fwater_lo; uint32_t fwater_hi; int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; struct drm_crtc *crtc, *enabled = NULL; if (IS_I945GM(dev)) wm_info = &i945_wm_info; else if (!IS_GEN2(dev)) wm_info = &i915_wm_info; else wm_info = &i855_wm_info; fifo_size = dev_priv->display.get_fifo_size(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0); if (crtc->enabled && crtc->fb) { planea_wm = intel_calculate_wm(crtc->mode.clock, wm_info, fifo_size, crtc->fb->bits_per_pixel / 8, latency_ns); enabled = crtc; } else planea_wm = fifo_size - wm_info->guard_size; fifo_size = dev_priv->display.get_fifo_size(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1); if (crtc->enabled && crtc->fb) { planeb_wm = intel_calculate_wm(crtc->mode.clock, wm_info, fifo_size, crtc->fb->bits_per_pixel / 8, latency_ns); if (enabled == NULL) enabled = crtc; else enabled = NULL; } else planeb_wm = fifo_size - wm_info->guard_size; DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); /* * Overlay gets an aggressive default since video jitter is bad. */ cwm = 2; /* Play safe and disable self-refresh before adjusting watermarks. */ if (IS_I945G(dev) || IS_I945GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); else if (IS_I915GM(dev)) I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); /* Calc sr entries for one plane configs */ if (HAS_FW_BLC(dev) && enabled) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; int clock = enabled->mode.clock; int htotal = enabled->mode.htotal; int hdisplay = enabled->mode.hdisplay; int pixel_size = enabled->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; line_time_us = (htotal * 1000) / clock; /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * hdisplay; entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; if (IS_I945G(dev) || IS_I945GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); else if (IS_I915GM(dev)) I915_WRITE(FW_BLC_SELF, srwm & 0x3f); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); /* Set request length to 8 cachelines per fetch */ fwater_lo = fwater_lo | (1 << 24) | (1 << 8); fwater_hi = fwater_hi | (1 << 8); I915_WRITE(FW_BLC, fwater_lo); I915_WRITE(FW_BLC2, fwater_hi); if (HAS_FW_BLC(dev)) { if (enabled) { if (IS_I945G(dev) || IS_I945GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); else if (IS_I915GM(dev)) I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); DRM_DEBUG_KMS("memory self refresh enabled\n"); } else DRM_DEBUG_KMS("memory self refresh disabled\n"); } } static void i830_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; uint32_t fwater_lo; int planea_wm; crtc = single_enabled_crtc(dev); if (crtc == NULL) return; planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, dev_priv->display.get_fifo_size(dev, 0), crtc->fb->bits_per_pixel / 8, latency_ns); fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); I915_WRITE(FW_BLC, fwater_lo); } #define ILK_LP0_PLANE_LATENCY 700 #define ILK_LP0_CURSOR_LATENCY 1300 /* * Check the wm result. * * If any calculated watermark values is larger than the maximum value that * can be programmed into the associated watermark register, that watermark * must be disabled. */ static bool ironlake_check_srwm(struct drm_device *dev, int level, int fbc_wm, int display_wm, int cursor_wm, const struct intel_watermark_params *display, const struct intel_watermark_params *cursor) { struct drm_i915_private *dev_priv = dev->dev_private; DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); if (fbc_wm > SNB_FBC_MAX_SRWM) { DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", fbc_wm, SNB_FBC_MAX_SRWM, level); /* fbc has it's own way to disable FBC WM */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); return false; } if (display_wm > display->max_wm) { DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", display_wm, SNB_DISPLAY_MAX_SRWM, level); return false; } if (cursor_wm > cursor->max_wm) { DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", cursor_wm, SNB_CURSOR_MAX_SRWM, level); return false; } if (!(fbc_wm || display_wm || cursor_wm)) { DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); return false; } return true; } /* * Compute watermark values of WM[1-3], */ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, int latency_ns, const struct intel_watermark_params *display, const struct intel_watermark_params *cursor, int *fbc_wm, int *display_wm, int *cursor_wm) { struct drm_crtc *crtc; unsigned long line_time_us; int hdisplay, htotal, pixel_size, clock; int line_count, line_size; int small, large; int entries; if (!latency_ns) { *fbc_wm = *display_wm = *cursor_wm = 0; return false; } crtc = intel_get_crtc_for_plane(dev, plane); hdisplay = crtc->mode.hdisplay; htotal = crtc->mode.htotal; clock = crtc->mode.clock; pixel_size = crtc->fb->bits_per_pixel / 8; line_time_us = (htotal * 1000) / clock; line_count = (latency_ns / line_time_us + 1000) / 1000; line_size = hdisplay * pixel_size; /* Use the minimum of the small and large buffer method for primary */ small = ((clock * pixel_size / 1000) * latency_ns) / 1000; large = line_count * line_size; entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); *display_wm = entries + display->guard_size; /* * Spec says: * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 */ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; /* calculate the self-refresh watermark for display cursor */ entries = line_count * pixel_size * 64; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; return ironlake_check_srwm(dev, level, *fbc_wm, *display_wm, *cursor_wm, display, cursor); } static void ironlake_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int fbc_wm, plane_wm, cursor_wm; unsigned int enabled; enabled = 0; if (g4x_compute_wm0(dev, 0, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, ILK_LP0_CURSOR_LATENCY, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); enabled |= 1; } if (g4x_compute_wm0(dev, 1, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, ILK_LP0_CURSOR_LATENCY, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); enabled |= 2; } /* * Calculate and update the self-refresh watermark only when one * display plane is used. */ I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); if (!single_plane_enabled(enabled)) return; enabled = ffs(enabled) - 1; /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, ILK_READ_WM1_LATENCY() * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) return; I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, ILK_READ_WM2_LATENCY() * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) return; I915_WRITE(WM2_LP_ILK, WM2_LP_EN | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* * WM3 is unsupported on ILK, probably because we don't have latency * data for that power state */ } static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ int fbc_wm, plane_wm, cursor_wm; unsigned int enabled; enabled = 0; if (g4x_compute_wm0(dev, 0, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); enabled |= 1; } if (g4x_compute_wm0(dev, 1, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); enabled |= 2; } /* * Calculate and update the self-refresh watermark only when one * display plane is used. * * SNB support 3 levels of watermark. * * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, * and disabled in the descending order * */ I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); if (!single_plane_enabled(enabled)) return; enabled = ffs(enabled) - 1; /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, SNB_READ_WM1_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) return; I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, SNB_READ_WM2_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) return; I915_WRITE(WM2_LP_ILK, WM2_LP_EN | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM3 */ if (!ironlake_compute_srwm(dev, 3, enabled, SNB_READ_WM3_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) return; I915_WRITE(WM3_LP_ILK, WM3_LP_EN | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); } /** * intel_update_watermarks - update FIFO watermark values based on current modes * * Calculate watermark values for the various WM regs based on current mode * and plane configuration. * * There are several cases to deal with here: * - normal (i.e. non-self-refresh) * - self-refresh (SR) mode * - lines are large relative to FIFO size (buffer can hold up to 2) * - lines are small relative to FIFO size (buffer can hold more than 2 * lines), so need to account for TLB latency * * The normal calculation is: * watermark = dotclock * bytes per pixel * latency * where latency is platform & configuration dependent (we assume pessimal * values here). * * The SR calculation is: * watermark = (trunc(latency/line time)+1) * surface width * * bytes per pixel * where * line time = htotal / dotclock * surface width = hdisplay for normal plane and 64 for cursor * and latency is assumed to be high, as above. * * The final value programmed to the register should always be rounded up, * and include an extra 2 entries to account for clock crossings. * * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. */ static void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->display.update_wm) dev_priv->display.update_wm(dev); } static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { return dev_priv->lvds_use_ssc && i915_panel_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; u32 temp; u32 lvds_sync = 0; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_DVO: is_dvo = true; break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; case INTEL_OUTPUT_ANALOG: is_crt = true; break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; } num_connectors++; } if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); } else if (!IS_GEN2(dev)) { refclk = 96000; } else { refclk = 48000; } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } /* Ensure that the cursor is valid for the new mode before changing... */ intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, refclk, &reduced_clock); if (has_reduced_clock && (clock.p != reduced_clock.p)) { /* * If the different P is found, it means that we can't * switch the display clock by using the FP0/FP1. * In such case we will disable the LVDS downclock * feature. */ DRM_DEBUG_KMS("Different P is found for " "LVDS clock/downclock\n"); has_reduced_clock = 0; } } /* SDVO TV has fixed PLL values depend on its clock range, this mirrors vbios setting. */ if (is_sdvo && is_tv) { if (adjusted_mode->clock >= 100000 && adjusted_mode->clock < 140500) { clock.p1 = 2; clock.p2 = 10; clock.n = 3; clock.m1 = 16; clock.m2 = 8; } else if (adjusted_mode->clock >= 140500 && adjusted_mode->clock <= 200000) { clock.p1 = 1; clock.p2 = 10; clock.n = 6; clock.m1 = 12; clock.m2 = 8; } } if (IS_PINEVIEW(dev)) { fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; if (has_reduced_clock) fp2 = (1 << reduced_clock.n) << 16 | reduced_clock.m1 << 8 | reduced_clock.m2; } else { fp = clock.n << 16 | clock.m1 << 8 | clock.m2; if (has_reduced_clock) fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | reduced_clock.m2; } dpll = DPLL_VGA_MODE_DIS; if (!IS_GEN2(dev)) { if (is_lvds) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); if (pixel_multiplier > 1) { if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } dpll |= DPLL_DVO_HIGH_SPEED; } if (is_dp) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_PINEVIEW(dev)) dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; else { dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; if (IS_G4X(dev) && has_reduced_clock) dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; } switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } if (INTEL_INFO(dev)->gen >= 4) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock.p1 == 2) dpll |= PLL_P1_DIVIDE_BY_TWO; else dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; if (clock.p2 == 4) dpll |= PLL_P2_DIVIDE_BY_4; } } if (is_sdvo && is_tv) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (is_tv) /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; /* Ironlake's plane is forced to pipe, bit 24 is to enable color space conversion */ if (pipe == 0) dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; else dspcntr |= DISPPLANE_SEL_PIPE_B; if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) * core speed. * * XXX: No double-wide on 915GM pipe B. Is that the only reason for the * pipe == 0 check? */ if (mode->clock > dev_priv->display.get_display_clock_speed(dev) * 9 / 10) pipeconf |= PIPECONF_DOUBLE_WIDE; else pipeconf &= ~PIPECONF_DOUBLE_WIDE; } dpll |= DPLL_VCO_ENABLE; DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); I915_WRITE(FP0(pipe), fp); I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); POSTING_READ(DPLL(pipe)); udelay(150); /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. */ if (is_lvds) { temp = I915_READ(LVDS); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (pipe == 1) { temp |= LVDS_PIPEB_SELECT; } else { temp &= ~LVDS_PIPEB_SELECT; } /* set the corresponsding LVDS_BORDER bit */ temp |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ /* set the dithering flag on LVDS as needed */ if (INTEL_INFO(dev)->gen >= 4) { if (dev_priv->lvds_dither) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; } if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) lvds_sync |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) lvds_sync |= LVDS_VSYNC_POLARITY; if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) != lvds_sync) { char flags[2] = "-+"; DRM_INFO("Changing LVDS panel from " "(%chsync, %cvsync) to (%chsync, %cvsync)\n", flags[!(temp & LVDS_HSYNC_POLARITY)], flags[!(temp & LVDS_VSYNC_POLARITY)], flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); temp |= lvds_sync; } I915_WRITE(LVDS, temp); } if (is_dp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } I915_WRITE(DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ POSTING_READ(DPLL(pipe)); udelay(150); if (INTEL_INFO(dev)->gen >= 4) { temp = 0; if (is_sdvo) { temp = intel_mode_get_pixel_multiplier(adjusted_mode); if (temp > 1) temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; else temp = 0; } I915_WRITE(DPLL_MD(pipe), temp); } else { /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ I915_WRITE(DPLL(pipe), dpll); } intel_crtc->lowfreq_avail = false; if (is_lvds && has_reduced_clock && i915_powersave) { I915_WRITE(FP1(pipe), fp2); intel_crtc->lowfreq_avail = true; if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; } } else { I915_WRITE(FP1(pipe), fp); if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; } } if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vdisplay -= 1; adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_start -= 1; adjusted_mode->crtc_vblank_end -= 1; adjusted_mode->crtc_vsync_end -= 1; adjusted_mode->crtc_vsync_start -= 1; } else pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ I915_WRITE(HTOTAL(pipe), (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); I915_WRITE(HBLANK(pipe), (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); I915_WRITE(HSYNC(pipe), (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); I915_WRITE(VTOTAL(pipe), (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); I915_WRITE(VBLANK(pipe), (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); I915_WRITE(VSYNC(pipe), (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ I915_WRITE(DSPSIZE(plane), ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(DSPPOS(plane), 0); I915_WRITE(PIPESRC(pipe), ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); intel_enable_pipe(dev_priv, pipe, false); intel_wait_for_vblank(dev, pipe); I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); return ret; } static int ironlake_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; u32 temp; u32 lvds_sync = 0; int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { if (encoder->base.crtc != crtc) continue; switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; case INTEL_OUTPUT_ANALOG: is_crt = true; break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; case INTEL_OUTPUT_EDP: has_edp_encoder = encoder; break; } num_connectors++; } if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); } else { refclk = 96000; if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) refclk = 120000; /* 120Mhz refclk */ } /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } /* Ensure that the cursor is valid for the new mode before changing... */ intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, refclk, &reduced_clock); if (has_reduced_clock && (clock.p != reduced_clock.p)) { /* * If the different P is found, it means that we can't * switch the display clock by using the FP0/FP1. * In such case we will disable the LVDS downclock * feature. */ DRM_DEBUG_KMS("Different P is found for " "LVDS clock/downclock\n"); has_reduced_clock = 0; } } /* SDVO TV has fixed PLL values depend on its clock range, this mirrors vbios setting. */ if (is_sdvo && is_tv) { if (adjusted_mode->clock >= 100000 && adjusted_mode->clock < 140500) { clock.p1 = 2; clock.p2 = 10; clock.n = 3; clock.m1 = 16; clock.m2 = 8; } else if (adjusted_mode->clock >= 140500 && adjusted_mode->clock <= 200000) { clock.p1 = 1; clock.p2 = 10; clock.n = 6; clock.m1 = 12; clock.m2 = 8; } } /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); lane = 0; /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { target_clock = mode->clock; intel_edp_link_config(has_edp_encoder, &lane, &link_bw); } else { /* [e]DP over FDI requires target mode clock instead of link clock */ if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) target_clock = mode->clock; else target_clock = adjusted_mode->clock; /* FDI is a binary signal running at ~2.7GHz, encoding * each output octet as 10 bits. The actual frequency * is stored as a divider into a 100MHz clock, and the * mode pixel clock is stored in units of 1KHz. * Hence the bw of each lane in terms of the mode signal * is: */ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; } /* determine panel color depth */ temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; if (is_lvds) { /* the BPC will be 6 if it is 18-bit LVDS panel */ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) temp |= PIPE_8BPC; else temp |= PIPE_6BPC; } else if (has_edp_encoder) { switch (dev_priv->edp.bpp/3) { case 8: temp |= PIPE_8BPC; break; case 10: temp |= PIPE_10BPC; break; case 6: temp |= PIPE_6BPC; break; case 12: temp |= PIPE_12BPC; break; } } else temp |= PIPE_8BPC; I915_WRITE(PIPECONF(pipe), temp); switch (temp & PIPE_BPC_MASK) { case PIPE_8BPC: bpp = 24; break; case PIPE_10BPC: bpp = 30; break; case PIPE_6BPC: bpp = 18; break; case PIPE_12BPC: bpp = 36; break; default: DRM_ERROR("unknown pipe bpc value\n"); bpp = 24; } if (!lane) { /* * Account for spread spectrum to avoid * oversubscribing the link. Max center spread * is 2.5%; use 5% for safety's sake. */ u32 bps = target_clock * bpp * 21 / 20; lane = bps / (link_bw * 8) + 1; } intel_crtc->fdi_lanes = lane; if (pixel_multiplier > 1) link_bw *= pixel_multiplier; ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ temp = I915_READ(PCH_DREF_CONTROL); /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; temp |= DREF_NONSPREAD_SOURCE_ENABLE; temp &= ~DREF_SSC_SOURCE_MASK; temp |= DREF_SSC_SOURCE_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); udelay(200); if (has_edp_encoder) { if (intel_panel_use_ssc(dev_priv)) { temp |= DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); udelay(200); } temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Enable CPU source on CPU attached eDP */ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { if (intel_panel_use_ssc(dev_priv)) temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; else temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else { /* Enable SSC on PCH eDP if needed */ if (intel_panel_use_ssc(dev_priv)) { DRM_ERROR("enabling SSC on PCH\n"); temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; } } I915_WRITE(PCH_DREF_CONTROL, temp); POSTING_READ(PCH_DREF_CONTROL); udelay(200); } fp = clock.n << 16 | clock.m1 << 8 | clock.m2; if (has_reduced_clock) fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | reduced_clock.m2; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; if (is_lvds) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->lvds_ssc_freq == 100) || (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) factor = 25; } else if (is_sdvo && is_tv) factor = 20; if (clock.m1 < factor * clock.n) fp |= FP_CB_TUNE; dpll = 0; if (is_lvds) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); if (pixel_multiplier > 1) { dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } dpll |= DPLL_DVO_HIGH_SPEED; } if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } if (is_sdvo && is_tv) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (is_tv) /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); /* PCH eDP needs FDI, but CPU eDP does not */ if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(PCH_FP0(pipe), fp); I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); POSTING_READ(PCH_DPLL(pipe)); udelay(150); } /* enable transcoder DPLL */ if (HAS_PCH_CPT(dev)) { temp = I915_READ(PCH_DPLL_SEL); switch (pipe) { case 0: temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; break; case 1: temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; break; case 2: /* FIXME: manage transcoder PLLs? */ temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; break; default: BUG(); } I915_WRITE(PCH_DPLL_SEL, temp); POSTING_READ(PCH_DPLL_SEL); udelay(150); } /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. */ if (is_lvds) { temp = I915_READ(PCH_LVDS); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (pipe == 1) { if (HAS_PCH_CPT(dev)) temp |= PORT_TRANS_B_SEL_CPT; else temp |= LVDS_PIPEB_SELECT; } else { if (HAS_PCH_CPT(dev)) temp &= ~PORT_TRANS_SEL_MASK; else temp &= ~LVDS_PIPEB_SELECT; } /* set the corresponsding LVDS_BORDER bit */ temp |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) lvds_sync |= LVDS_HSYNC_POLARITY; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) lvds_sync |= LVDS_VSYNC_POLARITY; if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) != lvds_sync) { char flags[2] = "-+"; DRM_INFO("Changing LVDS panel from " "(%chsync, %cvsync) to (%chsync, %cvsync)\n", flags[!(temp & LVDS_HSYNC_POLARITY)], flags[!(temp & LVDS_VSYNC_POLARITY)], flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); temp |= lvds_sync; } I915_WRITE(PCH_LVDS, temp); } /* set the dithering flag and clear for anything other than a panel. */ pipeconf &= ~PIPECONF_DITHER_EN; pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { pipeconf |= PIPECONF_DITHER_EN; pipeconf |= PIPECONF_DITHER_TYPE_ST1; } if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } else { /* For non-DP output, clear any trans DP clock recovery setting.*/ I915_WRITE(TRANSDATA_M1(pipe), 0); I915_WRITE(TRANSDATA_N1(pipe), 0); I915_WRITE(TRANSDPLINK_M1(pipe), 0); I915_WRITE(TRANSDPLINK_N1(pipe), 0); } if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(PCH_DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ POSTING_READ(PCH_DPLL(pipe)); udelay(150); /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ I915_WRITE(PCH_DPLL(pipe), dpll); } intel_crtc->lowfreq_avail = false; if (is_lvds && has_reduced_clock && i915_powersave) { I915_WRITE(PCH_FP1(pipe), fp2); intel_crtc->lowfreq_avail = true; if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; } } else { I915_WRITE(PCH_FP1(pipe), fp); if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; } } if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vdisplay -= 1; adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_start -= 1; adjusted_mode->crtc_vblank_end -= 1; adjusted_mode->crtc_vsync_end -= 1; adjusted_mode->crtc_vsync_start -= 1; } else pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ I915_WRITE(HTOTAL(pipe), (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); I915_WRITE(HBLANK(pipe), (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); I915_WRITE(HSYNC(pipe), (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); I915_WRITE(VTOTAL(pipe), (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); I915_WRITE(VBLANK(pipe), (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); I915_WRITE(VSYNC(pipe), (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ I915_WRITE(PIPESRC(pipe), ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); } I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); intel_wait_for_vblank(dev, pipe); if (IS_GEN5(dev)) { /* enable address swizzle for tiling buffer */ temp = I915_READ(DISP_ARB_CTL); I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); } I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); return ret; } static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int ret; drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); drm_vblank_post_modeset(dev, pipe); return ret; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ void intel_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int palreg = PALETTE(intel_crtc->pipe); int i; /* The clocks have to be on to load the palette. */ if (!crtc->enabled) return; /* use legacy palette for Ironlake */ if (HAS_PCH_SPLIT(dev)) palreg = LGC_PALETTE(intel_crtc->pipe); for (i = 0; i < 256; i++) { I915_WRITE(palreg + 4 * i, (intel_crtc->lut_r[i] << 16) | (intel_crtc->lut_g[i] << 8) | intel_crtc->lut_b[i]); } } static void i845_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool visible = base != 0; u32 cntl; if (intel_crtc->cursor_visible == visible) return; cntl = I915_READ(_CURACNTR); if (visible) { /* On these chipsets we can only modify the base whilst * the cursor is disabled. */ I915_WRITE(_CURABASE, base); cntl &= ~(CURSOR_FORMAT_MASK); /* XXX width must be 64, stride 256 => 0x00 << 28 */ cntl |= CURSOR_ENABLE | CURSOR_GAMMA_ENABLE | CURSOR_FORMAT_ARGB; } else cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); I915_WRITE(_CURACNTR, cntl); intel_crtc->cursor_visible = visible; } static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { uint32_t cntl = I915_READ(CURCNTR(pipe)); if (base) { cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; cntl |= pipe << 28; /* Connect to correct pipe */ } else { cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } I915_WRITE(CURCNTR(pipe), cntl); intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ I915_WRITE(CURBASE(pipe), base); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int x = intel_crtc->cursor_x; int y = intel_crtc->cursor_y; u32 base, pos; bool visible; pos = 0; if (on && crtc->enabled && crtc->fb) { base = intel_crtc->cursor_addr; if (x > (int) crtc->fb->width) base = 0; if (y > (int) crtc->fb->height) base = 0; } else base = 0; if (x < 0) { if (x + intel_crtc->cursor_width < 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; x = -x; } pos |= x << CURSOR_X_SHIFT; if (y < 0) { if (y + intel_crtc->cursor_height < 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; y = -y; } pos |= y << CURSOR_Y_SHIFT; visible = base != 0; if (!visible && !intel_crtc->cursor_visible) return; I915_WRITE(CURPOS(pipe), pos); if (IS_845G(dev) || IS_I865G(dev)) i845_update_cursor(crtc, base); else i9xx_update_cursor(crtc, base); if (visible) intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); } static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_gem_object *obj; uint32_t addr; int ret; DRM_DEBUG_KMS("\n"); /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; obj = NULL; mutex_lock(&dev->struct_mutex); goto finish; } /* Currently we only support 64x64 cursors */ if (width != 64 || height != 64) { DRM_ERROR("we currently only support 64x64 cursors\n"); return -EINVAL; } obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); if (&obj->base == NULL) return -ENOENT; if (obj->base.size < width * height * 4) { DRM_ERROR("buffer is to small\n"); ret = -ENOMEM; goto fail; } /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { if (obj->tiling_mode) { DRM_ERROR("cursor cannot be tiled\n"); ret = -EINVAL; goto fail_locked; } ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; } ret = i915_gem_object_set_to_gtt_domain(obj, 0); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; } ret = i915_gem_object_put_fence(obj); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; } addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, obj, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; } addr = obj->phys_obj->handle->busaddr; } if (IS_GEN2(dev)) I915_WRITE(CURSIZE, (height << 12) | width); finish: if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; intel_crtc_update_cursor(crtc, true); return 0; fail_unpin: i915_gem_object_unpin(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: drm_gem_object_unreference_unlocked(&obj->base); return ret; } static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->cursor_x = x; intel_crtc->cursor_y = y; intel_crtc_update_cursor(crtc, true); return 0; } /** Sets the color ramps on behalf of RandR */ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_crtc->lut_r[regno] = red >> 8; intel_crtc->lut_g[regno] = green >> 8; intel_crtc->lut_b[regno] = blue >> 8; } void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); *red = intel_crtc->lut_r[regno] << 8; *green = intel_crtc->lut_g[regno] << 8; *blue = intel_crtc->lut_b[regno] << 8; } static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { int end = (start + size > 256) ? 256 : start + size, i; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); for (i = start; i < end; i++) { intel_crtc->lut_r[i] = red[i] >> 8; intel_crtc->lut_g[i] = green[i] >> 8; intel_crtc->lut_b[i] = blue[i] >> 8; } intel_crtc_load_lut(crtc); } /** * Get a pipe with a simple mode set on it for doing load-based monitor * detection. * * It will be up to the load-detect code to adjust the pipe as appropriate for * its requirements. The pipe will be connected to no other encoders. * * Currently this code will only succeed if there is a pipe with no encoders * configured for it. In the future, it could choose to temporarily disable * some outputs to free up a pipe for its use. * * \return crtc, or NULL if no pipes are available. */ /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; static struct drm_framebuffer * intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_i915_gem_object *obj) { struct intel_framebuffer *intel_fb; int ret; intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { drm_gem_object_unreference_unlocked(&obj->base); return ERR_PTR(-ENOMEM); } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { drm_gem_object_unreference_unlocked(&obj->base); kfree(intel_fb); return ERR_PTR(ret); } return &intel_fb->base; } static u32 intel_framebuffer_pitch_for_width(int width, int bpp) { u32 pitch = DIV_ROUND_UP(width * bpp, 8); return ALIGN(pitch, 64); } static u32 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) { u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); } static struct drm_framebuffer * intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_display_mode *mode, int depth, int bpp) { struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd mode_cmd; obj = i915_gem_alloc_object(dev, intel_framebuffer_size_for_mode(mode, bpp)); if (obj == NULL) return ERR_PTR(-ENOMEM); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; mode_cmd.depth = depth; mode_cmd.bpp = bpp; mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); return intel_framebuffer_create(dev, &mode_cmd, obj); } static struct drm_framebuffer * mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; if (dev_priv->fbdev == NULL) return NULL; obj = dev_priv->fbdev->ifb.obj; if (obj == NULL) return NULL; fb = &dev_priv->fbdev->ifb.base; if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, fb->bits_per_pixel)) return NULL; if (obj->base.size < mode->vdisplay * fb->pitch) return NULL; return fb; } bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old) { struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_framebuffer *old_fb; int i = -1; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); /* * Algorithm gets a little messy: * * - if the connector already has an assigned crtc, use it (but make * sure it's on first) * * - try to find the first unused crtc that can drive this connector, * and use that if we find one */ /* See if we already have a CRTC for this connector */ if (encoder->crtc) { crtc = encoder->crtc; intel_crtc = to_intel_crtc(crtc); old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = false; /* Make sure the crtc and connector are running */ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc_helper_funcs *crtc_funcs; crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); encoder_funcs = encoder->helper_private; encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } return true; } /* Find an unused one (if possible) */ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { i++; if (!(encoder->possible_crtcs & (1 << i))) continue; if (!possible_crtc->enabled) { crtc = possible_crtc; break; } } /* * If we didn't find an unused CRTC, don't use any. */ if (!crtc) { DRM_DEBUG_KMS("no pipe available for load-detect\n"); return false; } encoder->crtc = crtc; connector->encoder = encoder; intel_crtc = to_intel_crtc(crtc); old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = true; old->release_fb = NULL; if (!mode) mode = &load_detect_mode; old_fb = crtc->fb; /* We need a framebuffer large enough to accommodate all accesses * that the plane may generate whilst we perform load detection. * We can not rely on the fbcon either being present (we get called * during its initialisation to detect all boot displays, or it may * not even exist) or that it is large enough to satisfy the * requested mode. */ crtc->fb = mode_fits_in_fbdev(dev, mode); if (crtc->fb == NULL) { DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); old->release_fb = crtc->fb; } else DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (IS_ERR(crtc->fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); crtc->fb = old_fb; return false; } if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); crtc->fb = old_fb; return false; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); return true; } void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct intel_load_detect_pipe *old) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); if (old->load_detect_temp) { connector->encoder = NULL; drm_helper_disable_unused_functions(dev); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); return; } /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) { encoder_funcs->dpms(encoder, old->dpms_mode); crtc_funcs->dpms(crtc, old->dpms_mode); } } /* Returns the clock of the currently programmed mode of the given pipe. */ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 dpll = I915_READ(DPLL(pipe)); u32 fp; intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = I915_READ(FP0(pipe)); else fp = I915_READ(FP1(pipe)); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; } else { clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } if (!IS_GEN2(dev)) { if (IS_PINEVIEW(dev)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); else clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> DPLL_FPA01_P1_POST_DIV_SHIFT); switch (dpll & DPLL_MODE_MASK) { case DPLLB_MODE_DAC_SERIAL: clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5 : 10; break; case DPLLB_MODE_LVDS: clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 7 : 14; break; default: DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); return 0; } /* XXX: Handle the 100Mhz refclk */ intel_clock(dev, 96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); if (is_lvds) { clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); clock.p2 = 14; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ intel_clock(dev, 66000, &clock); } else intel_clock(dev, 48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; else { clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; } if (dpll & PLL_P2_DIVIDE_BY_4) clock.p2 = 4; else clock.p2 = 2; intel_clock(dev, 48000, &clock); } } /* XXX: It would be nice to validate the clocks, but we can't reuse * i830PllIsValid() because it relies on the xf86_config connector * configuration being accurate, which it isn't necessarily. */ return clock.dot; } /** Returns the currently programmed mode of the given pipe. */ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; struct drm_display_mode *mode; int htot = I915_READ(HTOTAL(pipe)); int hsync = I915_READ(HSYNC(pipe)); int vtot = I915_READ(VTOTAL(pipe)); int vsync = I915_READ(VSYNC(pipe)); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; mode->clock = intel_crtc_clock_get(dev, crtc); mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; mode->vdisplay = (vtot & 0xffff) + 1; mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; mode->vsync_start = (vsync & 0xffff) + 1; mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); return mode; } #define GPU_IDLE_TIMEOUT 500 /* ms */ /* When this timer fires, we've been idle for awhile */ static void intel_gpu_idle_timer(unsigned long arg) { struct drm_device *dev = (struct drm_device *)arg; drm_i915_private_t *dev_priv = dev->dev_private; if (!list_empty(&dev_priv->mm.active_list)) { /* Still processing requests, so just re-arm the timer. */ mod_timer(&dev_priv->idle_timer, jiffies + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); return; } dev_priv->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } #define CRTC_IDLE_TIMEOUT 1000 /* ms */ static void intel_crtc_idle_timer(unsigned long arg) { struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; struct intel_framebuffer *intel_fb; intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb && intel_fb->obj->active) { /* The framebuffer is still being accessed by the GPU. */ mod_timer(&intel_crtc->idle_timer, jiffies + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); return; } intel_crtc->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); int dpll; if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) return; dpll = I915_READ(dpll_reg); if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { DRM_DEBUG_DRIVER("upclocking LVDS\n"); /* Unlock panel regs */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); dpll &= ~DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (dpll & DISPLAY_RATE_SELECT_FPA1) DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); /* ...and lock them again */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); } /* Schedule downclock */ mod_timer(&intel_crtc->idle_timer, jiffies + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } static void intel_decrease_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); int dpll = I915_READ(dpll_reg); if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) return; /* * Since this is called by a timer, we should never get here in * the manual case. */ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { DRM_DEBUG_DRIVER("downclocking LVDS\n"); /* Unlock panel regs */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); intel_wait_for_vblank(dev, pipe); dpll = I915_READ(dpll_reg); if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); /* ...and lock them again */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); } } /** * intel_idle_update - adjust clocks for idleness * @work: work struct * * Either the GPU or display (or both) went idle. Check the busy status * here and adjust the CRTC and GPU clocks as necessary. */ static void intel_idle_update(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, idle_work); struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; if (!i915_powersave) return; mutex_lock(&dev->struct_mutex); i915_update_gfx_val(dev_priv); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->busy) intel_decrease_pllclock(crtc); } mutex_unlock(&dev->struct_mutex); } /** * intel_mark_busy - mark the GPU and possibly the display busy * @dev: drm device * @obj: object we're operating on * * Callers can use this function to indicate that the GPU is busy processing * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout * buffer), we'll also mark the display as busy, so we know to increase its * clock frequency. */ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL; struct intel_framebuffer *intel_fb; struct intel_crtc *intel_crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; if (!dev_priv->busy) dev_priv->busy = true; else mod_timer(&dev_priv->idle_timer, jiffies + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb->obj == obj) { if (!intel_crtc->busy) { /* Non-busy -> busy, upclock */ intel_increase_pllclock(crtc); intel_crtc->busy = true; } else { /* Busy -> busy, put off timer */ mod_timer(&intel_crtc->idle_timer, jiffies + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } } } } static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; struct intel_unpin_work *work; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; intel_crtc->unpin_work = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); if (work) { cancel_work_sync(&work->work); kfree(work); } drm_crtc_cleanup(crtc); kfree(intel_crtc); } static void intel_unpin_work_fn(struct work_struct *__work) { struct intel_unpin_work *work = container_of(__work, struct intel_unpin_work, work); mutex_lock(&work->dev->struct_mutex); i915_gem_object_unpin(work->old_fb_obj); drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base); mutex_unlock(&work->dev->struct_mutex); kfree(work); } static void do_intel_finish_page_flip(struct drm_device *dev, struct drm_crtc *crtc) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; struct timeval tnow, tvbl; unsigned long flags; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; do_gettimeofday(&tnow); spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { spin_unlock_irqrestore(&dev->event_lock, flags); return; } intel_crtc->unpin_work = NULL; if (work->event) { e = work->event; e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); /* Called before vblank count and timestamps have * been updated for the vblank interval of flip * completion? Need to increment vblank count and * add one videorefresh duration to returned timestamp * to account for this. We assume this happened if we * get called over 0.9 frame durations after the last * timestamped vblank. * * This calculation can not be used with vrefresh rates * below 5Hz (10Hz to be on the safe side) without * promoting to 64 integers. */ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > 9 * crtc->framedur_ns) { e->event.sequence++; tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + crtc->framedur_ns); } e->event.tv_sec = tvbl.tv_sec; e->event.tv_usec = tvbl.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } drm_vblank_put(dev, intel_crtc->pipe); spin_unlock_irqrestore(&dev->event_lock, flags); obj = work->old_fb_obj; atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); if (atomic_read(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); } void intel_finish_page_flip(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; do_intel_finish_page_flip(dev, crtc); } void intel_finish_page_flip_plane(struct drm_device *dev, int plane) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; do_intel_finish_page_flip(dev, crtc); } void intel_prepare_page_flip(struct drm_device *dev, int plane) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { if ((++intel_crtc->unpin_work->pending) > 1) DRM_ERROR("Prepared flip multiple times\n"); } else { DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); } spin_unlock_irqrestore(&dev->event_lock, flags); } static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long offset; u32 flip_mask; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto out; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) goto out; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. */ if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); OUT_RING(MI_NOOP); OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); out: return ret; } static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long offset; u32 flip_mask; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto out; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; ret = BEGIN_LP_RING(6); if (ret) goto out; if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); OUT_RING(MI_NOOP); OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); out: return ret; } static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto out; ret = BEGIN_LP_RING(4); if (ret) goto out; /* i965+ uses the linear or tiled offsets from the * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); OUT_RING(obj->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); out: return ret; } static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto out; ret = BEGIN_LP_RING(4); if (ret) goto out; OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch | obj->tiling_mode); OUT_RING(obj->gtt_offset); pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); out: return ret; } /* * On gen7 we currently use the blit ring because (in early silicon at least) * the render ring doesn't give us interrpts for page flip completion, which * means clients will hang after the first flip is queued. Fortunately the * blit ring generates interrupts properly, so use it instead. */ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) goto out; ret = intel_ring_begin(ring, 4); if (ret) goto out; intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); intel_ring_emit(ring, (fb->pitch | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); intel_ring_advance(ring); out: return ret; } static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj) { return -ENODEV; } static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; work->event = event; work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); return -EBUSY; } intel_crtc->unpin_work = work; spin_unlock_irqrestore(&dev->event_lock, flags); intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); /* Reference the objects for the scheduled work. */ drm_gem_object_reference(&work->old_fb_obj->base); drm_gem_object_reference(&obj->base); crtc->fb = fb; ret = drm_vblank_get(dev, intel_crtc->pipe); if (ret) goto cleanup_objs; work->pending_flip_obj = obj; work->enable_stall_check = true; /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); if (ret) goto cleanup_pending; mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); return 0; cleanup_pending: atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); cleanup_objs: drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); spin_lock_irqsave(&dev->event_lock, flags); intel_crtc->unpin_work = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); return ret; } static void intel_sanitize_modesetting(struct drm_device *dev, int pipe, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg, val; if (HAS_PCH_SPLIT(dev)) return; /* Who knows what state these registers were left in by the BIOS or * grub? * * If we leave the registers in a conflicting state (e.g. with the * display plane reading from the other pipe than the one we intend * to use) then when we attempt to teardown the active mode, we will * not disable the pipes and planes in the correct order -- leaving * a plane reading from a disabled pipe and possibly leading to * undefined behaviour. */ reg = DSPCNTR(plane); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) == 0) return; if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) return; /* This display plane is active and attached to the other CPU pipe. */ pipe = !pipe; /* Disable the plane and wait for it to stop reading from the pipe. */ intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); } static void intel_crtc_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* Reset flags back to the 'unknown' status so that they * will be correctly set on the initial modeset. */ intel_crtc->dpms_mode = -1; /* We need to fix up any BIOS configuration that conflicts with * our expectations. */ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } static struct drm_crtc_helper_funcs intel_helper_funcs = { .dpms = intel_crtc_dpms, .mode_fixup = intel_crtc_mode_fixup, .mode_set = intel_crtc_mode_set, .mode_set_base = intel_pipe_set_base, .mode_set_base_atomic = intel_pipe_set_base_atomic, .load_lut = intel_crtc_load_lut, .disable = intel_crtc_disable, }; static const struct drm_crtc_funcs intel_crtc_funcs = { .reset = intel_crtc_reset, .cursor_set = intel_crtc_cursor_set, .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; static void intel_crtc_init(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; int i; intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (intel_crtc == NULL) return; drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; intel_crtc->lut_b[i] = i; } /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); intel_crtc->plane = !pipe; } BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; intel_crtc_reset(&intel_crtc->base); intel_crtc->active = true; /* force the pipe off on setup_init_config */ if (HAS_PCH_SPLIT(dev)) { intel_helper_funcs.prepare = ironlake_crtc_prepare; intel_helper_funcs.commit = ironlake_crtc_commit; } else { intel_helper_funcs.prepare = i9xx_crtc_prepare; intel_helper_funcs.commit = i9xx_crtc_commit; } drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, (unsigned long)intel_crtc); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { DRM_ERROR("no such CRTC id\n"); return -EINVAL; } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); pipe_from_crtc_id->pipe = crtc->pipe; return 0; } static int intel_encoder_clones(struct drm_device *dev, int type_mask) { struct intel_encoder *encoder; int index_mask = 0; int entry = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { if (type_mask & encoder->clone_mask) index_mask |= (1 << entry); entry++; } return index_mask; } static bool has_edp_a(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (!IS_MOBILE(dev)) return false; if ((I915_READ(DP_A) & DP_DETECTED) == 0) return false; if (IS_GEN5(dev) && (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) return false; return true; } static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; bool dpd_is_edp = false; bool has_lvds = false; if (IS_MOBILE(dev) && !IS_I830(dev)) has_lvds = intel_lvds_init(dev); if (!has_lvds && !HAS_PCH_SPLIT(dev)) { /* disable the panel fitter on everything but LVDS */ I915_WRITE(PFIT_CONTROL, 0); } if (HAS_PCH_SPLIT(dev)) { dpd_is_edp = intel_dpd_is_edp(dev); if (has_edp_a(dev)) intel_dp_init(dev, DP_A); if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } intel_crt_init(dev); if (HAS_PCH_SPLIT(dev)) { int found; if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, PCH_SDVOB); if (!found) intel_hdmi_init(dev, HDMIB); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_B); } if (I915_READ(HDMIC) & PORT_DETECTED) intel_hdmi_init(dev, HDMIC); if (I915_READ(HDMID) & PORT_DETECTED) intel_hdmi_init(dev, HDMID); if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C); if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); found = intel_sdvo_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, SDVOB); } if (!found && SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_B\n"); intel_dp_init(dev, DP_B); } } /* Before G4X SDVOC doesn't have its own detect register */ if (I915_READ(SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOC\n"); found = intel_sdvo_init(dev, SDVOC); } if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { if (SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); intel_hdmi_init(dev, SDVOC); } if (SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_C\n"); intel_dp_init(dev, DP_C); } } if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) { DRM_DEBUG_KMS("probing DP_D\n"); intel_dp_init(dev, DP_D); } } else if (IS_GEN2(dev)) intel_dvo_init(dev); if (SUPPORTS_TV(dev)) intel_tv_init(dev); list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = intel_encoder_clones(dev, encoder->clone_mask); } intel_panel_setup_backlight(dev); /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); drm_gem_object_unreference_unlocked(&intel_fb->obj->base); kfree(intel_fb); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; return drm_gem_handle_create(file, &obj->base, handle); } static const struct drm_framebuffer_funcs intel_fb_funcs = { .destroy = intel_user_framebuffer_destroy, .create_handle = intel_user_framebuffer_create_handle, }; int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd *mode_cmd, struct drm_i915_gem_object *obj) { int ret; if (obj->tiling_mode == I915_TILING_Y) return -EINVAL; if (mode_cmd->pitch & 63) return -EINVAL; switch (mode_cmd->bpp) { case 8: case 16: case 24: case 32: break; default: return -EINVAL; } ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); return ret; } drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; return 0; } static struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); if (&obj->base == NULL) return ERR_PTR(-ENOENT); return intel_framebuffer_create(dev, mode_cmd, obj); } static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .output_poll_changed = intel_fb_output_poll_changed, }; static struct drm_i915_gem_object * intel_alloc_context_page(struct drm_device *dev) { struct drm_i915_gem_object *ctx; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ctx = i915_gem_alloc_object(dev, 4096); if (!ctx) { DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); return NULL; } ret = i915_gem_object_pin(ctx, 4096, true); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; } ret = i915_gem_object_set_to_gtt_domain(ctx, 1); if (ret) { DRM_ERROR("failed to set-domain on power context: %d\n", ret); goto err_unpin; } return ctx; err_unpin: i915_gem_object_unpin(ctx); err_unref: drm_gem_object_unreference(&ctx->base); mutex_unlock(&dev->struct_mutex); return NULL; } bool ironlake_set_drps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; rgvswctl = I915_READ16(MEMSWCTL); if (rgvswctl & MEMCTL_CMD_STS) { DRM_DEBUG("gpu busy, RCS change rejected\n"); return false; /* still busy with another command */ } rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; I915_WRITE16(MEMSWCTL, rgvswctl); POSTING_READ16(MEMSWCTL); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE16(MEMSWCTL, rgvswctl); return true; } void ironlake_enable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; /* Enable temp reporting */ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); /* 100ms RC evaluation intervals */ I915_WRITE(RCUPEI, 100000); I915_WRITE(RCDNEI, 100000); /* Set max/min thresholds to 90ms and 80ms respectively */ I915_WRITE(RCBMAXAVG, 90000); I915_WRITE(RCBMINAVG, 80000); I915_WRITE(MEMIHYST, 1); /* Set up min, max, and cur for interrupt handling */ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; fmin = (rgvmodectl & MEMMODE_FMIN_MASK); fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; dev_priv->fmax = fmax; /* IPS callback will increase this */ dev_priv->fstart = fstart; dev_priv->max_delay = fstart; dev_priv->min_delay = fmin; dev_priv->cur_delay = fstart; DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, fstart); I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); /* * Interrupts will be enabled in ironlake_irq_postinstall */ I915_WRITE(VIDSTART, vstart); POSTING_READ(VIDSTART); rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); msleep(1); ironlake_set_drps(dev, fstart); dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + I915_READ(0x112e0); dev_priv->last_time1 = jiffies_to_msecs(jiffies); dev_priv->last_count2 = I915_READ(0x112f4); getrawmonotonic(&dev_priv->last_time2); } void ironlake_disable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl = I915_READ16(MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); I915_WRITE(DEIIR, DE_PCU_EVENT); I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ ironlake_set_drps(dev, dev_priv->fstart); msleep(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); msleep(1); } void gen6_set_rps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; u32 swreq; swreq = (val & 0x3ff) << 25; I915_WRITE(GEN6_RPNSWREQ, swreq); } void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); I915_WRITE(GEN6_PMIER, 0); spin_lock_irq(&dev_priv->rps_lock); dev_priv->pm_iir = 0; spin_unlock_irq(&dev_priv->rps_lock); I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); } static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; int div = (vidfreq & 0x3f0000) >> 16; int post = (vidfreq & 0x3000) >> 12; int pre = (vidfreq & 0x7); if (!pre) return 0; freq = ((div * 133333) / ((1<<post) * pre)); return freq; } void intel_init_emon(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; u8 pxw[16]; int i; /* Disable to program */ I915_WRITE(ECR, 0); POSTING_READ(ECR); /* Program energy weights for various events */ I915_WRITE(SDEW, 0x15040d00); I915_WRITE(CSIEW0, 0x007f0000); I915_WRITE(CSIEW1, 0x1e220004); I915_WRITE(CSIEW2, 0x04000004); for (i = 0; i < 5; i++) I915_WRITE(PEW + (i * 4), 0); for (i = 0; i < 3; i++) I915_WRITE(DEW + (i * 4), 0); /* Program P-state weights to account for frequency power adjustment */ for (i = 0; i < 16; i++) { u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); unsigned long freq = intel_pxfreq(pxvidfreq); unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; unsigned long val; val = vid * vid; val *= (freq / 1000); val *= 255; val /= (127*127*900); if (val > 0xff) DRM_ERROR("bad pxval: %ld\n", val); pxw[i] = val; } /* Render standby states get 0 weight */ pxw[14] = 0; pxw[15] = 0; for (i = 0; i < 4; i++) { u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); I915_WRITE(PXW + (i * 4), val); } /* Adjust magic regs to magic values (more experimental results) */ I915_WRITE(OGW0, 0); I915_WRITE(OGW1, 0); I915_WRITE(EG0, 0x00007f00); I915_WRITE(EG1, 0x0000000e); I915_WRITE(EG2, 0x000e0000); I915_WRITE(EG3, 0x68000300); I915_WRITE(EG4, 0x42000000); I915_WRITE(EG5, 0x00140031); I915_WRITE(EG6, 0); I915_WRITE(EG7, 0); for (i = 0; i < 8; i++) I915_WRITE(PXWL + (i * 4), 0); /* Enable PMON + select events */ I915_WRITE(ECR, 0x80000019); lcfuse = I915_READ(LCFUSE02); dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } void gen6_enable_rps(struct drm_i915_private *dev_priv) { u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 pcu_mbox, rc6_mask = 0; int cur_freq, min_freq, max_freq; int i; /* Here begins a magic sequence of register writes to enable * auto-downclocking. * * Perhaps there might be some value in exposing these to * userspace... */ I915_WRITE(GEN6_RC_STATE, 0); mutex_lock(&dev_priv->dev->struct_mutex); gen6_gt_force_wake_get(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); for (i = 0; i < I915_NUM_RINGS; i++) I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); I915_WRITE(GEN6_RC6_THRESHOLD, 50000); I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ if (i915_enable_rc6) rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | GEN6_RC_CTL_RC6_ENABLE; I915_WRITE(GEN6_RC_CONTROL, rc6_mask | GEN6_RC_CTL_EI_MODE(1) | GEN6_RC_CTL_HW_ENABLE); I915_WRITE(GEN6_RPNSWREQ, GEN6_FREQUENCY(10) | GEN6_OFFSET(0) | GEN6_AGGRESSIVE_TURBO); I915_WRITE(GEN6_RC_VIDEO_FREQ, GEN6_FREQUENCY(12)); I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 18 << 24 | 6 << 16); I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); I915_WRITE(GEN6_RP_UP_EI, 100000); I915_WRITE(GEN6_RP_DOWN_EI, 5000000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | GEN6_RP_USE_NORMAL_FREQ | GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG | GEN6_RP_DOWN_IDLE_CONT); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); I915_WRITE(GEN6_PCODE_DATA, 0); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); min_freq = (rp_state_cap & 0xff0000) >> 16; max_freq = rp_state_cap & 0xff; cur_freq = (gt_perf_status & 0xff00) >> 8; /* Check for overclock support */ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); pcu_mbox = I915_READ(GEN6_PCODE_DATA); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); if (pcu_mbox & (1<<31)) { /* OC supported */ max_freq = pcu_mbox & 0xff; DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } /* In units of 100MHz */ dev_priv->max_delay = max_freq; dev_priv->min_delay = min_freq; dev_priv->cur_delay = cur_freq; /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT | GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_EI_EXPIRED); spin_lock_irq(&dev_priv->rps_lock); WARN_ON(dev_priv->pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); spin_unlock_irq(&dev_priv->rps_lock); /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); gen6_gt_force_wake_put(dev_priv); mutex_unlock(&dev_priv->dev->struct_mutex); } static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; /* Required for FBC */ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | DPFCRUNIT_CLOCK_GATE_DISABLE | DPFDUNIT_CLOCK_GATE_DISABLE; /* Required for CxSR */ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; I915_WRITE(PCH_3DCGDIS0, MARIUNIT_CLOCK_GATE_DISABLE | SVSMUNIT_CLOCK_GATE_DISABLE); I915_WRITE(PCH_3DCGDIS1, VFMUNIT_CLOCK_GATE_DISABLE); I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); /* * According to the spec the following bits should be set in * order to enable memory self-refresh * The bit 22/21 of 0x42004 * The bit 5 of 0x42020 * The bit 15 of 0x45000 */ I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); I915_WRITE(ILK_DSPCLK_GATE, (I915_READ(ILK_DSPCLK_GATE) | ILK_DPARB_CLK_GATE)); I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); /* * Based on the document from hardware guys the following bits * should be set unconditionally in order to enable FBC. * The bit 22 of 0x42000 * The bit 22 of 0x42004 * The bit 7,8,9 of 0x42020. */ if (IS_IRONLAKE_M(dev)) { I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE); I915_WRITE(ILK_DSPCLK_GATE, I915_READ(ILK_DSPCLK_GATE) | ILK_DPFC_DIS1 | ILK_DPFC_DIS2 | ILK_CLK_FBC); } I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); I915_WRITE(_3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED << 16 | _3D_CHICKEN2_WM_READ_PIPELINED); } static void gen6_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); /* * According to the spec the following bits should be * set in order to enable memory self-refresh and fbc: * The bit21 and bit22 of 0x42000 * The bit21 and bit22 of 0x42004 * The bit5 and bit7 of 0x42020 * The bit14 of 0x70180 * The bit14 of 0x71180 */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL); I915_WRITE(ILK_DSPCLK_GATE, I915_READ(ILK_DSPCLK_GATE) | ILK_DPARB_CLK_GATE | ILK_DPFD_CLK_GATE); for_each_pipe(pipe) I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); } static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); for_each_pipe(pipe) I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); } static void g4x_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | GS_UNIT_CLOCK_GATE_DISABLE | CL_UNIT_CLOCK_GATE_DISABLE); I915_WRITE(RAMCLK_GATE_D, 0); dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | OVRUNIT_CLOCK_GATE_DISABLE | OVCUNIT_CLOCK_GATE_DISABLE; if (IS_GM45(dev)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); } static void crestline_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); I915_WRITE(DSPCLK_GATE_D, 0); I915_WRITE(RAMCLK_GATE_D, 0); I915_WRITE16(DEUC, 0); } static void broadwater_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | I965_RCPB_CLOCK_GATE_DISABLE | I965_ISC_CLOCK_GATE_DISABLE | I965_FBC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); } static void gen3_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dstate = I915_READ(D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | DSTATE_DOT_CLOCK_GATING; I915_WRITE(D_STATE, dstate); } static void i85x_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); } static void i830_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); } static void ibx_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* * On Ibex Peak and Cougar Point, we need to disable clock * gating for the panel power sequencer or it will fail to * start up when no ports are active. */ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); } static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* * On Ibex Peak and Cougar Point, we need to disable clock * gating for the panel power sequencer or it will fail to * start up when no ports are active. */ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); } static void ironlake_teardown_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->renderctx) { i915_gem_object_unpin(dev_priv->renderctx); drm_gem_object_unreference(&dev_priv->renderctx->base); dev_priv->renderctx = NULL; } if (dev_priv->pwrctx) { i915_gem_object_unpin(dev_priv->pwrctx); drm_gem_object_unreference(&dev_priv->pwrctx->base); dev_priv->pwrctx = NULL; } } static void ironlake_disable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (I915_READ(PWRCTXA)) { /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 50); I915_WRITE(PWRCTXA, 0); POSTING_READ(PWRCTXA); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); POSTING_READ(RSTDBYCTL); } ironlake_teardown_rc6(dev); } static int ironlake_setup_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->renderctx == NULL) dev_priv->renderctx = intel_alloc_context_page(dev); if (!dev_priv->renderctx) return -ENOMEM; if (dev_priv->pwrctx == NULL) dev_priv->pwrctx = intel_alloc_context_page(dev); if (!dev_priv->pwrctx) { ironlake_teardown_rc6(dev); return -ENOMEM; } return 0; } void ironlake_enable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* rc6 disabled by default due to repeated reports of hanging during * boot and resume. */ if (!i915_enable_rc6) return; mutex_lock(&dev->struct_mutex); ret = ironlake_setup_rc6(dev); if (ret) { mutex_unlock(&dev->struct_mutex); return; } /* * GPU can automatically power down the render unit if given a page * to save state. */ ret = BEGIN_LP_RING(6); if (ret) { ironlake_teardown_rc6(dev); mutex_unlock(&dev->struct_mutex); return; } OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); OUT_RING(MI_SET_CONTEXT); OUT_RING(dev_priv->renderctx->gtt_offset | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | MI_RESTORE_INHIBIT); OUT_RING(MI_SUSPEND_FLUSH); OUT_RING(MI_NOOP); OUT_RING(MI_FLUSH); ADVANCE_LP_RING(); /* * Wait for the command parser to advance past MI_SET_CONTEXT. The HW * does an implicit flush, combined with MI_FLUSH above, it should be * safe to assume that renderctx is valid */ ret = intel_wait_ring_idle(LP_RING(dev_priv)); if (ret) { DRM_ERROR("failed to enable ironlake power power savings\n"); ironlake_teardown_rc6(dev); mutex_unlock(&dev->struct_mutex); return; } I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); mutex_unlock(&dev->struct_mutex); } void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->display.init_clock_gating(dev); if (dev_priv->display.init_pch_clock_gating) dev_priv->display.init_pch_clock_gating(dev); } /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ if (HAS_PCH_SPLIT(dev)) { dev_priv->display.dpms = ironlake_crtc_dpms; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; } else { dev_priv->display.dpms = i9xx_crtc_dpms; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; } if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { dev_priv->display.fbc_enabled = ironlake_fbc_enabled; dev_priv->display.enable_fbc = ironlake_enable_fbc; dev_priv->display.disable_fbc = ironlake_disable_fbc; } else if (IS_GM45(dev)) { dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; } else if (IS_CRESTLINE(dev)) { dev_priv->display.fbc_enabled = i8xx_fbc_enabled; dev_priv->display.enable_fbc = i8xx_enable_fbc; dev_priv->display.disable_fbc = i8xx_disable_fbc; } /* 855GM needs testing */ } /* Returns the core display clock speed */ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev))) dev_priv->display.get_display_clock_speed = i945_get_display_clock_speed; else if (IS_I915G(dev)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; else if (IS_I915GM(dev)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; else if (IS_I865G(dev)) dev_priv->display.get_display_clock_speed = i865_get_display_clock_speed; else if (IS_I85X(dev)) dev_priv->display.get_display_clock_speed = i855_get_display_clock_speed; else /* 852, 830 */ dev_priv->display.get_display_clock_speed = i830_get_display_clock_speed; /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_IBX(dev)) dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; else if (HAS_PCH_CPT(dev)) dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; else { DRM_DEBUG_KMS("Failed to get proper latency. " "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } dev_priv->display.fdi_link_train = ironlake_fdi_link_train; dev_priv->display.init_clock_gating = ironlake_init_clock_gating; } else if (IS_GEN6(dev)) { if (SNB_READ_WM0_LATENCY()) { dev_priv->display.update_wm = sandybridge_update_wm; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } dev_priv->display.fdi_link_train = gen6_fdi_link_train; dev_priv->display.init_clock_gating = gen6_init_clock_gating; } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; if (SNB_READ_WM0_LATENCY()) { dev_priv->display.update_wm = sandybridge_update_wm; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; } else dev_priv->display.update_wm = NULL; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq)) { DRM_INFO("failed to find known CxSR latency " "(found ddr%s fsb freq %d, mem freq %d), " "disabling CxSR\n", (dev_priv->is_ddr3 == 1) ? "3": "2", dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ pineview_disable_cxsr(dev); dev_priv->display.update_wm = NULL; } else dev_priv->display.update_wm = pineview_update_wm; dev_priv->display.init_clock_gating = gen3_init_clock_gating; } else if (IS_G4X(dev)) { dev_priv->display.update_wm = g4x_update_wm; dev_priv->display.init_clock_gating = g4x_init_clock_gating; } else if (IS_GEN4(dev)) { dev_priv->display.update_wm = i965_update_wm; if (IS_CRESTLINE(dev)) dev_priv->display.init_clock_gating = crestline_init_clock_gating; else if (IS_BROADWATER(dev)) dev_priv->display.init_clock_gating = broadwater_init_clock_gating; } else if (IS_GEN3(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; dev_priv->display.init_clock_gating = gen3_init_clock_gating; } else if (IS_I865G(dev)) { dev_priv->display.update_wm = i830_update_wm; dev_priv->display.init_clock_gating = i85x_init_clock_gating; dev_priv->display.get_fifo_size = i830_get_fifo_size; } else if (IS_I85X(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i85x_get_fifo_size; dev_priv->display.init_clock_gating = i85x_init_clock_gating; } else { dev_priv->display.update_wm = i830_update_wm; dev_priv->display.init_clock_gating = i830_init_clock_gating; if (IS_845G(dev)) dev_priv->display.get_fifo_size = i845_get_fifo_size; else dev_priv->display.get_fifo_size = i830_get_fifo_size; } /* Default just returns -ENODEV to indicate unsupported */ dev_priv->display.queue_flip = intel_default_queue_flip; switch (INTEL_INFO(dev)->gen) { case 2: dev_priv->display.queue_flip = intel_gen2_queue_flip; break; case 3: dev_priv->display.queue_flip = intel_gen3_queue_flip; break; case 4: case 5: dev_priv->display.queue_flip = intel_gen4_queue_flip; break; case 6: dev_priv->display.queue_flip = intel_gen6_queue_flip; break; case 7: dev_priv->display.queue_flip = intel_gen7_queue_flip; break; } } /* * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, * resume, or other times. This quirk makes sure that's the case for * affected systems. */ static void quirk_pipea_force (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_PIPEA_FORCE; DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); } /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ static void quirk_ssc_force_disable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; } struct intel_quirk { int device; int subsystem_vendor; int subsystem_device; void (*hook)(struct drm_device *dev); }; struct intel_quirk intel_quirks[] = { /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae,0x103c, 0x361a, quirk_pipea_force }, /* Thinkpad R31 needs pipe A force quirk */ { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, /* ThinkPad X40 needs pipe A force quirk */ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, }; static void intel_init_quirks(struct drm_device *dev) { struct pci_dev *d = dev->pdev; int i; for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { struct intel_quirk *q = &intel_quirks[i]; if (d->device == q->device && (d->subsystem_vendor == q->subsystem_vendor || q->subsystem_vendor == PCI_ANY_ID) && (d->subsystem_device == q->subsystem_device || q->subsystem_device == PCI_ANY_ID)) q->hook(dev); } } /* Disable the VGA plane that we never use */ static void i915_disable_vga(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u8 sr1; u32 vga_reg; if (HAS_PCH_SPLIT(dev)) vga_reg = CPU_VGACNTRL; else vga_reg = VGACNTRL; vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); outb(1, VGA_SR_INDEX); sr1 = inb(VGA_SR_DATA); outb(sr1 | 1<<5, VGA_SR_DATA); vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); udelay(300); I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; drm_mode_config_init(dev); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.funcs = (void *)&intel_mode_funcs; intel_init_quirks(dev); intel_init_display(dev); if (IS_GEN2(dev)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; } else if (IS_GEN3(dev)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; } dev->mode_config.fb_base = dev->agp->base; DRM_DEBUG_KMS("%d display pipe%s available.\n", dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); for (i = 0; i < dev_priv->num_pipe; i++) { intel_crtc_init(dev, i); } /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); intel_init_clock_gating(dev); if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } if (IS_GEN6(dev)) gen6_enable_rps(dev_priv); INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); } void intel_modeset_gem_init(struct drm_device *dev) { if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); intel_setup_overlay(dev); } void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); intel_unregister_dsm_handler(); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) continue; intel_crtc = to_intel_crtc(crtc); intel_increase_pllclock(crtc); } if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); if (IS_GEN6(dev)) gen6_disable_rps(dev); if (IS_IRONLAKE_M(dev)) ironlake_disable_rc6(dev); mutex_unlock(&dev->struct_mutex); /* Disable the irq before mode object teardown, for the irq might * enqueue unpin/hotplug work. */ drm_irq_uninstall(dev); cancel_work_sync(&dev_priv->hotplug_work); /* Shut off idle work before the crtcs get freed. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { intel_crtc = to_intel_crtc(crtc); del_timer_sync(&intel_crtc->idle_timer); } del_timer_sync(&dev_priv->idle_timer); cancel_work_sync(&dev_priv->idle_work); drm_mode_config_cleanup(dev); } /* * Return which encoder is currently attached for connector. */ struct drm_encoder *intel_best_encoder(struct drm_connector *connector) { return &intel_attached_encoder(connector)->base; } void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { connector->encoder = encoder; drm_mode_connector_attach_encoder(&connector->base, &encoder->base); } /* * set vga decode state - true == enable VGA decode */ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) { struct drm_i915_private *dev_priv = dev->dev_private; u16 gmch_ctrl; pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); if (state) gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; else gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); return 0; } #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> struct intel_display_error_state { struct intel_cursor_error_state { u32 control; u32 position; u32 base; u32 size; } cursor[2]; struct intel_pipe_error_state { u32 conf; u32 source; u32 htotal; u32 hblank; u32 hsync; u32 vtotal; u32 vblank; u32 vsync; } pipe[2]; struct intel_plane_error_state { u32 control; u32 stride; u32 size; u32 pos; u32 addr; u32 surface; u32 tile_offset; } plane[2]; }; struct intel_display_error_state * intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; int i; error = kmalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; for (i = 0; i < 2; i++) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos= I915_READ(DSPPOS(i)); error->plane[i].addr = I915_READ(DSPADDR(i)); if (INTEL_INFO(dev)->gen >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].conf = I915_READ(PIPECONF(i)); error->pipe[i].source = I915_READ(PIPESRC(i)); error->pipe[i].htotal = I915_READ(HTOTAL(i)); error->pipe[i].hblank = I915_READ(HBLANK(i)); error->pipe[i].hsync = I915_READ(HSYNC(i)); error->pipe[i].vtotal = I915_READ(VTOTAL(i)); error->pipe[i].vblank = I915_READ(VBLANK(i)); error->pipe[i].vsync = I915_READ(VSYNC(i)); } return error; } void intel_display_print_error_state(struct seq_file *m, struct drm_device *dev, struct intel_display_error_state *error) { int i; for (i = 0; i < 2; i++) { seq_printf(m, "Pipe [%d]:\n", i); seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); seq_printf(m, " SRC: %08x\n", error->pipe[i].source); seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); seq_printf(m, "Plane [%d]:\n", i); seq_printf(m, " CNTR: %08x\n", error->plane[i].control); seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); seq_printf(m, " SIZE: %08x\n", error->plane[i].size); seq_printf(m, " POS: %08x\n", error->plane[i].pos); seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); if (INTEL_INFO(dev)->gen >= 4) { seq_printf(m, " SURF: %08x\n", error->plane[i].surface); seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); } seq_printf(m, "Cursor [%d]:\n", i); seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); seq_printf(m, " POS: %08x\n", error->cursor[i].position); seq_printf(m, " BASE: %08x\n", error->cursor[i].base); } } #endif
gpl-2.0
Fusion-Devices/android_kernel_mediatek_sprout
drivers/iio/adc/ad7793.c
2048
25794
/* * AD7785/AD7792/AD7793/AD7794/AD7795 SPI ADC driver * * Copyright 2011-2012 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/adc/ad_sigma_delta.h> #include <linux/platform_data/ad7793.h> /* Registers */ #define AD7793_REG_COMM 0 /* Communications Register (WO, 8-bit) */ #define AD7793_REG_STAT 0 /* Status Register (RO, 8-bit) */ #define AD7793_REG_MODE 1 /* Mode Register (RW, 16-bit */ #define AD7793_REG_CONF 2 /* Configuration Register (RW, 16-bit) */ #define AD7793_REG_DATA 3 /* Data Register (RO, 16-/24-bit) */ #define AD7793_REG_ID 4 /* ID Register (RO, 8-bit) */ #define AD7793_REG_IO 5 /* IO Register (RO, 8-bit) */ #define AD7793_REG_OFFSET 6 /* Offset Register (RW, 16-bit * (AD7792)/24-bit (AD7793)) */ #define AD7793_REG_FULLSALE 7 /* Full-Scale Register * (RW, 16-bit (AD7792)/24-bit (AD7793)) */ /* Communications Register Bit Designations (AD7793_REG_COMM) */ #define AD7793_COMM_WEN (1 << 7) /* Write Enable */ #define AD7793_COMM_WRITE (0 << 6) /* Write Operation */ #define AD7793_COMM_READ (1 << 6) /* Read Operation */ #define AD7793_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */ #define AD7793_COMM_CREAD (1 << 2) /* Continuous Read of Data Register */ /* Status Register Bit Designations (AD7793_REG_STAT) */ #define AD7793_STAT_RDY (1 << 7) /* Ready */ #define AD7793_STAT_ERR (1 << 6) /* Error (Overrange, Underrange) */ #define AD7793_STAT_CH3 (1 << 2) /* Channel 3 */ #define AD7793_STAT_CH2 (1 << 1) /* Channel 2 */ #define AD7793_STAT_CH1 (1 << 0) /* Channel 1 */ /* Mode Register Bit Designations (AD7793_REG_MODE) */ #define AD7793_MODE_SEL(x) (((x) & 0x7) << 13) /* Operation Mode Select */ #define AD7793_MODE_SEL_MASK (0x7 << 13) /* Operation Mode Select mask */ #define AD7793_MODE_CLKSRC(x) (((x) & 0x3) << 6) /* ADC Clock Source Select */ #define AD7793_MODE_RATE(x) ((x) & 0xF) /* Filter Update Rate Select */ #define AD7793_MODE_CONT 0 /* Continuous Conversion Mode */ #define AD7793_MODE_SINGLE 1 /* Single Conversion Mode */ #define AD7793_MODE_IDLE 2 /* Idle Mode */ #define AD7793_MODE_PWRDN 3 /* Power-Down Mode */ #define AD7793_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */ #define AD7793_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */ #define AD7793_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */ #define AD7793_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */ #define AD7793_CLK_INT 0 /* Internal 64 kHz Clock not * available at the CLK pin */ #define AD7793_CLK_INT_CO 1 /* Internal 64 kHz Clock available * at the CLK pin */ #define AD7793_CLK_EXT 2 /* External 64 kHz Clock */ #define AD7793_CLK_EXT_DIV2 3 /* External Clock divided by 2 */ /* Configuration Register Bit Designations (AD7793_REG_CONF) */ #define AD7793_CONF_VBIAS(x) (((x) & 0x3) << 14) /* Bias Voltage * Generator Enable */ #define AD7793_CONF_BO_EN (1 << 13) /* Burnout Current Enable */ #define AD7793_CONF_UNIPOLAR (1 << 12) /* Unipolar/Bipolar Enable */ #define AD7793_CONF_BOOST (1 << 11) /* Boost Enable */ #define AD7793_CONF_GAIN(x) (((x) & 0x7) << 8) /* Gain Select */ #define AD7793_CONF_REFSEL(x) ((x) << 6) /* INT/EXT Reference Select */ #define AD7793_CONF_BUF (1 << 4) /* Buffered Mode Enable */ #define AD7793_CONF_CHAN(x) ((x) & 0xf) /* Channel select */ #define AD7793_CONF_CHAN_MASK 0xf /* Channel select mask */ #define AD7793_CH_AIN1P_AIN1M 0 /* AIN1(+) - AIN1(-) */ #define AD7793_CH_AIN2P_AIN2M 1 /* AIN2(+) - AIN2(-) */ #define AD7793_CH_AIN3P_AIN3M 2 /* AIN3(+) - AIN3(-) */ #define AD7793_CH_AIN1M_AIN1M 3 /* AIN1(-) - AIN1(-) */ #define AD7793_CH_TEMP 6 /* Temp Sensor */ #define AD7793_CH_AVDD_MONITOR 7 /* AVDD Monitor */ #define AD7795_CH_AIN4P_AIN4M 4 /* AIN4(+) - AIN4(-) */ #define AD7795_CH_AIN5P_AIN5M 5 /* AIN5(+) - AIN5(-) */ #define AD7795_CH_AIN6P_AIN6M 6 /* AIN6(+) - AIN6(-) */ #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ /* ID Register Bit Designations (AD7793_REG_ID) */ #define AD7785_ID 0xB #define AD7792_ID 0xA #define AD7793_ID 0xB #define AD7794_ID 0xF #define AD7795_ID 0xF #define AD7796_ID 0xA #define AD7797_ID 0xB #define AD7798_ID 0x8 #define AD7799_ID 0x9 #define AD7793_ID_MASK 0xF /* IO (Excitation Current Sources) Register Bit Designations (AD7793_REG_IO) */ #define AD7793_IO_IEXC1_IOUT1_IEXC2_IOUT2 0 /* IEXC1 connect to IOUT1, * IEXC2 connect to IOUT2 */ #define AD7793_IO_IEXC1_IOUT2_IEXC2_IOUT1 1 /* IEXC1 connect to IOUT2, * IEXC2 connect to IOUT1 */ #define AD7793_IO_IEXC1_IEXC2_IOUT1 2 /* Both current sources * IEXC1,2 connect to IOUT1 */ #define AD7793_IO_IEXC1_IEXC2_IOUT2 3 /* Both current sources * IEXC1,2 connect to IOUT2 */ #define AD7793_IO_IXCEN_10uA (1 << 0) /* Excitation Current 10uA */ #define AD7793_IO_IXCEN_210uA (2 << 0) /* Excitation Current 210uA */ #define AD7793_IO_IXCEN_1mA (3 << 0) /* Excitation Current 1mA */ /* NOTE: * The AD7792/AD7793 features a dual use data out ready DOUT/RDY output. * In order to avoid contentions on the SPI bus, it's therefore necessary * to use spi bus locking. * * The DOUT/RDY output must also be wired to an interrupt capable GPIO. */ #define AD7793_FLAG_HAS_CLKSEL BIT(0) #define AD7793_FLAG_HAS_REFSEL BIT(1) #define AD7793_FLAG_HAS_VBIAS BIT(2) #define AD7793_HAS_EXITATION_CURRENT BIT(3) #define AD7793_FLAG_HAS_GAIN BIT(4) #define AD7793_FLAG_HAS_BUFFER BIT(5) struct ad7793_chip_info { unsigned int id; const struct iio_chan_spec *channels; unsigned int num_channels; unsigned int flags; const struct iio_info *iio_info; const u16 *sample_freq_avail; }; struct ad7793_state { const struct ad7793_chip_info *chip_info; struct regulator *reg; u16 int_vref_mv; u16 mode; u16 conf; u32 scale_avail[8][2]; struct ad_sigma_delta sd; }; enum ad7793_supported_device_ids { ID_AD7785, ID_AD7792, ID_AD7793, ID_AD7794, ID_AD7795, ID_AD7796, ID_AD7797, ID_AD7798, ID_AD7799, }; static struct ad7793_state *ad_sigma_delta_to_ad7793(struct ad_sigma_delta *sd) { return container_of(sd, struct ad7793_state, sd); } static int ad7793_set_channel(struct ad_sigma_delta *sd, unsigned int channel) { struct ad7793_state *st = ad_sigma_delta_to_ad7793(sd); st->conf &= ~AD7793_CONF_CHAN_MASK; st->conf |= AD7793_CONF_CHAN(channel); return ad_sd_write_reg(&st->sd, AD7793_REG_CONF, 2, st->conf); } static int ad7793_set_mode(struct ad_sigma_delta *sd, enum ad_sigma_delta_mode mode) { struct ad7793_state *st = ad_sigma_delta_to_ad7793(sd); st->mode &= ~AD7793_MODE_SEL_MASK; st->mode |= AD7793_MODE_SEL(mode); return ad_sd_write_reg(&st->sd, AD7793_REG_MODE, 2, st->mode); } static const struct ad_sigma_delta_info ad7793_sigma_delta_info = { .set_channel = ad7793_set_channel, .set_mode = ad7793_set_mode, .has_registers = true, .addr_shift = 3, .read_mask = BIT(6), }; static const struct ad_sd_calib_data ad7793_calib_arr[6] = { {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN1P_AIN1M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN1P_AIN1M}, {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN2P_AIN2M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN2P_AIN2M}, {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN3P_AIN3M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN3P_AIN3M} }; static int ad7793_calibrate_all(struct ad7793_state *st) { return ad_sd_calibrate_all(&st->sd, ad7793_calib_arr, ARRAY_SIZE(ad7793_calib_arr)); } static int ad7793_check_platform_data(struct ad7793_state *st, const struct ad7793_platform_data *pdata) { if ((pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT1 || pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT2) && ((pdata->exitation_current != AD7793_IX_10uA) && (pdata->exitation_current != AD7793_IX_210uA))) return -EINVAL; if (!(st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL) && pdata->clock_src != AD7793_CLK_SRC_INT) return -EINVAL; if (!(st->chip_info->flags & AD7793_FLAG_HAS_REFSEL) && pdata->refsel != AD7793_REFSEL_REFIN1) return -EINVAL; if (!(st->chip_info->flags & AD7793_FLAG_HAS_VBIAS) && pdata->bias_voltage != AD7793_BIAS_VOLTAGE_DISABLED) return -EINVAL; if (!(st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) && pdata->exitation_current != AD7793_IX_DISABLED) return -EINVAL; return 0; } static int ad7793_setup(struct iio_dev *indio_dev, const struct ad7793_platform_data *pdata, unsigned int vref_mv) { struct ad7793_state *st = iio_priv(indio_dev); int i, ret = -1; unsigned long long scale_uv; u32 id; ret = ad7793_check_platform_data(st, pdata); if (ret) return ret; /* reset the serial interface */ ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret)); if (ret < 0) goto out; usleep_range(500, 2000); /* Wait for at least 500us */ /* write/read test for device presence */ ret = ad_sd_read_reg(&st->sd, AD7793_REG_ID, 1, &id); if (ret) goto out; id &= AD7793_ID_MASK; if (id != st->chip_info->id) { dev_err(&st->sd.spi->dev, "device ID query failed\n"); goto out; } st->mode = AD7793_MODE_RATE(1); st->conf = 0; if (st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL) st->mode |= AD7793_MODE_CLKSRC(pdata->clock_src); if (st->chip_info->flags & AD7793_FLAG_HAS_REFSEL) st->conf |= AD7793_CONF_REFSEL(pdata->refsel); if (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS) st->conf |= AD7793_CONF_VBIAS(pdata->bias_voltage); if (pdata->buffered || !(st->chip_info->flags & AD7793_FLAG_HAS_BUFFER)) st->conf |= AD7793_CONF_BUF; if (pdata->boost_enable && (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS)) st->conf |= AD7793_CONF_BOOST; if (pdata->burnout_current) st->conf |= AD7793_CONF_BO_EN; if (pdata->unipolar) st->conf |= AD7793_CONF_UNIPOLAR; if (!(st->chip_info->flags & AD7793_FLAG_HAS_GAIN)) st->conf |= AD7793_CONF_GAIN(7); ret = ad7793_set_mode(&st->sd, AD_SD_MODE_IDLE); if (ret) goto out; ret = ad7793_set_channel(&st->sd, 0); if (ret) goto out; if (st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) { ret = ad_sd_write_reg(&st->sd, AD7793_REG_IO, 1, pdata->exitation_current | (pdata->current_source_direction << 2)); if (ret) goto out; } ret = ad7793_calibrate_all(st); if (ret) goto out; /* Populate available ADC input ranges */ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) { scale_uv = ((u64)vref_mv * 100000000) >> (st->chip_info->channels[0].scan_type.realbits - (!!(st->conf & AD7793_CONF_UNIPOLAR) ? 0 : 1)); scale_uv >>= i; st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10; st->scale_avail[i][0] = scale_uv; } return 0; out: dev_err(&st->sd.spi->dev, "setup failed\n"); return ret; } static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, 33, 19, 17, 16, 12, 10, 8, 6, 4}; static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 33, 0, 17, 16, 12, 10, 8, 6, 4}; static ssize_t ad7793_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); return sprintf(buf, "%d\n", st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); } static ssize_t ad7793_write_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); long lval; int i, ret; mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } mutex_unlock(&indio_dev->mlock); ret = kstrtol(buf, 10, &lval); if (ret) return ret; if (lval == 0) return -EINVAL; ret = -EINVAL; for (i = 0; i < 16; i++) if (lval == st->chip_info->sample_freq_avail[i]) { mutex_lock(&indio_dev->mlock); st->mode &= ~AD7793_MODE_RATE(-1); st->mode |= AD7793_MODE_RATE(i); ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); mutex_unlock(&indio_dev->mlock); ret = 0; } return ret ? ret : len; } static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, ad7793_read_frequency, ad7793_write_frequency); static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797, sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4"); static ssize_t ad7793_show_scale_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); int i, len = 0; for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0], st->scale_avail[i][1]); len += sprintf(buf + len, "\n"); return len; } static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in_voltage-voltage_scale_available, S_IRUGO, ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, NULL }; static const struct attribute_group ad7793_attribute_group = { .attrs = ad7793_attributes, }; static struct attribute *ad7797_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, NULL }; static const struct attribute_group ad7797_attribute_group = { .attrs = ad7797_attributes, }; static int ad7793_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad7793_state *st = iio_priv(indio_dev); int ret; unsigned long long scale_uv; bool unipolar = !!(st->conf & AD7793_CONF_UNIPOLAR); switch (m) { case IIO_CHAN_INFO_RAW: ret = ad_sigma_delta_single_conversion(indio_dev, chan, val); if (ret < 0) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: if (chan->differential) { *val = st-> scale_avail[(st->conf >> 8) & 0x7][0]; *val2 = st-> scale_avail[(st->conf >> 8) & 0x7][1]; return IIO_VAL_INT_PLUS_NANO; } else { /* 1170mV / 2^23 * 6 */ scale_uv = (1170ULL * 1000000000ULL * 6ULL); } break; case IIO_TEMP: /* 1170mV / 0.81 mV/C / 2^23 */ scale_uv = 1444444444444444ULL; break; default: return -EINVAL; } scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1)); *val = 0; *val2 = scale_uv; return IIO_VAL_INT_PLUS_NANO; case IIO_CHAN_INFO_OFFSET: if (!unipolar) *val = -(1 << (chan->scan_type.realbits - 1)); else *val = 0; /* Kelvin to Celsius */ if (chan->type == IIO_TEMP) { unsigned long long offset; unsigned int shift; shift = chan->scan_type.realbits - (unipolar ? 0 : 1); offset = 273ULL << shift; do_div(offset, 1444); *val -= offset; } return IIO_VAL_INT; } return -EINVAL; } static int ad7793_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad7793_state *st = iio_priv(indio_dev); int ret, i; unsigned int tmp; mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } switch (mask) { case IIO_CHAN_INFO_SCALE: ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) if (val2 == st->scale_avail[i][1]) { ret = 0; tmp = st->conf; st->conf &= ~AD7793_CONF_GAIN(-1); st->conf |= AD7793_CONF_GAIN(i); if (tmp == st->conf) break; ad_sd_write_reg(&st->sd, AD7793_REG_CONF, sizeof(st->conf), st->conf); ad7793_calibrate_all(st); break; } break; default: ret = -EINVAL; } mutex_unlock(&indio_dev->mlock); return ret; } static int ad7793_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask) { return IIO_VAL_INT_PLUS_NANO; } static const struct iio_info ad7793_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, .attrs = &ad7793_attribute_group, .validate_trigger = ad_sd_validate_trigger, .driver_module = THIS_MODULE, }; static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, .attrs = &ad7793_attribute_group, .validate_trigger = ad_sd_validate_trigger, .driver_module = THIS_MODULE, }; #define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \ const struct iio_chan_spec _name##_channels[] = { \ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \ AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \ AD_SD_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \ AD_SD_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \ IIO_CHAN_SOFT_TIMESTAMP(6), \ } #define DECLARE_AD7795_CHANNELS(_name, _b, _sb) \ const struct iio_chan_spec _name##_channels[] = { \ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \ AD_SD_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \ AD_SD_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \ AD_SD_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \ IIO_CHAN_SOFT_TIMESTAMP(9), \ } #define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \ const struct iio_chan_spec _name##_channels[] = { \ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \ AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \ AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \ AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \ IIO_CHAN_SOFT_TIMESTAMP(4), \ } #define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \ const struct iio_chan_spec _name##_channels[] = { \ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \ AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \ AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \ IIO_CHAN_SOFT_TIMESTAMP(5), \ } static DECLARE_AD7793_CHANNELS(ad7785, 20, 32, 4); static DECLARE_AD7793_CHANNELS(ad7792, 16, 32, 0); static DECLARE_AD7793_CHANNELS(ad7793, 24, 32, 0); static DECLARE_AD7795_CHANNELS(ad7794, 16, 32); static DECLARE_AD7795_CHANNELS(ad7795, 24, 32); static DECLARE_AD7797_CHANNELS(ad7796, 16, 16); static DECLARE_AD7797_CHANNELS(ad7797, 24, 32); static DECLARE_AD7799_CHANNELS(ad7798, 16, 16); static DECLARE_AD7799_CHANNELS(ad7799, 24, 32); static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { [ID_AD7785] = { .id = AD7785_ID, .channels = ad7785_channels, .num_channels = ARRAY_SIZE(ad7785_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL | AD7793_FLAG_HAS_REFSEL | AD7793_FLAG_HAS_VBIAS | AD7793_HAS_EXITATION_CURRENT | AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7792] = { .id = AD7792_ID, .channels = ad7792_channels, .num_channels = ARRAY_SIZE(ad7792_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL | AD7793_FLAG_HAS_REFSEL | AD7793_FLAG_HAS_VBIAS | AD7793_HAS_EXITATION_CURRENT | AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7793] = { .id = AD7793_ID, .channels = ad7793_channels, .num_channels = ARRAY_SIZE(ad7793_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL | AD7793_FLAG_HAS_REFSEL | AD7793_FLAG_HAS_VBIAS | AD7793_HAS_EXITATION_CURRENT | AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7794] = { .id = AD7794_ID, .channels = ad7794_channels, .num_channels = ARRAY_SIZE(ad7794_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL | AD7793_FLAG_HAS_REFSEL | AD7793_FLAG_HAS_VBIAS | AD7793_HAS_EXITATION_CURRENT | AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7795] = { .id = AD7795_ID, .channels = ad7795_channels, .num_channels = ARRAY_SIZE(ad7795_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL | AD7793_FLAG_HAS_REFSEL | AD7793_FLAG_HAS_VBIAS | AD7793_HAS_EXITATION_CURRENT | AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7796] = { .id = AD7796_ID, .channels = ad7796_channels, .num_channels = ARRAY_SIZE(ad7796_channels), .iio_info = &ad7797_info, .sample_freq_avail = ad7797_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL, }, [ID_AD7797] = { .id = AD7797_ID, .channels = ad7797_channels, .num_channels = ARRAY_SIZE(ad7797_channels), .iio_info = &ad7797_info, .sample_freq_avail = ad7797_sample_freq_avail, .flags = AD7793_FLAG_HAS_CLKSEL, }, [ID_AD7798] = { .id = AD7798_ID, .channels = ad7798_channels, .num_channels = ARRAY_SIZE(ad7798_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, [ID_AD7799] = { .id = AD7799_ID, .channels = ad7799_channels, .num_channels = ARRAY_SIZE(ad7799_channels), .iio_info = &ad7793_info, .sample_freq_avail = ad7793_sample_freq_avail, .flags = AD7793_FLAG_HAS_GAIN | AD7793_FLAG_HAS_BUFFER, }, }; static int ad7793_probe(struct spi_device *spi) { const struct ad7793_platform_data *pdata = spi->dev.platform_data; struct ad7793_state *st; struct iio_dev *indio_dev; int ret, vref_mv = 0; if (!pdata) { dev_err(&spi->dev, "no platform data?\n"); return -ENODEV; } if (!spi->irq) { dev_err(&spi->dev, "no IRQ?\n"); return -ENODEV; } indio_dev = iio_device_alloc(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); ad_sd_init(&st->sd, indio_dev, spi, &ad7793_sigma_delta_info); if (pdata->refsel != AD7793_REFSEL_INTERNAL) { st->reg = regulator_get(&spi->dev, "refin"); if (IS_ERR(st->reg)) { ret = PTR_ERR(st->reg); goto error_device_free; } ret = regulator_enable(st->reg); if (ret) goto error_put_reg; vref_mv = regulator_get_voltage(st->reg); if (vref_mv < 0) { ret = vref_mv; goto error_disable_reg; } vref_mv /= 1000; } else { vref_mv = 1170; /* Build-in ref */ } st->chip_info = &ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data]; spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; indio_dev->info = st->chip_info->iio_info; ret = ad_sd_setup_buffer_and_trigger(indio_dev); if (ret) goto error_disable_reg; ret = ad7793_setup(indio_dev, pdata, vref_mv); if (ret) goto error_remove_trigger; ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: ad_sd_cleanup_buffer_and_trigger(indio_dev); error_disable_reg: if (pdata->refsel != AD7793_REFSEL_INTERNAL) regulator_disable(st->reg); error_put_reg: if (pdata->refsel != AD7793_REFSEL_INTERNAL) regulator_put(st->reg); error_device_free: iio_device_free(indio_dev); return ret; } static int ad7793_remove(struct spi_device *spi) { const struct ad7793_platform_data *pdata = spi->dev.platform_data; struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7793_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev); if (pdata->refsel != AD7793_REFSEL_INTERNAL) { regulator_disable(st->reg); regulator_put(st->reg); } iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad7793_id[] = { {"ad7785", ID_AD7785}, {"ad7792", ID_AD7792}, {"ad7793", ID_AD7793}, {"ad7794", ID_AD7794}, {"ad7795", ID_AD7795}, {"ad7796", ID_AD7796}, {"ad7797", ID_AD7797}, {"ad7798", ID_AD7798}, {"ad7799", ID_AD7799}, {} }; MODULE_DEVICE_TABLE(spi, ad7793_id); static struct spi_driver ad7793_driver = { .driver = { .name = "ad7793", .owner = THIS_MODULE, }, .probe = ad7793_probe, .remove = ad7793_remove, .id_table = ad7793_id, }; module_spi_driver(ad7793_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7793 and simialr ADCs"); MODULE_LICENSE("GPL v2");
gpl-2.0
Chibaibuki/TCP-IP-Timer-For-Linux-Kernel
drivers/scsi/qla2xxx/qla_mbx.c
2048
127683
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" #include <linux/delay.h> #include <linux/gfp.h> /* * qla2x00_mailbox_command * Issue mailbox command and waits for completion. * * Input: * ha = adapter block pointer. * mcp = driver internal mbx struct pointer. * * Output: * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. * * Returns: * 0 : QLA_SUCCESS = cmd performed success * 1 : QLA_FUNCTION_FAILED (error encountered) * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) * * Context: * Kernel context. */ static int qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { int rval; unsigned long flags = 0; device_reg_t __iomem *reg; uint8_t abort_active; uint8_t io_lock_on; uint16_t command = 0; uint16_t *iptr; uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); if (ha->pdev->error_state > pci_channel_io_frozen) { ql_log(ql_log_warn, vha, 0x1001, "error_state is greater than pci_channel_io_frozen, " "exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x1002, "Device in failed state, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; rval = QLA_SUCCESS; abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (ha->flags.pci_channel_io_perm_failure) { ql_log(ql_log_warn, vha, 0x1003, "Perm failure on EEH timeout MBX, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1004, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); return QLA_FUNCTION_TIMEOUT; } /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during * non ISP abort time. */ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { /* Timeout occurred. Return error. */ ql_log(ql_log_warn, vha, 0x1005, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); return QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp = mcp; ql_dbg(ql_dbg_mbx, vha, 0x1006, "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ if (IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) optr = (uint16_t __iomem *)&reg->isp24.mailbox0; else optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); iptr = mcp->mb; command = mcp->mb[0]; mboxes = mcp->out_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8); if (mboxes & BIT_0) WRT_REG_WORD(optr, *iptr); mboxes >>= 1; optr++; iptr++; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, "Loaded MBX registers (displayed in bytes) =.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112, (uint8_t *)mcp->mb, 16); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113, ".\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114, ((uint8_t *)mcp->mb + 0x10), 16); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115, ".\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116, ((uint8_t *)mcp->mb + 0x20), 8); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, "I/O Address = %p.\n", optr); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x100f, "Going to unlock irq & waiting for interrupts. " "jiffies=%lx.\n", jiffies); /* Wait for mbx cmd completion until timeout */ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; ql_dbg(ql_dbg_mbx, vha, 0x1010, "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); if (IS_QLA82XX(ha)) { if (RD_REG_DWORD(&reg->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; ql_dbg(ql_dbg_mbx, vha, 0x1012, "Pending mailbox timeout, exiting.\n"); rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (time_after(jiffies, wait_time)) break; /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); if (!ha->flags.mbox_int && !(IS_QLA2200(ha) && command == MBC_LOAD_RISC_RAM_EXTENDED)) msleep(10); } /* while */ ql_dbg(ql_dbg_mbx, vha, 0x1013, "Waited %d sec.\n", (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); } /* Check whether we timed out */ if (ha->flags.mbox_int) { uint16_t *iptr2; ql_dbg(ql_dbg_mbx, vha, 0x1014, "Cmd=%x completed.\n", command); /* Got interrupt. Clear the flag. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ha->mcp = NULL; rval = QLA_FUNCTION_FAILED; ql_log(ql_log_warn, vha, 0x1015, "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); goto premature_exit; } if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) rval = QLA_FUNCTION_FAILED; /* Load return mailbox registers. */ iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) *iptr2 = *iptr; mboxes >>= 1; iptr2++; iptr++; } } else { uint16_t mb0; uint32_t ictrl; if (IS_FWI2_CAPABLE(ha)) { mb0 = RD_REG_WORD(&reg->isp24.mailbox0); ictrl = RD_REG_DWORD(&reg->isp24.ictrl); } else { mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); ictrl = RD_REG_WORD(&reg->isp.ictrl); } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* * Attempt to capture a firmware dump for further analysis * of the current firmware state */ ha->isp_ops->fw_dump(vha, 0); rval = QLA_FUNCTION_TIMEOUT; } ha->flags.mbox_busy = 0; /* Clean up */ ha->mcp = NULL; if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x101a, "Checking for additional resp interrupt.\n"); /* polling mode for non isp_abort commands. */ qla2x00_poll(ha->rsp_q_map[0]); } if (rval == QLA_FUNCTION_TIMEOUT && mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { if (!io_lock_on || (mcp->flags & IOCTL_CMD) || ha->flags.eeh_busy) { /* not in dpc. schedule it for dpc to take over. */ ql_dbg(ql_dbg_mbx, vha, 0x101b, "Timeout, schedule isp_abort_needed.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112a, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101c, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } else if (!abort_active) { /* call abort directly since we are in the DPC thread */ ql_dbg(ql_dbg_mbx, vha, 0x101d, "Timeout, calling abort_isp.\n"); if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (IS_QLA82XX(ha)) { ql_dbg(ql_dbg_mbx, vha, 0x112b, "disabling pause transmit on port " "0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0| CRB_NIU_XG_PAUSE_CTL_P1); } ql_log(ql_log_info, base_vha, 0x101e, "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); if (ha->isp_ops->abort_isp(vha)) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_mbx, vha, 0x101f, "Finished abort_isp.\n"); goto mbx_done; } } } premature_exit: /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); mbx_done: if (rval) { ql_log(ql_log_warn, base_vha, 0x1020, "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } return rval; } int qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, uint32_t risc_code_size) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, "Entered %s.\n", __func__); if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_LOAD_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[4] = MSW(risc_code_size); mcp->mb[5] = LSW(risc_code_size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(risc_code_size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1023, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, "Done %s.\n", __func__); } return rval; } #define EXTENDED_BB_CREDITS BIT_0 /* * qla2x00_execute_fw * Start adapter firmware. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, "Entered %s.\n", __func__); mcp->mb[0] = MBC_EXECUTE_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) { struct nvram_81xx *nv = ha->nvram; mcp->mb[4] = (nv->enhanced_features & EXTENDED_BB_CREDITS); } else mcp->mb[4] = 0; mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; mcp->in_mb |= MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; if (IS_QLA2322(ha) || IS_QLA6322(ha)) { mcp->mb[2] = 0; mcp->out_mb |= MBX_2; } } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { if (IS_FWI2_CAPABLE(ha)) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027, "Done exchanges=%x.\n", mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, "Done %s.\n", __func__); } } return rval; } /* * qla2x00_get_fw_version * Get firmware version. * * Input: * ha: adapter state pointer. * major: pointer for major number. * minor: pointer for minor number. * subminor: pointer for subminor number. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_version(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto failed; /* Return mailbox data. */ ha->fw_major_version = mcp->mb[1]; ha->fw_minor_version = mcp->mb[2]; ha->fw_subminor_version = mcp->mb[3]; ha->fw_attributes = mcp->mb[6]; if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; ha->phy_version[0] = mcp->mb[8] & 0xff; ha->phy_version[1] = mcp->mb[9] >> 8; ha->phy_version[2] = mcp->mb[9] & 0xff; } if (IS_FWI2_CAPABLE(ha)) { ha->fw_attributes_h = mcp->mb[15]; ha->fw_attributes_ext[0] = mcp->mb[16]; ha->fw_attributes_ext[1] = mcp->mb[17]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[15], mcp->mb[6]); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); } failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); } else { fwopts[0] = mcp->mb[0]; fwopts[1] = mcp->mb[1]; fwopts[2] = mcp->mb[2]; fwopts[3] = mcp->mb[3]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_set_fw_options * Set firmware options. * * Input: * ha = adapter block pointer. * fwopt = pointer for firmware options. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; mcp->mb[1] = fwopts[1]; mcp->mb[2] = fwopts[2]; mcp->mb[3] = fwopts[3]; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->in_mb |= MBX_1; } else { mcp->mb[10] = fwopts[10]; mcp->mb[11] = fwopts[11]; mcp->mb[12] = 0; /* Undocumented, but used */ mcp->out_mb |= MBX_12|MBX_11|MBX_10; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); fwopts[0] = mcp->mb[0]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1030, "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, "Done %s.\n", __func__); } return rval; } /* * qla2x00_mbx_reg_test * Mailbox register wrap test. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_mbx_reg_test(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, "Entered %s.\n", __func__); mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; mcp->mb[1] = 0xAAAA; mcp->mb[2] = 0x5555; mcp->mb[3] = 0xAA55; mcp->mb[4] = 0x55AA; mcp->mb[5] = 0xA5A5; mcp->mb[6] = 0x5A5A; mcp->mb[7] = 0x2525; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) rval = QLA_FUNCTION_FAILED; if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || mcp->mb[7] != 0x2525) rval = QLA_FUNCTION_FAILED; } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, "Done %s.\n", __func__); } return rval; } /* * qla2x00_verify_checksum * Verify firmware checksum. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, "Entered %s.\n", __func__); mcp->mb[0] = MBC_VERIFY_CHECKSUM; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->out_mb |= MBX_2|MBX_1; mcp->in_mb |= MBX_2|MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; mcp->in_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1036, "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, "Done %s.\n", __func__); } return rval; } /* * qla2x00_issue_iocb * Issue IOCB using mailbox command * * Input: * ha = adapter state pointer. * buffer = buffer pointer. * phys_addr = physical address of buffer. * size = size of buffer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size, uint32_t tov) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IOCB_COMMAND_A64; mcp->mb[1] = 0; mcp->mb[2] = MSW(phys_addr); mcp->mb[3] = LSW(phys_addr); mcp->mb[6] = MSW(MSD(phys_addr)); mcp->mb[7] = LSW(MSD(phys_addr)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_0; mcp->tov = tov; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); } else { sts_entry_t *sts_entry = (sts_entry_t *) buffer; /* Mask reserved bits. */ sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, "Done %s.\n", __func__); } return rval; } int qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, size_t size) { return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, MBX_TOV_SECONDS); } /* * qla2x00_abort_command * Abort command aborts a specified IOCB. * * Input: * ha = adapter block pointer. * sp = SB structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; fc_port_t *fcport = sp->fcport; scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; struct scsi_cmnd *cmd = GET_CMD_SP(sp); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == req->num_outstanding_cmds) { /* command not found */ return QLA_FUNCTION_FAILED; } mcp->mb[0] = MBC_ABORT_COMMAND; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = (uint16_t)handle; mcp->mb[3] = (uint16_t)(handle >> 16); mcp->mb[6] = (uint16_t)cmd->device->lun; mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, "Done %s.\n", __func__); } return rval; } int qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; l = l; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = fcport->loop_id << 8; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1040, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, "Done %s.\n", __func__); } return rval; } int qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; struct req_que *req; struct rsp_que *rsp; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, "Entered %s.\n", __func__); req = vha->hw->req_q_map[0]; rsp = req->rsp; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = l; mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1044, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_adapter_id * Get adapter ID and topology. * * Input: * ha = adapter block pointer. * id = pointer for loop ID. * al_pa = pointer for AL_PA. * area = pointer for area. * domain = pointer for domain. * top = pointer for topology. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mcp->mb[0] == MBS_COMMAND_ERROR) rval = QLA_COMMAND_ERROR; else if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; /* Return data. */ *id = mcp->mb[1]; *al_pa = LSB(mcp->mb[2]); *area = MSB(mcp->mb[2]); *domain = LSB(mcp->mb[3]); *top = mcp->mb[6]; *sw_cap = mcp->mb[7]; if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, "Done %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; vha->fcoe_fcf_idx = mcp->mb[10]; vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; } } return rval; } /* * qla2x00_get_retry_cnt * Get current firmware login retry count and delay. * * Input: * ha = adapter block pointer. * retry_cnt = pointer to login retry count. * tov = pointer to login timeout value. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, uint16_t *r_a_tov) { int rval; uint16_t ratov; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RETRY_COUNT; mcp->out_mb = MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Convert returned data and check our values. */ *r_a_tov = mcp->mb[3] / 2; ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { /* Update to the larger values */ *retry_cnt = (uint8_t)mcp->mb[1]; *tov = ratov; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); } return rval; } /* * qla2x00_init_firmware * Initialize adapter firmware. * * Input: * ha = adapter block pointer. * dptr = Initialization control block pointer. * size = size of initialization control block. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, "Entered %s.\n", __func__); if (IS_QLA82XX(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); if (ha->flags.npiv_supported) mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; else mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; mcp->mb[1] = 0; mcp->mb[2] = MSW(ha->init_cb_dma); mcp->mb[3] = LSW(ha->init_cb_dma); mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) { mcp->mb[1] = BIT_0; mcp->mb[10] = MSW(ha->ex_init_cb_dma); mcp->mb[11] = LSW(ha->ex_init_cb_dma); mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); mcp->mb[14] = sizeof(*ha->ex_init_cb); mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; } /* 1 and 2 should normally be captured. */ mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha)) /* mb3 is additional info about the installed SFP. */ mcp->in_mb |= MBX_3; mcp->buf_size = size; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104d, "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_node_name_list * Issue get node name list mailbox command, kmalloc() * and return the resulting list. Caller must kfree() it! * * Input: * ha = adapter state pointer. * out_data = resulting list * out_len = length of the resulting list * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) { struct qla_hw_data *ha = vha->hw; struct qla_port_24xx_data *list = NULL; void *pmap; mbx_cmd_t mc; dma_addr_t pmap_dma; ulong dma_size; int rval, left; left = 1; while (left > 0) { dma_size = left * sizeof(*list); pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size, &pmap_dma, GFP_KERNEL); if (!pmap) { ql_log(ql_log_warn, vha, 0x113f, "%s(%ld): DMA Alloc failed of %ld\n", __func__, vha->host_no, dma_size); rval = QLA_MEMORY_ALLOC_FAILED; goto out; } mc.mb[0] = MBC_PORT_NODE_NAME_LIST; mc.mb[1] = BIT_1 | BIT_3; mc.mb[2] = MSW(pmap_dma); mc.mb[3] = LSW(pmap_dma); mc.mb[6] = MSW(MSD(pmap_dma)); mc.mb[7] = LSW(MSD(pmap_dma)); mc.mb[8] = dma_size; mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8; mc.in_mb = MBX_0|MBX_1; mc.tov = 30; mc.flags = MBX_DMA_IN; rval = qla2x00_mailbox_command(vha, &mc); if (rval != QLA_SUCCESS) { if ((mc.mb[0] == MBS_COMMAND_ERROR) && (mc.mb[1] == 0xA)) { left += le16_to_cpu(mc.mb[2]) / sizeof(struct qla_port_24xx_data); goto restart; } goto out_free; } left = 0; list = kzalloc(dma_size, GFP_KERNEL); if (!list) { ql_log(ql_log_warn, vha, 0x1140, "%s(%ld): failed to allocate node names list " "structure.\n", __func__, vha->host_no); rval = QLA_MEMORY_ALLOC_FAILED; goto out_free; } memcpy(list, pmap, dma_size); restart: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); } *out_data = list; *out_len = dma_size; out: return rval; out_free: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); return rval; } /* * qla2x00_get_port_database * Issue normal/enhanced get port database mailbox command * and copy device name as necessary. * * Input: * ha = adapter state pointer. * dev = structure pointer. * opt = enhanced cmd option byte. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; port_database_t *pd; struct port_database_24xx *pd24; dma_addr_t pd_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, "Entered %s.\n", __func__); pd24 = NULL; pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0x1050, "Failed to allocate port database structure.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); mcp->mb[0] = MBC_GET_PORT_DATABASE; if (opt != 0 && !IS_FWI2_CAPABLE(ha)) mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; mcp->mb[2] = MSW(pd_dma); mcp->mb[3] = LSW(pd_dma); mcp->mb[6] = MSW(MSD(pd_dma)); mcp->mb[7] = LSW(MSD(pd_dma)); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = fcport->loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = fcport->loop_id << 8 | opt; mcp->out_mb |= MBX_1; } mcp->buf_size = IS_FWI2_CAPABLE(ha) ? PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) goto gpd_error_out; if (IS_FWI2_CAPABLE(ha)) { uint64_t zero = 0; pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ if (pd24->current_login_state != PDS_PRLI_COMPLETE && pd24->last_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1051, "Unable to verify login-state (%x/%x) for " "loop_id %x.\n", pd24->current_login_state, pd24->last_login_state, fcport->loop_id); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd24->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd24->port_id[0]; fcport->d_id.b.area = pd24->port_id[1]; fcport->d_id.b.al_pa = pd24->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? FC_COS_CLASS2 : FC_COS_CLASS3; if (pd24->prli_svc_param_word_3[0] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; } else { uint64_t zero = 0; /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { ql_dbg(ql_dbg_mbx, vha, 0x100a, "Unable to verify login-state (%x/%x) - " "portid=%02x%02x%02x.\n", pd->master_state, pd->slave_state, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } if (fcport->loop_id == FC_NO_LOOP_ID || (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && memcmp(fcport->port_name, pd->port_name, 8))) { /* We lost the device mid way. */ rval = QLA_NOT_LOGGED_IN; goto gpd_error_out; } /* Names are little-endian. */ memcpy(fcport->node_name, pd->node_name, WWN_SIZE); memcpy(fcport->port_name, pd->port_name, WWN_SIZE); /* Get port_id of device. */ fcport->d_id.b.domain = pd->port_id[0]; fcport->d_id.b.area = pd->port_id[3]; fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; /* If not target must be initiator or unknown type. */ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; /* Passback COS information. */ fcport->supported_classes = (pd->options & BIT_4) ? FC_COS_CLASS2: FC_COS_CLASS3; } gpd_error_out: dma_pool_free(ha->s_dma_pool, pd, pd_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1052, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_firmware_state * Get adapter firmware state. * * Input: * ha = adapter block pointer. * dptr = pointer for firmware state. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return firmware states. */ states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; } if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_port_name * Issue get port name mailbox command. * Returned name is in big endian format. * * Input: * ha = adapter block pointer. * loop_id = loop ID of device. * name = pointer for name. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_PORT_NAME; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8 | opt; } mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); } else { if (name != NULL) { /* This function returns name in big endian. */ name[0] = MSB(mcp->mb[2]); name[1] = LSB(mcp->mb[2]); name[2] = MSB(mcp->mb[3]); name[3] = LSB(mcp->mb[3]); name[4] = MSB(mcp->mb[6]); name[5] = LSB(mcp->mb[6]); name[6] = MSB(mcp->mb[7]); name[7] = LSB(mcp->mb[7]); } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, "Done %s.\n", __func__); } return rval; } /* * qla24xx_link_initialization * Issue link initialization mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla24xx_link_initialize(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_LINK_INITIALIZATION; mcp->mb[1] = BIT_6|BIT_4; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, "Done %s.\n", __func__); } return rval; } /* * qla2x00_lip_reset * Issue LIP reset mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_lip_reset(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { /* Logout across all FCFs. */ mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_1; mcp->mb[2] = 0; mcp->out_mb = MBX_2|MBX_1|MBX_0; } else if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = BIT_6; mcp->mb[2] = 0; mcp->mb[3] = vha->hw->loop_reset_delay; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; } else { mcp->mb[0] = MBC_LIP_RESET; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = 0x00ff; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = 0xff00; } mcp->mb[2] = vha->hw->loop_reset_delay; mcp->mb[3] = 0; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, "Done %s.\n", __func__); } return rval; } /* * qla2x00_send_sns * Send SNS command. * * Input: * ha = adapter block pointer. * sns = pointer for command. * cmd_size = command size. * buf_size = response/command size. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, uint16_t cmd_size, size_t buf_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, "Entered %s.\n", __func__); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, "Retry cnt=%d ratov=%d total tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); mcp->mb[0] = MBC_SEND_SNS_COMMAND; mcp->mb[1] = cmd_size; mcp->mb[2] = MSW(sns_phys_address); mcp->mb[3] = LSW(sns_phys_address); mcp->mb[6] = MSW(MSD(sns_phys_address)); mcp->mb[7] = LSW(MSD(sns_phys_address)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->buf_size = buf_size; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x105f, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, "Done %s.\n", __func__); } return rval; } int qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x1062, "Failed to allocate login IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); if (opt & BIT_1) lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1063, "Failed to issue login IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1064, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { iop[0] = le32_to_cpu(lg->io_parameter[0]); iop[1] = le32_to_cpu(lg->io_parameter[1]); ql_dbg(ql_dbg_mbx, vha, 0x1065, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), iop[0], iop[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: mb[0] = MBS_PORT_ID_USED; mb[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: mb[0] = MBS_LOOP_ID_USED; break; case LSC_SCODE_NOLINK: case LSC_SCODE_NOIOCB: case LSC_SCODE_NOXCB: case LSC_SCODE_CMD_FAILED: case LSC_SCODE_NOFABRIC: case LSC_SCODE_FW_NOT_READY: case LSC_SCODE_NOT_LOGGED_IN: case LSC_SCODE_NOPCB: case LSC_SCODE_ELS_REJECT: case LSC_SCODE_CMD_PARAM_ERR: case LSC_SCODE_NONPORT: case LSC_SCODE_LOGGED_IN: case LSC_SCODE_NOFLOGI_ACC: default: mb[0] = MBS_COMMAND_ERROR; break; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, "Done %s.\n", __func__); iop[0] = le32_to_cpu(lg->io_parameter[0]); mb[0] = MBS_COMMAND_COMPLETE; mb[1] = 0; if (iop[0] & BIT_4) { if (iop[0] & BIT_8) mb[1] |= BIT_1; } else mb[1] = BIT_0; /* Passback COS information. */ mb[10] = 0; if (lg->io_parameter[7] || lg->io_parameter[8]) mb[10] |= BIT_0; /* Class 2. */ if (lg->io_parameter[9] || lg->io_parameter[10]) mb[10] |= BIT_1; /* Class 3. */ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) mb[10] |= BIT_7; /* Confirmed Completion * Allowed */ } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_login_fabric * Issue login fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * domain = device domain. * area = device area. * al_pa = device AL_PA. * status = pointer for return status. * opt = command options. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = opt; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = (loop_id << 8) | opt; } mcp->mb[2] = domain; mcp->mb[3] = area << 8 | al_pa; mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[2] = mcp->mb[2]; mb[6] = mcp->mb[6]; mb[7] = mcp->mb[7]; /* COS retrieved from Get-Port-Database mailbox command. */ mb[10] = 0; } if (rval != QLA_SUCCESS) { /* RLU tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1068, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, "Done %s.\n", __func__); } return rval; } /* * qla2x00_login_local_device * Issue login loop port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * opt = command options. * * Returns: * Return status code. * * Context: * Kernel context. * */ int qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *mb_ret, uint8_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, "Entered %s.\n", __func__); if (IS_FWI2_CAPABLE(ha)) return qla24xx_login_fabric(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb_ret, opt); mcp->mb[0] = MBC_LOGIN_LOOP_PORT; if (HAS_EXTENDED_IDS(ha)) mcp->mb[1] = fcport->loop_id; else mcp->mb[1] = fcport->loop_id << 8; mcp->mb[2] = opt; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb_ret != NULL) { mb_ret[0] = mcp->mb[0]; mb_ret[1] = mcp->mb[1]; mb_ret[6] = mcp->mb[6]; mb_ret[7] = mcp->mb[7]; } if (rval != QLA_SUCCESS) { /* AV tmp code: need to change main mailbox_command function to * return ok even when the mailbox completion value is not * SUCCESS. The caller needs to be responsible to interpret * the return values of this mailbox command if we're not * to change too much of the existing code. */ if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx, vha, 0x106b, "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, "Done %s.\n", __func__); } return (rval); } int qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, "Entered %s.\n", __func__); lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { ql_log(ql_log_warn, vha, 0x106e, "Failed to allocate logout IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(lg, 0, sizeof(struct logio_entry_24xx)); if (ql2xmaxqueues > 1) req = ha->req_q_map[0]; else req = vha->req; rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| LCF_FREE_NPORT); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; lg->vp_index = vha->vp_idx; rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, (ha->r_a_tov / 10 * 2) + 2); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x106f, "Failed to issue logout IOCB (%x).\n", rval); } else if (lg->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1070, "Failed to complete IOCB -- error status (%x).\n", lg->entry_status); rval = QLA_FUNCTION_FAILED; } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1071, "Failed to complete IOCB -- completion status (%x) " "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), le32_to_cpu(lg->io_parameter[0]), le32_to_cpu(lg->io_parameter[1])); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, lg, lg_dma); return rval; } /* * qla2x00_fabric_logout * Issue logout fabric port mailbox command. * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, uint8_t area, uint8_t al_pa) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; mcp->out_mb = MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10; } else { mcp->mb[1] = loop_id << 8; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1074, "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, "Done %s.\n", __func__); } return rval; } /* * qla2x00_full_login_lip * Issue full login LIP mailbox command. * * Input: * ha = adapter block pointer. * TARGET_QUEUE_LOCK must be released. * ADAPTER_STATE_LOCK must be released. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_full_login_lip(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_id_list * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, uint16_t *entries) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, "Entered %s.\n", __func__); if (id_list == NULL) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_ID_LIST; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[2] = MSW(id_list_dma); mcp->mb[3] = LSW(id_list_dma); mcp->mb[6] = MSW(MSD(id_list_dma)); mcp->mb[7] = LSW(MSD(id_list_dma)); mcp->mb[8] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; } else { mcp->mb[1] = MSW(id_list_dma); mcp->mb[2] = LSW(id_list_dma); mcp->mb[3] = MSW(MSD(id_list_dma)); mcp->mb[6] = LSW(MSD(id_list_dma)); mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; } mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); } else { *entries = mcp->mb[1]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_resource_cnts * Get current firmware resource counts. * * Input: * ha = adapter block pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x107d, "Failed mb[0]=%x.\n", mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], mcp->mb[12]); if (cur_xchg_cnt) *cur_xchg_cnt = mcp->mb[3]; if (orig_xchg_cnt) *orig_xchg_cnt = mcp->mb[6]; if (cur_iocb_cnt) *cur_iocb_cnt = mcp->mb[7]; if (orig_iocb_cnt) *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) *max_fcfs = mcp->mb[12]; } return (rval); } /* * qla2x00_get_fcal_position_map * Get FCAL (LILP) position map using mailbox command * * Input: * ha = adapter state pointer. * pos_map = buffer pointer (can be NULL). * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ int qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; char *pmap; dma_addr_t pmap_dma; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, "Entered %s.\n", __func__); pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); if (pmap == NULL) { ql_log(ql_log_warn, vha, 0x1080, "Memory alloc failed.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(pmap, 0, FCAL_MAP_SIZE); mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; mcp->mb[2] = MSW(pmap_dma); mcp->mb[3] = LSW(pmap_dma); mcp->mb[6] = MSW(MSD(pmap_dma)); mcp->mb[7] = LSW(MSD(pmap_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->buf_size = FCAL_MAP_SIZE; mcp->flags = MBX_DMA_IN; mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, "mb0/mb1=%x/%X FC/AL position map size (%x).\n", mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, pmap, pmap[0] + 1); if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, "Done %s.\n", __func__); } return rval; } /* * qla2x00_get_link_status * * Input: * ha = adapter block pointer. * loop_id = device loop ID. * ret_buf = pointer to link status return buffer. * * Returns: * 0 = success. * BIT_0 = mem alloc error. * BIT_1 = mailbox error. */ int qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_0; if (IS_FWI2_CAPABLE(ha)) { mcp->mb[1] = loop_id; mcp->mb[4] = 0; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_4|MBX_1; mcp->in_mb |= MBX_1; } else if (HAS_EXTENDED_IDS(ha)) { mcp->mb[1] = loop_id; mcp->mb[10] = 0; mcp->out_mb |= MBX_10|MBX_1; } else { mcp->mb[1] = loop_id << 8; mcp->out_mb |= MBX_1; } mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1085, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { /* Copy over data -- firmware data is LE. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); dwords = offsetof(struct link_statistics, unused1) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); } return rval; } int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, dma_addr_t stats_dma) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint32_t *siter, *diter, dwords; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = sizeof(struct link_statistics) / 4; mcp->mb[9] = vha->vp_idx; mcp->mb[10] = 0; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x1089, "Failed mb[0]=%x.\n", mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, "Done %s.\n", __func__); /* Copy over data -- firmware data is LE. */ dwords = sizeof(struct link_statistics) / 4; siter = diter = &stats->link_fail_cnt; while (dwords--) *diter++ = le32_to_cpu(*siter++); } } else { /* Failed. */ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); } return rval; } int qla24xx_abort_command(srb_t *sp) { int rval; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct req_que *req = vha->req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ return QLA_FUNCTION_FAILED; } abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); if (abt == NULL) { ql_log(ql_log_warn, vha, 0x108d, "Failed to allocate abort IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(abt, 0, sizeof(struct abort_entry_24xx)); abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = MAKE_HANDLE(req->id, handle); abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = fcport->vha->vp_idx; abt->req_que_no = cpu_to_le16(req->id); rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x108e, "Failed to issue IOCB (%x).\n", rval); } else if (abt->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x108f, "Failed to complete IOCB -- error status (%x).\n", abt->entry_status); rval = QLA_FUNCTION_FAILED; } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, abt, abt_dma); return rval; } struct tsk_mgmt_cmd { union { struct tsk_mgmt_entry tsk; struct sts_entry_24xx sts; } p; }; static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; struct sts_entry_24xx *sts; dma_addr_t tsk_dma; scsi_qla_host_t *vha; struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; vha = fcport->vha; ha = vha->hw; req = vha->req; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, "Entered %s.\n", __func__); if (ha->flags.cpu_affinity_enabled) rsp = ha->rsp_q_map[tag + 1]; else rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { ql_log(ql_log_warn, vha, 0x1093, "Failed to allocate task management IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; tsk->p.tsk.port_id[1] = fcport->d_id.b.area; tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; tsk->p.tsk.vp_index = fcport->vha->vp_idx; if (type == TCF_LUN_RESET) { int_to_scsilun(l, &tsk->p.tsk.lun); host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, sizeof(tsk->p.tsk.lun)); } sts = &tsk->p.sts; rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1094, "Failed to issue %s reset IOCB (%x).\n", name, rval); } else if (sts->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x1095, "Failed to complete IOCB -- error status (%x).\n", sts->entry_status); rval = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x1096, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(sts->comp_status)); rval = QLA_FUNCTION_FAILED; } else if (le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID) { if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, "Ignoring inconsistent data length -- not enough " "response info (%d).\n", le32_to_cpu(sts->rsp_data_len)); } else if (sts->data[3]) { ql_dbg(ql_dbg_mbx, vha, 0x1098, "Failed to complete IOCB -- response (%x).\n", sts->data[3]); rval = QLA_FUNCTION_FAILED; } } /* Issue marker IOCB. */ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1099, "Failed to issue marker IOCB (%x).\n", rval2); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); return rval; } int qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { struct qla_hw_data *ha = fcport->vha->hw; if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int qla2x00_system_error(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, "Done %s.\n", __func__); } return rval; } /** * qla2x00_set_serdes_params() - * @ha: HA context * * Returns */ int qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, uint16_t sw_em_2g, uint16_t sw_em_4g) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SERDES_PARAMS; mcp->mb[1] = BIT_0; mcp->mb[2] = sw_em_1g | BIT_15; mcp->mb[3] = sw_em_2g | BIT_15; mcp->mb[4] = sw_em_4g | BIT_15; mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x109f, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, "Done %s.\n", __func__); } return rval; } int qla2x00_stop_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, "Entered %s.\n", __func__); mcp->mb[0] = MBC_STOP_FIRMWARE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 5; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); if (mcp->mb[0] == MBS_INVALID_COMMAND) rval = QLA_INVALID_COMMAND; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, uint16_t buffers) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_ENABLE; mcp->mb[2] = LSW(eft_dma); mcp->mb[3] = MSW(eft_dma); mcp->mb[4] = LSW(MSD(eft_dma)); mcp->mb[5] = MSW(MSD(eft_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a5, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, "Done %s.\n", __func__); } return rval; } int qla2x00_disable_eft_trace(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_EFT_DISABLE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10a8, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, "Done %s.\n", __func__); } return rval; } int qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, uint16_t buffers, uint16_t *mb, uint32_t *dwords) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_ENABLE; mcp->mb[2] = LSW(fce_dma); mcp->mb[3] = MSW(fce_dma); mcp->mb[4] = LSW(MSD(fce_dma)); mcp->mb[5] = MSW(MSD(fce_dma)); mcp->mb[6] = buffers; mcp->mb[7] = TC_AEN_DISABLE; mcp->mb[8] = 0; mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ab, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, "Done %s.\n", __func__); if (mb) memcpy(mb, mcp->mb, 8 * sizeof(*mb)); if (dwords) *dwords = buffers; } return rval; } int qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_TRACE_CONTROL; mcp->mb[1] = TC_FCE_DISABLE; mcp->mb[2] = TC_FCE_DISABLE_TRACE; mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ae, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, "Done %s.\n", __func__); if (wr) *wr = (uint64_t) mcp->mb[5] << 48 | (uint64_t) mcp->mb[4] << 32 | (uint64_t) mcp->mb[3] << 16 | (uint64_t) mcp->mb[2]; if (rd) *rd = (uint64_t) mcp->mb[9] << 48 | (uint64_t) mcp->mb[8] << 32 | (uint64_t) mcp->mb[7] << 16 | (uint64_t) mcp->mb[6]; } return rval; } int qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t *port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = mcp->mb[3] = 0; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, "Done %s.\n", __func__); if (port_speed) *port_speed = mcp->mb[3]; } return rval; } int qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t port_speed, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, "Entered %s.\n", __func__); if (!IS_IIDMA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); else mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, "Done %s.\n", __func__); } return rval; } void qla24xx_report_id_acquisition(scsi_qla_host_t *vha, struct vp_rpt_id_entry_24xx *rptid_entry) { uint8_t vp_idx; uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags; int found; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, "Entered %s.\n", __func__); if (rptid_entry->entry_status != 0) return; if (rptid_entry->format == 0) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, "Format 0 : Number of VPs setup %d, number of " "VPs acquired %d.\n", MSB(le16_to_cpu(rptid_entry->vp_count)), LSB(le16_to_cpu(rptid_entry->vp_count))); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, "Primary port id %02x%02x%02x.\n", rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); } else if (rptid_entry->format == 1) { vp_idx = LSB(stat); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, "Format 1: VP[%d] enabled - status %d - with " "port id %02x%02x%02x.\n", vp_idx, MSB(stat), rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); vp = vha; if (vp_idx == 0 && (MSB(stat) != 1)) goto reg_needed; if (MSB(stat) != 0 && MSB(stat) != 2) { ql_dbg(ql_dbg_mbx, vha, 0x10ba, "Could not acquire ID for VP[%d].\n", vp_idx); return; } found = 0; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp_idx == vp->vp_idx) { found = 1; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); if (!found) return; vp->d_id.b.domain = rptid_entry->port_id[2]; vp->d_id.b.area = rptid_entry->port_id[1]; vp->d_id.b.al_pa = rptid_entry->port_id[0]; /* * Cannot configure here as we are still sitting on the * response queue. Handle it in dpc context. */ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); reg_needed: set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } /* * qla24xx_modify_vp_config * Change VP configuration for vha * * Input: * vha = adapter block pointer. * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_modify_vp_config(scsi_qla_host_t *vha) { int rval; struct vp_config_entry_24xx *vpmod; dma_addr_t vpmod_dma; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); /* This can be called by the parent */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, "Entered %s.\n", __func__); vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); if (!vpmod) { ql_log(ql_log_warn, vha, 0x10bc, "Failed to allocate modify VP IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(vpmod, 0, sizeof(struct vp_config_entry_24xx)); vpmod->entry_type = VP_CONFIG_IOCB_TYPE; vpmod->entry_count = 1; vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; vpmod->vp_count = 1; vpmod->vp_index1 = vha->vp_idx; vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; qlt_modify_vp_config(vha, vpmod); memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); vpmod->entry_count = 1; rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10bd, "Failed to issue VP config IOCB (%x).\n", rval); } else if (vpmod->comp_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x10be, "Failed to complete IOCB -- error status (%x).\n", vpmod->comp_status); rval = QLA_FUNCTION_FAILED; } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10bf, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(vpmod->comp_status)); rval = QLA_FUNCTION_FAILED; } else { /* EMPTY */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, "Done %s.\n", __func__); fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); } dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); return rval; } /* * qla24xx_control_vp * Enable a virtual port for given host * * Input: * ha = adapter block pointer. * vhba = virtual adapter (unused) * index = index number for enabled VP * * Returns: * qla2xxx local function return status code. * * Context: * Kernel context. */ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) { int rval; int map, pos; struct vp_ctrl_entry_24xx *vce; dma_addr_t vce_dma; struct qla_hw_data *ha = vha->hw; int vp_index = vha->vp_idx; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1, "Entered %s enabling index %d.\n", __func__, vp_index); if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); if (!vce) { ql_log(ql_log_warn, vha, 0x10c2, "Failed to allocate VP control IOCB.\n"); return QLA_MEMORY_ALLOC_FAILED; } memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); vce->entry_type = VP_CTRL_IOCB_TYPE; vce->entry_count = 1; vce->command = cpu_to_le16(cmd); vce->vp_count = __constant_cpu_to_le16(1); /* index map in firmware starts with 1; decrement index * this is ok as we never use index 0 */ map = (vp_index - 1) / 8; pos = (vp_index - 1) & 7; mutex_lock(&ha->vport_lock); vce->vp_idx_map[map] |= 1 << pos; mutex_unlock(&ha->vport_lock); rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10c3, "Failed to issue VP control IOCB (%x).\n", rval); } else if (vce->entry_status != 0) { ql_dbg(ql_dbg_mbx, vha, 0x10c4, "Failed to complete IOCB -- error status (%x).\n", vce->entry_status); rval = QLA_FUNCTION_FAILED; } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_dbg(ql_dbg_mbx, vha, 0x10c5, "Failed to complet IOCB -- completion status (%x).\n", le16_to_cpu(vce->comp_status)); rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6, "Done %s.\n", __func__); } dma_pool_free(ha->s_dma_pool, vce, vce_dma); return rval; } /* * qla2x00_send_change_request * Receive or disable RSCN request from fabric controller * * Input: * ha = adapter block pointer * format = registration format: * 0 - Reserved * 1 - Fabric detected registration * 2 - N_port detected registration * 3 - Full registration * FF - clear registration * vp_idx = Virtual port index * * Returns: * qla2x00 local function return status code. * * Context: * Kernel Context */ int qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, uint16_t vp_idx) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; mcp->mb[1] = format; mcp->mb[9] = vp_idx; mcp->out_mb = MBX_9|MBX_1|MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval == QLA_SUCCESS) { if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { rval = BIT_1; } } else rval = BIT_1; return rval; } int qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, "Entered %s.\n", __func__); if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[8] = MSW(addr); mcp->out_mb = MBX_8|MBX_0; } else { mcp->mb[0] = MBC_DUMP_RISC_RAM; mcp->out_mb = MBX_0; } mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->out_mb |= MBX_5|MBX_4; } else { mcp->mb[4] = LSW(size); mcp->out_mb |= MBX_4; } mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1008, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, "Done %s.\n", __func__); } return rval; } /* 84XX Support **************************************************************/ struct cs84xx_mgmt_cmd { union { struct verify_chip_entry_84xx req; struct verify_chip_rsp_84xx rsp; } p; }; int qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) { int rval, retry; struct cs84xx_mgmt_cmd *mn; dma_addr_t mn_dma; uint16_t options; unsigned long flags; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, "Entered %s.\n", __func__); mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (mn == NULL) { return QLA_MEMORY_ALLOC_FAILED; } /* Force Update? */ options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; /* Diagnostic firmware? */ /* options |= MENLO_DIAG_FW; */ /* We update the firmware with only one data sequence. */ options |= VCO_END_OF_DATA; do { retry = 0; memset(mn, 0, sizeof(*mn)); mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; mn->p.req.entry_count = 1; mn->p.req.options = cpu_to_le16(options); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, "Dump of Verify Request.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, (uint8_t *)mn, sizeof(*mn)); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cb, "Failed to issue verify IOCB (%x).\n", rval); goto verify_done; } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, "Dump of Verify Response.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, (uint8_t *)mn, sizeof(*mn)); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? le16_to_cpu(mn->p.rsp.failure_code) : 0; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, "cs=%x fc=%x.\n", status[0], status[1]); if (status[0] != CS_COMPLETE) { rval = QLA_FUNCTION_FAILED; if (!(options & VCO_DONT_UPDATE_FW)) { ql_dbg(ql_dbg_mbx, vha, 0x10cf, "Firmware update failed. Retrying " "without update firmware.\n"); options |= VCO_DONT_UPDATE_FW; options &= ~VCO_FORCE_UPDATE; retry = 1; } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, "Firmware updated to %x.\n", le32_to_cpu(mn->p.rsp.fw_ver)); /* NOTE: we only update OP firmware. */ spin_lock_irqsave(&ha->cs84xx->access_lock, flags); ha->cs84xx->op_fw_version = le32_to_cpu(mn->p.rsp.fw_ver); spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); } } while (retry); verify_done: dma_pool_free(ha->s_dma_pool, mn, mn_dma); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, "Done %s.\n", __func__); } return rval; } int qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); mcp->mb[3] = LSW(LSD(req->dma)); mcp->mb[6] = MSW(MSD(req->dma)); mcp->mb[7] = LSW(MSD(req->dma)); mcp->mb[5] = req->length; if (req->rsp) mcp->mb[10] = req->rsp->id; mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; if (IS_QLA83XX(ha)) mcp->mb[15] = 0; reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + QLA_QUE_PAGE * req->id); mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) mcp->in_mb |= MBX_1; if (IS_QLA83XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { WRT_REG_DWORD(&reg->req_q_in, 0); if (!IS_QLA83XX(ha)) WRT_REG_DWORD(&reg->req_q_out, 0); } req->req_q_in = &reg->req_q_in; req->req_q_out = &reg->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d4, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, "Done %s.\n", __func__); } return rval; } int qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int rval; unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); mcp->mb[3] = LSW(LSD(rsp->dma)); mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; if (IS_QLA83XX(ha)) mcp->mb[15] = 0; reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + QLA_QUE_PAGE * rsp->id); mcp->mb[4] = rsp->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; if (IS_QLA81XX(ha)) { mcp->out_mb |= MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; } else if (IS_QLA83XX(ha)) { mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; } spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(&reg->rsp_q_out, 0); if (!IS_QLA83XX(ha)) WRT_REG_DWORD(&reg->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10d7, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, "Done %s.\n", __func__); } return rval; } int qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, "Entered %s.\n", __func__); mcp->mb[0] = MBC_IDC_ACK; memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10da, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, "Entered %s.\n", __func__); if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10dd, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, "Done %s.\n", __func__); *sector_size = mcp->mb[1]; } return rval; } int qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : FAC_OPT_CMD_WRITE_PROTECT; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e0, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, "Done %s.\n", __func__); } return rval; } int qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, "Entered %s.\n", __func__); mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; mcp->mb[2] = LSW(start); mcp->mb[3] = MSW(start); mcp->mb[4] = LSW(finish); mcp->mb[5] = MSW(finish); mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e3, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, "Done %s.\n", __func__); } return rval; } int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) { int rval = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_MPI_FW; mcp->out_mb = MBX_0; mcp->in_mb = MBX_0|MBX_1; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e6, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, "Done %s.\n", __func__); } return rval; } static int qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); *temp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x115a, "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, "Done %s.\n", __func__); } return rval; } int qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (opt & BIT_0) *sfp = mcp->mb[1]; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); } return rval; } int qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; if (len == 1) opt |= BIT_0; if (opt & BIT_0) len = *sfp; mcp->mb[0] = MBC_WRITE_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); mcp->mb[3] = LSW(sfp_dma); mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ec, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, "Done %s.\n", __func__); } return rval; } int qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, uint16_t size_in_bytes, uint16_t *actual_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_XGMAC_STATS; mcp->mb[2] = MSW(stats_dma); mcp->mb[3] = LSW(stats_dma); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = size_in_bytes >> 2; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10ef, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, "Done %s.\n", __func__); *actual_size = mcp->mb[2] << 2; } return rval; } int qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, uint16_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, "Entered %s.\n", __func__); if (!IS_CNA_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_DCBX_PARAMS; mcp->mb[1] = 0; mcp->mb[2] = MSW(tlv_dma); mcp->mb[3] = LSW(tlv_dma); mcp->mb[6] = MSW(MSD(tlv_dma)); mcp->mb[7] = LSW(MSD(tlv_dma)); mcp->mb[8] = size; mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f2, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, "Done %s.\n", __func__); } return rval; } int qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_READ_RAM_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f5, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, "Done %s.\n", __func__); *data = mcp->mb[3] << 16 | mcp->mb[2]; } return rval; } int qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing /* transfer count */ mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[11] = MSW(mreq->transfer_size); /* send data address */ mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); /* receive data address */ mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); /* Iteration count */ mcp->mb[18] = LSW(mreq->iteration_count); mcp->mb[19] = MSW(mreq->iteration_count); mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(vha->hw)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; mcp->buf_size = mreq->transfer_size; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10f8, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy( mresp, mcp->mb, 64); return rval; } int qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ if (IS_CNA_CAPABLE(ha)) { mcp->mb[1] |= BIT_15; mcp->mb[2] = vha->fcoe_fcf_idx; } mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[17] = MSW(mreq->rcv_dma); mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); mcp->mb[10] = LSW(mreq->transfer_size); mcp->mb[14] = LSW(mreq->send_dma); mcp->mb[15] = MSW(mreq->send_dma); mcp->mb[20] = LSW(MSD(mreq->send_dma)); mcp->mb[21] = MSW(MSD(mreq->send_dma)); mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; if (IS_CNA_CAPABLE(ha)) mcp->out_mb |= MBX_2; mcp->in_mb = MBX_0; if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) mcp->in_mb |= MBX_1; if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->buf_size = mreq->transfer_size; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10fb, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, "Done %s.\n", __func__); } /* Copy mailbox information */ memcpy(mresp, mcp->mb, 64); return rval; } int qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); mcp->mb[0] = MBC_ISP84XX_RESET; mcp->mb[1] = enable_diagnostic; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, "Done %s.\n", __func__); return rval; } int qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; mcp->mb[1] = LSW(risc_addr); mcp->mb[2] = LSW(data); mcp->mb[3] = MSW(data); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1101, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, "Done %s.\n", __func__); } return rval; } int qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) { int rval; uint32_t stat, timer; uint16_t mb0 = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; rval = QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, "Entered %s.\n", __func__); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Write the MBC data to the registers */ WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER); WRT_REG_WORD(&reg->mailbox1, mb[0]); WRT_REG_WORD(&reg->mailbox2, mb[1]); WRT_REG_WORD(&reg->mailbox3, mb[2]); WRT_REG_WORD(&reg->mailbox4, mb[3]); WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); /* Poll for MBC interrupt */ for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; if (stat == 0x1 || stat == 0x2 || stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_REG_WORD(&reg->mailbox0); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(&reg->hccr); break; } } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) rval = mb0 & MBS_MASK; else rval = QLA_FUNCTION_FAILED; if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1104, "Failed=%x mb[0]=%x.\n", rval, mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, "Done %s.\n", __func__); } return rval; } int qla2x00_get_data_rate(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_DATA_RATE; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1107, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; } return rval; } int qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, "Entered %s.\n", __func__); if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110a, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { /* Copy all bits to preserve original value */ memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, "Done %s.\n", __func__); } return rval; } int qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_PORT_CONFIG; /* Copy all bits to preserve original setting */ memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x110d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, "Done %s.\n", __func__); return rval; } int qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, uint16_t *mb) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, "Entered %s.\n", __func__); if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; if (ha->flags.fcp_prio_enabled) mcp->mb[2] = BIT_1; else mcp->mb[2] = BIT_2; mcp->mb[4] = priority & 0xf; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; mb[4] = mcp->mb[4]; } if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, "Done %s.\n", __func__); } return rval; } int qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) { int rval = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = vha->hw; uint8_t byte; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, "Entered %s.\n", __func__); if (ha->thermal_support & THERMAL_SUPPORT_I2C) { rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0); *temp = byte; if (rval == QLA_SUCCESS) goto done; ql_log(ql_log_warn, vha, 0x10c9, "Thermal not supported through I2C bus, trying alternate " "method (ISP access).\n"); ha->thermal_support &= ~THERMAL_SUPPORT_I2C; } if (ha->thermal_support & THERMAL_SUPPORT_ISP) { rval = qla2x00_read_asic_temperature(vha, temp); if (rval == QLA_SUCCESS) goto done; ql_log(ql_log_warn, vha, 0x1019, "Thermal not supported through ISP.\n"); ha->thermal_support &= ~THERMAL_SUPPORT_ISP; } ql_log(ql_log_warn, vha, 0x1150, "Thermal not supported by this card " "(ignoring further requests).\n"); return rval; done: ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, "Done %s.\n", __func__); return rval; } int qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, "Entered %s.\n", __func__); if (!IS_FWI2_CAPABLE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 1; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1016, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, "Entered %s.\n", __func__); if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_TOGGLE_INTERRUPT; mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x100c, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, "Done %s.\n", __func__); } return rval; } int qla82xx_md_get_template_size(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, "Entered %s.\n", __func__); memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT_SIZE); mcp->mb[3] = MSW(RQST_TMPLT_SIZE); mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); /* Always copy back return mailbox values. */ if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1120, "mailbox command FAILED=0x%x, subcode=%x.\n", (mcp->mb[1] << 16) | mcp->mb[0], (mcp->mb[3] << 16) | mcp->mb[2]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, "Done %s.\n", __func__); ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); if (!ha->md_template_size) { ql_dbg(ql_dbg_mbx, vha, 0x1122, "Null template size obtained.\n"); rval = QLA_FUNCTION_FAILED; } } return rval; } int qla82xx_md_get_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; int rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, "Entered %s.\n", __func__); ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); if (!ha->md_tmplt_hdr) { ql_log(ql_log_warn, vha, 0x1124, "Unable to allocate memory for Minidump template.\n"); return rval; } memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); mcp->mb[2] = LSW(RQST_TMPLT); mcp->mb[3] = MSW(RQST_TMPLT); mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); mcp->mb[8] = LSW(ha->md_template_size); mcp->mb[9] = MSW(ha->md_template_size); mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; mcp->tov = MBX_TOV_SECONDS; mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1125, "mailbox command FAILED=0x%x, subcode=%x.\n", ((mcp->mb[1] << 16) | mcp->mb[0]), ((mcp->mb[3] << 16) | mcp->mb[2])); } else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, "Done %s.\n", __func__); return rval; } int qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; mcp->mb[1] = led_cfg[0]; mcp->mb[2] = led_cfg[1]; if (IS_QLA8031(ha)) { mcp->mb[3] = led_cfg[2]; mcp->mb[4] = led_cfg[3]; mcp->mb[5] = led_cfg[4]; mcp->mb[6] = led_cfg[5]; } mcp->out_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->in_mb = MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1134, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, "Done %s.\n", __func__); } return rval; } int qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_GET_LED_CONFIG; mcp->out_mb = MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1137, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { led_cfg[0] = mcp->mb[1]; led_cfg[1] = mcp->mb[2]; if (IS_QLA8031(ha)) { led_cfg[2] = mcp->mb[3]; led_cfg[3] = mcp->mb[4]; led_cfg[4] = mcp->mb[5]; led_cfg[5] = mcp->mb[6]; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, "Done %s.\n", __func__); } return rval; } int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, "Entered %s.\n", __func__); memset(mcp, 0, sizeof(mbx_cmd_t)); mcp->mb[0] = MBC_SET_LED_CONFIG; if (enable) mcp->mb[7] = 0xE; else mcp->mb[7] = 0xD; mcp->out_mb = MBX_7|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1128, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, "Done %s.\n", __func__); } return rval; } int qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, "Entered %s.\n", __func__); mcp->mb[0] = MBC_WRITE_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->mb[3] = LSW(data); mcp->mb[4] = MSW(data); mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1131, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, "Done %s.\n", __func__); } return rval; } int qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) { int rval; struct qla_hw_data *ha = vha->hw; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, "Implicit LOGO Unsupported.\n"); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, "Entering %s.\n", __func__); /* Perform Implicit LOGO. */ mcp->mb[0] = MBC_PORT_LOGOUT; mcp->mb[1] = fcport->loop_id; mcp->mb[10] = BIT_15; mcp->out_mb = MBX_10|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_mbx, vha, 0x113d, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); else ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, "Done %s.\n", __func__); return rval; } int qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; unsigned long retry_max_time = jiffies + (2 * HZ); if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); retry_rd_reg: mcp->mb[0] = MBC_READ_REMOTE_REG; mcp->mb[1] = LSW(reg); mcp->mb[2] = MSW(reg); mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114c, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { *data = (mcp->mb[3] | (mcp->mb[4] << 16)); if (*data == QLA8XXX_BAD_VALUE) { /* * During soft-reset CAMRAM register reads might * return 0xbad0bad0. So retry for MAX of 2 sec * while reading camram registers. */ if (time_after(jiffies, retry_max_time)) { ql_dbg(ql_dbg_mbx, vha, 0x1141, "Failure to read CAMRAM register. " "data=0x%x.\n", *data); return QLA_FUNCTION_FAILED; } msleep(100); goto retry_rd_reg; } ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); } return rval; } int qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; mcp->out_mb = MBX_0; mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1144, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); ha->isp_ops->fw_dump(vha, 0); } else { ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); } return rval; } int qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint8_t subcode = (uint8_t)options; struct qla_hw_data *ha = vha->hw; if (!IS_QLA8031(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); mcp->mb[0] = MBC_SET_ACCESS_CONTROL; mcp->mb[1] = options; mcp->out_mb = MBX_1|MBX_0; if (subcode & BIT_2) { mcp->mb[2] = LSW(start_addr); mcp->mb[3] = MSW(start_addr); mcp->mb[4] = LSW(end_addr); mcp->mb[5] = MSW(end_addr); mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; } mcp->in_mb = MBX_2|MBX_1|MBX_0; if (!(subcode & (BIT_2 | BIT_5))) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1147, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[4]); ha->isp_ops->fw_dump(vha, 0); } else { if (subcode & BIT_5) *sector_size = mcp->mb[1]; else if (subcode & (BIT_6 | BIT_7)) { ql_dbg(ql_dbg_mbx, vha, 0x1148, "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } else if (subcode & (BIT_3 | BIT_4)) { ql_dbg(ql_dbg_mbx, vha, 0x1149, "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); } ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); } return rval; } int qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, uint32_t size) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; if (!IS_MCTP_CAPABLE(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, "Entered %s.\n", __func__); mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; mcp->mb[1] = LSW(addr); mcp->mb[2] = MSW(req_dma); mcp->mb[3] = LSW(req_dma); mcp->mb[4] = MSW(size); mcp->mb[5] = LSW(size); mcp->mb[6] = MSW(MSD(req_dma)); mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ mcp->mb[10] |= BIT_7; /* For MCTP RAM ID is 0x40 */ mcp->mb[10] |= 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; mcp->in_mb = MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x114e, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, "Done %s.\n", __func__); } return rval; }
gpl-2.0
CaptainThrowback/android_kernel_htc_a32e
arch/um/drivers/slip_user.c
3584
5060
/* * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <string.h> #include <sys/termios.h> #include <sys/wait.h> #include <net_user.h> #include <os.h> #include "slip.h" #include <um_malloc.h> static int slip_user_init(void *data, void *dev) { struct slip_data *pri = data; pri->dev = dev; return 0; } static int set_up_tty(int fd) { int i; struct termios tios; if (tcgetattr(fd, &tios) < 0) { printk(UM_KERN_ERR "could not get initial terminal " "attributes\n"); return -1; } tios.c_cflag = CS8 | CREAD | HUPCL | CLOCAL; tios.c_iflag = IGNBRK | IGNPAR; tios.c_oflag = 0; tios.c_lflag = 0; for (i = 0; i < NCCS; i++) tios.c_cc[i] = 0; tios.c_cc[VMIN] = 1; tios.c_cc[VTIME] = 0; cfsetospeed(&tios, B38400); cfsetispeed(&tios, B38400); if (tcsetattr(fd, TCSAFLUSH, &tios) < 0) { printk(UM_KERN_ERR "failed to set terminal attributes\n"); return -1; } return 0; } struct slip_pre_exec_data { int stdin; int stdout; int close_me; }; static void slip_pre_exec(void *arg) { struct slip_pre_exec_data *data = arg; if (data->stdin >= 0) dup2(data->stdin, 0); dup2(data->stdout, 1); if (data->close_me >= 0) close(data->close_me); } static int slip_tramp(char **argv, int fd) { struct slip_pre_exec_data pe_data; char *output; int pid, fds[2], err, output_len; err = os_pipe(fds, 1, 0); if (err < 0) { printk(UM_KERN_ERR "slip_tramp : pipe failed, err = %d\n", -err); goto out; } err = 0; pe_data.stdin = fd; pe_data.stdout = fds[1]; pe_data.close_me = fds[0]; err = run_helper(slip_pre_exec, &pe_data, argv); if (err < 0) goto out_close; pid = err; output_len = UM_KERN_PAGE_SIZE; output = uml_kmalloc(output_len, UM_GFP_KERNEL); if (output == NULL) { printk(UM_KERN_ERR "slip_tramp : failed to allocate output " "buffer\n"); os_kill_process(pid, 1); err = -ENOMEM; goto out_close; } close(fds[1]); read_output(fds[0], output, output_len); printk("%s", output); err = helper_wait(pid); close(fds[0]); kfree(output); return err; out_close: close(fds[0]); close(fds[1]); out: return err; } static int slip_open(void *data) { struct slip_data *pri = data; char version_buf[sizeof("nnnnn\0")]; char gate_buf[sizeof("nnn.nnn.nnn.nnn\0")]; char *argv[] = { "uml_net", version_buf, "slip", "up", gate_buf, NULL }; int sfd, mfd, err; err = get_pty(); if (err < 0) { printk(UM_KERN_ERR "slip-open : Failed to open pty, err = %d\n", -err); goto out; } mfd = err; err = open(ptsname(mfd), O_RDWR, 0); if (err < 0) { printk(UM_KERN_ERR "Couldn't open tty for slip line, " "err = %d\n", -err); goto out_close; } sfd = err; if (set_up_tty(sfd)) goto out_close2; pri->slave = sfd; pri->slip.pos = 0; pri->slip.esc = 0; if (pri->gate_addr != NULL) { sprintf(version_buf, "%d", UML_NET_VERSION); strcpy(gate_buf, pri->gate_addr); err = slip_tramp(argv, sfd); if (err < 0) { printk(UM_KERN_ERR "slip_tramp failed - err = %d\n", -err); goto out_close2; } err = os_get_ifname(pri->slave, pri->name); if (err < 0) { printk(UM_KERN_ERR "get_ifname failed, err = %d\n", -err); goto out_close2; } iter_addresses(pri->dev, open_addr, pri->name); } else { err = os_set_slip(sfd); if (err < 0) { printk(UM_KERN_ERR "Failed to set slip discipline " "encapsulation - err = %d\n", -err); goto out_close2; } } return mfd; out_close2: close(sfd); out_close: close(mfd); out: return err; } static void slip_close(int fd, void *data) { struct slip_data *pri = data; char version_buf[sizeof("nnnnn\0")]; char *argv[] = { "uml_net", version_buf, "slip", "down", pri->name, NULL }; int err; if (pri->gate_addr != NULL) iter_addresses(pri->dev, close_addr, pri->name); sprintf(version_buf, "%d", UML_NET_VERSION); err = slip_tramp(argv, pri->slave); if (err != 0) printk(UM_KERN_ERR "slip_tramp failed - errno = %d\n", -err); close(fd); close(pri->slave); pri->slave = -1; } int slip_user_read(int fd, void *buf, int len, struct slip_data *pri) { return slip_proto_read(fd, buf, len, &pri->slip); } int slip_user_write(int fd, void *buf, int len, struct slip_data *pri) { return slip_proto_write(fd, buf, len, &pri->slip); } static void slip_add_addr(unsigned char *addr, unsigned char *netmask, void *data) { struct slip_data *pri = data; if (pri->slave < 0) return; open_addr(addr, netmask, pri->name); } static void slip_del_addr(unsigned char *addr, unsigned char *netmask, void *data) { struct slip_data *pri = data; if (pri->slave < 0) return; close_addr(addr, netmask, pri->name); } const struct net_user_info slip_user_info = { .init = slip_user_init, .open = slip_open, .close = slip_close, .remove = NULL, .add_address = slip_add_addr, .delete_address = slip_del_addr, .mtu = BUF_SIZE, .max_packet = BUF_SIZE, };
gpl-2.0
sorenb-xlnx/linux-xlnx
arch/um/drivers/slip_user.c
3584
5060
/* * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <string.h> #include <sys/termios.h> #include <sys/wait.h> #include <net_user.h> #include <os.h> #include "slip.h" #include <um_malloc.h> static int slip_user_init(void *data, void *dev) { struct slip_data *pri = data; pri->dev = dev; return 0; } static int set_up_tty(int fd) { int i; struct termios tios; if (tcgetattr(fd, &tios) < 0) { printk(UM_KERN_ERR "could not get initial terminal " "attributes\n"); return -1; } tios.c_cflag = CS8 | CREAD | HUPCL | CLOCAL; tios.c_iflag = IGNBRK | IGNPAR; tios.c_oflag = 0; tios.c_lflag = 0; for (i = 0; i < NCCS; i++) tios.c_cc[i] = 0; tios.c_cc[VMIN] = 1; tios.c_cc[VTIME] = 0; cfsetospeed(&tios, B38400); cfsetispeed(&tios, B38400); if (tcsetattr(fd, TCSAFLUSH, &tios) < 0) { printk(UM_KERN_ERR "failed to set terminal attributes\n"); return -1; } return 0; } struct slip_pre_exec_data { int stdin; int stdout; int close_me; }; static void slip_pre_exec(void *arg) { struct slip_pre_exec_data *data = arg; if (data->stdin >= 0) dup2(data->stdin, 0); dup2(data->stdout, 1); if (data->close_me >= 0) close(data->close_me); } static int slip_tramp(char **argv, int fd) { struct slip_pre_exec_data pe_data; char *output; int pid, fds[2], err, output_len; err = os_pipe(fds, 1, 0); if (err < 0) { printk(UM_KERN_ERR "slip_tramp : pipe failed, err = %d\n", -err); goto out; } err = 0; pe_data.stdin = fd; pe_data.stdout = fds[1]; pe_data.close_me = fds[0]; err = run_helper(slip_pre_exec, &pe_data, argv); if (err < 0) goto out_close; pid = err; output_len = UM_KERN_PAGE_SIZE; output = uml_kmalloc(output_len, UM_GFP_KERNEL); if (output == NULL) { printk(UM_KERN_ERR "slip_tramp : failed to allocate output " "buffer\n"); os_kill_process(pid, 1); err = -ENOMEM; goto out_close; } close(fds[1]); read_output(fds[0], output, output_len); printk("%s", output); err = helper_wait(pid); close(fds[0]); kfree(output); return err; out_close: close(fds[0]); close(fds[1]); out: return err; } static int slip_open(void *data) { struct slip_data *pri = data; char version_buf[sizeof("nnnnn\0")]; char gate_buf[sizeof("nnn.nnn.nnn.nnn\0")]; char *argv[] = { "uml_net", version_buf, "slip", "up", gate_buf, NULL }; int sfd, mfd, err; err = get_pty(); if (err < 0) { printk(UM_KERN_ERR "slip-open : Failed to open pty, err = %d\n", -err); goto out; } mfd = err; err = open(ptsname(mfd), O_RDWR, 0); if (err < 0) { printk(UM_KERN_ERR "Couldn't open tty for slip line, " "err = %d\n", -err); goto out_close; } sfd = err; if (set_up_tty(sfd)) goto out_close2; pri->slave = sfd; pri->slip.pos = 0; pri->slip.esc = 0; if (pri->gate_addr != NULL) { sprintf(version_buf, "%d", UML_NET_VERSION); strcpy(gate_buf, pri->gate_addr); err = slip_tramp(argv, sfd); if (err < 0) { printk(UM_KERN_ERR "slip_tramp failed - err = %d\n", -err); goto out_close2; } err = os_get_ifname(pri->slave, pri->name); if (err < 0) { printk(UM_KERN_ERR "get_ifname failed, err = %d\n", -err); goto out_close2; } iter_addresses(pri->dev, open_addr, pri->name); } else { err = os_set_slip(sfd); if (err < 0) { printk(UM_KERN_ERR "Failed to set slip discipline " "encapsulation - err = %d\n", -err); goto out_close2; } } return mfd; out_close2: close(sfd); out_close: close(mfd); out: return err; } static void slip_close(int fd, void *data) { struct slip_data *pri = data; char version_buf[sizeof("nnnnn\0")]; char *argv[] = { "uml_net", version_buf, "slip", "down", pri->name, NULL }; int err; if (pri->gate_addr != NULL) iter_addresses(pri->dev, close_addr, pri->name); sprintf(version_buf, "%d", UML_NET_VERSION); err = slip_tramp(argv, pri->slave); if (err != 0) printk(UM_KERN_ERR "slip_tramp failed - errno = %d\n", -err); close(fd); close(pri->slave); pri->slave = -1; } int slip_user_read(int fd, void *buf, int len, struct slip_data *pri) { return slip_proto_read(fd, buf, len, &pri->slip); } int slip_user_write(int fd, void *buf, int len, struct slip_data *pri) { return slip_proto_write(fd, buf, len, &pri->slip); } static void slip_add_addr(unsigned char *addr, unsigned char *netmask, void *data) { struct slip_data *pri = data; if (pri->slave < 0) return; open_addr(addr, netmask, pri->name); } static void slip_del_addr(unsigned char *addr, unsigned char *netmask, void *data) { struct slip_data *pri = data; if (pri->slave < 0) return; close_addr(addr, netmask, pri->name); } const struct net_user_info slip_user_info = { .init = slip_user_init, .open = slip_open, .close = slip_close, .remove = NULL, .add_address = slip_add_addr, .delete_address = slip_del_addr, .mtu = BUF_SIZE, .max_packet = BUF_SIZE, };
gpl-2.0
AdiPat/android_kernel_htc_pico
arch/arm/mach-u300/i2c.c
3840
8242
/* * arch/arm/mach-u300/i2c.c * * Copyright (C) 2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * * Register board i2c devices * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/mfd/abx500.h> #include <linux/regulator/machine.h> #include <linux/amba/bus.h> #include <mach/irqs.h> /* * Initial settings of ab3100 registers. * Common for below LDO regulator settings are that * bit 7-5 controls voltage. Bit 4 turns regulator ON(1) or OFF(0). * Bit 3-2 controls sleep enable and bit 1-0 controls sleep mode. */ /* LDO_A 0x16: 2.75V, ON, SLEEP_A, SLEEP OFF GND */ #define LDO_A_SETTING 0x16 /* LDO_C 0x10: 2.65V, ON, SLEEP_A or B, SLEEP full power */ #define LDO_C_SETTING 0x10 /* LDO_D 0x10: 2.65V, ON, sleep mode not used */ #define LDO_D_SETTING 0x10 /* LDO_E 0x10: 1.8V, ON, SLEEP_A or B, SLEEP full power */ #define LDO_E_SETTING 0x10 /* LDO_E SLEEP 0x00: 1.8V, not used, SLEEP_A or B, not used */ #define LDO_E_SLEEP_SETTING 0x00 /* LDO_F 0xD0: 2.5V, ON, SLEEP_A or B, SLEEP full power */ #define LDO_F_SETTING 0xD0 /* LDO_G 0x00: 2.85V, OFF, SLEEP_A or B, SLEEP full power */ #define LDO_G_SETTING 0x00 /* LDO_H 0x18: 2.75V, ON, SLEEP_B, SLEEP full power */ #define LDO_H_SETTING 0x18 /* LDO_K 0x00: 2.75V, OFF, SLEEP_A or B, SLEEP full power */ #define LDO_K_SETTING 0x00 /* LDO_EXT 0x00: Voltage not set, OFF, not used, not used */ #define LDO_EXT_SETTING 0x00 /* BUCK 0x7D: 1.2V, ON, SLEEP_A and B, SLEEP low power */ #define BUCK_SETTING 0x7D /* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */ #define BUCK_SLEEP_SETTING 0xAC #ifdef CONFIG_AB3100_CORE static struct regulator_consumer_supply supply_ldo_c[] = { { .dev_name = "ab3100-codec", .supply = "vaudio", /* Powers the codec */ }, }; /* * This one needs to be a supply so we can turn it off * in order to shut down the system. */ static struct regulator_consumer_supply supply_ldo_d[] = { { .dev = NULL, .supply = "vana15", /* Powers the SoC (CPU etc) */ }, }; static struct regulator_consumer_supply supply_ldo_g[] = { { .dev_name = "mmci", .supply = "vmmc", /* Powers MMC/SD card */ }, }; static struct regulator_consumer_supply supply_ldo_h[] = { { .dev_name = "xgam_pdi", .supply = "vdisp", /* Powers camera, display etc */ }, }; static struct regulator_consumer_supply supply_ldo_k[] = { { .dev_name = "irda", .supply = "vir", /* Power IrDA */ }, }; /* * This is a placeholder for whoever wish to use the * external power. */ static struct regulator_consumer_supply supply_ldo_ext[] = { { .dev = NULL, .supply = "vext", /* External power */ }, }; /* Preset (hardware defined) voltages for these regulators */ #define LDO_A_VOLTAGE 2750000 #define LDO_C_VOLTAGE 2650000 #define LDO_D_VOLTAGE 2650000 static struct ab3100_platform_data ab3100_plf_data = { .reg_constraints = { /* LDO A routing and constraints */ { .constraints = { .name = "vrad", .min_uV = LDO_A_VOLTAGE, .max_uV = LDO_A_VOLTAGE, .valid_modes_mask = REGULATOR_MODE_NORMAL, .always_on = 1, .boot_on = 1, }, }, /* LDO C routing and constraints */ { .constraints = { .min_uV = LDO_C_VOLTAGE, .max_uV = LDO_C_VOLTAGE, .valid_modes_mask = REGULATOR_MODE_NORMAL, }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_c), .consumer_supplies = supply_ldo_c, }, /* LDO D routing and constraints */ { .constraints = { .min_uV = LDO_D_VOLTAGE, .max_uV = LDO_D_VOLTAGE, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_STATUS, /* * Actually this is boot_on but we need * to reference count it externally to * be able to shut down the system. */ }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_d), .consumer_supplies = supply_ldo_d, }, /* LDO E routing and constraints */ { .constraints = { .name = "vio", .min_uV = 1800000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, .always_on = 1, .boot_on = 1, }, }, /* LDO F routing and constraints */ { .constraints = { .name = "vana25", .min_uV = 2500000, .max_uV = 2500000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, .always_on = 1, .boot_on = 1, }, }, /* LDO G routing and constraints */ { .constraints = { .min_uV = 1500000, .max_uV = 2850000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_g), .consumer_supplies = supply_ldo_g, }, /* LDO H routing and constraints */ { .constraints = { .min_uV = 1200000, .max_uV = 2750000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_h), .consumer_supplies = supply_ldo_h, }, /* LDO K routing and constraints */ { .constraints = { .min_uV = 1800000, .max_uV = 2750000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_k), .consumer_supplies = supply_ldo_k, }, /* External regulator interface. No fixed voltage specified. * If we knew the voltage of the external regulator and it * was connected on the board, we could add the (fixed) * voltage for it here. */ { .constraints = { .min_uV = 0, .max_uV = 0, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(supply_ldo_ext), .consumer_supplies = supply_ldo_ext, }, /* Buck converter routing and constraints */ { .constraints = { .name = "vcore", .min_uV = 1200000, .max_uV = 1800000, .valid_modes_mask = REGULATOR_MODE_NORMAL, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, .always_on = 1, .boot_on = 1, }, }, }, .reg_initvals = { LDO_A_SETTING, LDO_C_SETTING, LDO_E_SETTING, LDO_E_SLEEP_SETTING, LDO_F_SETTING, LDO_G_SETTING, LDO_H_SETTING, LDO_K_SETTING, LDO_EXT_SETTING, BUCK_SETTING, BUCK_SLEEP_SETTING, LDO_D_SETTING, }, }; #endif #ifdef CONFIG_AB3550_CORE static struct abx500_init_settings ab3550_init_settings[] = { { .bank = 0, .reg = AB3550_IMR1, .setting = 0xff }, { .bank = 0, .reg = AB3550_IMR2, .setting = 0xff }, { .bank = 0, .reg = AB3550_IMR3, .setting = 0xff }, { .bank = 0, .reg = AB3550_IMR4, .setting = 0xff }, { .bank = 0, .reg = AB3550_IMR5, /* The two most significant bits are not used */ .setting = 0x3f }, }; static struct ab3550_platform_data ab3550_plf_data = { .irq = { .base = IRQ_AB3550_BASE, .count = (IRQ_AB3550_END - IRQ_AB3550_BASE + 1), }, .dev_data = { }, .init_settings = ab3550_init_settings, .init_settings_sz = ARRAY_SIZE(ab3550_init_settings), }; #endif static struct i2c_board_info __initdata bus0_i2c_board_info[] = { #if defined(CONFIG_AB3550_CORE) { .type = "ab3550", .addr = 0x4A, .irq = IRQ_U300_IRQ0_EXT, .platform_data = &ab3550_plf_data, }, #elif defined(CONFIG_AB3100_CORE) { .type = "ab3100", .addr = 0x48, .irq = IRQ_U300_IRQ0_EXT, .platform_data = &ab3100_plf_data, }, #else { }, #endif }; static struct i2c_board_info __initdata bus1_i2c_board_info[] = { #ifdef CONFIG_MACH_U300_BS335 { .type = "fwcam", .addr = 0x10, }, { .type = "fwcam", .addr = 0x5d, }, #else { }, #endif }; void __init u300_i2c_register_board_devices(void) { i2c_register_board_info(0, bus0_i2c_board_info, ARRAY_SIZE(bus0_i2c_board_info)); /* * This makes the core shut down all unused regulators * after all the initcalls have completed. */ regulator_has_full_constraints(); i2c_register_board_info(1, bus1_i2c_board_info, ARRAY_SIZE(bus1_i2c_board_info)); }
gpl-2.0
HackerOO7/android_kernel_huawei_u8951
drivers/media/dvb/frontends/cxd2820r_core.c
4864
14358
/* * Sony CXD2820R demodulator driver * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cxd2820r_priv.h" int cxd2820r_debug; module_param_named(debug, cxd2820r_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); /* write multiple registers */ static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; u8 buf[len+1]; struct i2c_msg msg[1] = { { .addr = i2c, .flags = 0, .len = sizeof(buf), .buf = buf, } }; buf[0] = reg; memcpy(&buf[1], val, len); ret = i2c_transfer(priv->i2c, msg, 1); if (ret == 1) { ret = 0; } else { warn("i2c wr failed ret:%d reg:%02x len:%d", ret, reg, len); ret = -EREMOTEIO; } return ret; } /* read multiple registers */ static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg, u8 *val, int len) { int ret; u8 buf[len]; struct i2c_msg msg[2] = { { .addr = i2c, .flags = 0, .len = 1, .buf = &reg, }, { .addr = i2c, .flags = I2C_M_RD, .len = sizeof(buf), .buf = buf, } }; ret = i2c_transfer(priv->i2c, msg, 2); if (ret == 2) { memcpy(val, buf, len); ret = 0; } else { warn("i2c rd failed ret:%d reg:%02x len:%d", ret, reg, len); ret = -EREMOTEIO; } return ret; } /* write multiple registers */ int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val, int len) { int ret; u8 i2c_addr; u8 reg = (reginfo >> 0) & 0xff; u8 bank = (reginfo >> 8) & 0xff; u8 i2c = (reginfo >> 16) & 0x01; /* select I2C */ if (i2c) i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */ else i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */ /* switch bank if needed */ if (bank != priv->bank[i2c]) { ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1); if (ret) return ret; priv->bank[i2c] = bank; } return cxd2820r_wr_regs_i2c(priv, i2c_addr, reg, val, len); } /* read multiple registers */ int cxd2820r_rd_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val, int len) { int ret; u8 i2c_addr; u8 reg = (reginfo >> 0) & 0xff; u8 bank = (reginfo >> 8) & 0xff; u8 i2c = (reginfo >> 16) & 0x01; /* select I2C */ if (i2c) i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */ else i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */ /* switch bank if needed */ if (bank != priv->bank[i2c]) { ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1); if (ret) return ret; priv->bank[i2c] = bank; } return cxd2820r_rd_regs_i2c(priv, i2c_addr, reg, val, len); } /* write single register */ int cxd2820r_wr_reg(struct cxd2820r_priv *priv, u32 reg, u8 val) { return cxd2820r_wr_regs(priv, reg, &val, 1); } /* read single register */ int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val) { return cxd2820r_rd_regs(priv, reg, val, 1); } /* write single register with mask */ int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = cxd2820r_rd_reg(priv, reg, &tmp); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return cxd2820r_wr_reg(priv, reg, val); } int cxd2820r_gpio(struct dvb_frontend *fe) { struct cxd2820r_priv *priv = fe->demodulator_priv; int ret, i; u8 *gpio, tmp0, tmp1; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: gpio = priv->cfg.gpio_dvbt; break; case SYS_DVBT2: gpio = priv->cfg.gpio_dvbt2; break; case SYS_DVBC_ANNEX_AC: gpio = priv->cfg.gpio_dvbc; break; default: ret = -EINVAL; goto error; } /* update GPIOs only when needed */ if (!memcmp(gpio, priv->gpio, sizeof(priv->gpio))) return 0; tmp0 = 0x00; tmp1 = 0x00; for (i = 0; i < sizeof(priv->gpio); i++) { /* enable / disable */ if (gpio[i] & CXD2820R_GPIO_E) tmp0 |= (2 << 6) >> (2 * i); else tmp0 |= (1 << 6) >> (2 * i); /* input / output */ if (gpio[i] & CXD2820R_GPIO_I) tmp1 |= (1 << (3 + i)); else tmp1 |= (0 << (3 + i)); /* high / low */ if (gpio[i] & CXD2820R_GPIO_H) tmp1 |= (1 << (0 + i)); else tmp1 |= (0 << (0 + i)); dbg("%s: GPIO i=%d %02x %02x", __func__, i, tmp0, tmp1); } dbg("%s: wr gpio=%02x %02x", __func__, tmp0, tmp1); /* write bits [7:2] */ ret = cxd2820r_wr_reg_mask(priv, 0x00089, tmp0, 0xfc); if (ret) goto error; /* write bits [5:0] */ ret = cxd2820r_wr_reg_mask(priv, 0x0008e, tmp1, 0x3f); if (ret) goto error; memcpy(priv->gpio, gpio, sizeof(priv->gpio)); return ret; error: dbg("%s: failed:%d", __func__, ret); return ret; } /* 64 bit div with round closest, like DIV_ROUND_CLOSEST but 64 bit */ u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor) { return div_u64(dividend + (divisor / 2), divisor); } static int cxd2820r_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (c->delivery_system) { case SYS_DVBT: ret = cxd2820r_init_t(fe); if (ret < 0) goto err; ret = cxd2820r_set_frontend_t(fe); if (ret < 0) goto err; break; case SYS_DVBT2: ret = cxd2820r_init_t(fe); if (ret < 0) goto err; ret = cxd2820r_set_frontend_t2(fe); if (ret < 0) goto err; break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_init_c(fe); if (ret < 0) goto err; ret = cxd2820r_set_frontend_c(fe); if (ret < 0) goto err; break; default: dbg("%s: error state=%d", __func__, fe->dtv_property_cache.delivery_system); ret = -EINVAL; break; } err: return ret; } static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_read_status_t(fe, status); break; case SYS_DVBT2: ret = cxd2820r_read_status_t2(fe, status); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_read_status_c(fe, status); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_get_frontend(struct dvb_frontend *fe) { struct cxd2820r_priv *priv = fe->demodulator_priv; int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); if (priv->delivery_system == SYS_UNDEFINED) return 0; switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_get_frontend_t(fe); break; case SYS_DVBT2: ret = cxd2820r_get_frontend_t2(fe); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_get_frontend_c(fe); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_read_ber(struct dvb_frontend *fe, u32 *ber) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_read_ber_t(fe, ber); break; case SYS_DVBT2: ret = cxd2820r_read_ber_t2(fe, ber); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_read_ber_c(fe, ber); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_read_signal_strength_t(fe, strength); break; case SYS_DVBT2: ret = cxd2820r_read_signal_strength_t2(fe, strength); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_read_signal_strength_c(fe, strength); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_read_snr(struct dvb_frontend *fe, u16 *snr) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_read_snr_t(fe, snr); break; case SYS_DVBT2: ret = cxd2820r_read_snr_t2(fe, snr); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_read_snr_c(fe, snr); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_read_ucblocks_t(fe, ucblocks); break; case SYS_DVBT2: ret = cxd2820r_read_ucblocks_t2(fe, ucblocks); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_read_ucblocks_c(fe, ucblocks); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_init(struct dvb_frontend *fe) { return 0; } static int cxd2820r_sleep(struct dvb_frontend *fe) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_sleep_t(fe); break; case SYS_DVBT2: ret = cxd2820r_sleep_t2(fe); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_sleep_c(fe); break; default: ret = -EINVAL; break; } return ret; } static int cxd2820r_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *s) { int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_get_tune_settings_t(fe, s); break; case SYS_DVBT2: ret = cxd2820r_get_tune_settings_t2(fe, s); break; case SYS_DVBC_ANNEX_A: ret = cxd2820r_get_tune_settings_c(fe, s); break; default: ret = -EINVAL; break; } return ret; } static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe) { struct cxd2820r_priv *priv = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; fe_status_t status = 0; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); /* switch between DVB-T and DVB-T2 when tune fails */ if (priv->last_tune_failed) { if (priv->delivery_system == SYS_DVBT) { ret = cxd2820r_sleep_t(fe); if (ret) goto error; c->delivery_system = SYS_DVBT2; } else if (priv->delivery_system == SYS_DVBT2) { ret = cxd2820r_sleep_t2(fe); if (ret) goto error; c->delivery_system = SYS_DVBT; } } /* set frontend */ ret = cxd2820r_set_frontend(fe); if (ret) goto error; /* frontend lock wait loop count */ switch (priv->delivery_system) { case SYS_DVBT: case SYS_DVBC_ANNEX_A: i = 20; break; case SYS_DVBT2: i = 40; break; case SYS_UNDEFINED: default: i = 0; break; } /* wait frontend lock */ for (; i > 0; i--) { dbg("%s: LOOP=%d", __func__, i); msleep(50); ret = cxd2820r_read_status(fe, &status); if (ret) goto error; if (status & FE_HAS_SIGNAL) break; } /* check if we have a valid signal */ if (status) { priv->last_tune_failed = 0; return DVBFE_ALGO_SEARCH_SUCCESS; } else { priv->last_tune_failed = 1; return DVBFE_ALGO_SEARCH_AGAIN; } error: dbg("%s: failed:%d", __func__, ret); return DVBFE_ALGO_SEARCH_ERROR; } static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } static void cxd2820r_release(struct dvb_frontend *fe) { struct cxd2820r_priv *priv = fe->demodulator_priv; dbg("%s", __func__); kfree(priv); return; } static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct cxd2820r_priv *priv = fe->demodulator_priv; dbg("%s: %d", __func__, enable); /* Bit 0 of reg 0xdb in bank 0x00 controls I2C repeater */ return cxd2820r_wr_reg_mask(priv, 0xdb, enable ? 1 : 0, 0x1); } static const struct dvb_frontend_ops cxd2820r_ops = { .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A }, /* default: DVB-T/T2 */ .info = { .name = "Sony CXD2820R", .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_MUTE_TS | FE_CAN_2G_MODULATION }, .release = cxd2820r_release, .init = cxd2820r_init, .sleep = cxd2820r_sleep, .get_tune_settings = cxd2820r_get_tune_settings, .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl, .get_frontend = cxd2820r_get_frontend, .get_frontend_algo = cxd2820r_get_frontend_algo, .search = cxd2820r_search, .read_status = cxd2820r_read_status, .read_snr = cxd2820r_read_snr, .read_ber = cxd2820r_read_ber, .read_ucblocks = cxd2820r_read_ucblocks, .read_signal_strength = cxd2820r_read_signal_strength, }; struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg, struct i2c_adapter *i2c) { struct cxd2820r_priv *priv = NULL; int ret; u8 tmp; priv = kzalloc(sizeof (struct cxd2820r_priv), GFP_KERNEL); if (!priv) goto error; priv->i2c = i2c; memcpy(&priv->cfg, cfg, sizeof (struct cxd2820r_config)); priv->bank[0] = priv->bank[1] = 0xff; ret = cxd2820r_rd_reg(priv, 0x000fd, &tmp); dbg("%s: chip id=%02x", __func__, tmp); if (ret || tmp != 0xe1) goto error; memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof (struct dvb_frontend_ops)); priv->fe.demodulator_priv = priv; return &priv->fe; error: kfree(priv); return NULL; } EXPORT_SYMBOL(cxd2820r_attach); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Sony CXD2820R demodulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
bingfengxiaokai/kernel3.4.2-transplant
arch/arm/mach-mxs/pm.c
5120
1045
/* * Copyright (C) 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/io.h> static int mxs_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: cpu_do_idle(); break; default: return -EINVAL; } return 0; } static struct platform_suspend_ops mxs_suspend_ops = { .enter = mxs_suspend_enter, .valid = suspend_valid_only_mem, }; static int __init mxs_pm_init(void) { suspend_set_ops(&mxs_suspend_ops); return 0; } device_initcall(mxs_pm_init);
gpl-2.0
zeroblade1984/htc_msm8930_kernel
net/can/proc.c
5120
15199
/* * proc.c - procfs support for Protocol family CAN core module * * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/if_arp.h> #include <linux/can/core.h> #include "af_can.h" /* * proc filenames for the PF_CAN core */ #define CAN_PROC_VERSION "version" #define CAN_PROC_STATS "stats" #define CAN_PROC_RESET_STATS "reset_stats" #define CAN_PROC_RCVLIST_ALL "rcvlist_all" #define CAN_PROC_RCVLIST_FIL "rcvlist_fil" #define CAN_PROC_RCVLIST_INV "rcvlist_inv" #define CAN_PROC_RCVLIST_SFF "rcvlist_sff" #define CAN_PROC_RCVLIST_EFF "rcvlist_eff" #define CAN_PROC_RCVLIST_ERR "rcvlist_err" static struct proc_dir_entry *can_dir; static struct proc_dir_entry *pde_version; static struct proc_dir_entry *pde_stats; static struct proc_dir_entry *pde_reset_stats; static struct proc_dir_entry *pde_rcvlist_all; static struct proc_dir_entry *pde_rcvlist_fil; static struct proc_dir_entry *pde_rcvlist_inv; static struct proc_dir_entry *pde_rcvlist_sff; static struct proc_dir_entry *pde_rcvlist_eff; static struct proc_dir_entry *pde_rcvlist_err; static int user_reset; static const char rx_list_name[][8] = { [RX_ERR] = "rx_err", [RX_ALL] = "rx_all", [RX_FIL] = "rx_fil", [RX_INV] = "rx_inv", [RX_EFF] = "rx_eff", }; /* receive filters subscribed for 'all' CAN devices */ extern struct dev_rcv_lists can_rx_alldev_list; /* * af_can statistics stuff */ static void can_init_stats(void) { /* * This memset function is called from a timer context (when * can_stattimer is active which is the default) OR in a process * context (reading the proc_fs when can_stattimer is disabled). */ memset(&can_stats, 0, sizeof(can_stats)); can_stats.jiffies_init = jiffies; can_pstats.stats_reset++; if (user_reset) { user_reset = 0; can_pstats.user_reset++; } } static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif, unsigned long count) { unsigned long rate; if (oldjif == newjif) return 0; /* see can_stat_update() - this should NEVER happen! */ if (count > (ULONG_MAX / HZ)) { printk(KERN_ERR "can: calc_rate: count exceeded! %ld\n", count); return 99999999; } rate = (count * HZ) / (newjif - oldjif); return rate; } void can_stat_update(unsigned long data) { unsigned long j = jiffies; /* snapshot */ /* restart counting in timer context on user request */ if (user_reset) can_init_stats(); /* restart counting on jiffies overflow */ if (j < can_stats.jiffies_init) can_init_stats(); /* prevent overflow in calc_rate() */ if (can_stats.rx_frames > (ULONG_MAX / HZ)) can_init_stats(); /* prevent overflow in calc_rate() */ if (can_stats.tx_frames > (ULONG_MAX / HZ)) can_init_stats(); /* matches overflow - very improbable */ if (can_stats.matches > (ULONG_MAX / 100)) can_init_stats(); /* calc total values */ if (can_stats.rx_frames) can_stats.total_rx_match_ratio = (can_stats.matches * 100) / can_stats.rx_frames; can_stats.total_tx_rate = calc_rate(can_stats.jiffies_init, j, can_stats.tx_frames); can_stats.total_rx_rate = calc_rate(can_stats.jiffies_init, j, can_stats.rx_frames); /* calc current values */ if (can_stats.rx_frames_delta) can_stats.current_rx_match_ratio = (can_stats.matches_delta * 100) / can_stats.rx_frames_delta; can_stats.current_tx_rate = calc_rate(0, HZ, can_stats.tx_frames_delta); can_stats.current_rx_rate = calc_rate(0, HZ, can_stats.rx_frames_delta); /* check / update maximum values */ if (can_stats.max_tx_rate < can_stats.current_tx_rate) can_stats.max_tx_rate = can_stats.current_tx_rate; if (can_stats.max_rx_rate < can_stats.current_rx_rate) can_stats.max_rx_rate = can_stats.current_rx_rate; if (can_stats.max_rx_match_ratio < can_stats.current_rx_match_ratio) can_stats.max_rx_match_ratio = can_stats.current_rx_match_ratio; /* clear values for 'current rate' calculation */ can_stats.tx_frames_delta = 0; can_stats.rx_frames_delta = 0; can_stats.matches_delta = 0; /* restart timer (one second) */ mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); } /* * proc read functions */ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, struct net_device *dev) { struct receiver *r; struct hlist_node *n; hlist_for_each_entry_rcu(r, n, rx_list, list) { char *fmt = (r->can_id & CAN_EFF_FLAG)? " %-5s %08x %08x %pK %pK %8ld %s\n" : " %-5s %03x %08x %pK %pK %8ld %s\n"; seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, r->func, r->data, r->matches, r->ident); } } static void can_print_recv_banner(struct seq_file *m) { /* * can1. 00000000 00000000 00000000 * ....... 0 tp20 */ seq_puts(m, " device can_id can_mask function" " userdata matches ident\n"); } static int can_stats_proc_show(struct seq_file *m, void *v) { seq_putc(m, '\n'); seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames); seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames); seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches); seq_putc(m, '\n'); if (can_stattimer.function == can_stat_update) { seq_printf(m, " %8ld %% total match ratio (RXMR)\n", can_stats.total_rx_match_ratio); seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", can_stats.total_tx_rate); seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", can_stats.total_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", can_stats.current_rx_match_ratio); seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", can_stats.current_tx_rate); seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", can_stats.current_rx_rate); seq_putc(m, '\n'); seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", can_stats.max_rx_match_ratio); seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", can_stats.max_tx_rate); seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", can_stats.max_rx_rate); seq_putc(m, '\n'); } seq_printf(m, " %8ld current receive list entries (CRCV)\n", can_pstats.rcv_entries); seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", can_pstats.rcv_entries_max); if (can_pstats.stats_reset) seq_printf(m, "\n %8ld statistic resets (STR)\n", can_pstats.stats_reset); if (can_pstats.user_reset) seq_printf(m, " %8ld user statistic resets (USTR)\n", can_pstats.user_reset); seq_putc(m, '\n'); return 0; } static int can_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_stats_proc_show, NULL); } static const struct file_operations can_stats_proc_fops = { .owner = THIS_MODULE, .open = can_stats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int can_reset_stats_proc_show(struct seq_file *m, void *v) { user_reset = 1; if (can_stattimer.function == can_stat_update) { seq_printf(m, "Scheduled statistic reset #%ld.\n", can_pstats.stats_reset + 1); } else { if (can_stats.jiffies_init != jiffies) can_init_stats(); seq_printf(m, "Performed statistic reset #%ld.\n", can_pstats.stats_reset); } return 0; } static int can_reset_stats_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_reset_stats_proc_show, NULL); } static const struct file_operations can_reset_stats_proc_fops = { .owner = THIS_MODULE, .open = can_reset_stats_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int can_version_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%s\n", CAN_VERSION_STRING); return 0; } static int can_version_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_version_proc_show, NULL); } static const struct file_operations can_version_proc_fops = { .owner = THIS_MODULE, .open = can_version_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx, struct net_device *dev, struct dev_rcv_lists *d) { if (!hlist_empty(&d->rx[idx])) { can_print_recv_banner(m); can_print_rcvlist(m, &d->rx[idx], dev); } else seq_printf(m, " (%s: no entry)\n", DNAME(dev)); } static int can_rcvlist_proc_show(struct seq_file *m, void *v) { /* double cast to prevent GCC warning */ int idx = (int)(long)m->private; struct net_device *dev; struct dev_rcv_lists *d; seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); rcu_read_lock(); /* receive list for 'all' CAN devices (dev == NULL) */ d = &can_rx_alldev_list; can_rcvlist_proc_show_one(m, idx, NULL, d); /* receive list for registered CAN devices */ for_each_netdev_rcu(&init_net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv); } rcu_read_unlock(); seq_putc(m, '\n'); return 0; } static int can_rcvlist_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_rcvlist_proc_show, PDE(inode)->data); } static const struct file_operations can_rcvlist_proc_fops = { .owner = THIS_MODULE, .open = can_rcvlist_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m, struct net_device *dev, struct dev_rcv_lists *d) { int i; int all_empty = 1; /* check wether at least one list is non-empty */ for (i = 0; i < 0x800; i++) if (!hlist_empty(&d->rx_sff[i])) { all_empty = 0; break; } if (!all_empty) { can_print_recv_banner(m); for (i = 0; i < 0x800; i++) { if (!hlist_empty(&d->rx_sff[i])) can_print_rcvlist(m, &d->rx_sff[i], dev); } } else seq_printf(m, " (%s: no entry)\n", DNAME(dev)); } static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) { struct net_device *dev; struct dev_rcv_lists *d; /* RX_SFF */ seq_puts(m, "\nreceive list 'rx_sff':\n"); rcu_read_lock(); /* sff receive list for 'all' CAN devices (dev == NULL) */ d = &can_rx_alldev_list; can_rcvlist_sff_proc_show_one(m, NULL, d); /* sff receive list for registered CAN devices */ for_each_netdev_rcu(&init_net, dev) { if (dev->type == ARPHRD_CAN && dev->ml_priv) can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv); } rcu_read_unlock(); seq_putc(m, '\n'); return 0; } static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file) { return single_open(file, can_rcvlist_sff_proc_show, NULL); } static const struct file_operations can_rcvlist_sff_proc_fops = { .owner = THIS_MODULE, .open = can_rcvlist_sff_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * proc utility functions */ static void can_remove_proc_readentry(const char *name) { if (can_dir) remove_proc_entry(name, can_dir); } /* * can_init_proc - create main CAN proc directory and procfs entries */ void can_init_proc(void) { /* create /proc/net/can directory */ can_dir = proc_mkdir("can", init_net.proc_net); if (!can_dir) { printk(KERN_INFO "can: failed to create /proc/net/can . " "CONFIG_PROC_FS missing?\n"); return; } /* own procfs entries from the AF_CAN core */ pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir, &can_version_proc_fops); pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir, &can_stats_proc_fops); pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir, &can_reset_stats_proc_fops); pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_ERR); pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_ALL); pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_FIL); pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_INV); pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir, &can_rcvlist_proc_fops, (void *)RX_EFF); pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, &can_rcvlist_sff_proc_fops); } /* * can_remove_proc - remove procfs entries and main CAN proc directory */ void can_remove_proc(void) { if (pde_version) can_remove_proc_readentry(CAN_PROC_VERSION); if (pde_stats) can_remove_proc_readentry(CAN_PROC_STATS); if (pde_reset_stats) can_remove_proc_readentry(CAN_PROC_RESET_STATS); if (pde_rcvlist_err) can_remove_proc_readentry(CAN_PROC_RCVLIST_ERR); if (pde_rcvlist_all) can_remove_proc_readentry(CAN_PROC_RCVLIST_ALL); if (pde_rcvlist_fil) can_remove_proc_readentry(CAN_PROC_RCVLIST_FIL); if (pde_rcvlist_inv) can_remove_proc_readentry(CAN_PROC_RCVLIST_INV); if (pde_rcvlist_eff) can_remove_proc_readentry(CAN_PROC_RCVLIST_EFF); if (pde_rcvlist_sff) can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF); if (can_dir) proc_net_remove(&init_net, "can"); }
gpl-2.0
jeboo/kernel_JB_ZSLS6_i777
tools/perf/builtin-list.c
8192
1387
/* * builtin-list.c * * Builtin list command: list all event types * * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com> * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> */ #include "builtin.h" #include "perf.h" #include "util/parse-events.h" #include "util/cache.h" int cmd_list(int argc, const char **argv, const char *prefix __used) { setup_pager(); if (argc == 1) print_events(NULL); else { int i; for (i = 1; i < argc; ++i) { if (i > 1) putchar('\n'); if (strncmp(argv[i], "tracepoint", 10) == 0) print_tracepoint_events(NULL, NULL); else if (strcmp(argv[i], "hw") == 0 || strcmp(argv[i], "hardware") == 0) print_events_type(PERF_TYPE_HARDWARE); else if (strcmp(argv[i], "sw") == 0 || strcmp(argv[i], "software") == 0) print_events_type(PERF_TYPE_SOFTWARE); else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) print_hwcache_events(NULL); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; if (sep == NULL) { print_events(argv[i]); continue; } sep_idx = sep - argv[i]; s = strdup(argv[i]); if (s == NULL) return -1; s[sep_idx] = '\0'; print_tracepoint_events(s, s + sep_idx + 1); free(s); } } } return 0; }
gpl-2.0
k2wlxda/3.10
net/ceph/crush/hash.c
12032
3181
#include <linux/types.h> #include <linux/crush/hash.h> /* * Robert Jenkins' function for mixing 32-bit values * http://burtleburtle.net/bob/hash/evahash.html * a, b = random bits, c = input and output */ #define crush_hashmix(a, b, c) do { \ a = a-b; a = a-c; a = a^(c>>13); \ b = b-c; b = b-a; b = b^(a<<8); \ c = c-a; c = c-b; c = c^(b>>13); \ a = a-b; a = a-c; a = a^(c>>12); \ b = b-c; b = b-a; b = b^(a<<16); \ c = c-a; c = c-b; c = c^(b>>5); \ a = a-b; a = a-c; a = a^(c>>3); \ b = b-c; b = b-a; b = b^(a<<10); \ c = c-a; c = c-b; c = c^(b>>15); \ } while (0) #define crush_hash_seed 1315423911 static __u32 crush_hash32_rjenkins1(__u32 a) { __u32 hash = crush_hash_seed ^ a; __u32 b = a; __u32 x = 231232; __u32 y = 1232; crush_hashmix(b, x, hash); crush_hashmix(y, a, hash); return hash; } static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b) { __u32 hash = crush_hash_seed ^ a ^ b; __u32 x = 231232; __u32 y = 1232; crush_hashmix(a, b, hash); crush_hashmix(x, a, hash); crush_hashmix(b, y, hash); return hash; } static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c) { __u32 hash = crush_hash_seed ^ a ^ b ^ c; __u32 x = 231232; __u32 y = 1232; crush_hashmix(a, b, hash); crush_hashmix(c, x, hash); crush_hashmix(y, a, hash); crush_hashmix(b, x, hash); crush_hashmix(y, c, hash); return hash; } static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d) { __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; __u32 x = 231232; __u32 y = 1232; crush_hashmix(a, b, hash); crush_hashmix(c, d, hash); crush_hashmix(a, x, hash); crush_hashmix(y, b, hash); crush_hashmix(c, x, hash); crush_hashmix(y, d, hash); return hash; } static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d, __u32 e) { __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; __u32 x = 231232; __u32 y = 1232; crush_hashmix(a, b, hash); crush_hashmix(c, d, hash); crush_hashmix(e, x, hash); crush_hashmix(y, a, hash); crush_hashmix(b, x, hash); crush_hashmix(y, c, hash); crush_hashmix(d, x, hash); crush_hashmix(y, e, hash); return hash; } __u32 crush_hash32(int type, __u32 a) { switch (type) { case CRUSH_HASH_RJENKINS1: return crush_hash32_rjenkins1(a); default: return 0; } } __u32 crush_hash32_2(int type, __u32 a, __u32 b) { switch (type) { case CRUSH_HASH_RJENKINS1: return crush_hash32_rjenkins1_2(a, b); default: return 0; } } __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c) { switch (type) { case CRUSH_HASH_RJENKINS1: return crush_hash32_rjenkins1_3(a, b, c); default: return 0; } } __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d) { switch (type) { case CRUSH_HASH_RJENKINS1: return crush_hash32_rjenkins1_4(a, b, c, d); default: return 0; } } __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e) { switch (type) { case CRUSH_HASH_RJENKINS1: return crush_hash32_rjenkins1_5(a, b, c, d, e); default: return 0; } } const char *crush_hash_name(int type) { switch (type) { case CRUSH_HASH_RJENKINS1: return "rjenkins1"; default: return "unknown"; } }
gpl-2.0
YoungjaeLee/linux-4.3-cxlbdev
arch/mips/txx9/jmr3927/irq.c
12544
4180
/* * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2001 Toshiba Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/mipsregs.h> #include <asm/txx9/generic.h> #include <asm/txx9/jmr3927.h> #if JMR3927_IRQ_END > NR_IRQS #error JMR3927_IRQ_END > NR_IRQS #endif /* * CP0_STATUS is a thread's resource (saved/restored on context switch). * So disable_irq/enable_irq MUST handle IOC/IRC registers. */ static void mask_irq_ioc(struct irq_data *d) { /* 0: mask */ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR); /* flush write buffer */ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } static void unmask_irq_ioc(struct irq_data *d) { /* 0: mask */ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR); /* flush write buffer */ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } static int jmr3927_ioc_irqroute(void) { unsigned char istat = jmr3927_ioc_reg_in(JMR3927_IOC_INTS2_ADDR); int i; for (i = 0; i < JMR3927_NR_IRQ_IOC; i++) { if (istat & (1 << i)) return JMR3927_IRQ_IOC + i; } return -1; } static int jmr3927_irq_dispatch(int pending) { int irq; if ((pending & CAUSEF_IP7) == 0) return -1; irq = (pending >> CAUSEB_IP2) & 0x0f; irq += JMR3927_IRQ_IRC; if (irq == JMR3927_IRQ_IOCINT) irq = jmr3927_ioc_irqroute(); return irq; } static struct irq_chip jmr3927_irq_ioc = { .name = "jmr3927_ioc", .irq_mask = mask_irq_ioc, .irq_unmask = unmask_irq_ioc, }; void __init jmr3927_irq_setup(void) { int i; txx9_irq_dispatch = jmr3927_irq_dispatch; /* Now, interrupt control disabled, */ /* all IRC interrupts are masked, */ /* all IRC interrupt mode are Low Active. */ /* mask all IOC interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTM_ADDR); /* setup IOC interrupt mode (SOFT:High Active, Others:Low Active) */ jmr3927_ioc_reg_out(JMR3927_IOC_INTF_SOFT, JMR3927_IOC_INTP_ADDR); /* clear PCI Soft interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTS1_ADDR); /* clear PCI Reset interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); tx3927_irq_init(); for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) irq_set_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); /* setup IOC interrupt 1 (PCI, MODEM) */ irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); }
gpl-2.0
u-ra/android_kernel_htc_memul
arch/mips/txx9/jmr3927/irq.c
12544
4180
/* * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2001 Toshiba Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/mipsregs.h> #include <asm/txx9/generic.h> #include <asm/txx9/jmr3927.h> #if JMR3927_IRQ_END > NR_IRQS #error JMR3927_IRQ_END > NR_IRQS #endif /* * CP0_STATUS is a thread's resource (saved/restored on context switch). * So disable_irq/enable_irq MUST handle IOC/IRC registers. */ static void mask_irq_ioc(struct irq_data *d) { /* 0: mask */ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask & ~bit, JMR3927_IOC_INTM_ADDR); /* flush write buffer */ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } static void unmask_irq_ioc(struct irq_data *d) { /* 0: mask */ unsigned int irq_nr = d->irq - JMR3927_IRQ_IOC; unsigned char imask = jmr3927_ioc_reg_in(JMR3927_IOC_INTM_ADDR); unsigned int bit = 1 << irq_nr; jmr3927_ioc_reg_out(imask | bit, JMR3927_IOC_INTM_ADDR); /* flush write buffer */ (void)jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR); } static int jmr3927_ioc_irqroute(void) { unsigned char istat = jmr3927_ioc_reg_in(JMR3927_IOC_INTS2_ADDR); int i; for (i = 0; i < JMR3927_NR_IRQ_IOC; i++) { if (istat & (1 << i)) return JMR3927_IRQ_IOC + i; } return -1; } static int jmr3927_irq_dispatch(int pending) { int irq; if ((pending & CAUSEF_IP7) == 0) return -1; irq = (pending >> CAUSEB_IP2) & 0x0f; irq += JMR3927_IRQ_IRC; if (irq == JMR3927_IRQ_IOCINT) irq = jmr3927_ioc_irqroute(); return irq; } static struct irq_chip jmr3927_irq_ioc = { .name = "jmr3927_ioc", .irq_mask = mask_irq_ioc, .irq_unmask = unmask_irq_ioc, }; void __init jmr3927_irq_setup(void) { int i; txx9_irq_dispatch = jmr3927_irq_dispatch; /* Now, interrupt control disabled, */ /* all IRC interrupts are masked, */ /* all IRC interrupt mode are Low Active. */ /* mask all IOC interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTM_ADDR); /* setup IOC interrupt mode (SOFT:High Active, Others:Low Active) */ jmr3927_ioc_reg_out(JMR3927_IOC_INTF_SOFT, JMR3927_IOC_INTP_ADDR); /* clear PCI Soft interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_INTS1_ADDR); /* clear PCI Reset interrupts */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); tx3927_irq_init(); for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++) irq_set_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq); /* setup IOC interrupt 1 (PCI, MODEM) */ irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq); }
gpl-2.0
chyyuu/ucore_os_lab
labcodes_answer/lab6_result/kern/trap/trap.c
1
9443
#include <defs.h> #include <mmu.h> #include <memlayout.h> #include <clock.h> #include <trap.h> #include <x86.h> #include <stdio.h> #include <assert.h> #include <console.h> #include <vmm.h> #include <swap.h> #include <kdebug.h> #include <unistd.h> #include <syscall.h> #include <error.h> #include <sched.h> #include <sync.h> #include <proc.h> #define TICK_NUM 100 static void print_ticks() { cprintf("%d ticks\n",TICK_NUM); #ifdef DEBUG_GRADE cprintf("End of Test.\n"); panic("EOT: kernel seems ok."); #endif } /* * * Interrupt descriptor table: * * Must be built at run time because shifted function addresses can't * be represented in relocation records. * */ static struct gatedesc idt[256] = {{0}}; static struct pseudodesc idt_pd = { sizeof(idt) - 1, (uintptr_t)idt }; /* idt_init - initialize IDT to each of the entry points in kern/trap/vectors.S */ void idt_init(void) { /* LAB1 YOUR CODE : STEP 2 */ /* (1) Where are the entry addrs of each Interrupt Service Routine (ISR)? * All ISR's entry addrs are stored in __vectors. where is uintptr_t __vectors[] ? * __vectors[] is in kern/trap/vector.S which is produced by tools/vector.c * (try "make" command in lab1, then you will find vector.S in kern/trap DIR) * You can use "extern uintptr_t __vectors[];" to define this extern variable which will be used later. * (2) Now you should setup the entries of ISR in Interrupt Description Table (IDT). * Can you see idt[256] in this file? Yes, it's IDT! you can use SETGATE macro to setup each item of IDT * (3) After setup the contents of IDT, you will let CPU know where is the IDT by using 'lidt' instruction. * You don't know the meaning of this instruction? just google it! and check the libs/x86.h to know more. * Notice: the argument of lidt is idt_pd. try to find it! */ /* LAB5 YOUR CODE */ //you should update your lab1 code (just add ONE or TWO lines of code), let user app to use syscall to get the service of ucore //so you should setup the syscall interrupt gate in here extern uintptr_t __vectors[]; int i; for (i = 0; i < sizeof(idt) / sizeof(struct gatedesc); i ++) { SETGATE(idt[i], 0, GD_KTEXT, __vectors[i], DPL_KERNEL); } SETGATE(idt[T_SYSCALL], 1, GD_KTEXT, __vectors[T_SYSCALL], DPL_USER); lidt(&idt_pd); } static const char * trapname(int trapno) { static const char * const excnames[] = { "Divide error", "Debug", "Non-Maskable Interrupt", "Breakpoint", "Overflow", "BOUND Range Exceeded", "Invalid Opcode", "Device Not Available", "Double Fault", "Coprocessor Segment Overrun", "Invalid TSS", "Segment Not Present", "Stack Fault", "General Protection", "Page Fault", "(unknown trap)", "x87 FPU Floating-Point Error", "Alignment Check", "Machine-Check", "SIMD Floating-Point Exception" }; if (trapno < sizeof(excnames)/sizeof(const char * const)) { return excnames[trapno]; } if (trapno >= IRQ_OFFSET && trapno < IRQ_OFFSET + 16) { return "Hardware Interrupt"; } return "(unknown trap)"; } /* trap_in_kernel - test if trap happened in kernel */ bool trap_in_kernel(struct trapframe *tf) { return (tf->tf_cs == (uint16_t)KERNEL_CS); } static const char *IA32flags[] = { "CF", NULL, "PF", NULL, "AF", NULL, "ZF", "SF", "TF", "IF", "DF", "OF", NULL, NULL, "NT", NULL, "RF", "VM", "AC", "VIF", "VIP", "ID", NULL, NULL, }; void print_trapframe(struct trapframe *tf) { cprintf("trapframe at %p\n", tf); print_regs(&tf->tf_regs); cprintf(" ds 0x----%04x\n", tf->tf_ds); cprintf(" es 0x----%04x\n", tf->tf_es); cprintf(" fs 0x----%04x\n", tf->tf_fs); cprintf(" gs 0x----%04x\n", tf->tf_gs); cprintf(" trap 0x%08x %s\n", tf->tf_trapno, trapname(tf->tf_trapno)); cprintf(" err 0x%08x\n", tf->tf_err); cprintf(" eip 0x%08x\n", tf->tf_eip); cprintf(" cs 0x----%04x\n", tf->tf_cs); cprintf(" flag 0x%08x ", tf->tf_eflags); int i, j; for (i = 0, j = 1; i < sizeof(IA32flags) / sizeof(IA32flags[0]); i ++, j <<= 1) { if ((tf->tf_eflags & j) && IA32flags[i] != NULL) { cprintf("%s,", IA32flags[i]); } } cprintf("IOPL=%d\n", (tf->tf_eflags & FL_IOPL_MASK) >> 12); if (!trap_in_kernel(tf)) { cprintf(" esp 0x%08x\n", tf->tf_esp); cprintf(" ss 0x----%04x\n", tf->tf_ss); } } void print_regs(struct pushregs *regs) { cprintf(" edi 0x%08x\n", regs->reg_edi); cprintf(" esi 0x%08x\n", regs->reg_esi); cprintf(" ebp 0x%08x\n", regs->reg_ebp); cprintf(" oesp 0x%08x\n", regs->reg_oesp); cprintf(" ebx 0x%08x\n", regs->reg_ebx); cprintf(" edx 0x%08x\n", regs->reg_edx); cprintf(" ecx 0x%08x\n", regs->reg_ecx); cprintf(" eax 0x%08x\n", regs->reg_eax); } static inline void print_pgfault(struct trapframe *tf) { /* error_code: * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user * */ cprintf("page fault at 0x%08x: %c/%c [%s].\n", rcr2(), (tf->tf_err & 4) ? 'U' : 'K', (tf->tf_err & 2) ? 'W' : 'R', (tf->tf_err & 1) ? "protection fault" : "no page found"); } static int pgfault_handler(struct trapframe *tf) { extern struct mm_struct *check_mm_struct; if(check_mm_struct !=NULL) { //used for test check_swap print_pgfault(tf); } struct mm_struct *mm; if (check_mm_struct != NULL) { assert(current == idleproc); mm = check_mm_struct; } else { if (current == NULL) { print_trapframe(tf); print_pgfault(tf); panic("unhandled page fault.\n"); } mm = current->mm; } return do_pgfault(mm, tf->tf_err, rcr2()); } static volatile int in_swap_tick_event = 0; extern struct mm_struct *check_mm_struct; static void trap_dispatch(struct trapframe *tf) { char c; int ret=0; switch (tf->tf_trapno) { case T_PGFLT: //page fault if ((ret = pgfault_handler(tf)) != 0) { print_trapframe(tf); if (current == NULL) { panic("handle pgfault failed. ret=%d\n", ret); } else { if (trap_in_kernel(tf)) { panic("handle pgfault failed in kernel mode. ret=%d\n", ret); } cprintf("killed by kernel.\n"); panic("handle user mode pgfault failed. ret=%d\n", ret); do_exit(-E_KILLED); } } break; case T_SYSCALL: syscall(); break; case IRQ_OFFSET + IRQ_TIMER: #if 0 LAB3 : If some page replacement algorithm(such as CLOCK PRA) need tick to change the priority of pages, then you can add code here. #endif /* LAB1 YOUR CODE : STEP 3 */ /* handle the timer interrupt */ /* (1) After a timer interrupt, you should record this event using a global variable (increase it), such as ticks in kern/driver/clock.c * (2) Every TICK_NUM cycle, you can print some info using a funciton, such as print_ticks(). * (3) Too Simple? Yes, I think so! */ /* LAB5 YOUR CODE */ /* you should upate you lab1 code (just add ONE or TWO lines of code): * Every TICK_NUM cycle, you should set current process's current->need_resched = 1 */ ticks ++; assert(current != NULL); sched_class_proc_tick(current); break; case IRQ_OFFSET + IRQ_COM1: c = cons_getc(); cprintf("serial [%03d] %c\n", c, c); break; case IRQ_OFFSET + IRQ_KBD: c = cons_getc(); cprintf("kbd [%03d] %c\n", c, c); break; //LAB1 CHALLENGE 1 : YOUR CODE you should modify below codes. case T_SWITCH_TOU: case T_SWITCH_TOK: panic("T_SWITCH_** ??\n"); break; case IRQ_OFFSET + IRQ_IDE1: case IRQ_OFFSET + IRQ_IDE2: /* do nothing */ break; default: print_trapframe(tf); if (current != NULL) { cprintf("unhandled trap.\n"); do_exit(-E_KILLED); } // in kernel, it must be a mistake panic("unexpected trap in kernel.\n"); } } /* * * trap - handles or dispatches an exception/interrupt. if and when trap() returns, * the code in kern/trap/trapentry.S restores the old CPU state saved in the * trapframe and then uses the iret instruction to return from the exception. * */ void trap(struct trapframe *tf) { // dispatch based on what type of trap occurred // used for previous projects if (current == NULL) { trap_dispatch(tf); } else { // keep a trapframe chain in stack struct trapframe *otf = current->tf; current->tf = tf; bool in_kernel = trap_in_kernel(tf); trap_dispatch(tf); current->tf = otf; if (!in_kernel) { if (current->flags & PF_EXITING) { do_exit(-E_KILLED); } if (current->need_resched) { schedule(); } } } }
gpl-2.0
ghisvail/nfft
kernel/nsfft/nsfft.c
1
53632
/* * Copyright (c) 2002, 2015 Jens Keiner, Stefan Kunis, Daniel Potts * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* $Id$ */ #include "config.h" #include <stdio.h> #include <math.h> #include <string.h> #include <stdlib.h> #ifdef HAVE_COMPLEX_H #include <complex.h> #endif #include "nfft3.h" #include "infft.h" #define NSFTT_DISABLE_TEST /* computes a 2d ndft by 1d nfft along the dimension 1 times 1d ndft along dimension 0 */ static void short_nfft_trafo_2d(nfft_plan* ths, nfft_plan* plan_1d) { int j,k0; double omega; for(j=0;j<ths->M_total;j++) { ths->f[j]= 0; plan_1d->x[j] = ths->x[ths->d * j + 1]; } for(k0=0;k0<ths->N[0];k0++) /* for shorties */ { plan_1d->f_hat = ths->f_hat + k0*ths->N[1]; nfft_trafo(plan_1d); for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0]; ths->f[j] += plan_1d->f[j] * cexp( - I*2*KPI*omega); } } } static void short_nfft_adjoint_2d(nfft_plan* ths, nfft_plan* plan_1d) { int j,k0; double omega; for(j=0;j<ths->M_total;j++) plan_1d->x[j] = ths->x[ths->d * j + 1]; for(k0=0;k0<ths->N[0];k0++) /* for shorties */ { for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0]; plan_1d->f[j] = ths->f[j] * cexp( + _Complex_I*2*KPI*omega); } plan_1d->f_hat = ths->f_hat + k0*ths->N[1]; nfft_adjoint(plan_1d); } } /* computes a 3d ndft by 1d nfft along the dimension 2 times 2d ndft along dimension 0,1 */ static void short_nfft_trafo_3d_1(nfft_plan* ths, nfft_plan* plan_1d) { int j,k0,k1; double omega; for(j=0;j<ths->M_total;j++) { ths->f[j] = 0; plan_1d->x[j] = ths->x[ths->d * j + 2]; } for(k0=0;k0<ths->N[0];k0++) /* for shorties */ for(k1=0;k1<ths->N[1];k1++) { plan_1d->f_hat = ths->f_hat + (k0*ths->N[1]+k1)*ths->N[2]; nfft_trafo(plan_1d); for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0] + ((double)(k1 - ths->N[1]/2)) * ths->x[ths->d * j + 1]; ths->f[j] += plan_1d->f[j] * cexp( - I*2*KPI*omega); } } } static void short_nfft_adjoint_3d_1(nfft_plan* ths, nfft_plan* plan_1d) { int j,k0,k1; double omega; for(j=0;j<ths->M_total;j++) plan_1d->x[j] = ths->x[ths->d * j + 2]; for(k0=0;k0<ths->N[0];k0++) /* for shorties */ for(k1=0;k1<ths->N[1];k1++) { for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0] + ((double)(k1 - ths->N[1]/2)) * ths->x[ths->d * j + 1]; plan_1d->f[j] = ths->f[j] * cexp( + _Complex_I*2*KPI*omega); } plan_1d->f_hat = ths->f_hat + (k0*ths->N[1]+k1)*ths->N[2]; nfft_adjoint(plan_1d); } } /* computes a 3d ndft by 2d nfft along the dimension 1,2 times 1d ndft along dimension 0 */ static void short_nfft_trafo_3d_2(nfft_plan* ths, nfft_plan* plan_2d) { int j,k0; double omega; for(j=0;j<ths->M_total;j++) { ths->f[j] = 0; plan_2d->x[2*j+0] = ths->x[ths->d * j + 1]; plan_2d->x[2*j+1] = ths->x[ths->d * j + 2]; } for(k0=0;k0<ths->N[0];k0++) /* for shorties */ { plan_2d->f_hat = ths->f_hat + k0*ths->N[1]*ths->N[2]; nfft_trafo(plan_2d); for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0]; ths->f[j] += plan_2d->f[j] * cexp( - I*2*KPI*omega); } } } static void short_nfft_adjoint_3d_2(nfft_plan* ths, nfft_plan* plan_2d) { int j,k0; double omega; for(j=0;j<ths->M_total;j++) { plan_2d->x[2*j+0] = ths->x[ths->d * j + 1]; plan_2d->x[2*j+1] = ths->x[ths->d * j + 2]; } for(k0=0;k0<ths->N[0];k0++) /* for shorties */ { for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - ths->N[0]/2)) * ths->x[ths->d * j + 0]; plan_2d->f[j] = ths->f[j] * cexp( + _Complex_I*2*KPI*omega); } plan_2d->f_hat = ths->f_hat + k0*ths->N[1]*ths->N[2]; nfft_adjoint(plan_2d); } } /*---------------------------------------------------------------------------*/ #ifdef GAUSSIAN static int index_sparse_to_full_direct_2d(int J, int k) { int N=X(exp2i)(J+2); /* number of full coeffs */ int N_B=X(exp2i)(J); /* number in each sparse block */ int j=k/N_B; /* consecutive number of Block */ int r=j/4; /* level of block */ int i, o, a, b,s,l,m1,m2; int k1,k2; if (k>=(J+4)*X(exp2i)(J+1)) { printf("Fehler!\n"); return(-1); } else { if (r>(J+1)/2) /* center block */ { i=k-4*((J+1)/2+1)*N_B; a=X(exp2i)(J/2+1); m1=i/a; m2=i%a; k1=N/2-a/2+m1; k2=N/2-a/2+m2; } else /* no center block */ { i=k-j*N_B; /* index in specific block */ o=j%4; /* kind of specific block */ a=X(exp2i)(r); b=X(exp2i)(J-r); l=MAX(a,b); /* long dimension of block */ s=MIN(a,b); /* short dimension of block */ m1=i/l; m2=i%l; switch(o) { case 0: { k1=N/2-a/2 ; k2=N/2+ b ; if (b>=a) { k1+=m1; k2+=m2; } else { k1+=m2; k2+=m1; } break; } case 1: { k1=N/2+ b ; k2=N/2-a/2 ; if (b>a) { k1+=m2; k2+=m1; } else { k1+=m1; k2+=m2; } break; } case 2: { k1=N/2-a/2 ; k2=N/2-2*b ; if (b>=a) { k1+=m1; k2+=m2; } else { k1+=m2; k2+=m1; } break; } case 3: { k1=N/2-2*b ; k2=N/2-a/2 ; if (b>a) { k1+=m2; k2+=m1; } else { k1+=m1; k2+=m2; } break; } default: { k1=-1; k2=-1; } } } //printf("m1=%d, m2=%d\n",m1,m2); return(k1*N+k2); } } #endif static inline int index_sparse_to_full_2d(nsfft_plan *ths, int k) { /* only by lookup table */ if( k < ths->N_total) return ths->index_sparse_to_full[k]; else return -1; } #ifndef NSFTT_DISABLE_TEST static int index_full_to_sparse_2d(int J, int k) { int N=X(exp2i)(J+2); /* number of full coeffs */ int N_B=X(exp2i)(J); /* number in each sparse block */ int k1=k/N-N/2; /* coordinates in the full grid */ int k2=k%N-N/2; /* k1: row, k2: column */ int r,a,b; a=X(exp2i)(J/2+1); if ( (k1>=-(a/2)) && (k1<a/2) && (k2>=(-a/2)) && (k2<a/2) ) { return(4*((J+1)/2+1)*N_B+(k1+a/2)*a+(k2+a/2)); } for (r=0; r<=(J+1)/2; r++) { b=X(exp2i)(r); a=X(exp2i)(J-r); if ( (k1>=-(b/2)) && (k1<(b+1)/2) && (k2>=a) && (k2<2*a) ) { if (a>=b) return((4*r+0)*N_B+(k1+b/2)*a+(k2-a)); else return((4*r+0)*N_B+(k2-a)*b+(k1+b/2)); } else if ( (k1>=a) && (k1<2*a) && (k2>=-(b/2)) && (k2<(b+1)/2) ) { if (a>b) return((4*r+1)*N_B+(k2+b/2)*a+(k1-a)); else return((4*r+1)*N_B+(k1-a)*b+(k2+b/2)); } else if ( (k1>=-(b/2)) && (k1<(b+1)/2) && (k2>=-2*a) && (k2<-a) ) { if (a>=b) return((4*r+2)*N_B+(k1+b/2)*a+(k2+2*a)); else return((4*r+2)*N_B+(k2+2*a)*b+(k1+b/2)); } else if ( (k1>=-2*a) && (k1<-a) && (k2>=-(b/2)) && (k2<(b+1)/2) ) { if (a>b) return((4*r+3)*N_B+(k2+b/2)*a+(k1+2*a)); else return((4*r+3)*N_B+(k1+2*a)*b+(k2+b/2)); } } return(-1); } #endif #ifdef GAUSSIAN static void init_index_sparse_to_full_2d(nsfft_plan *ths) { int k_S; for (k_S=0; k_S<ths->N_total; k_S++) ths->index_sparse_to_full[k_S]=index_sparse_to_full_direct_2d(ths->J, k_S); } #endif #ifdef GAUSSIAN static inline int index_sparse_to_full_3d(nsfft_plan *ths, int k) { /* only by lookup table */ if( k < ths->N_total) return ths->index_sparse_to_full[k]; else return -1; } #endif #ifndef NSFTT_DISABLE_TEST static int index_full_to_sparse_3d(int J, int k) { int N=X(exp2i)(J+2); /* length of the full grid */ int N_B_r; /* size of a sparse block in level r */ int sum_N_B_less_r; /* sum N_B_r */ int r,a,b; int k3=(k%N)-N/2; /* coordinates in the full grid */ int k2=((k/N)%N)-N/2; int k1=k/(N*N)-N/2; a=X(exp2i)(J/2+1); /* length of center block */ if((k1>=-(a/2)) && (k1<a/2) && (k2>=(-a/2)) && (k2<a/2) && (k3>=(-a/2)) && (k3<a/2)) { return(6*X(exp2i)(J)*(X(exp2i)((J+1)/2+1)-1)+((k1+a/2)*a+(k2+a/2))*a+ (k3+a/2)); } sum_N_B_less_r=0; for (r=0; r<=(J+1)/2; r++) { a=X(exp2i)(J-r); b=X(exp2i)(r); N_B_r=a*b*b; /* right - rear - top - left - front - bottom */ if ((k1>=a) && (k1<2*a) && (k2>=-(b/2)) && (k2<(b+1)/2) && (k3>=-(b/2)) && (k3<(b+1)/2)) /* right */ { if(a>b) return sum_N_B_less_r+N_B_r*0 + ((k2+b/2)*b+k3+b/2)*a + (k1-a); else return sum_N_B_less_r+N_B_r*0 + ((k1-a)*b+(k2+b/2))*b + (k3+b/2); } else if ((k2>=a) && (k2<2*a) && (k1>=-(b/2)) && (k1<(b+1)/2) && (k3>=-(b/2)) && (k3<(b+1)/2)) /* rear */ { if(a>b) return sum_N_B_less_r+N_B_r*1 + ((k1+b/2)*b+k3+b/2)*a + (k2-a); else if (a==b) return sum_N_B_less_r+N_B_r*1 + ((k1+b/2)*b+(k2-a))*a + (k3+b/2); else return sum_N_B_less_r+N_B_r*1 + ((k2-a)*b+(k1+b/2))*b + (k3+b/2); } else if ((k3>=a) && (k3<2*a) && (k1>=-(b/2)) && (k1<(b+1)/2) && (k2>=-(b/2)) && (k2<(b+1)/2)) /* top */ { if(a>=b) return sum_N_B_less_r+N_B_r*2 + ((k1+b/2)*b+k2+b/2)*a + (k3-a); else return sum_N_B_less_r+N_B_r*2 + ((k3-a)*b+(k1+b/2))*b + (k2+b/2); } else if ((k1>=-2*a) && (k1<-a) && (k2>=-(b/2)) && (k2<(b+1)/2) && (k3>=-(b/2)) && (k3<(b+1)/2)) /* left */ { if(a>b) return sum_N_B_less_r+N_B_r*3 + ((k2+b/2)*b+k3+b/2)*a + (k1+2*a); else return sum_N_B_less_r+N_B_r*3 + ((k1+2*a)*b+(k2+b/2))*b + (k3+b/2); } else if ((k2>=-2*a) && (k2<-a) && (k1>=-(b/2)) && (k1<(b+1)/2) && (k3>=-(b/2)) && (k3<(b+1)/2)) /* front */ { if(a>b) return sum_N_B_less_r+N_B_r*4 + ((k1+b/2)*b+k3+b/2)*a + (k2+2*a); else if (a==b) return sum_N_B_less_r+N_B_r*4 + ((k1+b/2)*b+(k2+2*a))*a + (k3+b/2); else return sum_N_B_less_r+N_B_r*4 + ((k2+2*a)*b+(k1+b/2))*b + (k3+b/2); } else if ((k3>=-2*a) && (k3<-a) && (k1>=-(b/2)) && (k1<(b+1)/2) && (k2>=-(b/2)) && (k2<(b+1)/2)) /* bottom */ { if(a>=b) return sum_N_B_less_r+N_B_r*5 + ((k1+b/2)*b+k2+b/2)*a + (k3+2*a); else return sum_N_B_less_r+N_B_r*5 + ((k3+2*a)*b+(k1+b/2))*b + (k2+b/2); } sum_N_B_less_r+=6*N_B_r; } /* for(r) */ return(-1); } #endif #ifdef GAUSSIAN static void init_index_sparse_to_full_3d(nsfft_plan *ths) { int k1,k2,k3,k_s,r; int a,b; int N=X(exp2i)(ths->J+2); /* length of the full grid */ int Nc=ths->center_nfft_plan->N[0]; /* length of the center block */ for (k_s=0, r=0; r<=(ths->J+1)/2; r++) { a=X(exp2i)(ths->J-r); b=X(exp2i)(r); /* right - rear - top - left - front - bottom */ /* right */ if(a>b) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=-b/2;k3<(b+1)/2;k3++) for(k1=a; k1<2*a; k1++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k1=a; k1<2*a; k1++) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; /* rear */ if(a>b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k3=-b/2;k3<(b+1)/2;k3++) for(k2=a; k2<2*a; k2++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else if(a==b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=a; k2<2*a; k2++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k2=a; k2<2*a; k2++) for(k1=-b/2;k1<(b+1)/2;k1++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; /* top */ if(a>=b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=a; k3<2*a; k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k3=a; k3<2*a; k3++) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=-b/2;k2<(b+1)/2;k2++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; /* left */ if(a>b) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=-b/2;k3<(b+1)/2;k3++) for(k1=-2*a; k1<-a; k1++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k1=-2*a; k1<-a; k1++) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; /* front */ if(a>b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k3=-b/2;k3<(b+1)/2;k3++) for(k2=-2*a; k2<-a; k2++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else if(a==b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=-2*a; k2<-a; k2++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k2=-2*a; k2<-a; k2++) for(k1=-b/2;k1<(b+1)/2;k1++) for(k3=-b/2;k3<(b+1)/2;k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; /* top */ if(a>=b) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=-b/2;k2<(b+1)/2;k2++) for(k3=-2*a; k3<-a; k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; else for(k3=-2*a; k3<-a; k3++) for(k1=-b/2;k1<(b+1)/2;k1++) for(k2=-b/2;k2<(b+1)/2;k2++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; } /* center */ for(k1=-Nc/2;k1<Nc/2;k1++) for(k2=-Nc/2;k2<Nc/2;k2++) for(k3=-Nc/2; k3<Nc/2; k3++,k_s++) ths->index_sparse_to_full[k_s]=((k1+N/2)*N+k2+N/2)*N+k3+N/2; } #endif /* copies ths->f_hat to ths_plan->f_hat */ void nsfft_cp(nsfft_plan *ths, nfft_plan *ths_full_plan) { int k; /* initialize f_hat with zero values */ memset(ths_full_plan->f_hat, 0, ths_full_plan->N_total*sizeof(double _Complex)); /* copy values at hyperbolic grid points */ for(k=0;k<ths->N_total;k++) ths_full_plan->f_hat[ths->index_sparse_to_full[k]]=ths->f_hat[k]; /* copy nodes */ memcpy(ths_full_plan->x,ths->act_nfft_plan->x,ths->M_total*ths->d*sizeof(double)); } #ifndef NSFTT_DISABLE_TEST /* test copy_sparse_to_full */ static void test_copy_sparse_to_full_2d(nsfft_plan *ths, nfft_plan *ths_full_plan) { int r; int k1, k2; int a,b; const int J=ths->J; /* N=2^J */ const int N=ths_full_plan->N[0]; /* size of full NFFT */ const int N_B=X(exp2i)(J); /* size of small blocks */ /* copy sparse plan to full plan */ nsfft_cp(ths, ths_full_plan); /* show blockwise f_hat */ printf("f_hat blockwise\n"); for (r=0; r<=(J+1)/2; r++){ a=X(exp2i)(J-r); b=X(exp2i)(r); printf("top\n"); for (k1=0; k1<a; k1++){ for (k2=0; k2<b; k2++){ printf("(%1.1f,%1.1f) ", creal(ths->f_hat[(4*r+1)*N_B+ k1*b+k2]), cimag(ths->f_hat[(4*r+1)*N_B+ k1*b+k2])); } printf("\n"); } printf("bottom\n"); for (k1=0; k1<a; k1++){ for (k2=0; k2<b; k2++){ printf("(%1.1f,%1.1f) ", creal(ths->f_hat[(4*r+3)*N_B+ k1*b+k2]), cimag(ths->f_hat[(4*r+3)*N_B+ k1*b+k2])); } printf("\n"); } printf("right\n"); for (k2=0; k2<b; k2++){ for (k1=0; k1<a; k1++){ printf("(%1.1f,%1.1f) ", creal(ths->f_hat[(4*r+0)*N_B+ k2*a+k1]), cimag(ths->f_hat[(4*r+0)*N_B+ k2*a+k1])); } printf("\n"); } printf("left\n"); for (k2=0; k2<b; k2++){ for (k1=0; k1<a; k1++){ printf("(%1.1f,%1.1f) ", creal(ths->f_hat[(4*r+2)*N_B+ k2*a+k1]), cimag(ths->f_hat[(4*r+2)*N_B+ k2*a+k1])); } printf("\n"); } } return; /* show full f_hat */ printf("full f_hat\n"); for (k1=0;k1<N;k1++){ for (k2=0;k2<N;k2++){ printf("(%1.1f,%1.1f) ", creal(ths_full_plan->f_hat[k1*N+k2]), cimag(ths_full_plan->f_hat[k1*N+k2])); } printf("\n"); } } #endif #ifndef NSFTT_DISABLE_TEST static void test_sparse_to_full_2d(nsfft_plan* ths) { int k_S,k1,k2; int N=X(exp2i)(ths->J+2); printf("N=%d\n\n",N); for(k1=0;k1<N;k1++) for(k2=0;k2<N;k2++) { k_S=index_full_to_sparse_2d(ths->J, k1*N+k2); if(k_S!=-1) printf("(%+d, %+d)\t= %+d \t= %+d = %+d \n",k1-N/2,k2-N/2, k1*N+k2, k_S, ths->index_sparse_to_full[k_S]); } } #endif #ifndef NSFTT_DISABLE_TEST static void test_sparse_to_full_3d(nsfft_plan* ths) { int k_S,k1,k2,k3; int N=X(exp2i)(ths->J+2); printf("N=%d\n\n",N); for(k1=0;k1<N;k1++) for(k2=0;k2<N;k2++) for(k3=0;k3<N;k3++) { k_S=index_full_to_sparse_3d(ths->J, (k1*N+k2)*N+k3); if(k_S!=-1) printf("(%d, %d, %d)\t= %d \t= %d = %d \n",k1-N/2,k2-N/2,k3-N/2, (k1*N+k2)*N+k3,k_S, ths->index_sparse_to_full[k_S]); } } #endif void nsfft_init_random_nodes_coeffs(nsfft_plan *ths) { int j; /* init frequencies */ nfft_vrand_unit_complex(ths->f_hat, ths->N_total); /* init nodes */ nfft_vrand_shifted_unit_double(ths->act_nfft_plan->x, ths->d * ths->M_total); if(ths->d==2) for(j=0;j<ths->M_total;j++) { ths->x_transposed[2*j+0]=ths->act_nfft_plan->x[2*j+1]; ths->x_transposed[2*j+1]=ths->act_nfft_plan->x[2*j+0]; } else /* this->d==3 */ for(j=0;j<ths->M_total;j++) { ths->x_102[3*j+0]=ths->act_nfft_plan->x[3*j+1]; ths->x_102[3*j+1]=ths->act_nfft_plan->x[3*j+0]; ths->x_102[3*j+2]=ths->act_nfft_plan->x[3*j+2]; ths->x_201[3*j+0]=ths->act_nfft_plan->x[3*j+2]; ths->x_201[3*j+1]=ths->act_nfft_plan->x[3*j+0]; ths->x_201[3*j+2]=ths->act_nfft_plan->x[3*j+1]; ths->x_120[3*j+0]=ths->act_nfft_plan->x[3*j+1]; ths->x_120[3*j+1]=ths->act_nfft_plan->x[3*j+2]; ths->x_120[3*j+2]=ths->act_nfft_plan->x[3*j+0]; ths->x_021[3*j+0]=ths->act_nfft_plan->x[3*j+0]; ths->x_021[3*j+1]=ths->act_nfft_plan->x[3*j+2]; ths->x_021[3*j+2]=ths->act_nfft_plan->x[3*j+1]; } } static void nsdft_trafo_2d(nsfft_plan *ths) { int j,k_S,k_L,k0,k1; double omega; int N=X(exp2i)(ths->J+2); memset(ths->f,0,ths->M_total*sizeof(double _Complex)); for(k_S=0;k_S<ths->N_total;k_S++) { k_L=ths->index_sparse_to_full[k_S]; k0=k_L / N; k1=k_L % N; for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - N/2)) * ths->act_nfft_plan->x[2 * j + 0] + ((double)(k1 - N/2)) * ths->act_nfft_plan->x[2 * j + 1]; ths->f[j] += ths->f_hat[k_S] * cexp( - I*2*KPI*omega); } } } /* void nsdft_trafo_2d */ static void nsdft_trafo_3d(nsfft_plan *ths) { int j,k_S,k0,k1,k2; double omega; int N=X(exp2i)(ths->J+2); int k_L; memset(ths->f,0,ths->M_total*sizeof(double _Complex)); for(k_S=0;k_S<ths->N_total;k_S++) { k_L=ths->index_sparse_to_full[k_S]; k0=k_L/(N*N); k1=(k_L/N)%N; k2=k_L%N; for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - N/2)) * ths->act_nfft_plan->x[3 * j + 0] + ((double)(k1 - N/2)) * ths->act_nfft_plan->x[3 * j + 1] + ((double)(k2 - N/2)) * ths->act_nfft_plan->x[3 * j + 2]; ths->f[j] += ths->f_hat[k_S] * cexp( - I*2*KPI*omega); } } } /* void nsdft_trafo_3d */ void nsfft_trafo_direct(nsfft_plan *ths) { if(ths->d==2) nsdft_trafo_2d(ths); else nsdft_trafo_3d(ths); } static void nsdft_adjoint_2d(nsfft_plan *ths) { int j,k_S,k_L,k0,k1; double omega; int N=X(exp2i)(ths->J+2); memset(ths->f_hat,0,ths->N_total*sizeof(double _Complex)); for(k_S=0;k_S<ths->N_total;k_S++) { k_L=ths->index_sparse_to_full[k_S]; k0=k_L / N; k1=k_L % N; for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - N/2)) * ths->act_nfft_plan->x[2 * j + 0] + ((double)(k1 - N/2)) * ths->act_nfft_plan->x[2 * j + 1]; ths->f_hat[k_S] += ths->f[j] * cexp( + _Complex_I*2*KPI*omega); } } } /* void nsdft_adjoint_2d */ static void nsdft_adjoint_3d(nsfft_plan *ths) { int j,k_S,k0,k1,k2; double omega; int N=X(exp2i)(ths->J+2); int k_L; memset(ths->f_hat,0,ths->N_total*sizeof(double _Complex)); for(k_S=0;k_S<ths->N_total;k_S++) { k_L=ths->index_sparse_to_full[k_S]; k0=k_L/(N*N); k1=(k_L/N)%N; k2=k_L%N; for(j=0;j<ths->M_total;j++) { omega = ((double)(k0 - N/2)) * ths->act_nfft_plan->x[3 * j + 0] + ((double)(k1 - N/2)) * ths->act_nfft_plan->x[3 * j + 1] + ((double)(k2 - N/2)) * ths->act_nfft_plan->x[3 * j + 2]; ths->f_hat[k_S] += ths->f[j] * cexp( + _Complex_I*2*KPI*omega); } } } /* void nsdft_adjoint_3d */ void nsfft_adjoint_direct(nsfft_plan *ths) { if(ths->d==2) nsdft_adjoint_2d(ths); else nsdft_adjoint_3d(ths); } static void nsfft_trafo_2d(nsfft_plan *ths) { int r,rr,j; double temp; int M=ths->M_total; int J=ths->J; /* center */ ths->center_nfft_plan->f_hat=ths->f_hat+4*((J+1)/2+1)*X(exp2i)(J); if (ths->center_nfft_plan->N[0]<=ths->center_nfft_plan->m) nfft_trafo_direct(ths->center_nfft_plan); else nfft_trafo(ths->center_nfft_plan); for (j=0; j<M; j++) ths->f[j] = ths->center_nfft_plan->f[j]; for(rr=0;rr<=(J+1)/2;rr++) { r=MIN(rr,J-rr); ths->act_nfft_plan->my_fftw_plan1 = ths->set_fftw_plan1[r]; ths->act_nfft_plan->N[0]=X(exp2i)(r); ths->act_nfft_plan->n[0]=ths->sigma*ths->act_nfft_plan->N[0]; ths->act_nfft_plan->N[1]=X(exp2i)(J-r); ths->act_nfft_plan->n[1]=ths->sigma*ths->act_nfft_plan->N[1]; /*printf("%d x %d\n",ths->act_nfft_plan->N[0],ths->act_nfft_plan->N[1]);*/ temp=-3.0*KPI*X(exp2i)(J-rr); /* right */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+0)*X(exp2i)(J); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_trafo(ths->act_nfft_plan); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[2*j+1]); /* top */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+1)*X(exp2i)(J); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_trafo(ths->act_nfft_plan); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[2*j+0]); /* left */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+2)*X(exp2i)(J); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_trafo(ths->act_nfft_plan); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[2*j+1]); /* bottom */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+3)*X(exp2i)(J); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_trafo(ths->act_nfft_plan); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[2*j+0]); } /* for(rr) */ } /* void nsfft_trafo_2d */ static void nsfft_adjoint_2d(nsfft_plan *ths) { int r,rr,j; double temp; int M=ths->M_total; int J=ths->J; /* center */ for (j=0; j<M; j++) ths->center_nfft_plan->f[j] = ths->f[j]; ths->center_nfft_plan->f_hat=ths->f_hat+4*((J+1)/2+1)*X(exp2i)(J); if (ths->center_nfft_plan->N[0]<=ths->center_nfft_plan->m) nfft_adjoint_direct(ths->center_nfft_plan); else nfft_adjoint(ths->center_nfft_plan); for(rr=0;rr<=(J+1)/2;rr++) { r=MIN(rr,J-rr); ths->act_nfft_plan->my_fftw_plan2 = ths->set_fftw_plan2[r]; ths->act_nfft_plan->N[0]=X(exp2i)(r); ths->act_nfft_plan->n[0]=ths->sigma*ths->act_nfft_plan->N[0]; ths->act_nfft_plan->N[1]=X(exp2i)(J-r); ths->act_nfft_plan->n[1]=ths->sigma*ths->act_nfft_plan->N[1]; /*printf("%d x %d\n",ths->act_nfft_plan->N[0],ths->act_nfft_plan->N[1]);*/ temp=-3.0*KPI*X(exp2i)(J-rr); /* right */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+0)*X(exp2i)(J); for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[2*j+1]); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_adjoint(ths->act_nfft_plan); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); /* top */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+1)*X(exp2i)(J); for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[2*j+0]); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_adjoint(ths->act_nfft_plan); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); /* left */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+2)*X(exp2i)(J); for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[2*j+1]); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_adjoint(ths->act_nfft_plan); if(r<rr) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); /* bottom */ ths->act_nfft_plan->f_hat=ths->f_hat+(4*rr+3)*X(exp2i)(J); for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[2*j+0]); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_2d(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else nfft_adjoint(ths->act_nfft_plan); if((r==rr)&&(J-rr!=rr)) RSWAP(ths->act_nfft_plan->x,ths->x_transposed); } /* for(rr) */ } /* void nsfft_adjoint_2d */ static void nsfft_trafo_3d(nsfft_plan *ths) { int r,rr,j; double temp; int sum_N_B_less_r,N_B_r,a,b; int M=ths->M_total; int J=ths->J; /* center */ ths->center_nfft_plan->f_hat=ths->f_hat+6*X(exp2i)(J)*(X(exp2i)((J+1)/2+1)-1); if (ths->center_nfft_plan->N[0]<=ths->center_nfft_plan->m) nfft_trafo_direct(ths->center_nfft_plan); else nfft_trafo(ths->center_nfft_plan); for (j=0; j<M; j++) ths->f[j] = ths->center_nfft_plan->f[j]; sum_N_B_less_r=0; for(rr=0;rr<=(J+1)/2;rr++) { a=X(exp2i)(J-rr); b=X(exp2i)(rr); N_B_r=a*b*b; r=MIN(rr,J-rr); ths->act_nfft_plan->my_fftw_plan1 = ths->set_fftw_plan1[rr]; ths->act_nfft_plan->N[0]=X(exp2i)(r); if(a<b) ths->act_nfft_plan->N[1]=X(exp2i)(J-r); else ths->act_nfft_plan->N[1]=X(exp2i)(r); ths->act_nfft_plan->N[2]=X(exp2i)(J-r); /*printf("\n\n%d x %d x %d:\t",ths->act_nfft_plan->N[0],ths->act_nfft_plan->N[1],ths->act_nfft_plan->N[2]); fflush(stdout);*/ ths->act_nfft_plan->N_total=ths->act_nfft_plan->N[0]*ths->act_nfft_plan->N[1]*ths->act_nfft_plan->N[2]; ths->act_nfft_plan->n[0]=ths->sigma*ths->act_nfft_plan->N[0]; ths->act_nfft_plan->n[1]=ths->sigma*ths->act_nfft_plan->N[1]; ths->act_nfft_plan->n[2]=ths->sigma*ths->act_nfft_plan->N[2]; ths->act_nfft_plan->n_total=ths->act_nfft_plan->n[0]*ths->act_nfft_plan->n[1]*ths->act_nfft_plan->n[2]; /* only for right - rear - top */ if((J==0)||((J==1)&&(rr==1))) temp=-2.0*KPI; else temp=-3.0*KPI*X(exp2i)(J-rr); /* right */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*0; if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+0]); /* rear */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*1; if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+1]); /* top */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*2; if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+2]); /* only for left - front - bottom */ if((J==0)||((J==1)&&(rr==1))) temp=-4.0*KPI; else temp=-3.0*KPI*X(exp2i)(J-rr); /* left */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*3; if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+0]); /* front */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*4; if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+1]); /* bottom */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*5; if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_trafo_direct(ths->act_nfft_plan); else short_nfft_trafo_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_trafo_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_trafo(ths->act_nfft_plan); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); for (j=0; j<M; j++) ths->f[j] += ths->act_nfft_plan->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+2]); sum_N_B_less_r+=6*N_B_r; } /* for(rr) */ } /* void nsfft_trafo_3d */ static void nsfft_adjoint_3d(nsfft_plan *ths) { int r,rr,j; double temp; int sum_N_B_less_r,N_B_r,a,b; int M=ths->M_total; int J=ths->J; /* center */ for (j=0; j<M; j++) ths->center_nfft_plan->f[j] = ths->f[j]; ths->center_nfft_plan->f_hat=ths->f_hat+6*X(exp2i)(J)*(X(exp2i)((J+1)/2+1)-1); if (ths->center_nfft_plan->N[0]<=ths->center_nfft_plan->m) nfft_adjoint_direct(ths->center_nfft_plan); else nfft_adjoint(ths->center_nfft_plan); sum_N_B_less_r=0; for(rr=0;rr<=(J+1)/2;rr++) { a=X(exp2i)(J-rr); b=X(exp2i)(rr); N_B_r=a*b*b; r=MIN(rr,J-rr); ths->act_nfft_plan->my_fftw_plan1 = ths->set_fftw_plan1[rr]; ths->act_nfft_plan->my_fftw_plan2 = ths->set_fftw_plan2[rr]; ths->act_nfft_plan->N[0]=X(exp2i)(r); if(a<b) ths->act_nfft_plan->N[1]=X(exp2i)(J-r); else ths->act_nfft_plan->N[1]=X(exp2i)(r); ths->act_nfft_plan->N[2]=X(exp2i)(J-r); /*printf("\n\n%d x %d x %d:\t",ths->act_nfft_plan->N[0],ths->act_nfft_plan->N[1],ths->act_nfft_plan->N[2]); fflush(stdout);*/ ths->act_nfft_plan->N_total=ths->act_nfft_plan->N[0]*ths->act_nfft_plan->N[1]*ths->act_nfft_plan->N[2]; ths->act_nfft_plan->n[0]=ths->sigma*ths->act_nfft_plan->N[0]; ths->act_nfft_plan->n[1]=ths->sigma*ths->act_nfft_plan->N[1]; ths->act_nfft_plan->n[2]=ths->sigma*ths->act_nfft_plan->N[2]; ths->act_nfft_plan->n_total=ths->act_nfft_plan->n[0]*ths->act_nfft_plan->n[1]*ths->act_nfft_plan->n[2]; /* only for right - rear - top */ if((J==0)||((J==1)&&(rr==1))) temp=-2.0*KPI; else temp=-3.0*KPI*X(exp2i)(J-rr); /* right */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*0; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+0]); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); /* rear */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*1; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+1]); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); /* top */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*2; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( - I*temp*ths->act_nfft_plan->x[3*j+2]); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); /* only for left - front - bottom */ if((J==0)||((J==1)&&(rr==1))) temp=-4.0*KPI; else temp=-3.0*KPI*X(exp2i)(J-rr); /* left */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*3; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+0]); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_120); /* front */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*4; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+1]); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a>b) RSWAP(ths->act_nfft_plan->x,ths->x_021); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_102); /* bottom */ ths->act_nfft_plan->f_hat=ths->f_hat + sum_N_B_less_r + N_B_r*5; for (j=0; j<M; j++) ths->act_nfft_plan->f[j]= ths->f[j] * cexp( + _Complex_I*temp*ths->act_nfft_plan->x[3*j+2]); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); if(ths->act_nfft_plan->N[0]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[1]<=ths->act_nfft_plan->m) if(ths->act_nfft_plan->N[2]<=ths->act_nfft_plan->m) nfft_adjoint_direct(ths->act_nfft_plan); else short_nfft_adjoint_3d_1(ths->act_nfft_plan,&(ths->set_nfft_plan_1d[r])); else short_nfft_adjoint_3d_2(ths->act_nfft_plan,&(ths->set_nfft_plan_2d[r])); else nfft_adjoint(ths->act_nfft_plan); if(a<b) RSWAP(ths->act_nfft_plan->x,ths->x_201); sum_N_B_less_r+=6*N_B_r; } /* for(rr) */ } /* void nsfft_adjoint_3d */ void nsfft_trafo(nsfft_plan *ths) { if(ths->d==2) nsfft_trafo_2d(ths); else nsfft_trafo_3d(ths); } void nsfft_adjoint(nsfft_plan *ths) { if(ths->d==2) nsfft_adjoint_2d(ths); else nsfft_adjoint_3d(ths); } /*========================================================*/ /* J >1, no precomputation at all!! */ #ifdef GAUSSIAN static void nsfft_init_2d(nsfft_plan *ths, int J, int M, int m, unsigned snfft_flags) { int r; int N[2]; int n[2]; ths->flags=snfft_flags; ths->sigma=2; ths->J=J; ths->M_total=M; ths->N_total=(J+4)*X(exp2i)(J+1); /* memory allocation */ ths->f = (double _Complex *)nfft_malloc(M*sizeof(double _Complex)); ths->f_hat = (double _Complex *)nfft_malloc(ths->N_total*sizeof(double _Complex)); ths->x_transposed= (double*)nfft_malloc(2*M*sizeof(double)); ths->act_nfft_plan = (nfft_plan*)nfft_malloc(sizeof(nfft_plan)); ths->center_nfft_plan = (nfft_plan*)nfft_malloc(sizeof(nfft_plan)); ths->set_fftw_plan1=(fftw_plan*) nfft_malloc((J/2+1)*sizeof(fftw_plan)); ths->set_fftw_plan2=(fftw_plan*) nfft_malloc((J/2+1)*sizeof(fftw_plan)); ths->set_nfft_plan_1d = (nfft_plan*) nfft_malloc((X(log2i)(m)+1)*(sizeof(nfft_plan))); /* planning the small nffts */ /* r=0 */ N[0]=1; n[0]=ths->sigma*N[0]; N[1]=X(exp2i)(J); n[1]=ths->sigma*N[1]; nfft_init_guru(ths->act_nfft_plan,2,N,M,n,m, FG_PSI| MALLOC_X| MALLOC_F| FFTW_INIT, FFTW_MEASURE); if(ths->act_nfft_plan->flags & PRE_ONE_PSI) nfft_precompute_one_psi(ths->act_nfft_plan); ths->set_fftw_plan1[0]=ths->act_nfft_plan->my_fftw_plan1; ths->set_fftw_plan2[0]=ths->act_nfft_plan->my_fftw_plan2; for(r=1;r<=J/2;r++) { N[0]=X(exp2i)(r); n[0]=ths->sigma*N[0]; N[1]=X(exp2i)(J-r); n[1]=ths->sigma*N[1]; ths->set_fftw_plan1[r] = fftw_plan_dft(2, n, ths->act_nfft_plan->g1, ths->act_nfft_plan->g2, FFTW_FORWARD, ths->act_nfft_plan->fftw_flags); ths->set_fftw_plan2[r] = fftw_plan_dft(2, n, ths->act_nfft_plan->g2, ths->act_nfft_plan->g1, FFTW_BACKWARD, ths->act_nfft_plan->fftw_flags); } /* planning the 1d nffts */ for(r=0;r<=X(log2i)(m);r++) { N[0]=X(exp2i)(J-r); n[0]=ths->sigma*N[0]; /* ==N[1] of the 2 dimensional plan */ nfft_init_guru(&(ths->set_nfft_plan_1d[r]),1,N,M,n,m, MALLOC_X| MALLOC_F| FFTW_INIT, FFTW_MEASURE); ths->set_nfft_plan_1d[r].flags = ths->set_nfft_plan_1d[r].flags | FG_PSI; ths->set_nfft_plan_1d[r].K=ths->act_nfft_plan->K; ths->set_nfft_plan_1d[r].psi=ths->act_nfft_plan->psi; } /* center plan */ /* J/2 == floor(((double)J) / 2.0) */ N[0]=X(exp2i)(J/2+1); n[0]=ths->sigma*N[0]; N[1]=X(exp2i)(J/2+1); n[1]=ths->sigma*N[1]; nfft_init_guru(ths->center_nfft_plan,2,N,M,n, m, MALLOC_F| FFTW_INIT, FFTW_MEASURE); ths->center_nfft_plan->x= ths->act_nfft_plan->x; ths->center_nfft_plan->flags = ths->center_nfft_plan->flags| FG_PSI; ths->center_nfft_plan->K=ths->act_nfft_plan->K; ths->center_nfft_plan->psi=ths->act_nfft_plan->psi; if(ths->flags & NSDFT) { ths->index_sparse_to_full=(int*)nfft_malloc(ths->N_total*sizeof(int)); init_index_sparse_to_full_2d(ths); } } #endif /*========================================================*/ /* J >1, no precomputation at all!! */ #ifdef GAUSSIAN static void nsfft_init_3d(nsfft_plan *ths, int J, int M, int m, unsigned snfft_flags) { int r,rr,a,b; int N[3]; int n[3]; ths->flags=snfft_flags; ths->sigma=2; ths->J=J; ths->M_total=M; ths->N_total=6*X(exp2i)(J)*(X(exp2i)((J+1)/2+1)-1)+X(exp2i)(3*(J/2+1)); /* memory allocation */ ths->f = (double _Complex *)nfft_malloc(M*sizeof(double _Complex)); ths->f_hat = (double _Complex *)nfft_malloc(ths->N_total*sizeof(double _Complex)); ths->x_102= (double*)nfft_malloc(3*M*sizeof(double)); ths->x_201= (double*)nfft_malloc(3*M*sizeof(double)); ths->x_120= (double*)nfft_malloc(3*M*sizeof(double)); ths->x_021= (double*)nfft_malloc(3*M*sizeof(double)); ths->act_nfft_plan = (nfft_plan*)nfft_malloc(sizeof(nfft_plan)); ths->center_nfft_plan = (nfft_plan*)nfft_malloc(sizeof(nfft_plan)); ths->set_fftw_plan1=(fftw_plan*) nfft_malloc(((J+1)/2+1)*sizeof(fftw_plan)); ths->set_fftw_plan2=(fftw_plan*) nfft_malloc(((J+1)/2+1)*sizeof(fftw_plan)); ths->set_nfft_plan_1d = (nfft_plan*) nfft_malloc((X(log2i)(m)+1)*(sizeof(nfft_plan))); ths->set_nfft_plan_2d = (nfft_plan*) nfft_malloc((X(log2i)(m)+1)*(sizeof(nfft_plan))); /* planning the small nffts */ /* r=0 */ N[0]=1; n[0]=ths->sigma*N[0]; N[1]=1; n[1]=ths->sigma*N[1]; N[2]=X(exp2i)(J); n[2]=ths->sigma*N[2]; nfft_init_guru(ths->act_nfft_plan,3,N,M,n,m, FG_PSI| MALLOC_X| MALLOC_F, FFTW_MEASURE); if(ths->act_nfft_plan->flags & PRE_ONE_PSI) nfft_precompute_one_psi(ths->act_nfft_plan); /* malloc g1, g2 for maximal size */ ths->act_nfft_plan->g1 = nfft_malloc(ths->sigma*ths->sigma*ths->sigma*X(exp2i)(J+(J+1)/2)*sizeof(double _Complex)); ths->act_nfft_plan->g2 = nfft_malloc(ths->sigma*ths->sigma*ths->sigma*X(exp2i)(J+(J+1)/2)*sizeof(double _Complex)); ths->act_nfft_plan->my_fftw_plan1 = fftw_plan_dft(3, n, ths->act_nfft_plan->g1, ths->act_nfft_plan->g2, FFTW_FORWARD, ths->act_nfft_plan->fftw_flags); ths->act_nfft_plan->my_fftw_plan2 = fftw_plan_dft(3, n, ths->act_nfft_plan->g2, ths->act_nfft_plan->g1, FFTW_BACKWARD, ths->act_nfft_plan->fftw_flags); ths->set_fftw_plan1[0]=ths->act_nfft_plan->my_fftw_plan1; ths->set_fftw_plan2[0]=ths->act_nfft_plan->my_fftw_plan2; for(rr=1;rr<=(J+1)/2;rr++) { a=X(exp2i)(J-rr); b=X(exp2i)(rr); r=MIN(rr,J-rr); n[0]=ths->sigma*X(exp2i)(r); if(a<b) n[1]=ths->sigma*X(exp2i)(J-r); else n[1]=ths->sigma*X(exp2i)(r); n[2]=ths->sigma*X(exp2i)(J-r); ths->set_fftw_plan1[rr] = fftw_plan_dft(3, n, ths->act_nfft_plan->g1, ths->act_nfft_plan->g2, FFTW_FORWARD, ths->act_nfft_plan->fftw_flags); ths->set_fftw_plan2[rr] = fftw_plan_dft(3, n, ths->act_nfft_plan->g2, ths->act_nfft_plan->g1, FFTW_BACKWARD, ths->act_nfft_plan->fftw_flags); } /* planning the 1d nffts */ for(r=0;r<=X(log2i)(m);r++) { N[0]=X(exp2i)(J-r); n[0]=ths->sigma*N[0]; N[1]=X(exp2i)(J-r); n[1]=ths->sigma*N[1]; if(N[0]>m) { nfft_init_guru(&(ths->set_nfft_plan_1d[r]),1,N,M,n,m, MALLOC_X| MALLOC_F| FFTW_INIT, FFTW_MEASURE); ths->set_nfft_plan_1d[r].flags = ths->set_nfft_plan_1d[r].flags | FG_PSI; ths->set_nfft_plan_1d[r].K=ths->act_nfft_plan->K; ths->set_nfft_plan_1d[r].psi=ths->act_nfft_plan->psi; nfft_init_guru(&(ths->set_nfft_plan_2d[r]),2,N,M,n,m, MALLOC_X| MALLOC_F| FFTW_INIT, FFTW_MEASURE); ths->set_nfft_plan_2d[r].flags = ths->set_nfft_plan_2d[r].flags | FG_PSI; ths->set_nfft_plan_2d[r].K=ths->act_nfft_plan->K; ths->set_nfft_plan_2d[r].psi=ths->act_nfft_plan->psi; } } /* center plan */ /* J/2 == floor(((double)J) / 2.0) */ N[0]=X(exp2i)(J/2+1); n[0]=ths->sigma*N[0]; N[1]=X(exp2i)(J/2+1); n[1]=ths->sigma*N[1]; N[2]=X(exp2i)(J/2+1); n[2]=ths->sigma*N[2]; nfft_init_guru(ths->center_nfft_plan,3,N,M,n, m, MALLOC_F| FFTW_INIT, FFTW_MEASURE); ths->center_nfft_plan->x= ths->act_nfft_plan->x; ths->center_nfft_plan->flags = ths->center_nfft_plan->flags| FG_PSI; ths->center_nfft_plan->K=ths->act_nfft_plan->K; ths->center_nfft_plan->psi=ths->act_nfft_plan->psi; if(ths->flags & NSDFT) { ths->index_sparse_to_full=(int*)nfft_malloc(ths->N_total*sizeof(int)); init_index_sparse_to_full_3d(ths); } } #endif #ifdef GAUSSIAN void nsfft_init(nsfft_plan *ths, int d, int J, int M, int m, unsigned flags) { ths->d=d; if(ths->d==2) nsfft_init_2d(ths, J, M, m, flags); else nsfft_init_3d(ths, J, M, m, flags); ths->mv_trafo = (void (*) (void* ))nsfft_trafo; ths->mv_adjoint = (void (*) (void* ))nsfft_adjoint; } #else void nsfft_init(nsfft_plan *ths, int d, int J, int M, int m, unsigned flags) { UNUSED(ths); UNUSED(d); UNUSED(J); UNUSED(M); UNUSED(m); UNUSED(flags); fprintf(stderr, "\nError in kernel/nsfft_init: require GAUSSIAN window function\n"); } #endif static void nsfft_finalize_2d(nsfft_plan *ths) { int r; if(ths->flags & NSDFT) nfft_free(ths->index_sparse_to_full); /* center plan */ ths->center_nfft_plan->flags = ths->center_nfft_plan->flags ^ FG_PSI; nfft_finalize(ths->center_nfft_plan); /* the 1d nffts */ for(r=0;r<=X(log2i)(ths->act_nfft_plan->m);r++) { ths->set_nfft_plan_1d[r].flags = ths->set_nfft_plan_1d[r].flags ^ FG_PSI; nfft_finalize(&(ths->set_nfft_plan_1d[r])); } /* finalize the small nffts */ ths->act_nfft_plan->my_fftw_plan2=ths->set_fftw_plan2[0]; ths->act_nfft_plan->my_fftw_plan1=ths->set_fftw_plan1[0]; for(r=1;r<=ths->J/2;r++) { fftw_destroy_plan(ths->set_fftw_plan2[r]); fftw_destroy_plan(ths->set_fftw_plan1[r]); } /* r=0 */ nfft_finalize(ths->act_nfft_plan); nfft_free(ths->set_nfft_plan_1d); nfft_free(ths->set_fftw_plan2); nfft_free(ths->set_fftw_plan1); nfft_free(ths->x_transposed); nfft_free(ths->f_hat); nfft_free(ths->f); } static void nsfft_finalize_3d(nsfft_plan *ths) { int r; if(ths->flags & NSDFT) nfft_free(ths->index_sparse_to_full); /* center plan */ ths->center_nfft_plan->flags = ths->center_nfft_plan->flags ^ FG_PSI; nfft_finalize(ths->center_nfft_plan); /* the 1d and 2d nffts */ for(r=0;r<=X(log2i)(ths->act_nfft_plan->m);r++) { if(X(exp2i)(ths->J-r)>ths->act_nfft_plan->m) { ths->set_nfft_plan_2d[r].flags = ths->set_nfft_plan_2d[r].flags ^ FG_PSI; nfft_finalize(&(ths->set_nfft_plan_2d[r])); ths->set_nfft_plan_1d[r].flags = ths->set_nfft_plan_1d[r].flags ^ FG_PSI; nfft_finalize(&(ths->set_nfft_plan_1d[r])); } } /* finalize the small nffts */ ths->act_nfft_plan->my_fftw_plan2=ths->set_fftw_plan2[0]; ths->act_nfft_plan->my_fftw_plan1=ths->set_fftw_plan1[0]; for(r=1;r<=(ths->J+1)/2;r++) { fftw_destroy_plan(ths->set_fftw_plan2[r]); fftw_destroy_plan(ths->set_fftw_plan1[r]); } /* r=0 */ nfft_finalize(ths->act_nfft_plan); nfft_free(ths->set_nfft_plan_1d); nfft_free(ths->set_nfft_plan_2d); nfft_free(ths->set_fftw_plan2); nfft_free(ths->set_fftw_plan1); nfft_free(ths->x_102); nfft_free(ths->x_201); nfft_free(ths->x_120); nfft_free(ths->x_021); nfft_free(ths->f_hat); nfft_free(ths->f); } void nsfft_finalize(nsfft_plan *ths) { if(ths->d==2) nsfft_finalize_2d(ths); else nsfft_finalize_3d(ths); }
gpl-2.0
WildfireTeamPRJ/wildfire_stm32_iso_mini
02-野火ISO-MINI 程序/9、DMA-为CPU减负/User/usart/bsp_usart1.c
1
3991
/** ****************************************************************************** * @file bsp_usart1.c * @author fire * @version V1.0 * @date 2013-xx-xx * @brief usartÓ¦ÓÃbsp ****************************************************************************** * @attention * * ʵÑéÆ½Ì¨:Ò°»ð iSO-MINI STM32 ¿ª·¢°å * ÂÛ̳ :http://www.chuxue123.com * ÌÔ±¦ :http://firestm32.taobao.com * ****************************************************************************** */ #include "bsp_usart1.h" uint8_t SendBuff[SENDBUFF_SIZE]; /** * @brief USART1 GPIO ÅäÖÃ,¹¤×÷ģʽÅäÖá£115200 8-N-1 * @param ÎÞ * @retval ÎÞ */ void USART1_Config(void) { GPIO_InitTypeDef GPIO_InitStructure; USART_InitTypeDef USART_InitStructure; /* config USART1 clock */ RCC_APB2PeriphClockCmd(RCC_APB2Periph_USART1 | RCC_APB2Periph_GPIOA, ENABLE); /* USART1 GPIO config */ /* Configure USART1 Tx (PA.09) as alternate function push-pull */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_9; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_AF_PP; GPIO_InitStructure.GPIO_Speed = GPIO_Speed_50MHz; GPIO_Init(GPIOA, &GPIO_InitStructure); /* Configure USART1 Rx (PA.10) as input floating */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_10; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_IN_FLOATING; GPIO_Init(GPIOA, &GPIO_InitStructure); /* USART1 mode config */ USART_InitStructure.USART_BaudRate = 115200; USART_InitStructure.USART_WordLength = USART_WordLength_8b; USART_InitStructure.USART_StopBits = USART_StopBits_1; USART_InitStructure.USART_Parity = USART_Parity_No ; USART_InitStructure.USART_HardwareFlowControl = USART_HardwareFlowControl_None; USART_InitStructure.USART_Mode = USART_Mode_Rx | USART_Mode_Tx; USART_Init(USART1, &USART_InitStructure); USART_Cmd(USART1, ENABLE); } /** * @brief USART1 TX DMA ÅäÖã¬ÄÚ´æµ½ÍâÉè(USART1->DR) * @param ÎÞ * @retval ÎÞ */ void USART1_DMA_Config(void) { DMA_InitTypeDef DMA_InitStructure; /*¿ªÆôDMAʱÖÓ*/ RCC_AHBPeriphClockCmd(RCC_AHBPeriph_DMA1, ENABLE); //NVIC_Config(); //ÅäÖÃDMAÖÐ¶Ï /*ÉèÖÃDMAÔ´£º´®¿ÚÊý¾Ý¼Ä´æÆ÷µØÖ·*/ DMA_InitStructure.DMA_PeripheralBaseAddr = USART1_DR_Base; /*ÄÚ´æµØÖ·(Òª´«ÊäµÄ±äÁ¿µÄÖ¸Õë)*/ DMA_InitStructure.DMA_MemoryBaseAddr = (u32)SendBuff; /*·½Ïò£º´ÓÄÚ´æµ½ÍâÉè*/ DMA_InitStructure.DMA_DIR = DMA_DIR_PeripheralDST; /*´«Êä´óСDMA_BufferSize=SENDBUFF_SIZE*/ DMA_InitStructure.DMA_BufferSize = SENDBUFF_SIZE; /*ÍâÉèµØÖ·²»Ôö*/ DMA_InitStructure.DMA_PeripheralInc = DMA_PeripheralInc_Disable; /*ÄÚ´æµØÖ·×ÔÔö*/ DMA_InitStructure.DMA_MemoryInc = DMA_MemoryInc_Enable; /*ÍâÉèÊý¾Ýµ¥Î»*/ DMA_InitStructure.DMA_PeripheralDataSize = DMA_PeripheralDataSize_Byte; /*ÄÚ´æÊý¾Ýµ¥Î» 8bit*/ DMA_InitStructure.DMA_MemoryDataSize = DMA_MemoryDataSize_Byte; /*DMAģʽ£º²»¶ÏÑ­»·*/ //DMA_InitStructure.DMA_Mode = DMA_Mode_Normal ; DMA_InitStructure.DMA_Mode = DMA_Mode_Circular; /*ÓÅÏȼ¶£ºÖÐ*/ DMA_InitStructure.DMA_Priority = DMA_Priority_Medium; /*½ûÖ¹ÄÚ´æµ½ÄÚ´æµÄ´«Êä */ DMA_InitStructure.DMA_M2M = DMA_M2M_Disable; /*ÅäÖÃDMA1µÄ4ͨµÀ*/ DMA_Init(DMA1_Channel4, &DMA_InitStructure); /*ʹÄÜDMA*/ DMA_Cmd (DMA1_Channel4,ENABLE); //DMA_ITConfig(DMA1_Channel4,DMA_IT_TC,ENABLE); //ÅäÖÃDMA·¢ËÍÍê³Éºó²úÉúÖÐ¶Ï } /// ÖØ¶¨Ïòc¿âº¯Êýprintfµ½USART1 int fputc(int ch, FILE *f) { /* ·¢ËÍÒ»¸ö×Ö½ÚÊý¾Ýµ½USART1 */ USART_SendData(USART1, (uint8_t) ch); /* µÈ´ý·¢ËÍÍê±Ï */ while (USART_GetFlagStatus(USART1, USART_FLAG_TC) == RESET); return (ch); } /// ÖØ¶¨Ïòc¿âº¯Êýscanfµ½USART1 int fgetc(FILE *f) { /* µÈ´ý´®¿Ú1ÊäÈëÊý¾Ý */ while (USART_GetFlagStatus(USART1, USART_FLAG_RXNE) == RESET); return (int)USART_ReceiveData(USART1); } /*********************************************END OF FILE**********************/
gpl-2.0
chronoxor/Depth
tests/containers/test-containers-CArray.cpp
1
7186
/*! * \file test-containers-CArray.cpp Testing of the CArray class. * \brief Testing of the CArray class. * \author Ivan Shynkarenka aka 4ekucT * \version 1.0 * \date 14.02.2007 */ /*==========================================================================*/ /* FILE DESCRIPTION: Testing of the CArray class. AUTHOR: Ivan Shynkarenka aka 4ekucT GROUP: The NULL workgroup PROJECT: The Depth PART: Depth containers tests VERSION: 1.0 CREATED: 14.02.2007 21:01:52 EMAIL: chronoxor@gmail.com WWW: http://code.google.com/p/depth COPYRIGHT: (C) 2005-2010 The NULL workgroup. All Rights Reserved. */ /*--------------------------------------------------------------------------*/ /* Copyright (C) 2005-2010 The NULL workgroup. This program is free software; you can redistribute it and/or modify it it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*--------------------------------------------------------------------------*/ /* FILE ID: $Id$ CHANGE LOG: $Log$ */ /*==========================================================================*/ // Temporary headers. #include <Depth/todo/serialize/serialize.hpp> /*--------------------------------------------------------------------------*/ // Common headers. #include "unit-test.hpp" /*--------------------------------------------------------------------------*/ // Specific headers. #include <Depth/include/containers/CArray.hpp> /*==========================================================================*/ // Common namespaces. using namespace NDepth; using namespace NDepth::NUnitTest; /*--------------------------------------------------------------------------*/ // Specific namespaces. using namespace NDepth::NContainers; using namespace NDepth::NSerialization; /*==========================================================================*/ class CTest : public CUnitTest { // Launch unit test. EUnitTestResult test() { CALL CArray<Tsint> a; CArray<Tsint> b; UT_ASSERT_ZERO(a.getSize()); UT_ASSERT_ZERO(b.getSize()); // Fill the first array. UT_ASSERT_CHECK_FILL(a, true); UT_ASSERT_EQUAL(a.getSize(), 6); // Fill the second array. UT_ASSERT_CHECK_FILL(b, false); UT_ASSERT_EQUAL(b.getSize(), 6); // Show the first array. UT_ASSERT_CHECK_SHOW(a, true); // Show the second array. UT_ASSERT_CHECK_SHOW(b, false); // Copy arrays. CArray<Tsint> c(a); CArray<Tsint> d(b); UT_ASSERT_EQUAL(c.getSize(), 6); UT_ASSERT_EQUAL(d.getSize(), 6); // Reverse arrays. c.reverse(); d.reverse(); // Show the first reversed array. UT_ASSERT_CHECK_SHOW_REVERSED(c, true); // Show the last reversed array. UT_ASSERT_CHECK_SHOW_REVERSED(d, false); // Remove items from the first array. UT_ASSERT_CHECK_REMOVE(a, true); UT_ASSERT_ZERO(a.getSize()); // Remove items from the second array. UT_ASSERT_CHECK_REMOVE(b, false); UT_ASSERT_ZERO(b.getSize()); // Clear the first reversed array. UT_ASSERT(c.clear()); UT_ASSERT_ZERO(c.getSize()); // Clear the second reversed array. UT_ASSERT(d.clear()); UT_ASSERT_ZERO(d.getSize()); // Check serialization functionality. UT_ASSERT_CHECK_SERIALIZATION(); UT_ACCEPT; } // Check the fill functionality of the CArray<Tsint>. void UT_ASSERT_CHECK_FILL(CArray<Tsint>& a_rArray, const Tbool a_cDirectOrder) { CALL UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(6) : a_rArray.insertLast(1)); UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(5) : a_rArray.insertLast(2)); UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(4) : a_rArray.insertLast(3)); UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(3) : a_rArray.insertLast(4)); UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(2) : a_rArray.insertLast(5)); UT_ASSERT(a_cDirectOrder ? a_rArray.insertFirst(1) : a_rArray.insertLast(6)); } // Check the show functionality of the CArray<Tsint>. void UT_ASSERT_CHECK_SHOW(const CArray<Tsint>& a_crArray, const Tbool a_cDirectOrder) { CALL Tsint value = (a_cDirectOrder ? 1 : 6); CArray<Tsint>::TIteratorConst it = (a_cDirectOrder ? a_crArray.getItFirst() : a_crArray.getItLast()); if (it.isValid()) { do { UT_ASSERT_EQUAL(*it, value); (a_cDirectOrder) ? ++value : --value; } while ((a_cDirectOrder ? it.stepForward() : it.stepBackward()) == 1); } UT_ASSERT_EQUAL(value, (a_cDirectOrder ? 7 : 0)); } // Check the show reversed functionality of the CArray<Tsint>. void UT_ASSERT_CHECK_SHOW_REVERSED(const CArray<Tsint>& a_crArray, const Tbool a_cDirectOrder) { CALL Tsint value = (a_cDirectOrder ? 6 : 1); CArray<Tsint>::TIteratorConst it = (a_cDirectOrder ? a_crArray.getItFirst() : a_crArray.getItLast()); if (it.isValid()) { do { UT_ASSERT_EQUAL(*it, value); (a_cDirectOrder) ? --value : ++value; } while ((a_cDirectOrder ? it.stepForward() : it.stepBackward()) == 1); } UT_ASSERT_EQUAL(value, (a_cDirectOrder ? 0 : 7)); } // Check the remove functionality of the CArray<Tsint>. void UT_ASSERT_CHECK_REMOVE(CArray<Tsint>& a_rArray, Tbool a_cDirectOrder, const Tbool a_cReversed = false) { CALL Tsint value = (a_cDirectOrder || a_cReversed) ? 1 : 6; CArray<Tsint>::TIterator it = (a_cDirectOrder ? a_rArray.getItFirst() : a_rArray.getItLast()); while (it.isValid()) { UT_ASSERT_EQUAL(*it, value); (a_cDirectOrder || a_cReversed) ? ++value : --value; UT_ASSERT(a_cDirectOrder ? it.removeForward() : it.removeBackward()); } UT_ASSERT_EQUAL(value, ((a_cDirectOrder || a_cReversed) ? 7 : 0)); } // Check the serialization functionality. void UT_ASSERT_CHECK_SERIALIZATION() { CALL CArray<Tsint> instance; // Fill the array. UT_ASSERT_CHECK_FILL(instance, true); // Show the array before serialization. UT_ASSERT_CHECK_SHOW(instance, true); // Save instance. SaveArchive saver; UT_ASSERT(saver.open()); UT_ASSERT(saver.serialize(instance)); UT_ASSERT(saver.close()); // Clear instance. UT_ASSERT(instance.clear()); // Load instance. LoadArchive loader; UT_ASSERT(loader.open()); UT_ASSERT(loader.serialize(instance)); UT_ASSERT(loader.close()); // Show the array after serialization. UT_ASSERT_CHECK_SHOW(instance, true); } } the_Test; /*==========================================================================*/
gpl-2.0
facebookexperimental/eden
eden/fs/store/hg/test/HgBackingStoreTest.cpp
1
4245
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ #include <folly/executors/QueuedImmediateExecutor.h> #include <folly/experimental/TestUtil.h> #include <folly/portability/GMock.h> #include <folly/portability/GTest.h> #include <folly/test/TestUtils.h> #include <stdexcept> #include "eden/common/utils/ProcessNameCache.h" #include "eden/fs/config/EdenConfig.h" #include "eden/fs/model/Tree.h" #include "eden/fs/store/BackingStoreLogger.h" #include "eden/fs/store/MemoryLocalStore.h" #include "eden/fs/store/ObjectFetchContext.h" #include "eden/fs/store/ObjectStore.h" #include "eden/fs/store/hg/HgBackingStore.h" #include "eden/fs/store/hg/HgImporter.h" #include "eden/fs/store/hg/HgQueuedBackingStore.h" #include "eden/fs/telemetry/EdenStats.h" #include "eden/fs/telemetry/NullStructuredLogger.h" #include "eden/fs/testharness/HgRepo.h" #include "eden/fs/utils/ImmediateFuture.h" using namespace facebook::eden; using namespace std::chrono_literals; namespace { constexpr size_t kTreeCacheMaximumSize = 1000; // bytes constexpr size_t kTreeCacheMinimumEntries = 0; } // namespace struct TestRepo { folly::test::TemporaryDirectory testDir{"eden_hg_backing_store_test"}; AbsolutePath testPath{testDir.path().string()}; HgRepo repo{testPath + "repo"_pc}; RootId commit1; Hash20 manifest1; TestRepo() { repo.hgInit(); repo.enableTreeManifest(testPath + "cache"_pc); repo.mkdir("foo"); repo.writeFile("foo/bar.txt", "bar\n"); repo.mkdir("src"); repo.writeFile("src/hello.txt", "world\n"); repo.hg("add", "foo", "src"); commit1 = repo.commit("Initial commit"); manifest1 = repo.getManifestForCommit(commit1); } }; struct HgBackingStoreTest : TestRepo, ::testing::Test { HgBackingStoreTest() { rawEdenConfig->inMemoryTreeCacheSize.setValue( kTreeCacheMaximumSize, ConfigSource::Default, true); rawEdenConfig->inMemoryTreeCacheMinElements.setValue( kTreeCacheMinimumEntries, ConfigSource::Default, true); auto treeCache = TreeCache::create(edenConfig); objectStore = ObjectStore::create( localStore, backingStore, treeCache, stats, std::make_shared<ProcessNameCache>(), std::make_shared<NullStructuredLogger>(), rawEdenConfig, kPathMapDefaultCaseSensitive); } std::shared_ptr<MemoryLocalStore> localStore{ std::make_shared<MemoryLocalStore>()}; std::shared_ptr<EdenStats> stats{std::make_shared<EdenStats>()}; HgImporter importer{repo.path(), stats}; std::shared_ptr<EdenConfig> rawEdenConfig{EdenConfig::createTestEdenConfig()}; std::shared_ptr<ReloadableConfig> edenConfig{ std::make_shared<ReloadableConfig>( rawEdenConfig, ConfigReloadBehavior::NoReload)}; std::shared_ptr<HgQueuedBackingStore> backingStore{ std::make_shared<HgQueuedBackingStore>( localStore, stats, std::make_unique<HgBackingStore>( repo.path(), &importer, edenConfig, localStore, stats), edenConfig, std::make_shared<NullStructuredLogger>(), nullptr)}; std::shared_ptr<ObjectStore> objectStore; }; namespace { std::vector<PathComponent> getTreeNames( const std::shared_ptr<const Tree>& tree) { std::vector<PathComponent> names; for (const auto& entry : *tree) { names.emplace_back(entry.first); } return names; } } // namespace TEST_F( HgBackingStoreTest, getTreeForCommit_reimports_tree_if_it_was_deleted_after_import) { auto tree1 = objectStore->getRootTree(commit1, ObjectFetchContext::getNullContext()) .get(0ms); EXPECT_TRUE(tree1); ASSERT_THAT( getTreeNames(tree1), ::testing::ElementsAre(PathComponent{"foo"}, PathComponent{"src"})); localStore->clearKeySpace(KeySpace::TreeFamily); auto tree2 = objectStore->getRootTree(commit1, ObjectFetchContext::getNullContext()) .get(0ms); EXPECT_TRUE(tree2); ASSERT_THAT( getTreeNames(tree1), ::testing::ElementsAre(PathComponent{"foo"}, PathComponent{"src"})); }
gpl-2.0
kendling/android_kernel_google_dragon
drivers/net/wireless/iwl7000/mac80211/tdls.c
1
55897
/* * mac80211 TDLS handling code * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH * Copyright 2015 Intel Deutschland GmbH * * This file is GPLv2 as found in COPYING. */ #include <linux/ieee80211.h> #include <linux/log2.h> #include <net/cfg80211.h> #include <linux/rtnetlink.h> #include "ieee80211_i.h" #include "driver-ops.h" /* give usermode some time for retries in setting up the TDLS session */ #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) void ieee80211_tdls_peer_del_work(struct work_struct *wk) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; sdata = container_of(wk, struct ieee80211_sub_if_data, u.mgd.tdls_peer_del_work.work); local = sdata->local; mutex_lock(&local->mtx); if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer)) { tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->u.mgd.tdls_peer); sta_info_destroy_addr(sdata, sdata->u.mgd.tdls_peer); eth_zero_addr(sdata->u.mgd.tdls_peer); } mutex_unlock(&local->mtx); } static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool chan_switch = local->hw.wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; enum ieee80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = (void *)skb_put(skb, 10); *pos++ = WLAN_EID_EXT_CAPABILITY; *pos++ = 8; /* len */ *pos++ = 0x0; *pos++ = 0x0; *pos++ = 0x0; *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0; *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; *pos++ = 0; *pos++ = 0; *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0; } static u8 ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u16 start, u16 end, u16 spacing) { u8 subband_cnt = 0, ch_cnt = 0; struct ieee80211_channel *ch; struct cfg80211_chan_def chandef; int i, subband_start; struct wiphy *wiphy = sdata->local->hw.wiphy; for (i = start; i <= end; i += spacing) { if (!ch_cnt) subband_start = i; ch = ieee80211_get_channel(sdata->local->hw.wiphy, i); if (ch) { /* we will be active on the channel */ cfg80211_chandef_create(&chandef, ch, NL80211_CHAN_NO_HT); if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, sdata->wdev.iftype)) { ch_cnt++; /* * check if the next channel is also part of * this allowed range */ continue; } } /* * we've reached the end of a range, with allowed channels * found */ if (ch_cnt) { u8 *pos = skb_put(skb, 2); *pos++ = ieee80211_frequency_to_channel(subband_start); *pos++ = ch_cnt; subband_cnt++; ch_cnt = 0; } } /* all channels in the requested range are allowed - add them here */ if (ch_cnt) { u8 *pos = skb_put(skb, 2); *pos++ = ieee80211_frequency_to_channel(subband_start); *pos++ = ch_cnt; subband_cnt++; } return subband_cnt; } static void ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { /* * Add possible channels for TDLS. These are channels that are allowed * to be active. */ u8 subband_cnt; u8 *pos = skb_put(skb, 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; /* * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as * this doesn't happen in real world scenarios. */ /* 2GHz, with 5MHz spacing */ subband_cnt = ieee80211_tdls_add_subband(sdata, skb, 2412, 2472, 5); /* 5GHz, with 20MHz spacing */ subband_cnt += ieee80211_tdls_add_subband(sdata, skb, 5000, 5825, 20); /* length */ *pos = 2 * subband_cnt; } static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { u8 *pos; u8 op_class; if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef, &op_class)) return; pos = skb_put(skb, 4); *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES; *pos++ = 2; /* len */ *pos++ = op_class; *pos++ = op_class; /* give current operating class as alternate too */ } static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) { u8 *pos = (void *)skb_put(skb, 3); *pos++ = WLAN_EID_BSS_COEX_2040; *pos++ = 1; /* len */ *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; } static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, u16 status_code) { /* The capability will be 0 when sending a failure code */ if (status_code != 0) return 0; if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; } return 0; } static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator) { struct ieee80211_tdls_lnkie *lnkid; const u8 *init_addr, *rsp_addr; if (initiator) { init_addr = sdata->vif.addr; rsp_addr = peer; } else { init_addr = peer; rsp_addr = sdata->vif.addr; } lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie)); lnkid->ie_type = WLAN_EID_LINK_ID; lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2; memcpy(lnkid->bssid, sdata->u.mgd.bssid, ETH_ALEN); memcpy(lnkid->init_sta, init_addr, ETH_ALEN); memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN); } static void ieee80211_tdls_add_aid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 *pos = (void *)skb_put(skb, 4); *pos++ = WLAN_EID_AID; *pos++ = 2; /* len */ put_unaligned_le16(ifmgd->aid, pos); } /* translate numbering in the WMM parameter IE to the mac80211 notation */ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac) { switch (ac) { default: WARN_ON_ONCE(1); case 0: return IEEE80211_AC_BE; case 1: return IEEE80211_AC_BK; case 2: return IEEE80211_AC_VI; case 3: return IEEE80211_AC_VO; } } static u8 ieee80211_wmm_aci_aifsn(int aifsn, bool acm, int aci) { u8 ret; ret = aifsn & 0x0f; if (acm) ret |= 0x10; ret |= (aci << 5) & 0x60; return ret; } static u8 ieee80211_wmm_ecw(u16 cw_min, u16 cw_max) { return ((ilog2(cw_min + 1) << 0x0) & 0x0f) | ((ilog2(cw_max + 1) << 0x4) & 0xf0); } static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_wmm_param_ie *wmm; struct ieee80211_tx_queue_params *txq; int i; wmm = (void *)skb_put(skb, sizeof(*wmm)); memset(wmm, 0, sizeof(*wmm)); wmm->element_id = WLAN_EID_VENDOR_SPECIFIC; wmm->len = sizeof(*wmm) - 2; wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */ wmm->oui[1] = 0x50; wmm->oui[2] = 0xf2; wmm->oui_type = 2; /* WME */ wmm->oui_subtype = 1; /* WME param */ wmm->version = 1; /* WME ver */ wmm->qos_info = 0; /* U-APSD not in use */ /* * Use the EDCA parameters defined for the BSS, or default if the AP * doesn't support it, as mandated by 802.11-2012 section 10.22.4 */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { txq = &sdata->tx_conf[ieee80211_ac_from_wmm(i)]; wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, txq->acm, i); wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); } } static void ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { /* IEEE802.11ac-2013 Table E-4 */ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; struct cfg80211_chan_def uc = sta->tdls_chandef; enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); int i; /* only support upgrading non-narrow channels up to 80Mhz */ if (max_width == NL80211_CHAN_WIDTH_5 || max_width == NL80211_CHAN_WIDTH_10) return; if (max_width > NL80211_CHAN_WIDTH_80) max_width = NL80211_CHAN_WIDTH_80; if (uc.width == max_width) return; /* * Channel usage constrains in the IEEE802.11ac-2013 specification only * allow expanding a 20MHz channel to 80MHz in a single way. In * addition, there are no 40MHz allowed channels that are not part of * the allowed 80MHz range in the 5GHz spectrum (the relevant one here). */ for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { uc.center_freq1 = centers_80mhz[i]; uc.width = NL80211_CHAN_WIDTH_80; break; } if (!uc.center_freq1) return; /* proceed to downgrade the chandef until usable or the same */ while (uc.width > max_width && !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, sdata->wdev.iftype)) ieee80211_chandef_downgrade(&uc); if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n", sta->tdls_chandef.width, uc.width); /* * the station is not yet authorized when BW upgrade is done, * locking is not required */ sta->tdls_chandef = uc; } } static void ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { enum ieee80211_band band = ieee80211_get_sdata_band(sdata); struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct sta_info *sta = NULL; size_t offset = 0, noffset; u8 *pos; ieee80211_add_srates_ie(sdata, skb, false, band); ieee80211_add_ext_srates_ie(sdata, skb, false, band); ieee80211_tdls_add_supp_channels(sdata, skb); /* add any custom IEs that go before Extended Capabilities */ if (extra_ies_len) { static const u8 before_ext_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ext_cap, ARRAY_SIZE(before_ext_cap), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } ieee80211_tdls_add_ext_capab(sdata, skb); /* add the QoS element if we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES) ieee80211_add_wmm_info_ie(skb_put(skb, 9), 0); /* no U-APSD */ /* add any custom IEs that go before HT capabilities */ if (extra_ies_len) { static const u8 before_ht_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ht_cap, ARRAY_SIZE(before_ht_cap), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } mutex_lock(&local->sta_mtx); /* we should have the peer STA if we're already responding */ if (action_code == WLAN_TDLS_SETUP_RESPONSE) { sta = sta_info_get(sdata, peer); if (WARN_ON_ONCE(!sta)) { mutex_unlock(&local->sta_mtx); return; } sta->tdls_chandef = sdata->vif.bss_conf.chandef; } ieee80211_tdls_add_oper_classes(sdata, skb); /* * with TDLS we can switch channels, and HT-caps are not necessarily * the same on all bands. The specification limits the setup to a * single HT-cap, so use the current band for now. */ sband = local->hw.wiphy->bands[band]; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && ht_cap.ht_supported) { ieee80211_apply_htcap_overrides(sdata, &ht_cap); /* disable SMPS in TDLS initiator */ ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { /* the peer caps are already intersected with our own */ memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap)); pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } if (ht_cap.ht_supported && (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) ieee80211_tdls_add_bss_coex_ie(skb); ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* add any custom IEs that go before VHT capabilities */ if (extra_ies_len) { static const u8 before_vht_cap[] = { WLAN_EID_SUPP_RATES, WLAN_EID_COUNTRY, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_EXT_CAPABILITY, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_MULTI_BAND, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_vht_cap, ARRAY_SIZE(before_vht_cap), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } /* build the VHT-cap similarly to the HT-cap */ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); if ((action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && vht_cap.vht_supported) { ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); /* the AID is present only when VHT is implemented */ if (action_code == WLAN_TDLS_SETUP_REQUEST) ieee80211_tdls_add_aid(sdata, skb); pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) { /* the peer caps are already intersected with our own */ memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap)); /* the AID is present only when VHT is implemented */ ieee80211_tdls_add_aid(sdata, skb); pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz */ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) ieee80211_tdls_chandef_vht_upgrade(sdata, sta); } mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; enum ieee80211_band band = ieee80211_get_sdata_band(sdata); u8 *pos; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); ap_sta = sta_info_get(sdata, ifmgd->bssid); if (WARN_ON_ONCE(!sta || !ap_sta)) { mutex_unlock(&local->sta_mtx); return; } sta->tdls_chandef = sdata->vif.bss_conf.chandef; /* add any custom IEs that go before the QoS IE */ if (extra_ies_len) { static const u8 before_qos[] = { WLAN_EID_RSN, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_qos, ARRAY_SIZE(before_qos), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } /* add the QoS param IE if both the peer and we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) ieee80211_tdls_add_wmm_param_ie(sdata, skb); /* add any custom IEs that go before HT operation */ if (extra_ies_len) { static const u8 before_ht_op[] = { WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_FAST_BSS_TRANSITION, WLAN_EID_TIMEOUT_INTERVAL, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_ht_op, ARRAY_SIZE(before_ht_op), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } /* * if HT support is only added in TDLS, we need an HT-operation IE. * add the IE as required by IEEE802.11-2012 9.23.3.2. */ if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap, &sdata->vif.bss_conf.chandef, prot, true); } ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz */ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) ieee80211_tdls_chandef_vht_upgrade(sdata, sta); pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap, &sta->tdls_chandef); } mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_tdls_data *tf; size_t offset = 0, noffset; u8 *pos; if (WARN_ON_ONCE(!chandef)) return; tf = (void *)skb->data; tf->u.chan_switch_req.target_channel = ieee80211_frequency_to_channel(chandef->chan->center_freq); tf->u.chan_switch_req.oper_class = oper_class; if (extra_ies_len) { static const u8 before_lnkie[] = { WLAN_EID_SECONDARY_CHANNEL_OFFSET, }; noffset = ieee80211_ie_split(extra_ies, extra_ies_len, before_lnkie, ARRAY_SIZE(before_lnkie), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); offset = noffset; } ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; pos = skb_put(skb, noffset - offset); memcpy(pos, extra_ies + offset, noffset - offset); } } static void ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { if (status_code == 0) ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); if (extra_ies_len) memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len); } static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, u8 action_code, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: if (status_code == 0) ieee80211_tdls_add_setup_start_ies(sdata, skb, peer, action_code, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_SETUP_CONFIRM: if (status_code == 0) ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_DISCOVERY_REQUEST: if (extra_ies_len) memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len); if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); break; case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer, initiator, extra_ies, extra_ies_len, oper_class, chandef); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer, status_code, initiator, extra_ies, extra_ies_len); break; } } static int ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_tdls_data *tf; tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); memcpy(tf->da, peer, ETH_ALEN); memcpy(tf->sa, sdata->vif.addr, ETH_ALEN); tf->ether_type = cpu_to_be16(ETH_P_TDLS); tf->payload_type = WLAN_TDLS_SNAP_RFTYPE; /* network header is after the ethernet header */ skb_set_network_header(skb, ETH_HLEN); switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_REQUEST; skb_put(skb, sizeof(tf->u.setup_req)); tf->u.setup_req.dialog_token = dialog_token; tf->u.setup_req.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; case WLAN_TDLS_SETUP_RESPONSE: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_RESPONSE; skb_put(skb, sizeof(tf->u.setup_resp)); tf->u.setup_resp.status_code = cpu_to_le16(status_code); tf->u.setup_resp.dialog_token = dialog_token; tf->u.setup_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; case WLAN_TDLS_SETUP_CONFIRM: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_SETUP_CONFIRM; skb_put(skb, sizeof(tf->u.setup_cfm)); tf->u.setup_cfm.status_code = cpu_to_le16(status_code); tf->u.setup_cfm.dialog_token = dialog_token; break; case WLAN_TDLS_TEARDOWN: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_TEARDOWN; skb_put(skb, sizeof(tf->u.teardown)); tf->u.teardown.reason_code = cpu_to_le16(status_code); break; case WLAN_TDLS_DISCOVERY_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST; skb_put(skb, sizeof(tf->u.discover_req)); tf->u.discover_req.dialog_token = dialog_token; break; case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; skb_put(skb, sizeof(tf->u.chan_switch_req)); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: tf->category = WLAN_CATEGORY_TDLS; tf->action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; skb_put(skb, sizeof(tf->u.chan_switch_resp)); tf->u.chan_switch_resp.status_code = cpu_to_le16(status_code); break; default: return -EINVAL; } return 0; } static int ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_mgmt *mgmt; mgmt = (void *)skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, peer, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); switch (action_code) { case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp)); mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; mgmt->u.action.u.tdls_discover_resp.dialog_token = dialog_token; mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, status_code)); break; default: return -EINVAL; } return 0; } static struct sk_buff * ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; int ret; skb = netdev_alloc_skb(sdata->dev, local->hw.extra_tx_headroom + max(sizeof(struct ieee80211_mgmt), sizeof(struct ieee80211_tdls_data)) + 50 + /* supported rates */ 10 + /* ext capab */ 26 + /* max(WMM-info, WMM-param) */ 2 + max(sizeof(struct ieee80211_ht_cap), sizeof(struct ieee80211_ht_operation)) + 2 + max(sizeof(struct ieee80211_vht_cap), sizeof(struct ieee80211_vht_operation)) + 50 + /* supported channels */ 3 + /* 40/20 BSS coex */ 4 + /* AID */ 4 + /* oper classes */ extra_ies_len + sizeof(struct ieee80211_tdls_lnkie)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_DISCOVERY_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy, sdata->dev, peer, action_code, dialog_token, status_code, skb); break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev, peer, action_code, dialog_token, status_code, skb); break; default: ret = -ENOTSUPP; break; } if (ret < 0) goto fail; ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code, initiator, extra_ies, extra_ies_len, oper_class, chandef); return skb; fail: dev_kfree_skb(skb); return NULL; } static int ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sk_buff *skb = NULL; struct sta_info *sta; u32 flags = 0; int ret = 0; rcu_read_lock(); sta = sta_info_get(sdata, peer); /* infer the initiator if we can, to support old userspace */ switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: if (sta) { set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = false; } /* fall-through */ case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_DISCOVERY_REQUEST: initiator = true; break; case WLAN_TDLS_SETUP_RESPONSE: /* * In some testing scenarios, we send a request and response. * Make the last packet sent take effect for the initiator * value. */ if (sta) { clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = true; } /* fall-through */ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: initiator = false; break; case WLAN_TDLS_TEARDOWN: case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: /* any value is ok */ break; default: ret = -ENOTSUPP; break; } if (sta && test_sta_flag(sta, WLAN_STA_TDLS_INITIATOR)) initiator = true; rcu_read_unlock(); if (ret < 0) goto fail; skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code, dialog_token, status_code, initiator, extra_ies, extra_ies_len, oper_class, chandef); if (!skb) { ret = -EINVAL; goto fail; } if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { ieee80211_tx_skb(sdata, skb); return 0; } /* * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise * we should default to AC_VI. */ switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: skb_set_queue_mapping(skb, IEEE80211_AC_BK); skb->priority = 2; break; default: skb_set_queue_mapping(skb, IEEE80211_AC_VI); skb->priority = 5; break; } /* * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. * Later, if no ACK is returned from peer, we will re-send the teardown * packet through the AP. */ if ((action_code == WLAN_TDLS_TEARDOWN) && ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) { bool try_resend; /* Should we keep skb for possible resend */ /* If not sending directly to peer - no point in keeping skb */ rcu_read_lock(); sta = sta_info_get(sdata, peer); try_resend = sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); rcu_read_unlock(); spin_lock_bh(&sdata->u.mgd.teardown_lock); if (try_resend && !sdata->u.mgd.teardown_skb) { /* Mark it as requiring TX status callback */ flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; /* * skb is copied since mac80211 will later set * properties that might not be the same as the AP, * such as encryption, QoS, addresses, etc. * * No problem if skb_copy() fails, so no need to check. */ sdata->u.mgd.teardown_skb = skb_copy(skb, GFP_ATOMIC); sdata->u.mgd.orig_teardown_skb = skb; } spin_unlock_bh(&sdata->u.mgd.teardown_lock); } /* disable bottom halves when entering the Tx path */ local_bh_disable(); __ieee80211_subif_start_xmit(skb, dev, flags); local_bh_enable(); return ret; fail: dev_kfree_skb(skb); return ret; } static int ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode; int ret; /* don't support setup with forced SMPS mode that's not off */ if (smps_mode != IEEE80211_SMPS_AUTOMATIC && smps_mode != IEEE80211_SMPS_OFF) { tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n", smps_mode); return -ENOTSUPP; } mutex_lock(&local->mtx); /* we don't support concurrent TDLS peer setups */ if (!is_zero_ether_addr(sdata->u.mgd.tdls_peer) && !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { ret = -EBUSY; goto out_unlock; } /* * make sure we have a STA representing the peer so we drop or buffer * non-TDLS-setup frames to the peer. We can't send other packets * during setup through the AP path. * Allow error packets to be sent - sometimes we don't even add a STA * before failing the setup. */ if (status_code == 0) { rcu_read_lock(); if (!sta_info_get(sdata, peer)) { rcu_read_unlock(); ret = -ENOLINK; goto out_unlock; } rcu_read_unlock(); } ieee80211_flush_queues(local, sdata, false); memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN); mutex_unlock(&local->mtx); /* we cannot take the mutex while preparing the setup packet */ ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); if (ret < 0) { mutex_lock(&local->mtx); eth_zero_addr(sdata->u.mgd.tdls_peer); mutex_unlock(&local->mtx); return ret; } ieee80211_queue_delayed_work(&sdata->local->hw, &sdata->u.mgd.tdls_peer_del_work, TDLS_PEER_SETUP_TIMEOUT); return 0; out_unlock: mutex_unlock(&local->mtx); return ret; } static int ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; int ret; /* * No packets can be transmitted to the peer via the AP during setup - * the STA is set as a TDLS peer, but is not authorized. * During teardown, we prevent direct transmissions by stopping the * queues and flushing all direct packets. */ ieee80211_stop_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); ieee80211_flush_queues(local, sdata, false); ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); if (ret < 0) sdata_err(sdata, "Failed sending TDLS teardown packet %d\n", ret); /* * Remove the STA AUTH flag to force further traffic through the AP. If * the STA was unreachable, it was already removed. */ rcu_read_lock(); sta = sta_info_get(sdata, peer); if (sta) clear_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); rcu_read_unlock(); ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); return 0; } int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, const_since_3_16 u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, #if CFG80211_VERSION >= KERNEL_VERSION(3,15,0) u32 peer_capability, #endif #if CFG80211_VERSION >= KERNEL_VERSION(3,17,0) bool initiator, #endif const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); #if CFG80211_VERSION < KERNEL_VERSION(3,15,0) u32 peer_capability = 0; #endif #if CFG80211_VERSION < KERNEL_VERSION(3,17,0) bool initiator = false; #endif int ret; if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -ENOTSUPP; /* make sure we are in managed mode, and associated */ if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->u.mgd.associated) return -EINVAL; switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_DISCOVERY_REQUEST: /* * Protect the discovery so we can hear the TDLS discovery * response frame. It is transmitted directly and not buffered * by the AP. */ drv_mgd_protect_tdls_discover(sdata->local, sdata); /* fall-through */ case WLAN_TDLS_SETUP_CONFIRM: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: /* no special handling */ ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, NULL); break; default: ret = -EOPNOTSUPP; break; } tdls_dbg(sdata, "TDLS mgmt action %d peer %pM status %d\n", action_code, peer, ret); return ret; } static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { ctx = container_of(conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_chantype(local, ctx); } mutex_unlock(&local->chanctx_mtx); } static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata) { struct sta_info *sta; bool result = false; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) || !sta->sta.ht_cap.ht_supported) continue; result = true; break; } rcu_read_unlock(); return result; } static void iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool tdls_ht; u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; u16 opmode; /* Nothing to do if the BSS connection uses HT */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) return; tdls_ht = (sta && sta->sta.ht_cap.ht_supported) || iee80211_tdls_have_ht_peers(sdata); opmode = sdata->vif.bss_conf.ht_operation_mode; if (tdls_ht) opmode |= protection; else opmode &= ~protection; if (opmode == sdata->vif.bss_conf.ht_operation_mode) return; sdata->vif.bss_conf.ht_operation_mode = opmode; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); } int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const_since_3_16 u8 *peer, enum nl80211_tdls_operation oper) { struct sta_info *sta; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int ret; if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -ENOTSUPP; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EINVAL; switch (oper) { case NL80211_TDLS_ENABLE_LINK: case NL80211_TDLS_DISABLE_LINK: break; case NL80211_TDLS_TEARDOWN: case NL80211_TDLS_SETUP: case NL80211_TDLS_DISCOVERY_REQ: /* We don't support in-driver setup/teardown/discovery */ return -ENOTSUPP; } /* protect possible bss_conf changes and avoid concurrency in * ieee80211_bss_info_change_notify() */ sdata_lock(sdata); mutex_lock(&local->mtx); tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer); switch (oper) { case NL80211_TDLS_ENABLE_LINK: if (sdata->vif.csa_active) { tdls_dbg(sdata, "TDLS: disallow link during CSA\n"); ret = -EBUSY; break; } iee80211_tdls_recalc_chanctx(sdata); mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); if (!sta) { mutex_unlock(&local->sta_mtx); ret = -ENOLINK; break; } iee80211_tdls_recalc_ht_protection(sdata, sta); set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); mutex_unlock(&local->sta_mtx); WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) || !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)); ret = 0; break; case NL80211_TDLS_DISABLE_LINK: /* * The teardown message in ieee80211_tdls_mgmt_teardown() was * created while the queues were stopped, so it might still be * pending. Before flushing the queues we need to be sure the * message is handled by the tasklet handling pending messages, * otherwise we might start destroying the station before * sending the teardown packet. * Note that this only forces the tasklet to flush pendings - * not to stop the tasklet from rescheduling itself. */ tasklet_kill(&local->tx_pending_tasklet); /* flush a potentially queued teardown packet */ ieee80211_flush_queues(local, sdata, false); ret = sta_info_destroy_addr(sdata, peer); mutex_lock(&local->sta_mtx); iee80211_tdls_recalc_ht_protection(sdata, NULL); mutex_unlock(&local->sta_mtx); iee80211_tdls_recalc_chanctx(sdata); break; default: ret = -ENOTSUPP; break; } if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) { cancel_delayed_work(&sdata->u.mgd.tdls_peer_del_work); eth_zero_addr(sdata->u.mgd.tdls_peer); } if (ret == 0) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.request_smps_work); mutex_unlock(&local->mtx); sdata_unlock(sdata); return ret; } void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) { sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n", oper); return; } cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp); } EXPORT_SYMBOL(ieee80211_tdls_oper_request); static void iee80211_tdls_add_ch_switch_timing(u8 *buf, u16 switch_time, u16 switch_timeout) { struct ieee80211_ch_switch_timing *ch_sw; *buf++ = WLAN_EID_CHAN_SWITCH_TIMING; *buf++ = sizeof(struct ieee80211_ch_switch_timing); ch_sw = (void *)buf; ch_sw->switch_time = cpu_to_le16(switch_time); ch_sw->switch_timeout = cpu_to_le16(switch_timeout); } /* find switch timing IE in SKB ready for Tx */ static const u8 *ieee80211_tdls_find_sw_timing_ie(struct sk_buff *skb) { struct ieee80211_tdls_data *tf; const u8 *ie_start; /* * Get the offset for the new location of the switch timing IE. * The SKB network header will now point to the "payload_type" * element of the TDLS data frame struct. */ tf = container_of(skb->data + skb_network_offset(skb), struct ieee80211_tdls_data, payload_type); ie_start = tf->u.chan_switch_req.variable; return cfg80211_find_ie(WLAN_EID_CHAN_SWITCH_TIMING, ie_start, skb->len - (ie_start - skb->data)); } static struct sk_buff * ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, struct cfg80211_chan_def *chandef, u32 *ch_sw_tm_ie_offset) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 extra_ies[2 + sizeof(struct ieee80211_sec_chan_offs_ie) + 2 + sizeof(struct ieee80211_ch_switch_timing)]; int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing); u8 *pos = extra_ies; struct sk_buff *skb; /* * if chandef points to a wide channel add a Secondary-Channel * Offset information element */ if (chandef->width == NL80211_CHAN_WIDTH_40) { struct ieee80211_sec_chan_offs_ie *sec_chan_ie; bool ht40plus; *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; *pos++ = sizeof(*sec_chan_ie); sec_chan_ie = (void *)pos; ht40plus = cfg80211_get_chandef_type(chandef) == NL80211_CHAN_HT40PLUS; sec_chan_ie->sec_chan_offs = ht40plus ? IEEE80211_HT_PARAM_CHA_SEC_ABOVE : IEEE80211_HT_PARAM_CHA_SEC_BELOW; pos += sizeof(*sec_chan_ie); extra_ies_len += 2 + sizeof(struct ieee80211_sec_chan_offs_ie); } /* just set the values to 0, this is a template */ iee80211_tdls_add_ch_switch_timing(pos, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, WLAN_TDLS_CHANNEL_SWITCH_REQUEST, 0, 0, !sta->sta.tdls_initiator, extra_ies, extra_ies_len, oper_class, chandef); if (!skb) return NULL; skb = ieee80211_build_data_template(sdata, skb, 0); if (IS_ERR(skb)) { tdls_dbg(sdata, "Failed building TDLS channel switch frame\n"); return NULL; } if (ch_sw_tm_ie_offset) { const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); if (!tm_ie) { tdls_dbg(sdata, "No switch timing IE in TDLS switch\n"); dev_kfree_skb_any(skb); return NULL; } *ch_sw_tm_ie_offset = tm_ie - skb->data; } tdls_dbg(sdata, "TDLS channel switch request template for %pM ch %d width %d\n", sta->sta.addr, chandef->chan->center_freq, chandef->width); return skb; } int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct sk_buff *skb = NULL; u32 ch_sw_tm_ie; int ret; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, addr); if (!sta) { tdls_dbg(sdata, "Invalid TDLS peer %pM for channel switch request\n", addr); ret = -ENOENT; goto out; } if (!test_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH)) { tdls_dbg(sdata, "TDLS channel switch unsupported by %pM\n", addr); ret = -ENOTSUPP; goto out; } skb = ieee80211_tdls_ch_sw_tmpl_get(sta, oper_class, chandef, &ch_sw_tm_ie); if (!skb) { ret = -ENOENT; goto out; } ret = drv_tdls_channel_switch(local, sdata, &sta->sta, oper_class, chandef, skb, ch_sw_tm_ie); if (!ret) set_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(skb); return ret; } void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, addr); if (!sta) { tdls_dbg(sdata, "Invalid TDLS peer %pM for channel switch cancel\n", addr); goto out; } if (!test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { tdls_dbg(sdata, "TDLS channel switch not initiated by %pM\n", addr); goto out; } drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); out: mutex_unlock(&local->sta_mtx); } static struct sk_buff * ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta, u32 *ch_sw_tm_ie_offset) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct sk_buff *skb; u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)]; /* initial timing are always zero in the template */ iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, WLAN_TDLS_CHANNEL_SWITCH_RESPONSE, 0, 0, !sta->sta.tdls_initiator, extra_ies, sizeof(extra_ies), 0, NULL); if (!skb) return NULL; skb = ieee80211_build_data_template(sdata, skb, 0); if (IS_ERR(skb)) { tdls_dbg(sdata, "Failed building TDLS channel switch resp frame\n"); return NULL; } if (ch_sw_tm_ie_offset) { const u8 *tm_ie = ieee80211_tdls_find_sw_timing_ie(skb); if (!tm_ie) { tdls_dbg(sdata, "No switch timing IE in TDLS switch resp\n"); dev_kfree_skb_any(skb); return NULL; } *ch_sw_tm_ie_offset = tm_ie - skb->data; } tdls_dbg(sdata, "TDLS get channel switch response template for %pM\n", sta->sta.addr); return skb; } static int ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; struct ieee80211_tdls_data *tf = (void *)skb->data; bool local_initiator; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable); struct ieee80211_tdls_ch_sw_params params = {}; int ret; params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; params.timestamp = rx_status->device_timestamp; if (skb->len < baselen) { tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n", skb->len); return -EINVAL; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", tf->sa); ret = -EINVAL; goto out; } params.sta = &sta->sta; params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code); if (params.status != 0) { ret = 0; goto call_drv; } ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, skb->len - baselen, false, &elems); if (elems.parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); ret = -EINVAL; goto out; } if (!elems.ch_sw_timing || !elems.lnk_id) { tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); ret = -EINVAL; goto out; } /* validate the initiator is set correctly */ local_initiator = !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, &params.ch_sw_tm_ie); if (!params.tmpl_skb) { ret = -ENOENT; goto out; } call_drv: drv_tdls_recv_channel_switch(sdata->local, sdata, &params); tdls_dbg(sdata, "TDLS channel switch response received from %pM status %d\n", tf->sa, params.status); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); return ret; } static int ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; enum nl80211_channel_type chan_type; int freq; u8 target_channel, oper_class; bool local_initiator; struct sta_info *sta; enum ieee80211_band band; struct ieee80211_tdls_data *tf = (void *)skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable); struct ieee80211_tdls_ch_sw_params params = {}; int ret = 0; params.action_code = WLAN_TDLS_CHANNEL_SWITCH_REQUEST; params.timestamp = rx_status->device_timestamp; if (skb->len < baselen) { tdls_dbg(sdata, "TDLS channel switch req too short: %d\n", skb->len); return -EINVAL; } target_channel = tf->u.chan_switch_req.target_channel; oper_class = tf->u.chan_switch_req.oper_class; /* * We can't easily infer the channel band. The operating class is * ambiguous - there are multiple tables (US/Europe/JP/Global). The * solution here is to treat channels with number >14 as 5GHz ones, * and specifically check for the (oper_class, channel) combinations * where this doesn't hold. These are thankfully unique according to * IEEE802.11-2012. * We consider only the 2GHz and 5GHz bands and 20MHz+ channels as * valid here. */ if ((oper_class == 112 || oper_class == 2 || oper_class == 3 || oper_class == 4 || oper_class == 5 || oper_class == 6) && target_channel < 14) band = IEEE80211_BAND_5GHZ; else band = target_channel < 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; freq = ieee80211_channel_to_frequency(target_channel, band); if (freq == 0) { tdls_dbg(sdata, "Invalid channel in TDLS chan switch: %d\n", target_channel); return -EINVAL; } chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq); if (!chan) { tdls_dbg(sdata, "Unsupported channel for TDLS chan switch: %d\n", target_channel); return -EINVAL; } ieee802_11_parse_elems(tf->u.chan_switch_req.variable, skb->len - baselen, false, &elems); if (elems.parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch req\n"); return -EINVAL; } if (!elems.ch_sw_timing || !elems.lnk_id) { tdls_dbg(sdata, "TDLS channel switch req - missing IEs\n"); return -EINVAL; } if (!elems.sec_chan_offs) { chan_type = NL80211_CHAN_HT20; } else { switch (elems.sec_chan_offs->sec_chan_offs) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: chan_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: chan_type = NL80211_CHAN_HT40MINUS; break; default: chan_type = NL80211_CHAN_HT20; break; } } cfg80211_chandef_create(&chandef, chan, chan_type); /* we will be active on the TDLS link */ if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef, sdata->wdev.iftype)) { tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n"); return -EINVAL; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", tf->sa); ret = -EINVAL; goto out; } params.sta = &sta->sta; /* validate the initiator is set correctly */ local_initiator = !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } /* peer should have known better */ if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs && elems.sec_chan_offs->sec_chan_offs) { tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); ret = -ENOTSUPP; goto out; } params.chandef = &chandef; params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, &params.ch_sw_tm_ie); if (!params.tmpl_skb) { ret = -ENOENT; goto out; } drv_tdls_recv_channel_switch(sdata->local, sdata, &params); tdls_dbg(sdata, "TDLS ch switch request received from %pM ch %d width %d\n", tf->sa, params.chandef->chan->center_freq, params.chandef->width); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); return ret; } static void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_tdls_data *tf = (void *)skb->data; struct wiphy *wiphy = sdata->local->hw.wiphy; ASSERT_RTNL(); /* make sure the driver supports it */ if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return; /* we want to access the entire packet */ if (skb_linearize(skb)) return; /* * The packet/size was already validated by mac80211 Rx path, only look * at the action type. */ switch (tf->action_code) { case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: ieee80211_process_tdls_channel_switch_req(sdata, skb); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ieee80211_process_tdls_channel_switch_resp(sdata, skb); break; default: WARN_ON_ONCE(1); return; } } void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) { struct sta_info *sta; u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; rcu_read_lock(); list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) continue; ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, NL80211_TDLS_TEARDOWN, reason, GFP_ATOMIC); } rcu_read_unlock(); } void ieee80211_tdls_chsw_work(struct work_struct *wk) { struct ieee80211_local *local = container_of(wk, struct ieee80211_local, tdls_chsw_work); struct ieee80211_sub_if_data *sdata; struct sk_buff *skb; struct ieee80211_tdls_data *tf; rtnl_lock(); while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) { tf = (struct ieee80211_tdls_data *)skb->data; list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata) || sdata->vif.type != NL80211_IFTYPE_STATION || !ether_addr_equal(tf->da, sdata->vif.addr)) continue; ieee80211_process_tdls_channel_switch(sdata, skb); break; } kfree_skb(skb); } rtnl_unlock(); }
gpl-2.0
PabloPiaggi/lammps
src/USER-MISC/pair_cosine_squared.cpp
1
15267
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Contributing authors: Eugen Rozic (University College London) ------------------------------------------------------------------------- */ #include "pair_cosine_squared.h" #include <cmath> #include <cstdlib> #include <cstring> #include "atom.h" #include "comm.h" #include "force.h" #include "neighbor.h" #include "neigh_list.h" #include "neigh_request.h" #include "update.h" #include "integrate.h" #include "respa.h" #include "math_const.h" #include "memory.h" #include "error.h" #include "utils.h" using namespace LAMMPS_NS; using namespace MathConst; /* ---------------------------------------------------------------------- */ PairCosineSquared::PairCosineSquared(LAMMPS *lmp) : Pair(lmp) { writedata = 1; } /* ---------------------------------------------------------------------- */ PairCosineSquared::~PairCosineSquared() { if (allocated) { memory->destroy(setflag); memory->destroy(cutsq); memory->destroy(epsilon); memory->destroy(sigma); memory->destroy(w); memory->destroy(cut); memory->destroy(wcaflag); memory->destroy(lj12_e); memory->destroy(lj6_e); memory->destroy(lj12_f); memory->destroy(lj6_f); } } /* ---------------------------------------------------------------------- allocate all arrays ------------------------------------------------------------------------- */ void PairCosineSquared::allocate() { allocated = 1; int n = atom->ntypes; memory->create(setflag, n+1, n+1, "pair:setflag"); memory->create(cutsq, n+1, n+1, "pair:cutsq"); memory->create(cut, n+1, n+1, "pair:cut"); memory->create(epsilon, n+1, n+1, "pair:epsilon"); memory->create(sigma, n+1, n+1, "pair:sigma"); memory->create(w, n+1, n+1, "pair:w"); memory->create(wcaflag, n+1, n+1, "pair:wcaflag"); memory->create(lj12_e, n+1, n+1, "pair:lj12_e"); memory->create(lj6_e, n+1, n+1, "pair:lj6_e"); memory->create(lj12_f, n+1, n+1, "pair:lj12_f"); memory->create(lj6_f, n+1, n+1, "pair:lj6_f"); for (int i = 1; i <= n; i++) { for (int j = i; j <= n; j++) { setflag[i][j] = 0; wcaflag[i][j] = 0; } } } /* ---------------------------------------------------------------------- global settings ------------------------------------------------------------------------- */ void PairCosineSquared::settings(int narg, char **arg) { if (narg != 1) { error->all(FLERR, "Illegal pair_style command (wrong number of params)"); } cut_global = force->numeric(FLERR, arg[0]); // reset cutoffs that have been explicitly set if (allocated) { int i, j; for (i = 1; i <= atom->ntypes; i++) for (j = i+1; j <= atom->ntypes; j++) if (setflag[i][j]) cut[i][j] = cut_global; } } /* ---------------------------------------------------------------------- set coeffs for one or more type pairs ------------------------------------------------------------------------- */ void PairCosineSquared::coeff(int narg, char **arg) { if (narg < 4 || narg > 6) error->all(FLERR, "Incorrect args for pair coefficients (too few or too many)"); if (!allocated) allocate(); int ilo, ihi, jlo, jhi; force->bounds(FLERR, arg[0], atom->ntypes, ilo, ihi); force->bounds(FLERR, arg[1], atom->ntypes, jlo, jhi); double epsilon_one = force->numeric(FLERR, arg[2]); double sigma_one = force->numeric(FLERR, arg[3]); double cut_one = cut_global; double wca_one = 0; if (narg == 6) { cut_one = force->numeric(FLERR, arg[4]); if (strcmp(arg[5], "wca") == 0) { wca_one = 1; } else { error->all(FLERR, "Incorrect args for pair coefficients (unknown option)"); } } else if (narg == 5) { if (strcmp(arg[4], "wca") == 0) { wca_one = 1; } else { cut_one = force->numeric(FLERR, arg[4]); } } if (cut_one < sigma_one) { error->all(FLERR, "Incorrect args for pair coefficients (cutoff < sigma)"); } else if (cut_one == sigma_one) { if (wca_one == 0) { error->all(FLERR, "Incorrect args for pair coefficients (cutoff = sigma w/o wca)"); } else { error->warning(FLERR, "Cosine/squared set to WCA only (cutoff = sigma)"); } } int count = 0; for (int i = ilo; i <= ihi; i++) { for (int j = MAX(jlo,i); j <= jhi; j++) { epsilon[i][j] = epsilon_one; sigma[i][j] = sigma_one; cut[i][j] = cut_one; wcaflag[i][j] = wca_one; setflag[i][j] = 1; count++; } } if (count == 0) error->all(FLERR, "Incorrect args for pair coefficients (none set)"); } /* ---------------------------------------------------------------------- init specific to this pair style (unneccesary) ------------------------------------------------------------------------- */ /* void PairCosineSquared::init_style() { neighbor->request(this,instance_me); } */ /* ---------------------------------------------------------------------- init for one type pair i,j and corresponding j,i ------------------------------------------------------------------------- */ double PairCosineSquared::init_one(int i, int j) { if (setflag[i][j] == 0) error->all(FLERR, "Mixing not supported in pair_style cosine/squared"); epsilon[j][i] = epsilon[i][j]; sigma[j][i] = sigma[i][j]; cut[j][i] = cut[i][j]; wcaflag[j][i] = wcaflag[i][j]; w[j][i] = w[i][j] = cut[i][j] - sigma[i][j]; if (wcaflag[i][j]) { lj12_e[j][i] = lj12_e[i][j] = epsilon[i][j] * pow(sigma[i][j], 12.0); lj6_e[j][i] = lj6_e[i][j] = 2.0 * epsilon[i][j] * pow(sigma[i][j], 6.0); lj12_f[j][i] = lj12_f[i][j] = 12.0 * epsilon[i][j] * pow(sigma[i][j], 12.0); lj6_f[j][i] = lj6_f[i][j] = 12.0 * epsilon[i][j] * pow(sigma[i][j], 6.0); } // Note: cutsq is set in pair.cpp return cut[i][j]; } /* ---------------------------------------------------------------------- this is here to throw errors & warnings for given options ------------------------------------------------------------------------- */ void PairCosineSquared::modify_params(int narg, char **arg) { Pair::modify_params(narg, arg); int iarg = 0; while (iarg < narg) { if (strcmp(arg[iarg], "mix") == 0) { error->all(FLERR, "pair_modify mix not supported for pair_style cosine/squared"); } else if (strcmp(arg[iarg], "shift") == 0) { error->warning(FLERR, "pair_modify shift has no effect on pair_style cosine/squared"); offset_flag = 0; } else if (strcmp(arg[iarg], "tail") == 0) { error->warning(FLERR, "pair_modify tail has no effect on pair_style cosine/squared"); tail_flag = 0; } iarg++; } } /* ---------------------------------------------------------------------- proc 0 writes to restart file ------------------------------------------------------------------------- */ void PairCosineSquared::write_restart(FILE *fp) { write_restart_settings(fp); int i, j; for (i = 1; i <= atom->ntypes; i++) for (j = i; j <= atom->ntypes; j++) { fwrite(&setflag[i][j], sizeof(int), 1, fp); if (setflag[i][j]) { fwrite(&epsilon[i][j], sizeof(double), 1, fp); fwrite(&sigma[i][j], sizeof(double), 1, fp); fwrite(&cut[i][j], sizeof(double), 1, fp); fwrite(&wcaflag[i][j], sizeof(int), 1, fp); } } } /* ---------------------------------------------------------------------- proc 0 reads from restart file, bcasts ------------------------------------------------------------------------- */ void PairCosineSquared::read_restart(FILE *fp) { read_restart_settings(fp); allocate(); int i,j; int me = comm->me; for (i = 1; i <= atom->ntypes; i++) { for (j = i; j <= atom->ntypes; j++) { if (me == 0) utils::sfread(FLERR,&setflag[i][j], sizeof(int), 1, fp,NULL,error); MPI_Bcast(&setflag[i][j], 1, MPI_INT, 0, world); if (setflag[i][j]) { if (me == 0) { utils::sfread(FLERR,&epsilon[i][j], sizeof(double), 1, fp,NULL,error); utils::sfread(FLERR,&sigma[i][j], sizeof(double), 1, fp,NULL,error); utils::sfread(FLERR,&cut[i][j], sizeof(double), 1, fp,NULL,error); utils::sfread(FLERR,&wcaflag[i][j], sizeof(int), 1, fp,NULL,error); } MPI_Bcast(&epsilon[i][j], 1, MPI_DOUBLE, 0, world); MPI_Bcast(&sigma[i][j], 1, MPI_DOUBLE, 0, world); MPI_Bcast(&cut[i][j], 1, MPI_DOUBLE, 0, world); MPI_Bcast(&wcaflag[i][j], 1, MPI_INT, 0, world); } } } } /* ---------------------------------------------------------------------- proc 0 writes to restart file ------------------------------------------------------------------------- */ void PairCosineSquared::write_restart_settings(FILE *fp) { fwrite(&cut_global, sizeof(double), 1, fp); } /* ---------------------------------------------------------------------- proc 0 reads from restart file, bcasts ------------------------------------------------------------------------- */ void PairCosineSquared::read_restart_settings(FILE *fp) { int me = comm->me; if (me == 0) { utils::sfread(FLERR,&cut_global, sizeof(double), 1, fp,NULL,error); } MPI_Bcast(&cut_global, 1, MPI_DOUBLE, 0, world); } /* ---------------------------------------------------------------------- proc 0 writes to data file ------------------------------------------------------------------------- */ void PairCosineSquared::write_data(FILE *fp) { for (int i = 1; i <= atom->ntypes; i++) fprintf(fp, "%d %g %g %g %d\n", i, epsilon[i][i], sigma[i][i], cut[i][i], wcaflag[i][i]); } /* ---------------------------------------------------------------------- proc 0 writes all pairs to data file ------------------------------------------------------------------------- */ void PairCosineSquared::write_data_all(FILE *fp) { for (int i = 1; i <= atom->ntypes; i++) for (int j = i; j <= atom->ntypes; j++) fprintf(fp, "%d %d %g %g %g %d\n", i, j, epsilon[i][j], sigma[i][j], cut[i][j], wcaflag[i][j]); } /* ---------------------------------------------------------------------- */ void PairCosineSquared::compute(int eflag, int vflag) { int i, j, ii, jj, inum, jnum, itype, jtype; int *ilist, *jlist, *numneigh, **firstneigh; double xtmp, ytmp, ztmp, delx, dely, delz, evdwl, fpair; double r, rsq, r2inv, r6inv; double factor_lj, force_lj, force_cos, cosone; evdwl = 0.0; if (eflag || vflag) ev_setup(eflag, vflag); else evflag = vflag_fdotr = 0; double **x = atom->x; double **f = atom->f; int *type = atom->type; int nlocal = atom->nlocal; double *special_lj = force->special_lj; int newton_pair = force->newton_pair; inum = list->inum; ilist = list->ilist; numneigh = list->numneigh; firstneigh = list->firstneigh; // loop over neighbors of my atoms for (ii = 0; ii < inum; ii++) { i = ilist[ii]; xtmp = x[i][0]; ytmp = x[i][1]; ztmp = x[i][2]; itype = type[i]; jlist = firstneigh[i]; jnum = numneigh[i]; for (jj = 0; jj < jnum; jj++) { j = jlist[jj]; factor_lj = special_lj[sbmask(j)]; j &= NEIGHMASK; delx = xtmp - x[j][0]; dely = ytmp - x[j][1]; delz = ztmp - x[j][2]; rsq = delx*delx + dely*dely + delz*delz; jtype = type[j]; if (rsq < cutsq[itype][jtype]) { /* This is exactly what the "single" method does, in fact it could be called here instead of repeating the code but here energy calculation is optional so a little bit of calculation is possibly saved */ r = sqrt(rsq); if (r <= sigma[itype][jtype]) { if (wcaflag[itype][jtype]) { r2inv = 1.0/rsq; r6inv = r2inv*r2inv*r2inv; force_lj = r6inv*(lj12_f[itype][jtype]*r6inv - lj6_f[itype][jtype]); fpair = factor_lj*force_lj*r2inv; if (eflag) { evdwl = factor_lj*r6inv* (lj12_e[itype][jtype]*r6inv - lj6_e[itype][jtype]); if (sigma[itype][jtype] == cut[itype][jtype]) { // this is the WCA-only case (it requires this shift by definition) evdwl += factor_lj*epsilon[itype][jtype]; } } } else { fpair = 0.0; if (eflag) { evdwl = -factor_lj*epsilon[itype][jtype]; } } } else { force_cos = -(MY_PI*epsilon[itype][jtype] / (2.0*w[itype][jtype])) * sin(MY_PI*(r-sigma[itype][jtype]) / w[itype][jtype]); fpair = factor_lj*force_cos / r; if (eflag) { cosone = cos(MY_PI*(r-sigma[itype][jtype]) / (2.0*w[itype][jtype])); evdwl = -factor_lj*epsilon[itype][jtype]*cosone*cosone; } } f[i][0] += delx*fpair; f[i][1] += dely*fpair; f[i][2] += delz*fpair; if (newton_pair || j < nlocal) { f[j][0] -= delx*fpair; f[j][1] -= dely*fpair; f[j][2] -= delz*fpair; } if (evflag) ev_tally(i, j, nlocal, newton_pair, evdwl, 0.0, fpair, delx, dely, delz); } } } if (vflag_fdotr) virial_fdotr_compute(); } /* ---------------------------------------------------------------------- This is used be pair_write; it is called only if rsq < cutsq[itype][jtype], no need to check that ------------------------------------------------------------------------- */ double PairCosineSquared::single(int /* i */, int /* j */, int itype, int jtype, double rsq, double /* factor_coul */, double factor_lj, double &fforce) { double r, r2inv, r6inv, cosone, force, energy; r = sqrt(rsq); if (r <= sigma[itype][jtype]) { if (wcaflag[itype][jtype]) { r2inv = 1.0/rsq; r6inv = r2inv*r2inv*r2inv; force = r6inv*(lj12_f[itype][jtype]*r6inv - lj6_f[itype][jtype])*r2inv; energy = r6inv*(lj12_e[itype][jtype]*r6inv - lj6_e[itype][jtype]); if (sigma[itype][jtype] == cut[itype][jtype]) { // this is the WCA-only case (it requires this shift by definition) energy += epsilon[itype][jtype]; } } else { force = 0.0; energy = -epsilon[itype][jtype]; } } else { cosone = cos(MY_PI*(r-sigma[itype][jtype]) / (2.0*w[itype][jtype])); force = -(MY_PI*epsilon[itype][jtype] / (2.0*w[itype][jtype])) * sin(MY_PI*(r-sigma[itype][jtype]) / w[itype][jtype]) / r; energy = -epsilon[itype][jtype]*cosone*cosone; } fforce = factor_lj*force; return factor_lj*energy; }
gpl-2.0
abev66/linux-kernel-vta2-ns
drivers/net/wireless/bcmdhd/wl_cfg80211.c
1
226162
/* * Linux cfg80211 driver * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ */ #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <linux/kernel.h> #include <bcmutils.h> #include <bcmwifi.h> #include <bcmendian.h> #include <proto/ethernet.h> #include <proto/802.11.h> #include <linux/if_arp.h> #include <asm/uaccess.h> #include <dngl_stats.h> #include <dhd.h> #include <dhdioctl.h> #include <wlioctl.h> #include <dhd_cfg80211.h> #include <proto/ethernet.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/wait.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <wlioctl.h> #include <wldev_common.h> #include <wl_cfg80211.h> #include <wl_cfgp2p.h> static struct device *cfg80211_parent_dev = NULL; static int vsdb_supported = 0; struct wl_priv *wlcfg_drv_priv = NULL; u32 wl_dbg_level = WL_DBG_ERR; #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] #define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" #define MAX_WAIT_TIME 1500 #define WL_SCAN_ACTIVE_TIME 40 #define WL_SCAN_PASSIVE_TIME 130 #define WL_FRAME_LEN 300 #define WL_SCAN_BUSY_MAX 8 #define DNGL_FUNC(func, parameters) func parameters; #define COEX_DHCP /* This is to override regulatory domains defined in cfg80211 module (reg.c) * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels. * All the chnages in world regulatory domain are to be done here. */ static const struct ieee80211_regdomain brcm_regdom = { .n_reg_rules = 4, .alpha2 = "99", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. No HT40 * channel fits here. */ /* If any */ /* * IEEE 802.11 channel 14 - is for JP only, * we need cfg80211 to allow it (reg_flags = 0); so that * hostapd could request auto channel by sending down ch 14 */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..64 */ REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), /* IEEE 802.11a, channel 100..165 */ REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } }; /* Data Element Definitions */ #define WPS_ID_CONFIG_METHODS 0x1008 #define WPS_ID_REQ_TYPE 0x103A #define WPS_ID_DEVICE_NAME 0x1011 #define WPS_ID_VERSION 0x104A #define WPS_ID_DEVICE_PWD_ID 0x1012 #define WPS_ID_REQ_DEV_TYPE 0x106A #define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053 #define WPS_ID_PRIM_DEV_TYPE 0x1054 /* Device Password ID */ #define DEV_PW_DEFAULT 0x0000 #define DEV_PW_USER_SPECIFIED 0x0001, #define DEV_PW_MACHINE_SPECIFIED 0x0002 #define DEV_PW_REKEY 0x0003 #define DEV_PW_PUSHBUTTON 0x0004 #define DEV_PW_REGISTRAR_SPECIFIED 0x0005 /* Config Methods */ #define WPS_CONFIG_USBA 0x0001 #define WPS_CONFIG_ETHERNET 0x0002 #define WPS_CONFIG_LABEL 0x0004 #define WPS_CONFIG_DISPLAY 0x0008 #define WPS_CONFIG_EXT_NFC_TOKEN 0x0010 #define WPS_CONFIG_INT_NFC_TOKEN 0x0020 #define WPS_CONFIG_NFC_INTERFACE 0x0040 #define WPS_CONFIG_PUSHBUTTON 0x0080 #define WPS_CONFIG_KEYPAD 0x0100 #define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280 #define WPS_CONFIG_PHY_PUSHBUTTON 0x0480 #define WPS_CONFIG_VIRT_DISPLAY 0x2008 #define WPS_CONFIG_PHY_DISPLAY 0x4008 /* * cfg80211_ops api/callback list */ static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, const struct ether_addr *sa, const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody); static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid); static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request); static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed); static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params); static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev); static s32 wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo); static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, s32 timeout); static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code); static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, s32 dbm); static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool unicast, bool multicast); static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params); static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr); static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params *params)); static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx); static s32 wl_cfg80211_resume(struct wiphy *wiphy); static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct net_device *dev, u64 cookie); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); #else static s32 wl_cfg80211_suspend(struct wiphy *wiphy); #endif static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa); static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa); static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev); static s32 wl_notify_escan_complete(struct wl_priv *wl, struct net_device *ndev, bool aborted, bool fw_abort); /* * event & event Q handlers for cfg80211 interfaces */ static s32 wl_create_event_handler(struct wl_priv *wl); static void wl_destroy_event_handler(struct wl_priv *wl); static s32 wl_event_handler(void *data); static void wl_init_eq(struct wl_priv *wl); static void wl_flush_eq(struct wl_priv *wl); static unsigned long wl_lock_eq(struct wl_priv *wl); static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags); static void wl_init_eq_lock(struct wl_priv *wl); static void wl_init_event_handler(struct wl_priv *wl); static struct wl_event_q *wl_deq_event(struct wl_priv *wl); static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type, const wl_event_msg_t *msg, void *data); static void wl_put_event(struct wl_event_q *e); static void wl_wakeup_event(struct wl_priv *wl); static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); static s32 wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); static s32 wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, bool completed); static s32 wl_ibss_join_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, bool completed); static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); #ifdef WL_SCHED_SCAN static s32 wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); #endif /* WL_SCHED_SCAN */ #ifdef PNO_SUPPORT static s32 wl_notify_pfn_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data); #endif /* PNO_SUPPORT */ /* * register/deregister parent device */ static void wl_cfg80211_clear_parent_dev(void); /* * cfg80211 set_wiphy_params utilities */ static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold); static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold); static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l); /* * wl profile utilities */ static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, s32 item); static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item); static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev); /* * cfg80211 connect utilites */ static s32 wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_set_set_sharedkey(struct net_device *dev, struct cfg80211_connect_params *sme); static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev); static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params, size_t *join_params_size); /* * information element utilities */ static void wl_rst_ie(struct wl_priv *wl); static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v); static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size); static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size); static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size); static u32 wl_get_ielen(struct wl_priv *wl); static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev); static void wl_free_wdev(struct wl_priv *wl); static s32 wl_inform_bss(struct wl_priv *wl); static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi); static s32 wl_inform_ibss(struct wl_priv *wl, const u8 *bssid); static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev); static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, const u8 *mac_addr, struct key_params *params); /* * key indianess swap utilities */ static void swap_key_from_BE(struct wl_wsec_key *key); static void swap_key_to_BE(struct wl_wsec_key *key); /* * wl_priv memory init/deinit utilities */ static s32 wl_init_priv_mem(struct wl_priv *wl); static void wl_deinit_priv_mem(struct wl_priv *wl); static void wl_delay(u32 ms); /* * ibss mode utilities */ static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev); static __used bool wl_is_ibssstarter(struct wl_priv *wl); /* * link up/down , default configuration utilities */ static s32 __wl_cfg80211_up(struct wl_priv *wl); static s32 __wl_cfg80211_down(struct wl_priv *wl); static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e); static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev); static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e); static void wl_link_up(struct wl_priv *wl); static void wl_link_down(struct wl_priv *wl); static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype); static void wl_init_conf(struct wl_conf *conf); /* * iscan handler */ static void wl_iscan_timer(unsigned long data); static void wl_term_iscan(struct wl_priv *wl); static s32 wl_init_scan(struct wl_priv *wl); static s32 wl_iscan_thread(void *data); static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action); static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request); static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan); static s32 wl_invoke_iscan(struct wl_priv *wl); static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status, struct wl_scan_results **bss_list); static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted); static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan); static s32 wl_iscan_done(struct wl_priv *wl); static s32 wl_iscan_pending(struct wl_priv *wl); static s32 wl_iscan_inprogress(struct wl_priv *wl); static s32 wl_iscan_aborted(struct wl_priv *wl); static void wl_scan_timeout_process(struct work_struct *work); /* * find most significant bit set */ static __used u32 wl_find_msb(u16 bit16); /* * rfkill support */ static int wl_setup_rfkill(struct wl_priv *wl, bool setup); static int wl_rfkill_set(void *data, bool blocked); static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size); static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac); /* * Some external functions, TODO: move them to dhd_linux.h */ int dhd_add_monitor(char *name, struct net_device **new_ndev); int dhd_del_monitor(struct net_device *ndev); int dhd_monitor_init(void *dhd_pub); int dhd_monitor_uninit(void); int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); #define CHECK_SYS_UP(wlpriv) \ do { \ struct net_device *ndev = wl_to_prmry_ndev(wlpriv); \ if (unlikely(!wl_get_drv_status(wlpriv, READY, ndev))) { \ WL_INFO(("device is not ready\n")); \ return -EIO; \ } \ } while (0) #define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ (akm) == RSN_AKM_UNSPECIFIED || \ (akm) == RSN_AKM_PSK) extern int dhd_wait_pend8021x(struct net_device *dev); #if (WL_DBG_LEVEL > 0) #define WL_DBG_ESTR_MAX 50 static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND", "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC", "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END", "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM", "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH", "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND", "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND", "PFN_NET_LOST", "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START", "IBSS_ASSOC", "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT", "PROBREQ_MSG", "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED", "EXCEEDED_MEDIUM_TIME", "ICV_ERROR", "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE", "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE", "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG", "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND", "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED", "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT", "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE", "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP", "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE" }; #endif /* WL_DBG_LEVEL */ #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ .band = IEEE80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) #define RATETAB_ENT(_rateid, _flags) \ { \ .bitrate = RATE_TO_BASE100KBPS(_rateid), \ .hw_value = (_rateid), \ .flags = (_flags), \ } static struct ieee80211_rate __wl_rates[] = { RATETAB_ENT(WLC_RATE_1M, 0), RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(WLC_RATE_6M, 0), RATETAB_ENT(WLC_RATE_9M, 0), RATETAB_ENT(WLC_RATE_12M, 0), RATETAB_ENT(WLC_RATE_18M, 0), RATETAB_ENT(WLC_RATE_24M, 0), RATETAB_ENT(WLC_RATE_36M, 0), RATETAB_ENT(WLC_RATE_48M, 0), RATETAB_ENT(WLC_RATE_54M, 0) }; #define wl_a_rates (__wl_rates + 4) #define wl_a_rates_size 8 #define wl_g_rates (__wl_rates + 0) #define wl_g_rates_size 12 static struct ieee80211_channel __wl_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0) }; static struct ieee80211_channel __wl_5ghz_a_channels[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0) }; static struct ieee80211_supported_band __wl_band_2ghz = { .band = IEEE80211_BAND_2GHZ, .channels = __wl_2ghz_channels, .n_channels = ARRAY_SIZE(__wl_2ghz_channels), .bitrates = wl_g_rates, .n_bitrates = wl_g_rates_size }; static struct ieee80211_supported_band __wl_band_5ghz_a = { .band = IEEE80211_BAND_5GHZ, .channels = __wl_5ghz_a_channels, .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size }; static const u32 __wl_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, }; /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) }, [NL80211_IFTYPE_STATION] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4) }, [NL80211_IFTYPE_AP_VLAN] = { /* copy AP */ .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_GO] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4) } }; static void swap_key_from_BE(struct wl_wsec_key *key) { key->index = htod32(key->index); key->len = htod32(key->len); key->algo = htod32(key->algo); key->flags = htod32(key->flags); key->rxiv.hi = htod32(key->rxiv.hi); key->rxiv.lo = htod16(key->rxiv.lo); key->iv_initialized = htod32(key->iv_initialized); } static void swap_key_to_BE(struct wl_wsec_key *key) { key->index = dtoh32(key->index); key->len = dtoh32(key->len); key->algo = dtoh32(key->algo); key->flags = dtoh32(key->flags); key->rxiv.hi = dtoh32(key->rxiv.hi); key->rxiv.lo = dtoh16(key->rxiv.lo); key->iv_initialized = dtoh32(key->iv_initialized); } /* For debug: Dump the contents of the encoded wps ie buffe */ static void wl_validate_wps_ie(char *wps_ie, bool *pbc) { #define WPS_IE_FIXED_LEN 6 u16 len = (u16) wps_ie[TLV_LEN_OFF]; u8 *subel = wps_ie+ WPS_IE_FIXED_LEN; u16 subelt_id; u16 subelt_len; u16 val; u8 *valptr = (uint8*) &val; WL_DBG(("wps_ie len=%d\n", len)); len -= 4; /* for the WPS IE's OUI, oui_type fields */ while (len >= 4) { /* must have attr id, attr len fields */ valptr[0] = *subel++; valptr[1] = *subel++; subelt_id = HTON16(val); valptr[0] = *subel++; valptr[1] = *subel++; subelt_len = HTON16(val); len -= 4; /* for the attr id, attr len fields */ len -= subelt_len; /* for the remaining fields in this attribute */ WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n", subel, subelt_id, subelt_len)); if (subelt_id == WPS_ID_VERSION) { WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel)); } else if (subelt_id == WPS_ID_REQ_TYPE) { WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel)); } else if (subelt_id == WPS_ID_CONFIG_METHODS) { valptr[0] = *subel; valptr[1] = *(subel + 1); WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val))); } else if (subelt_id == WPS_ID_DEVICE_NAME) { char devname[100]; memcpy(devname, subel, subelt_len); devname[subelt_len] = '\0'; WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n", devname, subelt_len)); } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) { valptr[0] = *subel; valptr[1] = *(subel + 1); WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val))); *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false; } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) { valptr[0] = *subel; valptr[1] = *(subel + 1); WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val))); valptr[0] = *(subel + 6); valptr[1] = *(subel + 7); WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val))); } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) { valptr[0] = *subel; valptr[1] = *(subel + 1); WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val))); valptr[0] = *(subel + 6); valptr[1] = *(subel + 7); WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val))); } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) { valptr[0] = *subel; valptr[1] = *(subel + 1); WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS" ": cat=%u\n", HTON16(val))); } else { WL_DBG((" unknown attr 0x%x\n", subelt_id)); } subel += subelt_len; } } static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy) { if (vsdb_supported) { return wf_chspec_aton(WL_P2P_TEMP_CHAN); } else { chanspec_t chspec; int err = 0; struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *dev = wl_to_prmry_ndev(wl); struct ether_addr bssid; struct wl_bss_info *bss = NULL; if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) { /* STA interface is not associated. So start the new interface on a temp * channel . Later proper channel will be applied by the above framework * via set_channel (cfg80211 API). */ WL_DBG(("Not associated. Return a temp channel. \n")); return wf_chspec_aton(WL_P2P_TEMP_CHAN); } *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, wl->extra_buf, WL_EXTRA_BUF_MAX, false))) { WL_ERR(("Failed to get associated bss info, use temp channel \n")); chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN); } else { bss = (struct wl_bss_info *) (wl->extra_buf + 4); chspec = bss->chanspec; WL_DBG(("Valid BSS Found. chanspec:%d \n", bss->chanspec)); } return chspec; } } static struct net_device* wl_cfg80211_add_monitor_if(char *name) { int ret = 0; struct net_device* ndev = NULL; ret = dhd_add_monitor(name, &ndev); WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev)); return ndev; } static struct net_device * wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { s32 err; s32 timeout = -1; s32 wlif_type = -1; s32 mode = 0; #if defined(WL_ENABLE_P2P_IF) s32 dhd_mode = 0; #endif /* (WL_ENABLE_P2P_IF) */ chanspec_t chspec; struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *_ndev; struct ether_addr primary_mac; int (*net_attach)(void *dhdp, int ifidx); bool rollback_lock = false; /* Use primary I/F for sending cmds down to firmware */ _ndev = wl_to_prmry_ndev(wl); WL_DBG(("if name: %s, type: %d\n", name, type)); switch (type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: WL_ERR(("Unsupported interface type\n")); mode = WL_MODE_IBSS; return NULL; case NL80211_IFTYPE_MONITOR: return wl_cfg80211_add_monitor_if(name); case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: wlif_type = WL_P2P_IF_CLIENT; mode = WL_MODE_BSS; break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: wlif_type = WL_P2P_IF_GO; mode = WL_MODE_AP; break; default: WL_ERR(("Unsupported interface type\n")); return NULL; break; } if (!name) { WL_ERR(("name is NULL\n")); return NULL; } if (wl->p2p_supported && (wlif_type != -1)) { if (wl_get_p2p_status(wl, IF_DELETING)) { /* wait till IF_DEL is complete * release the lock for the unregister to proceed */ if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n", __func__)); timeout = wait_event_interruptible_timeout(wl->netif_change_event, (wl_get_p2p_status(wl, IF_DELETING) == false), msecs_to_jiffies(MAX_WAIT_TIME)); /* put back the rtnl_lock again */ if (rollback_lock) { rtnl_lock(); rollback_lock = false; } if (timeout > 0) { WL_ERR(("IF DEL is Success\n")); } else { WL_ERR(("timeount < 0, return -EAGAIN\n")); return ERR_PTR(-EAGAIN); } /* It should be now be safe to put this check here since we are sure * by now netdev_notifier (unregister) would have been called */ if (wl->iface_cnt == IFACE_MAX_CNT) return ERR_PTR(-ENOMEM); } if (wl->p2p && !wl->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { p2p_on(wl) = true; wl_cfgp2p_set_firm_p2p(wl); wl_cfgp2p_init_discovery(wl); get_primary_mac(wl, &primary_mac); wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr); } memset(wl->p2p->vir_ifname, 0, IFNAMSIZ); strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1); wldev_iovar_setint(_ndev, "mpc", 0); wl_notify_escan_complete(wl, _ndev, true, true); /* In concurrency case, STA may be already associated in a particular channel. * so retrieve the current channel of primary interface and then start the virtual * interface on that. */ chspec = wl_cfg80211_get_shared_freq(wiphy); /* For P2P mode, use P2P-specific driver features to create the * bss: "wl p2p_ifadd" */ wl_set_p2p_status(wl, IF_ADD); err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec); if (unlikely(err)) { WL_ERR((" virtual iface add failed (%d) \n", err)); return ERR_PTR(-ENOMEM); } timeout = wait_event_interruptible_timeout(wl->netif_change_event, (wl_get_p2p_status(wl, IF_ADD) == false), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) { struct wireless_dev *vwdev; vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); if (unlikely(!vwdev)) { WL_ERR(("Could not allocate wireless device\n")); return ERR_PTR(-ENOMEM); } vwdev->wiphy = wl->wdev->wiphy; WL_INFO((" virtual interface(%s) is created memalloc done \n", wl->p2p->vir_ifname)); vwdev->iftype = type; _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); _ndev->ieee80211_ptr = vwdev; SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy)); vwdev->netdev = _ndev; wl_set_drv_status(wl, READY, _ndev); wl->p2p->vif_created = true; wl_set_mode_by_netdev(wl, _ndev, mode); net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION); if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } if (net_attach && !net_attach(wl->pub, _ndev->ifindex)) { wl_alloc_netinfo(wl, _ndev, vwdev, mode); WL_ERR((" virtual interface(%s) is " "created net attach done\n", wl->p2p->vir_ifname)); #if defined(WL_ENABLE_P2P_IF) if (type == NL80211_IFTYPE_P2P_CLIENT) dhd_mode = P2P_GC_ENABLED; else if (type == NL80211_IFTYPE_P2P_GO) dhd_mode = P2P_GO_ENABLED; DNGL_FUNC(dhd_cfg80211_set_p2p_info, (wl, dhd_mode)); #endif /* (WL_ENABLE_P2P_IF) */ /* Start the P2P I/F with PM disabled. Enable PM from * the framework */ if ((type == NL80211_IFTYPE_P2P_CLIENT) || ( type == NL80211_IFTYPE_P2P_GO)) vwdev->ps = NL80211_PS_DISABLED; } else { /* put back the rtnl_lock again */ if (rollback_lock) rtnl_lock(); goto fail; } /* put back the rtnl_lock again */ if (rollback_lock) rtnl_lock(); return _ndev; } else { wl_clr_p2p_status(wl, IF_ADD); WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname)); memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ); wl->p2p->vif_created = false; } } fail: return ERR_PTR(-ENODEV); } static s32 wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev) { struct ether_addr p2p_mac; struct wl_priv *wl = wiphy_priv(wiphy); s32 timeout = -1; s32 ret = 0; WL_DBG(("Enter\n")); if (wl->p2p_net == dev) { /* Since there is no ifidx corresponding to p2p0, cmds to * firmware should be routed through primary I/F */ dev = wl_to_prmry_ndev(wl); } if (wl->p2p_supported) { memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN); /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases */ WL_DBG(("P2P: GO_NEG_PHASE status cleared ")); wl_clr_p2p_status(wl, GO_NEG_PHASE); if (wl->p2p->vif_created) { if (wl_get_drv_status(wl, SCANNING, dev)) { wl_notify_escan_complete(wl, dev, true, true); } wldev_iovar_setint(dev, "mpc", 1); wl_set_p2p_status(wl, IF_DELETING); ret = wl_cfgp2p_ifdel(wl, &p2p_mac); /* Firmware could not delete the interface so we will not get WLC_E_IF * event for cleaning the dhd virtual nw interace * So lets do it here. Failures from fw will ensure the application to do * ifconfig <inter> down and up sequnce, which will reload the fw * however we should cleanup the linux network virtual interfaces */ /* Request framework to RESET and clean up */ if (ret) { struct net_device *ndev = wl_to_prmry_ndev(wl); WL_ERR(("Firmware returned an error (%d) from p2p_ifdel" "HANG Notification sent to %s\n", ret, ndev->name)); wl_cfg80211_hang(ndev, WLAN_REASON_DRIVER_ERROR); } /* Wait for any pending scan req to get aborted from the sysioc context */ timeout = wait_event_interruptible_timeout(wl->netif_change_event, (wl->p2p->vif_created == false), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && (wl->p2p->vif_created == false)) { WL_DBG(("IFDEL operation done\n")); #if defined(WL_ENABLE_P2P_IF) DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (wl)); #endif /* (WL_ENABLE_P2P_IF)) */ } else { WL_ERR(("IFDEL didn't complete properly\n")); } ret = dhd_del_monitor(dev); } } return ret; } static s32 wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { s32 ap = 0; s32 infra = 0; s32 err = BCME_OK; s32 timeout = -1; s32 wlif_type; s32 mode = 0; chanspec_t chspec; struct wl_priv *wl = wiphy_priv(wiphy); WL_DBG(("Enter type %d\n", type)); switch (type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: ap = 1; WL_ERR(("type (%d) : currently we do not support this type\n", type)); break; case NL80211_IFTYPE_ADHOC: mode = WL_MODE_IBSS; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: mode = WL_MODE_BSS; infra = 1; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: mode = WL_MODE_AP; ap = 1; break; default: return -EINVAL; } WL_DBG(("%s : ap (%d), infra (%d), iftype: (%d)\n", ndev->name, ap, infra, type)); if (ap) { wl_set_mode_by_netdev(wl, ndev, mode); if (wl->p2p_supported && wl->p2p->vif_created) { WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created, p2p_on(wl))); wldev_iovar_setint(ndev, "mpc", 0); wl_notify_escan_complete(wl, ndev, true, true); /* In concurrency case, STA may be already associated in a particular * channel. so retrieve the current channel of primary interface and * then start the virtual interface on that. */ chspec = wl_cfg80211_get_shared_freq(wiphy); wlif_type = WL_P2P_IF_GO; WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n", ndev->name, ap, infra, type)); wl_set_p2p_status(wl, IF_CHANGING); wl_clr_p2p_status(wl, IF_CHANGED); err = wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec); timeout = wait_event_interruptible_timeout(wl->netif_change_event, (wl_get_p2p_status(wl, IF_CHANGED) == true), msecs_to_jiffies(MAX_WAIT_TIME)); wl_set_mode_by_netdev(wl, ndev, mode); wl_clr_p2p_status(wl, IF_CHANGING); wl_clr_p2p_status(wl, IF_CHANGED); } else if (ndev == wl_to_prmry_ndev(wl) && !wl_get_drv_status(wl, AP_CREATED, ndev)) { wl_set_drv_status(wl, AP_CREATING, ndev); if (!wl->ap_info && !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) { WL_ERR(("struct ap_saved_ie allocation failed\n")); return -ENOMEM; } } else { WL_ERR(("Cannot change the interface for GO or SOFTAP\n")); return -EINVAL; } } else { infra = htod32(infra); err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true); if (err) { WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); return -EAGAIN; } wl_set_mode_by_netdev(wl, ndev, mode); } ndev->ieee80211_ptr->iftype = type; return 0; } s32 wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx, void* _net_attach) { struct wl_priv *wl = wlcfg_drv_priv; s32 ret = BCME_OK; WL_DBG(("Enter")); if (!ndev) { WL_ERR(("net is NULL\n")); return 0; } if (wl->p2p_supported && wl_get_p2p_status(wl, IF_ADD)) { WL_DBG(("IF_ADD event called from dongle, old interface name: %s," "new name: %s\n", ndev->name, wl->p2p->vir_ifname)); /* Assign the net device to CONNECT BSSCFG */ strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1); wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev; wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx; wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach; ndev->ifindex = idx; wl_clr_p2p_status(wl, IF_ADD); wake_up_interruptible(&wl->netif_change_event); } else { ret = BCME_NOTREADY; } return ret; } s32 wl_cfg80211_notify_ifdel(void) { struct wl_priv *wl = wlcfg_drv_priv; WL_DBG(("Enter \n")); wl_clr_p2p_status(wl, IF_DELETING); wake_up_interruptible(&wl->netif_change_event); return 0; } s32 wl_cfg80211_ifdel_ops(struct net_device *ndev) { struct wl_priv *wl = wlcfg_drv_priv; bool rollback_lock = false; s32 index = 0; if (!ndev || !ndev->name) { WL_ERR(("net is NULL\n")); return 0; } if (p2p_is_on(wl) && wl->p2p->vif_created && wl_get_p2p_status(wl, IF_DELETING)) { if (wl->scan_request && (wl->escan_info.ndev == ndev)) { /* Abort any pending scan requests */ wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; if (!rtnl_is_locked()) { rtnl_lock(); rollback_lock = true; } WL_DBG(("ESCAN COMPLETED\n")); wl_notify_escan_complete(wl, ndev, true, false); if (rollback_lock) rtnl_unlock(); } WL_ERR(("IF_DEL event called from dongle, net %x, vif name: %s\n", (unsigned int)ndev, wl->p2p->vir_ifname)); memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ); index = wl_cfgp2p_find_idx(wl, ndev); wl_to_p2p_bss_ndev(wl, index) = NULL; wl_to_p2p_bss_bssidx(wl, index) = 0; wl->p2p->vif_created = false; wl_cfgp2p_clear_management_ie(wl, index); WL_DBG(("index : %d\n", index)); } /* Wake up any waiting thread */ wake_up_interruptible(&wl->netif_change_event); return 0; } s32 wl_cfg80211_is_progress_ifadd(void) { s32 is_progress = 0; struct wl_priv *wl = wlcfg_drv_priv; if (wl_get_p2p_status(wl, IF_ADD)) is_progress = 1; return is_progress; } s32 wl_cfg80211_is_progress_ifchange(void) { s32 is_progress = 0; struct wl_priv *wl = wlcfg_drv_priv; if (wl_get_p2p_status(wl, IF_CHANGING)) is_progress = 1; return is_progress; } s32 wl_cfg80211_notify_ifchange(void) { struct wl_priv *wl = wlcfg_drv_priv; if (wl_get_p2p_status(wl, IF_CHANGING)) { wl_set_p2p_status(wl, IF_CHANGED); wake_up_interruptible(&wl->netif_change_event); } return 0; } static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request) { u32 n_ssids; u32 n_channels; u16 channel; chanspec_t chanspec; s32 i, offset; char *ptr; wlc_ssid_t ssid; memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN); params->bss_type = DOT11_BSSTYPE_ANY; params->scan_type = 0; params->nprobes = -1; params->active_time = -1; params->passive_time = -1; params->home_time = -1; params->channel_num = 0; memset(&params->ssid, 0, sizeof(wlc_ssid_t)); WL_SCAN(("Preparing Scan request\n")); WL_SCAN(("nprobes=%d\n", params->nprobes)); WL_SCAN(("active_time=%d\n", params->active_time)); WL_SCAN(("passive_time=%d\n", params->passive_time)); WL_SCAN(("home_time=%d\n", params->home_time)); WL_SCAN(("scan_type=%d\n", params->scan_type)); params->nprobes = htod32(params->nprobes); params->active_time = htod32(params->active_time); params->passive_time = htod32(params->passive_time); params->home_time = htod32(params->home_time); /* if request is null just exit so it will be all channel broadcast scan */ if (!request) return; n_ssids = request->n_ssids; n_channels = request->n_channels; /* Copy channel array if applicable */ WL_SCAN(("### List of channelspecs to scan ###\n")); if (n_channels > 0) { for (i = 0; i < n_channels; i++) { chanspec = 0; channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq); if (request->channels[i]->band == IEEE80211_BAND_2GHZ) chanspec |= WL_CHANSPEC_BAND_2G; else chanspec |= WL_CHANSPEC_BAND_5G; if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40) { chanspec |= WL_CHANSPEC_BW_20; chanspec |= WL_CHANSPEC_CTL_SB_NONE; } else { chanspec |= WL_CHANSPEC_BW_40; if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40PLUS) chanspec |= WL_CHANSPEC_CTL_SB_LOWER; else chanspec |= WL_CHANSPEC_CTL_SB_UPPER; } params->channel_list[i] = channel; params->channel_list[i] &= WL_CHANSPEC_CHAN_MASK; params->channel_list[i] |= chanspec; WL_SCAN(("Chan : %d, Channel spec: %x \n", channel, params->channel_list[i])); params->channel_list[i] = htod16(params->channel_list[i]); } } else { WL_SCAN(("Scanning all channels\n")); } /* Copy ssid array if applicable */ WL_SCAN(("### List of SSIDs to scan ###\n")); if (n_ssids > 0) { offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16); offset = roundup(offset, sizeof(u32)); ptr = (char*)params + offset; for (i = 0; i < n_ssids; i++) { memset(&ssid, 0, sizeof(wlc_ssid_t)); ssid.SSID_len = request->ssids[i].ssid_len; memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len); if (!ssid.SSID_len) WL_SCAN(("%d: Broadcast scan\n", i)); else WL_SCAN(("%d: scan for %s size =%d\n", i, ssid.SSID, ssid.SSID_len)); memcpy(ptr, &ssid, sizeof(wlc_ssid_t)); ptr += sizeof(wlc_ssid_t); } } else { WL_SCAN(("Broadcast scan\n")); } /* Adding mask to channel numbers */ params->channel_num = htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) | (n_channels & WL_SCAN_PARAMS_COUNT_MASK)); } static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action) { u32 n_channels; u32 n_ssids; s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params)); struct wl_iscan_params *params; s32 err = 0; if (request != NULL) { n_channels = request->n_channels; n_ssids = request->n_ssids; /* Allocate space for populating ssids in wl_iscan_params struct */ if (n_channels % 2) /* If n_channels is odd, add a padd of u16 */ params_size += sizeof(u16) * (n_channels + 1); else params_size += sizeof(u16) * n_channels; /* Allocate space for populating ssids in wl_iscan_params struct */ params_size += sizeof(struct wlc_ssid) * n_ssids; } params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL); if (!params) { return -ENOMEM; } wl_scan_prep(&params->params, request); params->version = htod32(ISCAN_REQ_VERSION); params->action = htod16(action); params->scan_duration = htod16(0); if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) { WL_ERR(("ioctl buffer length is not sufficient\n")); err = -ENOMEM; goto done; } err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size, iscan->ioctl_buf, WLC_IOCTL_MEDLEN, NULL); if (unlikely(err)) { if (err == -EBUSY) { WL_ERR(("system busy : iscan canceled\n")); } else { WL_ERR(("error (%d)\n", err)); } } done: kfree(params); return err; } static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request) { struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); struct net_device *ndev = wl_to_prmry_ndev(wl); s32 passive_scan; s32 err = 0; iscan->state = WL_ISCAN_STATE_SCANING; passive_scan = wl->active_scan ? 0 : 1; err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan), false); if (unlikely(err)) { WL_DBG(("error (%d)\n", err)); return err; } wl->iscan_kickstart = true; wl_run_iscan(iscan, request, WL_SCAN_ACTION_START); mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); iscan->timer_on = 1; return err; } static s32 wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size) { wl_uint32_list_t *list; s32 err = BCME_OK; if (valid_chan_list == NULL || size <= 0) return -ENOMEM; memset(valid_chan_list, 0, size); list = (wl_uint32_list_t *)(void *) valid_chan_list; list->count = htod32(WL_NUMCHANNELS); err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false); if (err != 0) { WL_ERR(("get channels failed with %d\n", err)); } return err; } static s32 wl_run_escan(struct wl_priv *wl, struct net_device *ndev, struct cfg80211_scan_request *request, uint16 action) { s32 err = BCME_OK; u32 n_channels; u32 n_ssids; s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)); wl_escan_params_t *params = NULL; struct cfg80211_scan_request *scan_request = wl->scan_request; u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)]; u32 num_chans = 0; s32 channel; s32 n_valid_chan; s32 search_state = WL_P2P_DISC_ST_SCAN; u32 i, j, n_nodfs = 0; u16 *default_chan_list = NULL; wl_uint32_list_t *list; struct net_device *dev = NULL; WL_DBG(("Enter \n")); if (!wl->p2p_supported || ((ndev == wl_to_prmry_ndev(wl)) && !p2p_scan(wl))) { /* LEGACY SCAN TRIGGER */ WL_SCAN((" LEGACY E-SCAN START\n")); if (request != NULL) { n_channels = request->n_channels; n_ssids = request->n_ssids; /* Allocate space for populating ssids in wl_iscan_params struct */ if (n_channels % 2) /* If n_channels is odd, add a padd of u16 */ params_size += sizeof(u16) * (n_channels + 1); else params_size += sizeof(u16) * n_channels; /* Allocate space for populating ssids in wl_iscan_params struct */ params_size += sizeof(struct wlc_ssid) * n_ssids; } params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL); if (params == NULL) { err = -ENOMEM; goto exit; } wl_scan_prep(&params->params, request); params->version = htod32(ESCAN_REQ_VERSION); params->action = htod16(action); params->sync_id = htod16(0x1234); if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) { WL_ERR(("ioctl buffer length not sufficient\n")); kfree(params); err = -ENOMEM; goto exit; } err = wldev_iovar_setbuf(ndev, "escan", params, params_size, wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); if (unlikely(err)) WL_ERR((" Escan set error (%d)\n", err)); kfree(params); } else if (p2p_is_on(wl) && p2p_scan(wl)) { /* P2P SCAN TRIGGER */ s32 _freq = 0; n_nodfs = 0; if (scan_request && scan_request->n_channels) { num_chans = scan_request->n_channels; WL_SCAN((" chann number : %d\n", num_chans)); default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list), GFP_KERNEL); if (default_chan_list == NULL) { WL_ERR(("channel list allocation failed \n")); err = -ENOMEM; goto exit; } if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) { list = (wl_uint32_list_t *) chan_buf; n_valid_chan = dtoh32(list->count); for (i = 0; i < num_chans; i++) { _freq = scan_request->channels[i]->center_freq; channel = ieee80211_frequency_to_channel(_freq); /* remove DFS channels */ if (channel < 52 || channel > 140) { for (j = 0; j < n_valid_chan; j++) { /* allows only supported channel on * current reguatory */ if (channel == (dtoh32(list->element[j]))) default_chan_list[n_nodfs++] = channel; } } } } if (num_chans == 3 && ( (default_chan_list[0] == SOCIAL_CHAN_1) && (default_chan_list[1] == SOCIAL_CHAN_2) && (default_chan_list[2] == SOCIAL_CHAN_3))) { /* SOCIAL CHANNELS 1, 6, 11 */ search_state = WL_P2P_DISC_ST_SEARCH; WL_INFO(("P2P SEARCH PHASE START \n")); } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) && (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP)) { /* If you are already a GO, then do SEARCH only */ WL_INFO(("Already a GO. Do SEARCH Only")); search_state = WL_P2P_DISC_ST_SEARCH; num_chans = n_nodfs; } else { WL_INFO(("P2P SCAN STATE START \n")); num_chans = n_nodfs; } } err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list, search_state, action, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); kfree(default_chan_list); } exit: if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); } return err; } static s32 wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { s32 err = BCME_OK; s32 passive_scan; wl_scan_results_t *results; WL_SCAN(("Enter \n")); mutex_lock(&wl->usr_sync); wl->escan_info.ndev = ndev; wl->escan_info.wiphy = wiphy; wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING; passive_scan = wl->active_scan ? 0 : 1; err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan), false); if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); goto exit; } results = (wl_scan_results_t *) wl->escan_info.escan_buf; results->version = 0; results->count = 0; results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START); exit: mutex_unlock(&wl->usr_sync); return err; } static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid) { struct wl_priv *wl = wiphy_priv(wiphy); struct cfg80211_ssid *ssids; struct wl_scan_req *sr = wl_to_sr(wl); struct ether_addr primary_mac; wpa_ie_fixed_t *wps_ie; s32 passive_scan; bool iscan_req; bool escan_req = false; bool p2p_ssid; s32 err = 0; s32 i; u32 wpsie_len = 0; u8 wpsie[IE_MAX_LEN]; /* If scan req comes for p2p0, send it over primary I/F * Scan results will be delivered corresponding to cfg80211_scan_request */ if (ndev == wl->p2p_net) { ndev = wl_to_prmry_ndev(wl); } WL_DBG(("Enter wiphy (%p)\n", wiphy)); if (wl_get_drv_status_all(wl, SCANNING)) { if (wl->scan_request == NULL) { wl_clr_drv_status_all(wl, SCANNING); WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n")); } else { WL_ERR(("Scanning already\n")); return -EAGAIN; } } if (wl_get_drv_status(wl, SCAN_ABORTING, ndev)) { WL_ERR(("Scanning being aborted\n")); return -EAGAIN; } if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) { WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n")); return -EOPNOTSUPP; } /* Arm scan timeout timer */ mod_timer(&wl->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); iscan_req = false; if (request) { /* scan bss */ ssids = request->ssids; if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) { iscan_req = true; } else if (wl->escan_on) { escan_req = true; p2p_ssid = false; for (i = 0; i < request->n_ssids; i++) { if (ssids[i].ssid_len && IS_P2P_SSID(ssids[i].ssid)) { p2p_ssid = true; break; } } if (p2p_ssid) { if (wl->p2p_supported) { /* p2p scan trigger */ if (p2p_on(wl) == false) { /* p2p on at the first time */ p2p_on(wl) = true; wl_cfgp2p_set_firm_p2p(wl); get_primary_mac(wl, &primary_mac); wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr); } wl_clr_p2p_status(wl, GO_NEG_PHASE); WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); p2p_scan(wl) = true; } } else { /* legacy scan trigger * So, we have to disable p2p discovery if p2p discovery is on */ if (wl->p2p_supported) { p2p_scan(wl) = false; /* If Netdevice is not equals to primary and p2p is on * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE. */ if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl))) p2p_scan(wl) = true; if (p2p_scan(wl) == false) { if (wl_get_p2p_status(wl, DISCOVERY_ON)) { err = wl_cfgp2p_discover_enable_search(wl, false); if (unlikely(err)) { goto scan_out; } } } } if (!wl->p2p_supported || !p2p_scan(wl)) { if (ndev == wl_to_prmry_ndev(wl)) { /* find the WPSIE */ memset(wpsie, 0, sizeof(wpsie)); if ((wps_ie = wl_cfgp2p_find_wpsie( (u8 *)request->ie, request->ie_len)) != NULL) { wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; memcpy(wpsie, wps_ie, wpsie_len); } else { wpsie_len = 0; } if (wpsie_len > 0) { err = wl_cfgp2p_set_management_ie(wl, ndev, -1, VNDR_IE_PRBREQ_FLAG, wpsie, wpsie_len); if (unlikely(err)) { goto scan_out; } } } } } } } else { /* scan in ibss */ /* we don't do iscan in ibss */ ssids = this_ssid; } wl->scan_request = request; wl_set_drv_status(wl, SCANNING, ndev); if (iscan_req) { err = wl_do_iscan(wl, request); if (likely(!err)) return err; else goto scan_out; } else if (escan_req) { if (wl->p2p_supported) { if (p2p_on(wl) && p2p_scan(wl)) { err = wl_cfgp2p_enable_discovery(wl, ndev, request->ie, request->ie_len); if (unlikely(err)) { goto scan_out; } } } err = wl_do_escan(wl, wiphy, ndev, request); if (likely(!err)) return err; else goto scan_out; } else { memset(&sr->ssid, 0, sizeof(sr->ssid)); sr->ssid.SSID_len = min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len); if (sr->ssid.SSID_len) { memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len); sr->ssid.SSID_len = htod32(sr->ssid.SSID_len); WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n", sr->ssid.SSID, sr->ssid.SSID_len)); } else { WL_SCAN(("Broadcast scan\n")); } WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len)); passive_scan = wl->active_scan ? 0 : 1; err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan), false); if (unlikely(err)) { WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err)); goto scan_out; } err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid, sizeof(sr->ssid), false); if (err) { if (err == -EBUSY) { WL_ERR(("system busy : scan for \"%s\" " "canceled\n", sr->ssid.SSID)); } else { WL_ERR(("WLC_SCAN error (%d)\n", err)); } goto scan_out; } } return 0; scan_out: wl_clr_drv_status(wl, SCANNING, ndev); if (timer_pending(&wl->scan_timeout)) del_timer_sync(&wl->scan_timeout); wl->scan_request = NULL; return err; } static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { s32 err = 0; struct wl_priv *wl = wiphy_priv(wiphy); WL_DBG(("Enter \n")); CHECK_SYS_UP(wl); err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); if (unlikely(err)) { WL_ERR(("scan error (%d)\n", err)); if (err == BCME_BUSY) { wl->scan_busy_count++; if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { wl->scan_busy_count = 0; WL_ERR(("Continuous scan failures!! Exercising FW hang recovery\n")); net_os_send_hang_message(ndev); } } return err; } return err; } static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold) { s32 err = 0; err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold); if (unlikely(err)) { WL_ERR(("Error (%d)\n", err)); return err; } return err; } static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold) { s32 err = 0; err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0); if (unlikely(err)) { WL_ERR(("Error (%d)\n", err)); return err; } return err; } static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l) { s32 err = 0; u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL); retry = htod32(retry); err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), false); if (unlikely(err)) { WL_ERR(("cmd (%d) , error (%d)\n", cmd, err)); return err; } return err; } static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy); struct net_device *ndev = wl_to_prmry_ndev(wl); s32 err = 0; CHECK_SYS_UP(wl); WL_DBG(("Enter\n")); if (changed & WIPHY_PARAM_RTS_THRESHOLD && (wl->conf->rts_threshold != wiphy->rts_threshold)) { wl->conf->rts_threshold = wiphy->rts_threshold; err = wl_set_rts(ndev, wl->conf->rts_threshold); if (!err) return err; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD && (wl->conf->frag_threshold != wiphy->frag_threshold)) { wl->conf->frag_threshold = wiphy->frag_threshold; err = wl_set_frag(ndev, wl->conf->frag_threshold); if (!err) return err; } if (changed & WIPHY_PARAM_RETRY_LONG && (wl->conf->retry_long != wiphy->retry_long)) { wl->conf->retry_long = wiphy->retry_long; err = wl_set_retry(ndev, wl->conf->retry_long, true); if (!err) return err; } if (changed & WIPHY_PARAM_RETRY_SHORT && (wl->conf->retry_short != wiphy->retry_short)) { wl->conf->retry_short = wiphy->retry_short; err = wl_set_retry(ndev, wl->conf->retry_short, false); if (!err) { return err; } } return err; } static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { struct wl_priv *wl = wiphy_priv(wiphy); struct wl_join_params join_params; struct wlc_ssid ssid; struct ether_addr bssid; size_t join_params_size = 0; s32 wsec = 0; s32 bcnprd; s32 err = 0; WL_TRACE(("In\n")); CHECK_SYS_UP(wl); /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ if (wl->scan_request) { wl_notify_escan_complete(wl, dev, true, true); } /* Clean BSSID */ bzero(&bssid, sizeof(bssid)); wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); wl_update_prof(wl, dev, NULL, params->bssid, WL_PROF_PENDING_BSSID); if (params->ssid) WL_INFO(("SSID: %s\n", params->ssid)); else { WL_ERR(("SSID: NULL, Not supported\n")); err = -EOPNOTSUPP; goto CleanUp; } if (params->bssid) WL_INFO(("BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n", params->bssid[0], params->bssid[1], params->bssid[2], params->bssid[3], params->bssid[4], params->bssid[5])); if (params->channel) WL_INFO(("channel: %d\n", params->channel->center_freq)); if (params->channel_fixed) WL_INFO(("fixed channel required\n")); if (params->ie && params->ie_len) WL_INFO(("ie len: %d\n", params->ie_len)); if (params->beacon_interval) WL_INFO(("beacon interval: %d\n", params->beacon_interval)); if (params->basic_rates) WL_INFO(("basic rates: %08X\n", params->basic_rates)); if (params->privacy) WL_INFO(("privacy required\n")); wl_set_drv_status(wl, CONNECTING, dev); /* Configure Privacy for starter */ if (params->privacy) wsec |= WEP_ENABLED; err = wldev_iovar_setint(dev, "wsec", wsec); if (err) { WL_ERR(("wsec failed (%d)\n", err)); goto CleanUp; } err = wldev_iovar_setint(dev, "auth", WL_AUTH_OPEN_SYSTEM); if (err) { WL_ERR(("auth failed (%d)\n", err)); goto CleanUp; } err = wldev_iovar_setint(dev, "wpa_auth", 0); if (err) { WL_ERR(("wpa_auth failed (%d)\n", err)); goto CleanUp; } /* Configure Beacon Interval for starter */ if (params->beacon_interval) bcnprd = params->beacon_interval; else bcnprd = 100; bcnprd = htod32(bcnprd); err = wldev_ioctl(dev, WLC_SET_BCNPRD, &bcnprd, sizeof(bcnprd), true); if (err) { WL_ERR(("WLC_SET_BCNPRD failed (%d)\n", err)); goto CleanUp; } /* Configure required join parameter */ memset(&join_params, 0, sizeof(struct wl_join_params)); /* SSID */ memset(&ssid, 0, sizeof(struct wlc_ssid)); ssid.SSID_len = MIN(params->ssid_len, 32); join_params.ssid.SSID_len = htod32(ssid.SSID_len); memcpy(ssid.SSID, params->ssid, ssid.SSID_len); memcpy(join_params.ssid.SSID, params->ssid, ssid.SSID_len); join_params_size = sizeof(join_params.ssid); wl_update_prof(wl, dev, NULL, &ssid, WL_PROF_SSID); /* BSSID */ if (params->bssid) { memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN); join_params_size = sizeof(join_params.ssid) + WL_ASSOC_PARAMS_FIXED_SIZE; wl_update_prof(wl, dev, NULL, params->bssid, WL_PROF_BSSID); } else { memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN); } /* Channel */ if (params->channel) { u32 target_channel; target_channel = ieee80211_frequency_to_channel( params->channel->center_freq); if (params->channel_fixed) { /* adding chanspec */ wl_ch_to_chanspec(target_channel, &join_params, &join_params_size); } /* set channel for starter */ target_channel = htod32(target_channel); err = wldev_ioctl(dev, WLC_SET_CHANNEL, &target_channel, sizeof(target_channel), true); if (err) { WL_ERR(("WLC_SET_CHANNEL failed (%d)\n", err)); goto CleanUp; } } wl->ibss_starter = false; err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); if (err) { WL_ERR(("WLC_SET_SSID failed (%d)\n", err)); goto CleanUp; } CleanUp: if (err) wl_clr_drv_status(wl, CONNECTING, dev); WL_TRACE(("Exit\n")); return err; } static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct wl_priv *wl = wiphy_priv(wiphy); scb_val_t scbval; bool act = false; s32 err = 0; u8 *curbssid; WL_TRACE(("Enter\n")); CHECK_SYS_UP(wl); act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT); curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID); if (act) { /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ if (wl->scan_request) { wl_notify_escan_complete(wl, dev, true, true); } wl_set_drv_status(wl, DISCONNECTING, dev); scbval.val = DOT11_RC_DISASSOC_LEAVING; memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); scbval.val = htod32(scbval.val); err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); if (unlikely(err)) { wl_clr_drv_status(wl, DISCONNECTING, dev); WL_ERR(("error (%d)\n", err)); return err; } } WL_TRACE(("Exit\n")); return err; } static s32 wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wlcfg_drv_priv; struct wl_security *sec; s32 val = 0; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) val = WPA2_AUTH_PSK| WPA2_AUTH_UNSPECIFIED; else val = WPA_AUTH_DISABLED; if (is_wps_conn(sme)) val = WPA_AUTH_DISABLED; WL_DBG(("setting wpa_auth to 0x%0x\n", val)); err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); if (unlikely(err)) { WL_ERR(("set wpa_auth failed (%d)\n", err)); return err; } sec = wl_read_prof(wl, dev, WL_PROF_SEC); sec->wpa_versions = sme->crypto.wpa_versions; return err; } static s32 wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wlcfg_drv_priv; struct wl_security *sec; s32 val = 0; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); switch (sme->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: val = WL_AUTH_OPEN_SYSTEM; WL_DBG(("open system\n")); break; case NL80211_AUTHTYPE_SHARED_KEY: val = WL_AUTH_SHARED_KEY; WL_DBG(("shared key\n")); break; case NL80211_AUTHTYPE_AUTOMATIC: val = WL_AUTH_OPEN_SHARED; WL_DBG(("automatic\n")); break; case NL80211_AUTHTYPE_NETWORK_EAP: WL_DBG(("network eap\n")); default: val = WL_AUTH_OPEN_SHARED; WL_ERR(("invalid auth type (%d)\n", sme->auth_type)); break; } err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); if (unlikely(err)) { WL_ERR(("set auth failed (%d)\n", err)); return err; } sec = wl_read_prof(wl, dev, WL_PROF_SEC); sec->auth_type = sme->auth_type; return err; } static s32 wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wlcfg_drv_priv; struct wl_security *sec; s32 pval = 0; s32 gval = 0; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); if (sme->crypto.n_ciphers_pairwise) { switch (sme->crypto.ciphers_pairwise[0]) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: pval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: pval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: pval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: pval = AES_ENABLED; break; default: WL_ERR(("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0])); return -EINVAL; } } if (sme->crypto.cipher_group) { switch (sme->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: gval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: gval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: gval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: gval = AES_ENABLED; break; default: WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group)); return -EINVAL; } } WL_DBG(("pval (%d) gval (%d)\n", pval, gval)); if (is_wps_conn(sme)) { if (sme->privacy) err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx); else /* WPS-2.0 allowes no security */ err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); } else { WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC")); err = wldev_iovar_setint_bsscfg(dev, "wsec", pval | gval, bssidx); } if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); return err; } sec = wl_read_prof(wl, dev, WL_PROF_SEC); sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; sec->cipher_group = sme->crypto.cipher_group; return err; } static s32 wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wlcfg_drv_priv; struct wl_security *sec; s32 val = 0; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); if (sme->crypto.n_akm_suites) { err = wldev_iovar_getint(dev, "wpa_auth", &val); if (unlikely(err)) { WL_ERR(("could not get wpa_auth (%d)\n", err)); return err; } if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA_AUTH_PSK; break; default: WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group)); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA2_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA2_AUTH_PSK; break; default: WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group)); return -EINVAL; } } WL_DBG(("setting wpa_auth to %d\n", val)); err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); if (unlikely(err)) { WL_ERR(("could not set wpa_auth (%d)\n", err)); return err; } } sec = wl_read_prof(wl, dev, WL_PROF_SEC); sec->wpa_auth = sme->crypto.akm_suites[0]; return err; } static s32 wl_set_set_sharedkey(struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wlcfg_drv_priv; struct wl_security *sec; struct wl_wsec_key key; s32 val; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); WL_DBG(("key len (%d)\n", sme->key_len)); if (sme->key_len) { sec = wl_read_prof(wl, dev, WL_PROF_SEC); WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n", sec->wpa_versions, sec->cipher_pairwise)); if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) && (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104))) { memset(&key, 0, sizeof(key)); key.len = (u32) sme->key_len; key.index = (u32) sme->key_idx; if (unlikely(key.len > sizeof(key.data))) { WL_ERR(("Too long key length (%u)\n", key.len)); return -EINVAL; } memcpy(key.data, sme->key, key.len); key.flags = WL_PRIMARY_KEY; switch (sec->cipher_pairwise) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; break; default: WL_ERR(("Invalid algorithm (%d)\n", sme->crypto.ciphers_pairwise[0])); return -EINVAL; } /* Set the new key/index */ WL_DBG(("key length (%d) key index (%d) algo (%d)\n", key.len, key.index, key.algo)); WL_DBG(("key \"%s\"\n", key.data)); swap_key_from_BE(&key); err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (unlikely(err)) { WL_ERR(("WLC_SET_KEY error (%d)\n", err)); return err; } if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { WL_DBG(("set auth_type to shared key\n")); val = WL_AUTH_SHARED_KEY; /* shared key */ err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); if (unlikely(err)) { WL_ERR(("set auth failed (%d)\n", err)); return err; } } } } return err; } static s32 wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct wl_priv *wl = wiphy_priv(wiphy); struct ieee80211_channel *chan = sme->channel; wl_extjoin_params_t *ext_join_params; struct wl_join_params join_params; size_t join_params_size; s32 err = 0; wpa_ie_fixed_t *wpa_ie; wpa_ie_fixed_t *wps_ie; bcm_tlv_t *wpa2_ie; u8* wpaie = 0; u32 wpaie_len = 0; u32 wpsie_len = 0; u32 chan_cnt = 0; u8 wpsie[IE_MAX_LEN]; struct ether_addr bssid; WL_DBG(("In\n")); CHECK_SYS_UP(wl); /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ if (wl->scan_request) { wl_notify_escan_complete(wl, dev, true, true); } /* Clean BSSID */ bzero(&bssid, sizeof(bssid)); wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); wl_update_prof(wl, dev, NULL, sme->bssid, WL_PROF_PENDING_BSSID); if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) { /* we only allow to connect using virtual interface in case of P2P */ if (p2p_is_on(wl) && is_wps_conn(sme)) { WL_DBG(("ASSOC1 p2p index : %d sme->ie_len %d\n", wl_cfgp2p_find_idx(wl, dev), sme->ie_len)); /* Have to apply WPS IE + P2P IE in assoc req frame */ wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG, wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie, wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len); wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); } else if (p2p_is_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) { /* This is the connect req after WPS is done [credentials exchanged] * currently identified with WPA_VERSION_2 . * Update the previously set IEs with * the newly received IEs from Supplicant. This will remove the WPS IE from * the Assoc Req. */ WL_DBG(("ASSOC2 p2p index : %d sme->ie_len %d\n", wl_cfgp2p_find_idx(wl, dev), sme->ie_len)); wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG, sme->ie, sme->ie_len); wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); } } else if (dev == wl_to_prmry_ndev(wl)) { /* find the RSN_IE */ if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len, DOT11_MNG_RSN_ID)) != NULL) { WL_DBG((" WPA2 IE is found\n")); } /* find the WPA_IE */ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie, sme->ie_len)) != NULL) { WL_DBG((" WPA IE is found\n")); } if (wpa_ie != NULL || wpa2_ie != NULL) { wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie; wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len; wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN; wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); } else { wldev_iovar_setbuf(dev, "wpaie", NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); } /* find the WPSIE */ memset(wpsie, 0, sizeof(wpsie)); if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie, sme->ie_len)) != NULL) { wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN; memcpy(wpsie, wps_ie, wpsie_len); } else { wpsie_len = 0; } err = wl_cfgp2p_set_management_ie(wl, dev, -1, VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len); if (unlikely(err)) { return err; } } if (unlikely(!sme->ssid)) { WL_ERR(("Invalid ssid\n")); return -EOPNOTSUPP; } if (chan) { wl->channel = ieee80211_frequency_to_channel(chan->center_freq); chan_cnt = 1; WL_DBG(("channel (%d), center_req (%d)\n", wl->channel, chan->center_freq)); } else wl->channel = 0; WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len)); err = wl_set_wpa_version(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid wpa_version\n")); return err; } err = wl_set_auth_type(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid auth type\n")); return err; } err = wl_set_set_cipher(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid ciper\n")); return err; } err = wl_set_key_mgmt(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid key mgmt\n")); return err; } err = wl_set_set_sharedkey(dev, sme); if (unlikely(err)) { WL_ERR(("Invalid shared key\n")); return err; } /* * Join with specific BSSID and cached SSID * If SSID is zero join based on BSSID only */ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE + chan_cnt * sizeof(chanspec_t); ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL); if (ext_join_params == NULL) { err = -ENOMEM; wl_clr_drv_status(wl, CONNECTING, dev); goto exit; } ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len); memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len); ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len); /* Set up join scan parameters */ ext_join_params->scan.scan_type = -1; ext_join_params->scan.nprobes = 2; /* increate dwell time to receive probe response or detect Beacon * from target AP at a noisy air only during connect command */ ext_join_params->scan.active_time = WL_SCAN_ACTIVE_TIME*3; ext_join_params->scan.passive_time = WL_SCAN_PASSIVE_TIME*3; ext_join_params->scan.home_time = -1; if (sme->bssid) memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN); else memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN); ext_join_params->assoc.chanspec_num = chan_cnt; if (chan_cnt) { u16 channel, band, bw, ctl_sb; chanspec_t chspec; channel = wl->channel; band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G; bw = WL_CHANSPEC_BW_20; ctl_sb = WL_CHANSPEC_CTL_SB_NONE; chspec = (channel | band | bw | ctl_sb); ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; ext_join_params->assoc.chanspec_list[0] |= chspec; ext_join_params->assoc.chanspec_list[0] = htodchanspec(ext_join_params->assoc.chanspec_list[0]); } ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num); if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len)); } wl_set_drv_status(wl, CONNECTING, dev); err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, wl->ioctl_buf, WLC_IOCTL_MAXLEN, wl_cfgp2p_find_idx(wl, dev), &wl->ioctl_buf_sync); kfree(ext_join_params); if (err) { wl_clr_drv_status(wl, CONNECTING, dev); if (err == BCME_UNSUPPORTED) { WL_DBG(("join iovar is not supported\n")); goto set_ssid; } else WL_ERR(("error (%d)\n", err)); } else goto exit; set_ssid: memset(&join_params, 0, sizeof(join_params)); join_params_size = sizeof(join_params.ssid); join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len); memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len); join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len); wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID); if (sme->bssid) memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN); else memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN); wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size); WL_DBG(("join_param_size %d\n", join_params_size)); if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID, join_params.ssid.SSID_len)); } wl_set_drv_status(wl, CONNECTING, dev); err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); if (err) { WL_ERR(("error (%d)\n", err)); wl_clr_drv_status(wl, CONNECTING, dev); } exit: return err; } static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct wl_priv *wl = wiphy_priv(wiphy); scb_val_t scbval; bool act = false; s32 err = 0; u8 *curbssid; WL_ERR(("Reason %d\n", reason_code)); CHECK_SYS_UP(wl); act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT); curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID); if (act) { /* * Cancel ongoing scan to sync up with sme state machine of cfg80211. */ if (wl->scan_request) { wl_notify_escan_complete(wl, dev, true, true); } wl_set_drv_status(wl, DISCONNECTING, dev); scbval.val = reason_code; memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); scbval.val = htod32(scbval.val); err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); if (unlikely(err)) { wl_clr_drv_status(wl, DISCONNECTING, dev); WL_ERR(("error (%d)\n", err)); return err; } } return err; } static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, s32 dbm) { struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *ndev = wl_to_prmry_ndev(wl); u16 txpwrmw; s32 err = 0; s32 disable = 0; CHECK_SYS_UP(wl); switch (type) { case NL80211_TX_POWER_AUTOMATIC: break; case NL80211_TX_POWER_LIMITED: if (dbm < 0) { WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n")); return -EINVAL; } break; case NL80211_TX_POWER_FIXED: if (dbm < 0) { WL_ERR(("TX_POWER_FIXED - dbm is negative..\n")); return -EINVAL; } break; } /* Make sure radio is off or on as far as software is concerned */ disable = WL_RADIO_SW_DISABLE << 16; disable = htod32(disable); err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true); if (unlikely(err)) { WL_ERR(("WLC_SET_RADIO error (%d)\n", err)); return err; } if (dbm > 0xffff) txpwrmw = 0xffff; else txpwrmw = (u16) dbm; err = wldev_iovar_setint(ndev, "qtxpower", (s32) (bcm_mw_to_qdbm(txpwrmw))); if (unlikely(err)) { WL_ERR(("qtxpower error (%d)\n", err)); return err; } wl->conf->tx_power = dbm; return err; } static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) { struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *ndev = wl_to_prmry_ndev(wl); s32 txpwrdbm; u8 result; s32 err = 0; CHECK_SYS_UP(wl); err = wldev_iovar_getint(ndev, "qtxpower", &txpwrdbm); if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); return err; } result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE); *dbm = (s32) bcm_qdbm_to_mw(result); return err; } static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool unicast, bool multicast) { struct wl_priv *wl = wiphy_priv(wiphy); u32 index; s32 wsec; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); WL_DBG(("key index (%d)\n", key_idx)); CHECK_SYS_UP(wl); err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); if (unlikely(err)) { WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); return err; } if (wsec == WEP_ENABLED) { /* Just select a new current key */ index = (u32) key_idx; index = htod32(index); err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index, sizeof(index), true); if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); } } return err; } static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, const u8 *mac_addr, struct key_params *params) { struct wl_priv *wl = wiphy_priv(wiphy); struct wl_wsec_key key; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); s32 mode = wl_get_mode_by_netdev(wl, dev); memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; if (!ETHER_ISMULTI(mac_addr)) memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN); key.len = (u32) params->key_len; /* check for key index change */ if (key.len == 0) { /* key delete */ swap_key_from_BE(&key); wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (unlikely(err)) { WL_ERR(("key delete error (%d)\n", err)); return err; } } else { if (key.len > sizeof(key.data)) { WL_ERR(("Invalid key length (%d)\n", key.len)); return -EINVAL; } WL_DBG(("Setting the key index %d\n", key.index)); memcpy(key.data, params->key, key.len); if ((mode == WL_MODE_BSS) && (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { u8 keybuf[8]; memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); } /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ if (params->seq && params->seq_len == 6) { /* rx iv */ u8 *ivptr; ivptr = (u8 *) params->seq; key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | (ivptr[3] << 8) | ivptr[2]; key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; key.iv_initialized = true; } switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); break; case WLAN_CIPHER_SUITE_TKIP: key.algo = CRYPTO_ALGO_TKIP; WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); break; default: WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); return -EINVAL; } swap_key_from_BE(&key); wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (unlikely(err)) { WL_ERR(("WLC_SET_KEY error (%d)\n", err)); return err; } } return err; } static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct wl_wsec_key key; s32 val = 0; s32 wsec = 0; s32 err = 0; u8 keybuf[8]; s32 bssidx = 0; struct wl_priv *wl = wiphy_priv(wiphy); s32 mode = wl_get_mode_by_netdev(wl, dev); WL_DBG(("key index (%d)\n", key_idx)); CHECK_SYS_UP(wl); bssidx = wl_cfgp2p_find_idx(wl, dev); if (mac_addr && ((params->cipher != WLAN_CIPHER_SUITE_WEP40) && (params->cipher != WLAN_CIPHER_SUITE_WEP104))) { wl_add_keyext(wiphy, dev, key_idx, mac_addr, params); goto exit; } memset(&key, 0, sizeof(key)); key.len = (u32) params->key_len; key.index = (u32) key_idx; if (unlikely(key.len > sizeof(key.data))) { WL_ERR(("Too long key length (%u)\n", key.len)); return -EINVAL; } memcpy(key.data, params->key, key.len); key.flags = WL_PRIMARY_KEY; switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; val = WEP_ENABLED; WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; val = WEP_ENABLED; WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); break; case WLAN_CIPHER_SUITE_TKIP: key.algo = CRYPTO_ALGO_TKIP; val = TKIP_ENABLED; /* wpa_supplicant switches the third and fourth quarters of the TKIP key */ if (mode == WL_MODE_BSS) { bcopy(&key.data[24], keybuf, sizeof(keybuf)); bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); bcopy(keybuf, &key.data[16], sizeof(keybuf)); } WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; val = AES_ENABLED; WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; val = AES_ENABLED; WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); break; default: WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); return -EINVAL; } /* Set the new key/index */ swap_key_from_BE(&key); err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (unlikely(err)) { WL_ERR(("WLC_SET_KEY error (%d)\n", err)); return err; } exit: err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); if (unlikely(err)) { WL_ERR(("get wsec error (%d)\n", err)); return err; } wsec |= val; err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); if (unlikely(err)) { WL_ERR(("set wsec error (%d)\n", err)); return err; } return err; } static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct wl_wsec_key key; struct wl_priv *wl = wiphy_priv(wiphy); s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); WL_DBG(("Enter\n")); CHECK_SYS_UP(wl); memset(&key, 0, sizeof(key)); key.flags = WL_PRIMARY_KEY; key.algo = CRYPTO_ALGO_OFF; key.index = (u32) key_idx; WL_DBG(("key index (%d)\n", key_idx)); /* Set the new key/index */ swap_key_from_BE(&key); wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (unlikely(err)) { if (err == -EINVAL) { if (key.index >= DOT11_MAX_DEFAULT_KEYS) { /* we ignore this key index in this case */ WL_DBG(("invalid key index (%d)\n", key_idx)); } } else { WL_ERR(("WLC_SET_KEY error (%d)\n", err)); } return err; } return err; } static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params * params)) { struct key_params params; struct wl_wsec_key key; struct wl_priv *wl = wiphy_priv(wiphy); struct wl_security *sec; s32 wsec; s32 err = 0; s32 bssidx = wl_cfgp2p_find_idx(wl, dev); WL_DBG(("key index (%d)\n", key_idx)); CHECK_SYS_UP(wl); memset(&key, 0, sizeof(key)); key.index = key_idx; swap_key_to_BE(&key); memset(&params, 0, sizeof(params)); params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len); memcpy(params.key, key.data, params.key_len); wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); if (unlikely(err)) { WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); return err; } switch (wsec & ~SES_OW_ENABLED) { case WEP_ENABLED: sec = wl_read_prof(wl, dev, WL_PROF_SEC); if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { params.cipher = WLAN_CIPHER_SUITE_WEP40; WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { params.cipher = WLAN_CIPHER_SUITE_WEP104; WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); } break; case TKIP_ENABLED: params.cipher = WLAN_CIPHER_SUITE_TKIP; WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); break; case AES_ENABLED: params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); break; default: WL_ERR(("Invalid algo (0x%x)\n", wsec)); return -EINVAL; } callback(cookie, &params); return err; } static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx) { WL_INFO(("Not supported\n")); return -EOPNOTSUPP; } static s32 wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct wl_priv *wl = wiphy_priv(wiphy); scb_val_t scb_val; s32 rssi; s32 rate; s32 err = 0; sta_info_t *sta; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) s8 eabuf[ETHER_ADDR_STR_LEN]; #endif dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); CHECK_SYS_UP(wl); if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) { err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac, ETHER_ADDR_LEN, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); if (err < 0) { WL_ERR(("GET STA INFO failed, %d\n", err)); return err; } sinfo->filled = STATION_INFO_INACTIVE_TIME; sta = (sta_info_t *)wl->ioctl_buf; sta->len = dtoh16(sta->len); sta->cap = dtoh16(sta->cap); sta->flags = dtoh32(sta->flags); sta->idle = dtoh32(sta->idle); sta->in = dtoh32(sta->in); sinfo->inactive_time = sta->idle * 1000; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) if (sta->flags & WL_STA_ASSOC) { sinfo->filled |= STATION_INFO_CONNECTED_TIME; sinfo->connected_time = sta->in; } WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n", bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time, sta->idle * 1000)); #endif } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS) { get_pktcnt_t pktcnt; u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID); err = -ENODEV; if (!wl_get_drv_status(wl, CONNECTED, dev) || (dhd_is_associated(dhd, NULL, &err) == FALSE)) { WL_ERR(("NOT assoc: %d\n", err)); goto get_station_err; } if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) { WL_ERR(("Wrong Mac address: "MACSTR" != "MACSTR"\n", MAC2STR(mac), MAC2STR(curmacp))); } /* Report the current tx rate */ err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); if (err) { WL_ERR(("Could not get rate (%d)\n", err)); } else { rate = dtoh32(rate); sinfo->filled |= STATION_INFO_TX_BITRATE; sinfo->txrate.legacy = rate * 5; WL_DBG(("Rate %d Mbps\n", (rate / 2))); } memset(&scb_val, 0, sizeof(scb_val)); scb_val.val = 0; err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), false); if (err) { WL_ERR(("Could not get rssi (%d)\n", err)); goto get_station_err; } rssi = dtoh32(scb_val.val); sinfo->filled |= STATION_INFO_SIGNAL; sinfo->signal = rssi; WL_DBG(("RSSI %d dBm\n", rssi)); err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, sizeof(pktcnt), false); if (!err) { sinfo->filled |= (STATION_INFO_RX_PACKETS | STATION_INFO_RX_DROP_MISC | STATION_INFO_TX_PACKETS | STATION_INFO_TX_FAILED); sinfo->rx_packets = pktcnt.rx_good_pkt; sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; sinfo->tx_packets = pktcnt.tx_good_pkt; sinfo->tx_failed = pktcnt.tx_bad_pkt; } get_station_err: if (err && (err != -ETIMEDOUT) && (err != -EIO)) { /* Disconnect due to zero BSSID or error to get RSSI */ WL_ERR(("force cfg80211_disconnected: %d\n", err)); wl_clr_drv_status(wl, CONNECTED, dev); cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL); wl_link_down(wl); } } return err; } int wl_cfg80211_update_power_mode(struct net_device *dev) { int pm = -1; int err; err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), false); if (err || (pm == -1)) { WL_ERR(("error (%d)\n", err)); } else { pm = (pm == PM_OFF) ? false : true; WL_DBG(("%s: %d\n", __func__, pm)); if (dev->ieee80211_ptr) dev->ieee80211_ptr->ps = pm; } return err; } static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, s32 timeout) { s32 pm; s32 err = 0; struct wl_priv *wl = wiphy_priv(wiphy); #if !defined(SUPPORT_PM2_ONLY) dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); #endif CHECK_SYS_UP(wl); WL_DBG(("Enter : power save %s\n", (enabled ? "enable" : "disable"))); if (wl->p2p_net == dev) { return err; } #if !defined(SUPPORT_PM2_ONLY) pm = enabled ? ((dhd->in_suspend) ? PM_MAX : PM_FAST) : PM_OFF; #else pm = enabled ? PM_FAST : PM_OFF; #endif pm = htod32(pm); err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true); if (unlikely(err)) { if (err == -ENODEV) WL_DBG(("net_device is not ready yet\n")); else WL_ERR(("error (%d)\n", err)); return err; } WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled"))); return err; } static __used u32 wl_find_msb(u16 bit16) { u32 ret = 0; if (bit16 & 0xff00) { ret += 8; bit16 >>= 8; } if (bit16 & 0xf0) { ret += 4; bit16 >>= 4; } if (bit16 & 0xc) { ret += 2; bit16 >>= 2; } if (bit16 & 2) ret += bit16 & 2; else if (bit16) ret += bit16; return ret; } static s32 wl_cfg80211_resume(struct wiphy *wiphy) { struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *ndev = wl_to_prmry_ndev(wl); s32 err = 0; if (unlikely(!wl_get_drv_status(wl, READY, ndev))) { WL_INFO(("device is not ready\n")); return 0; } wl_invoke_iscan(wl); return err; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) #else static s32 wl_cfg80211_suspend(struct wiphy *wiphy) #endif { #ifdef DHD_CLEAR_ON_SUSPEND struct wl_priv *wl = wiphy_priv(wiphy); struct net_info *iter, *next; struct net_device *ndev = wl_to_prmry_ndev(wl); unsigned long flags; if (unlikely(!wl_get_drv_status(wl, READY, ndev))) { WL_INFO(("device is not ready : status (%d)\n", (int)wl->status)); return 0; } for_each_ndev(wl, iter, next) wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev); wl_term_iscan(wl); spin_lock_irqsave(&wl->cfgdrv_lock, flags); if (wl->scan_request) { cfg80211_scan_done(wl->scan_request, true); wl->scan_request = NULL; } for_each_ndev(wl, iter, next) { wl_clr_drv_status(wl, SCANNING, iter->ndev); wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev); } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); for_each_ndev(wl, iter, next) { if (wl_get_drv_status(wl, CONNECTING, iter->ndev)) { wl_bss_connect_done(wl, iter->ndev, NULL, NULL, false); } } #endif /* DHD_CLEAR_ON_SUSPEND */ return 0; } static s32 wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list, s32 err) { int i, j; struct wl_priv *wl = wlcfg_drv_priv; struct net_device *primary_dev = wl_to_prmry_ndev(wl); if (!pmk_list) { printk("pmk_list is NULL\n"); return -EINVAL; } /* pmk list is supported only for STA interface i.e. primary interface * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init */ if (primary_dev != dev) { WL_INFO(("Not supporting Flushing pmklist on virtual" " interfaces than primary interface\n")); return err; } WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid)); for (i = 0; i < pmk_list->pmkids.npmkid; i++) { WL_DBG(("PMKID[%d]: %pM =\n", i, &pmk_list->pmkids.pmkid[i].BSSID)); for (j = 0; j < WPA2_PMKID_LEN; j++) { WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j])); } } if (likely(!err)) { err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list, sizeof(*pmk_list), wl->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); } return err; } static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) { struct wl_priv *wl = wiphy_priv(wiphy); s32 err = 0; int i; CHECK_SYS_UP(wl); for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++) if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN)) break; if (i < WL_NUM_PMKIDS_MAX) { memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid, ETHER_ADDR_LEN); memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid, WPA2_PMKID_LEN); if (i == wl->pmk_list->pmkids.npmkid) wl->pmk_list->pmkids.npmkid++; } else { err = -EINVAL; } WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID)); for (i = 0; i < WPA2_PMKID_LEN; i++) { WL_DBG(("%02x\n", wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1]. PMKID[i])); } err = wl_update_pmklist(dev, wl->pmk_list, err); return err; } static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) { struct wl_priv *wl = wiphy_priv(wiphy); struct _pmkid_list pmkid; s32 err = 0; int i; CHECK_SYS_UP(wl); memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN); memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN); WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", &pmkid.pmkid[0].BSSID)); for (i = 0; i < WPA2_PMKID_LEN; i++) { WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i])); } for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++) if (!memcmp (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN)) break; if ((wl->pmk_list->pmkids.npmkid > 0) && (i < wl->pmk_list->pmkids.npmkid)) { memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t)); for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) { memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, &wl->pmk_list->pmkids.pmkid[i + 1].BSSID, ETHER_ADDR_LEN); memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, &wl->pmk_list->pmkids.pmkid[i + 1].PMKID, WPA2_PMKID_LEN); } wl->pmk_list->pmkids.npmkid--; } else { err = -EINVAL; } err = wl_update_pmklist(dev, wl->pmk_list, err); return err; } static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev) { struct wl_priv *wl = wiphy_priv(wiphy); s32 err = 0; CHECK_SYS_UP(wl); memset(wl->pmk_list, 0, sizeof(*wl->pmk_list)); err = wl_update_pmklist(dev, wl->pmk_list, err); return err; } static wl_scan_params_t * wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) { wl_scan_params_t *params; int params_size; int num_chans; *out_params_size = 0; /* Our scan params only need space for 1 channel and 0 ssids */ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16); params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL); if (params == NULL) { WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size)); return params; } memset(params, 0, params_size); params->nprobes = nprobes; num_chans = (channel == 0) ? 0 : 1; memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN); params->bss_type = DOT11_BSSTYPE_ANY; params->scan_type = DOT11_SCANTYPE_ACTIVE; params->nprobes = htod32(1); params->active_time = htod32(-1); params->passive_time = htod32(-1); params->home_time = htod32(10); params->channel_list[0] = htodchanspec(channel); /* Our scan params have 1 channel and 0 ssids */ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); *out_params_size = params_size; /* rtn size to the caller */ return params; } static s32 wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel * channel, enum nl80211_channel_type channel_type, unsigned int duration, u64 *cookie) { s32 target_channel; u32 id; struct ether_addr primary_mac; struct net_device *ndev = NULL; s32 err = BCME_OK; struct wl_priv *wl = wiphy_priv(wiphy); WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex)); if (wl->p2p_net == dev) { ndev = wl_to_prmry_ndev(wl); } else { ndev = dev; } if (wl_get_drv_status(wl, SCANNING, ndev)) { wl_notify_escan_complete(wl, ndev, true, true); } target_channel = ieee80211_frequency_to_channel(channel->center_freq); memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel)); wl->remain_on_chan_type = channel_type; id = ++wl->last_roc_id; if (id == 0) id = ++wl->last_roc_id; *cookie = id; cfg80211_ready_on_channel(dev, *cookie, channel, channel_type, duration, GFP_KERNEL); if (wl->p2p && !wl->p2p->on) { get_primary_mac(wl, &primary_mac); wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr); /* In case of p2p_listen command, supplicant send remain_on_channel * without turning on P2P */ p2p_on(wl) = true; err = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0); if (unlikely(err)) { goto exit; } } if (p2p_is_on(wl)) wl_cfgp2p_discover_listen(wl, target_channel, duration); exit: return err; } static s32 wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, u64 cookie) { s32 err = 0; WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex)); return err; } static s32 wl_cfg80211_send_pending_tx_act_frm(struct wl_priv *wl) { wl_af_params_t *tx_act_frm; struct net_device *dev = wl->afx_hdl->dev; if (!p2p_is_on(wl)) return -1; if (dev == wl->p2p_net) { dev = wl_to_prmry_ndev(wl); } tx_act_frm = wl->afx_hdl->pending_tx_act_frm; WL_DBG(("Sending the action frame\n")); wl->afx_hdl->pending_tx_act_frm = NULL; if (tx_act_frm != NULL) { /* Suspend P2P discovery's search-listen to prevent it from * starting a scan or changing the channel. */ wl_clr_drv_status(wl, SENDING_ACT_FRM, wl->afx_hdl->dev); wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); wl_notify_escan_complete(wl, dev, true, true); wl_cfgp2p_discover_enable_search(wl, false); tx_act_frm->channel = wl->afx_hdl->peer_chan; wl->afx_hdl->ack_recv = (wl_cfgp2p_tx_action_frame(wl, dev, tx_act_frm, wl->afx_hdl->bssidx)) ? false : true; } return 0; } static void wl_cfg80211_afx_handler(struct work_struct *work) { struct afx_hdl *afx_instance; struct wl_priv *wl = wlcfg_drv_priv; afx_instance = container_of(work, struct afx_hdl, work); if (afx_instance != NULL) { wl_cfgp2p_act_frm_search(wl, wl->afx_hdl->dev, wl->afx_hdl->bssidx, 0); } } static bool wl_cfg80211_send_at_common_channel(struct wl_priv *wl, struct net_device *dev, wl_af_params_t *af_params) { WL_DBG((" enter ) \n")); /* initialize afx_hdl */ wl->afx_hdl->pending_tx_act_frm = af_params; wl->afx_hdl->bssidx = wl_cfgp2p_find_idx(wl, dev); wl->afx_hdl->dev = dev; wl->afx_hdl->retry = 0; wl->afx_hdl->peer_chan = WL_INVALID; wl->afx_hdl->ack_recv = false; memcpy(wl->afx_hdl->pending_tx_dst_addr.octet, af_params->action_frame.da.octet, sizeof(wl->afx_hdl->pending_tx_dst_addr.octet)); /* Loop to wait until we have sent the pending tx action frame or the * pending action frame tx is cancelled. */ while ((wl->afx_hdl->retry < WL_CHANNEL_SYNC_RETRY) && (wl->afx_hdl->peer_chan == WL_INVALID)) { wl_set_drv_status(wl, SENDING_ACT_FRM, dev); wl_set_drv_status(wl, SCANNING, dev); WL_DBG(("Scheduling the action frame for sending.. retry %d\n", wl->afx_hdl->retry)); /* Do find_peer_for_action */ schedule_work(&wl->afx_hdl->work); wait_for_completion(&wl->act_frm_scan); wl->afx_hdl->retry++; } if (wl->afx_hdl->peer_chan != WL_INVALID) wl_cfg80211_send_pending_tx_act_frm(wl); else { WL_ERR(("Couldn't find the peer " MACSTR " after %d retries\n", MAC2STR(wl->afx_hdl->pending_tx_dst_addr.octet), wl->afx_hdl->retry)); } wl->afx_hdl->dev = NULL; wl->afx_hdl->bssidx = WL_INVALID; wl_clr_drv_status(wl, SENDING_ACT_FRM, dev); if (wl->afx_hdl->ack_recv) return true; /* ACK */ else return false; /* NO ACK */ } static s32 wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, struct ieee80211_channel *channel, bool offchan, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8* buf, size_t len, #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) bool no_cck, #endif u64 *cookie) { wl_action_frame_t *action_frame; wl_af_params_t *af_params; wifi_p2p_ie_t *p2p_ie; wpa_ie_fixed_t *wps_ie; scb_val_t scb_val; wifi_wfd_ie_t *wfd_ie; const struct ieee80211_mgmt *mgmt; struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *dev = NULL; s32 err = BCME_OK; s32 bssidx = 0; u32 p2pie_len = 0; u32 wpsie_len = 0; u32 wfdie_len = 0; u32 id; u32 retry = 0; bool ack = false; wifi_p2p_pub_act_frame_t *act_frm = NULL; wifi_p2p_action_frame_t *p2p_act_frm = NULL; wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; s8 eabuf[ETHER_ADDR_STR_LEN]; int retry_cnt = 0; WL_DBG(("Enter \n")); if (ndev == wl->p2p_net) { dev = wl_to_prmry_ndev(wl); } else { /* If TX req is for any valid ifidx. Use as is */ dev = ndev; } /* find bssidx based on ndev */ bssidx = wl_cfgp2p_find_idx(wl, dev); if (bssidx == -1) { WL_ERR(("Can not find the bssidx for dev( %p )\n", dev)); return -ENODEV; } if (p2p_is_on(wl)) { /* Suspend P2P discovery search-listen to prevent it from changing the * channel. */ if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) { WL_ERR(("Can not disable discovery mode\n")); return -EFAULT; } } *cookie = 0; id = wl->send_action_id++; if (id == 0) id = wl->send_action_id++; *cookie = id; mgmt = (const struct ieee80211_mgmt *)buf; if (ieee80211_is_mgmt(mgmt->frame_control)) { if (ieee80211_is_probe_resp(mgmt->frame_control)) { s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; s32 ie_len = len - ie_offset; if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len)) != NULL) { /* Total length of P2P Information Element */ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id); } if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)(buf + ie_offset), ie_len)) != NULL) { /* Total length of WFD Information Element */ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); } if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len)) != NULL) { /* Order of Vendor IE is 1) WPS IE + * 2) P2P IE created by supplicant * So, it is ok to find start address of WPS IE * to save IEs */ wpsie_len = wps_ie->length + sizeof(wps_ie->length) + sizeof(wps_ie->tag); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_PRBRSP_FLAG, (u8 *)wps_ie, wpsie_len + p2pie_len+ wfdie_len); } cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL); goto exit; } else if (ieee80211_is_disassoc(mgmt->frame_control) || ieee80211_is_deauth(mgmt->frame_control)) { memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN); scb_val.val = mgmt->u.disassoc.reason_code; wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, sizeof(scb_val_t), true); WL_DBG(("Disconnect STA : %s scb_val.val %d\n", bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf), scb_val.val)); /* Wait for the deauth event to come, supplicant will do the * delete iface immediately and we will have problem in sending * deauth frame if we delete the bss in firmware */ wl_delay(400); cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL); goto exit; } else if (ieee80211_is_action(mgmt->frame_control)) { /* Abort the dwell time of any previous off-channel * action frame that may be still in effect. Sending * off-channel action frames relies on the driver's * scan engine. If a previous off-channel action frame * tx is still in progress (including the dwell time), * then this new action frame will not be sent out. */ wl_notify_escan_complete(wl, dev, true, true); } } else { WL_ERR(("Driver only allows MGMT packet type\n")); goto exit; } af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL); if (af_params == NULL) { WL_ERR(("unable to allocate frame\n")); return -ENOMEM; } action_frame = &af_params->action_frame; /* Add the packet Id */ action_frame->packetId = *cookie; WL_DBG(("action frame %d\n", action_frame->packetId)); /* Add BSSID */ memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN); memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN); /* Add the length exepted for 802.11 header */ action_frame->len = len - DOT11_MGMT_HDR_LEN; WL_DBG(("action_frame->len: %d\n", action_frame->len)); /* Add the channel */ af_params->channel = ieee80211_frequency_to_channel(channel->center_freq); if (channel->band == IEEE80211_BAND_5GHZ) { WL_DBG(("5GHz channel %d", af_params->channel)); err = wldev_ioctl(dev, WLC_SET_CHANNEL, &af_params->channel, sizeof(af_params->channel), true); if (err < 0) { WL_ERR(("WLC_SET_CHANNEL error %d\n", err)); } } /* Add the dwell time * Dwell time to stay off-channel to wait for a response action frame * after transmitting an GO Negotiation action frame */ af_params->dwell_time = WL_DWELL_TIME; memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len); if (wl_cfgp2p_is_pub_action(action_frame->data, action_frame->len)) { act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data); WL_DBG(("P2P PUB action_frame->len: %d chan %d category %d subtype %d\n", action_frame->len, af_params->channel, act_frm->category, act_frm->subtype)); if (act_frm && ((act_frm->subtype == P2P_PAF_GON_REQ) || (act_frm->subtype == P2P_PAF_GON_RSP) || (act_frm->subtype == P2P_PAF_GON_CONF) || (act_frm->subtype == P2P_PAF_PROVDIS_REQ))) { wldev_iovar_setint(dev, "mpc", 0); } if (act_frm->subtype == P2P_PAF_GON_REQ) { WL_DBG(("P2P: GO_NEG_PHASE status set \n")); wl_set_p2p_status(wl, GO_NEG_PHASE); } else if (act_frm->subtype == P2P_PAF_GON_CONF) { /* If we reached till GO Neg confirmation * reset the filter */ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); wl_clr_p2p_status(wl, GO_NEG_PHASE); } if (act_frm->subtype == P2P_PAF_GON_RSP) retry_cnt = 1; else retry_cnt = WL_ACT_FRAME_RETRY; if (act_frm && act_frm->subtype == P2P_PAF_DEVDIS_REQ) { af_params->dwell_time = WL_LONG_DWELL_TIME; } else if (act_frm && (act_frm->subtype == P2P_PAF_PROVDIS_REQ || act_frm->subtype == P2P_PAF_PROVDIS_RSP || act_frm->subtype == P2P_PAF_GON_RSP)) { af_params->dwell_time = WL_MED_DWELL_TIME; } } else if (wl_cfgp2p_is_p2p_action(action_frame->data, action_frame->len)) { p2p_act_frm = (wifi_p2p_action_frame_t *) (action_frame->data); WL_DBG(("P2P action_frame->len: %d chan %d category %d subtype %d\n", action_frame->len, af_params->channel, p2p_act_frm->category, p2p_act_frm->subtype)); } else if (wl_cfgp2p_is_gas_action(action_frame->data, action_frame->len)) { sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (action_frame->data); WL_DBG(("Service Discovery action_frame->len: %d chan %d category %d action %d\n", action_frame->len, af_params->channel, sd_act_frm->category, sd_act_frm->action)); af_params->dwell_time = WL_MED_DWELL_TIME; retry_cnt = WL_ACT_FRAME_RETRY; } wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len); /* * To make sure to send successfully action frame, we have to turn off mpc */ if (IS_P2P_SOCIAL(af_params->channel) && (IS_P2P_PUB_ACT_REQ(act_frm, &act_frm->elts[0], action_frame->len) || IS_GAS_REQ(sd_act_frm, action_frame->len)) && wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) { /* channel offload require P2P IE for Probe request * otherwise, we will use wl_cfgp2p_tx_action_frame directly. * channel offload for action request frame */ /* channel offload for action request frame */ ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params); /* We need to retry Service discovery frames as they don't get retried immediately by supplicant*/ if ((!ack) && (IS_GAS_REQ(sd_act_frm, action_frame->len))) { for (retry = 1; retry < retry_cnt; retry++) { WL_DBG(("Service Discovery action_frame retry %d len: %d chan %d category %d action %d\n", retry, action_frame->len, af_params->channel, sd_act_frm->category, sd_act_frm->action)); ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true; if (ack) break; } } } else { ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true; if (!ack) { for (retry = 1; retry < retry_cnt; retry++) { ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true; if (ack) break; } } } cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, ack, GFP_KERNEL); if (act_frm && act_frm->subtype == P2P_PAF_GON_CONF) { wldev_iovar_setint(dev, "mpc", 1); } kfree(af_params); exit: return err; } static void wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev, u16 frame_type, bool reg) { WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg)); if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) return; return; } static s32 wl_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { if (params->use_cts_prot >= 0) { } if (params->use_short_preamble >= 0) { } if (params->use_short_slot_time >= 0) { } if (params->basic_rates) { } if (params->ap_isolate >= 0) { } if (params->ht_opmode >= 0) { } return 0; } static s32 wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { s32 channel; s32 err = BCME_OK; struct wl_priv *wl = wiphy_priv(wiphy); if (wl->p2p_net == dev) { dev = wl_to_prmry_ndev(wl); } channel = ieee80211_frequency_to_channel(chan->center_freq); if (wl_get_drv_status(wl, AP_CREATING, dev)) { WL_TRACE(("<0> %s: as!!! in AP creating mode, save chan num:%d\n", __FUNCTION__, channel)); wl->hostapd_chan = channel; if (channel == 14) return err; /* hostapd requested ch auto-select, will be done later */ } WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n", dev->ifindex, channel_type, channel)); err = wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), true); if (err < 0) { WL_ERR(("WLC_SET_CHANNEL error %d chip may not be supporting this channel\n", err)); } return err; } static s32 wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) { s32 len = 0; s32 err = BCME_OK; u16 auth = WL_AUTH_OPEN_SYSTEM; /* d11 open authentication */ u32 wsec; u32 pval = 0; u32 gval = 0; u32 wpa_auth = 0; u8* tmp; wpa_suite_mcast_t *mcast; wpa_suite_ucast_t *ucast; wpa_suite_auth_key_mgmt_t *mgmt; if (wpa2ie == NULL) goto exit; WL_DBG(("Enter \n")); len = wpa2ie->len; /* check the mcast cipher */ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN]; tmp = mcast->oui; switch (tmp[DOT11_OUI_LEN]) { case WPA_CIPHER_NONE: gval = 0; break; case WPA_CIPHER_WEP_40: case WPA_CIPHER_WEP_104: gval = WEP_ENABLED; break; case WPA_CIPHER_TKIP: gval = TKIP_ENABLED; break; case WPA_CIPHER_AES_CCM: gval = AES_ENABLED; break; default: WL_ERR(("No Security Info\n")); break; } len -= WPA_SUITE_LEN; /* check the unicast cipher */ ucast = (wpa_suite_ucast_t *)&mcast[1]; ltoh16_ua(&ucast->count); tmp = ucast->list[0].oui; switch (tmp[DOT11_OUI_LEN]) { case WPA_CIPHER_NONE: pval = 0; break; case WPA_CIPHER_WEP_40: case WPA_CIPHER_WEP_104: pval = WEP_ENABLED; break; case WPA_CIPHER_TKIP: pval = TKIP_ENABLED; break; case WPA_CIPHER_AES_CCM: pval = AES_ENABLED; break; default: WL_ERR(("No Security Info\n")); } /* FOR WPS , set SEC_OW_ENABLED */ wsec = (pval | gval | SES_OW_ENABLED); /* check the AKM */ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1]; ltoh16_ua(&mgmt->count); tmp = (u8 *)&mgmt->list[0]; switch (tmp[DOT11_OUI_LEN]) { case RSN_AKM_NONE: wpa_auth = WPA_AUTH_NONE; break; case RSN_AKM_UNSPECIFIED: wpa_auth = WPA2_AUTH_UNSPECIFIED; break; case RSN_AKM_PSK: wpa_auth = WPA2_AUTH_PSK; break; default: WL_ERR(("No Key Mgmt Info\n")); } /* set auth */ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); if (err < 0) { WL_ERR(("auth error %d\n", err)); return BCME_ERROR; } /* set wsec */ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); if (err < 0) { WL_ERR(("wsec error %d\n", err)); return BCME_ERROR; } /* set upper-layer auth */ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); if (err < 0) { WL_ERR(("wpa_auth error %d\n", err)); return BCME_ERROR; } exit: return 0; } static s32 wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx) { wpa_suite_mcast_t *mcast; wpa_suite_ucast_t *ucast; wpa_suite_auth_key_mgmt_t *mgmt; u16 auth = WL_AUTH_OPEN_SYSTEM; /* d11 open authentication */ u16 count; s32 err = BCME_OK; s32 len = 0; u32 i; u32 wsec; u32 pval = 0; u32 gval = 0; u32 wpa_auth = 0; u32 tmp = 0; if (wpaie == NULL) goto exit; WL_DBG(("Enter \n")); len = wpaie->length; /* value length */ len -= WPA_IE_TAG_FIXED_LEN; /* check for multicast cipher suite */ if (len < WPA_SUITE_LEN) { WL_INFO(("no multicast cipher suite\n")); goto exit; } /* pick up multicast cipher */ mcast = (wpa_suite_mcast_t *)&wpaie[1]; len -= WPA_SUITE_LEN; if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) { if (IS_WPA_CIPHER(mcast->type)) { tmp = 0; switch (mcast->type) { case WPA_CIPHER_NONE: tmp = 0; break; case WPA_CIPHER_WEP_40: case WPA_CIPHER_WEP_104: tmp = WEP_ENABLED; break; case WPA_CIPHER_TKIP: tmp = TKIP_ENABLED; break; case WPA_CIPHER_AES_CCM: tmp = AES_ENABLED; break; default: WL_ERR(("No Security Info\n")); } gval |= tmp; } } /* Check for unicast suite(s) */ if (len < WPA_IE_SUITE_COUNT_LEN) { WL_INFO(("no unicast suite\n")); goto exit; } /* walk thru unicast cipher list and pick up what we recognize */ ucast = (wpa_suite_ucast_t *)&mcast[1]; count = ltoh16_ua(&ucast->count); len -= WPA_IE_SUITE_COUNT_LEN; for (i = 0; i < count && len >= WPA_SUITE_LEN; i++, len -= WPA_SUITE_LEN) { if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { if (IS_WPA_CIPHER(ucast->list[i].type)) { tmp = 0; switch (ucast->list[i].type) { case WPA_CIPHER_NONE: tmp = 0; break; case WPA_CIPHER_WEP_40: case WPA_CIPHER_WEP_104: tmp = WEP_ENABLED; break; case WPA_CIPHER_TKIP: tmp = TKIP_ENABLED; break; case WPA_CIPHER_AES_CCM: tmp = AES_ENABLED; break; default: WL_ERR(("No Security Info\n")); } pval |= tmp; } } } len -= (count - i) * WPA_SUITE_LEN; /* Check for auth key management suite(s) */ if (len < WPA_IE_SUITE_COUNT_LEN) { WL_INFO((" no auth key mgmt suite\n")); goto exit; } /* walk thru auth management suite list and pick up what we recognize */ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count]; count = ltoh16_ua(&mgmt->count); len -= WPA_IE_SUITE_COUNT_LEN; for (i = 0; i < count && len >= WPA_SUITE_LEN; i++, len -= WPA_SUITE_LEN) { if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { if (IS_WPA_AKM(mgmt->list[i].type)) { tmp = 0; switch (mgmt->list[i].type) { case RSN_AKM_NONE: tmp = WPA_AUTH_NONE; break; case RSN_AKM_UNSPECIFIED: tmp = WPA_AUTH_UNSPECIFIED; break; case RSN_AKM_PSK: tmp = WPA_AUTH_PSK; break; default: WL_ERR(("No Key Mgmt Info\n")); } wpa_auth |= tmp; } } } /* FOR WPS , set SEC_OW_ENABLED */ wsec = (pval | gval | SES_OW_ENABLED); /* set auth */ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); if (err < 0) { WL_ERR(("auth error %d\n", err)); return BCME_ERROR; } /* set wsec */ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); if (err < 0) { WL_ERR(("wsec error %d\n", err)); return BCME_ERROR; } /* set upper-layer auth */ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); if (err < 0) { WL_ERR(("wpa_auth error %d\n", err)); return BCME_ERROR; } exit: return 0; } static s32 wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *info) { s32 err = BCME_OK; bcm_tlv_t *ssid_ie; wlc_ssid_t ssid; struct wl_priv *wl = wiphy_priv(wiphy); struct wl_join_params join_params; wpa_ie_fixed_t *wps_ie; wpa_ie_fixed_t *wpa_ie; bcm_tlv_t *wpa2_ie; wifi_p2p_ie_t *p2p_ie; wifi_wfd_ie_t *wfd_ie; bool is_bssup = false; bool update_bss = false; bool pbc = false; u16 wpsie_len = 0; u16 p2pie_len = 0; u32 wfdie_len = 0; u8 beacon_ie[IE_MAX_LEN]; s32 ie_offset = 0; s32 bssidx = 0; s32 infra = 1; s32 join_params_size = 0; s32 ap = 0; WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n", info->interval, info->dtim_period, info->head_len, info->tail_len)); if (wl->p2p_net == dev) { dev = wl_to_prmry_ndev(wl); } bssidx = wl_cfgp2p_find_idx(wl, dev); if (p2p_is_on(wl) && (bssidx == wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION))) { memset(beacon_ie, 0, sizeof(beacon_ie)); /* We don't need to set beacon for P2P_GO, * but need to parse ssid from beacon_parameters * because there is no way to set ssid */ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; /* find the SSID */ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], info->head_len - ie_offset, DOT11_MNG_SSID_ID)) != NULL) { memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len); wl->p2p->ssid.SSID_len = ssid_ie->len; WL_DBG(("SSID (%s) in Head \n", ssid_ie->data)); } else { WL_ERR(("No SSID in beacon \n")); } /* find the WPSIE */ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) { wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; /* * Should be compared with saved ie before saving it */ wl_validate_wps_ie((char *) wps_ie, &pbc); memcpy(beacon_ie, wps_ie, wpsie_len); } else { WL_ERR(("No WPSIE in beacon \n")); } /* find the P2PIE */ if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) { /* Total length of P2P Information Element */ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id); memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len); } else { WL_ERR(("No P2PIE in beacon \n")); } /* find the WFD IEs */ if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)info->tail, info->tail_len)) != NULL) { /* Total length of P2P Information Element */ wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); if ((wpsie_len + p2pie_len + wfdie_len) < IE_MAX_LEN) { memcpy(&beacon_ie[wpsie_len + p2pie_len], wfd_ie, wfdie_len); } else { WL_ERR(("Found WFD IE but there is no space, (%d)(%d)(%d)\n", wpsie_len, p2pie_len, wfdie_len)); wfdie_len = 0; } } else { WL_ERR(("No WFDIE in beacon \n")); } /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, beacon_ie, wpsie_len + p2pie_len + wfdie_len); /* find the RSN_IE */ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, DOT11_MNG_RSN_ID)) != NULL) { WL_DBG((" WPA2 IE is found\n")); } is_bssup = wl_cfgp2p_bss_isup(dev, bssidx); if (!is_bssup && (wpa2_ie != NULL)) { wldev_iovar_setint(dev, "mpc", 0); if ((err = wl_validate_wpa2ie(dev, wpa2_ie, bssidx)) < 0) { WL_ERR(("WPA2 IE parsing error")); goto exit; } err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); if (err < 0) { WL_ERR(("SET INFRA error %d\n", err)); goto exit; } err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid, sizeof(wl->p2p->ssid), wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); if (err < 0) { WL_ERR(("GO SSID setting error %d\n", err)); goto exit; } if ((err = wl_cfgp2p_bss(wl, dev, bssidx, 1)) < 0) { WL_ERR(("GO Bring up error %d\n", err)); goto exit; } } } else if (wl_get_drv_status(wl, AP_CREATING, dev)) { ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; ap = 1; /* find the SSID */ if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], info->head_len - ie_offset, DOT11_MNG_SSID_ID)) != NULL) { memset(&ssid, 0, sizeof(wlc_ssid_t)); memcpy(ssid.SSID, ssid_ie->data, ssid_ie->len); WL_DBG(("SSID is (%s) in Head \n", ssid.SSID)); ssid.SSID_len = ssid_ie->len; wldev_iovar_setint(dev, "mpc", 0); wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { WL_ERR(("setting AP mode failed %d \n", err)); return err; } /* if requested, do softap ch autoselect */ if (wl->hostapd_chan == 14) { int auto_chan; if ((err = wldev_get_auto_channel(dev, &auto_chan)) != 0) { WL_ERR(("softap: auto chan select failed," " will use ch 6\n")); auto_chan = 6; } else { printf("<0>softap: got auto ch:%d\n", auto_chan); } err = wldev_ioctl(dev, WLC_SET_CHANNEL, &auto_chan, sizeof(auto_chan), true); if (err < 0) { WL_ERR(("softap: WLC_SET_CHANNEL error %d chip" " may not be supporting this channel\n", err)); return err; } } /* find the RSN_IE */ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, DOT11_MNG_RSN_ID)) != NULL) { WL_DBG((" WPA2 IE is found\n")); } /* find the WPA_IE */ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail, info->tail_len)) != NULL) { WL_DBG((" WPA IE is found\n")); } if ((wpa_ie != NULL || wpa2_ie != NULL)) { if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 || wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) { wl->ap_info->security_mode = false; return BCME_ERROR; } wl->ap_info->security_mode = true; if (wl->ap_info->rsn_ie) { kfree(wl->ap_info->rsn_ie); wl->ap_info->rsn_ie = NULL; } if (wl->ap_info->wpa_ie) { kfree(wl->ap_info->wpa_ie); wl->ap_info->wpa_ie = NULL; } if (wl->ap_info->wps_ie) { kfree(wl->ap_info->wps_ie); wl->ap_info->wps_ie = NULL; } if (wpa_ie != NULL) { /* WPAIE */ wl->ap_info->rsn_ie = NULL; wl->ap_info->wpa_ie = kmemdup(wpa_ie, wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else { /* RSNIE */ wl->ap_info->wpa_ie = NULL; wl->ap_info->rsn_ie = kmemdup(wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } } else wl->ap_info->security_mode = false; /* find the WPSIE */ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) { wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN; /* * Should be compared with saved ie before saving it */ wl_validate_wps_ie((char *) wps_ie, &pbc); memcpy(beacon_ie, wps_ie, wpsie_len); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, beacon_ie, wpsie_len); wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); } else { WL_DBG(("No WPSIE in beacon \n")); } if (info->interval) { if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, &info->interval, sizeof(s32), true)) < 0) { WL_ERR(("Beacon Interval Set Error, %d\n", err)); return err; } } if (info->dtim_period) { if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, &info->dtim_period, sizeof(s32), true)) < 0) { WL_ERR(("DTIM Interval Set Error, %d\n", err)); return err; } } err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); if (unlikely(err)) { WL_ERR(("WLC_UP error (%d)\n", err)); return err; } memset(&join_params, 0, sizeof(join_params)); /* join parameters starts with ssid */ join_params_size = sizeof(join_params.ssid); memcpy(join_params.ssid.SSID, ssid.SSID, ssid.SSID_len); join_params.ssid.SSID_len = htod32(ssid.SSID_len); /* create softap */ if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true)) == 0) { wl_clr_drv_status(wl, AP_CREATING, dev); wl_set_drv_status(wl, AP_CREATED, dev); } } } else if (wl_get_drv_status(wl, AP_CREATED, dev)) { ap = 1; /* find the WPSIE */ if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) { wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; /* * Should be compared with saved ie before saving it */ wl_validate_wps_ie((char *) wps_ie, &pbc); memcpy(beacon_ie, wps_ie, wpsie_len); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, beacon_ie, wpsie_len); if (wl->ap_info->wps_ie && memcmp(wl->ap_info->wps_ie, wps_ie, wpsie_len)) { WL_DBG((" WPS IE is changed\n")); kfree(wl->ap_info->wps_ie); wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); } else if (wl->ap_info->wps_ie == NULL) { WL_DBG((" WPS IE is added\n")); wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); } /* find the RSN_IE */ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, DOT11_MNG_RSN_ID)) != NULL) { WL_DBG((" WPA2 IE is found\n")); } /* find the WPA_IE */ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail, info->tail_len)) != NULL) { WL_DBG((" WPA IE is found\n")); } if ((wpa_ie != NULL || wpa2_ie != NULL)) { if (!wl->ap_info->security_mode) { /* change from open mode to security mode */ update_bss = true; if (wpa_ie != NULL) { wl->ap_info->wpa_ie = kmemdup(wpa_ie, wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else { wl->ap_info->rsn_ie = kmemdup(wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } } else if (wl->ap_info->wpa_ie) { /* change from WPA mode to WPA2 mode */ if (wpa2_ie != NULL) { update_bss = true; kfree(wl->ap_info->wpa_ie); wl->ap_info->rsn_ie = kmemdup(wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); wl->ap_info->wpa_ie = NULL; } else if (memcmp(wl->ap_info->wpa_ie, wpa_ie, wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN)) { kfree(wl->ap_info->wpa_ie); update_bss = true; wl->ap_info->wpa_ie = kmemdup(wpa_ie, wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); wl->ap_info->rsn_ie = NULL; } } else { /* change from WPA2 mode to WPA mode */ if (wpa_ie != NULL) { update_bss = true; kfree(wl->ap_info->rsn_ie); wl->ap_info->rsn_ie = NULL; wl->ap_info->wpa_ie = kmemdup(wpa_ie, wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); } else if (memcmp(wl->ap_info->rsn_ie, wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) { update_bss = true; kfree(wl->ap_info->rsn_ie); wl->ap_info->rsn_ie = kmemdup(wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, GFP_KERNEL); wl->ap_info->wpa_ie = NULL; } } if (update_bss) { wl->ap_info->security_mode = true; wl_cfgp2p_bss(wl, dev, bssidx, 0); if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 || wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) { return BCME_ERROR; } wl_cfgp2p_bss(wl, dev, bssidx, 1); } } } else { WL_ERR(("No WPSIE in beacon \n")); } } exit: if (err) wldev_iovar_setint(dev, "mpc", 1); return err; } #ifdef WL_SCHED_SCAN #define PNO_TIME 30 #define PNO_REPEAT 4 #define PNO_FREQ_EXPO_MAX 2 int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *request) { ushort pno_time = PNO_TIME; int pno_repeat = PNO_REPEAT; int pno_freq_expo_max = PNO_FREQ_EXPO_MAX; wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; struct wl_priv *wl = wiphy_priv(wiphy); struct cfg80211_ssid *ssid = NULL; int ssid_count = 0; int i; int ret = 0; WL_DBG(("Enter \n")); WL_PNO((">>> SCHED SCAN START\n")); WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n", request->n_match_sets, request->n_ssids)); WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); #if defined(WL_ENABLE_P2P_IF) /* While GO is operational, PNO is not supported */ if (dhd_cfg80211_get_opmode(wl) & P2P_GO_ENABLED) { WL_DBG(("PNO not enabled! op_mode: P2P GO")); return -1; } #endif if (!request || !request->n_ssids || !request->n_match_sets) { WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids)); return -EINVAL; } memset(&ssids_local, 0, sizeof(ssids_local)); if (request->n_match_sets > 0) { for (i = 0; i < request->n_match_sets; i++) { ssid = &request->match_sets[i].ssid; memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len); ssids_local[i].SSID_len = ssid->ssid_len; WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid)); ssid_count++; } } if (request->n_ssids > 0) { for (i = 0; i < request->n_ssids; i++) { /* Active scan req for ssids */ WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid)); /* match_set ssids is a supert set of n_ssid list, so we need * not add these set seperately */ } } if (ssid_count) { if ((ret = dhd_dev_pno_set(dev, ssids_local, request->n_match_sets, pno_time, pno_repeat, pno_freq_expo_max)) < 0) { WL_ERR(("PNO setup failed!! ret=%d \n", ret)); return -EINVAL; } /* Enable the PNO */ if (dhd_dev_pno_enable(dev, 1) < 0) { WL_ERR(("PNO enable failed!! ret=%d \n", ret)); return -EINVAL; } wl->sched_scan_req = request; } else { return -EINVAL; } return 0; } int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) { struct wl_priv *wl = wiphy_priv(wiphy); WL_DBG(("Enter \n")); WL_PNO((">>> SCHED SCAN STOP\n")); if (dhd_dev_pno_enable(dev, 0) < 0) WL_ERR(("PNO disable failed")); if (dhd_dev_pno_reset(dev) < 0) WL_ERR(("PNO reset failed")); if (wl->scan_request && wl->sched_scan_running) { WL_PNO((">>> Sched scan running. Aborting it..\n")); wl_notify_escan_complete(wl, dev, true, true); } wl->sched_scan_req = NULL; wl->sched_scan_running = FALSE; return 0; } #endif /* WL_SCHED_SCAN */ static struct cfg80211_ops wl_cfg80211_ops = { .add_virtual_intf = wl_cfg80211_add_virtual_iface, .del_virtual_intf = wl_cfg80211_del_virtual_iface, .change_virtual_intf = wl_cfg80211_change_virtual_iface, .scan = wl_cfg80211_scan, .set_wiphy_params = wl_cfg80211_set_wiphy_params, .join_ibss = wl_cfg80211_join_ibss, .leave_ibss = wl_cfg80211_leave_ibss, .get_station = wl_cfg80211_get_station, .set_tx_power = wl_cfg80211_set_tx_power, .get_tx_power = wl_cfg80211_get_tx_power, .add_key = wl_cfg80211_add_key, .del_key = wl_cfg80211_del_key, .get_key = wl_cfg80211_get_key, .set_default_key = wl_cfg80211_config_default_key, .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key, .set_power_mgmt = wl_cfg80211_set_power_mgmt, .connect = wl_cfg80211_connect, .disconnect = wl_cfg80211_disconnect, .suspend = wl_cfg80211_suspend, .resume = wl_cfg80211_resume, .set_pmksa = wl_cfg80211_set_pmksa, .del_pmksa = wl_cfg80211_del_pmksa, .flush_pmksa = wl_cfg80211_flush_pmksa, .remain_on_channel = wl_cfg80211_remain_on_channel, .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel, .mgmt_tx = wl_cfg80211_mgmt_tx, .mgmt_frame_register = wl_cfg80211_mgmt_frame_register, .change_bss = wl_cfg80211_change_bss, .set_channel = wl_cfg80211_set_channel, .set_beacon = wl_cfg80211_add_set_beacon, .add_beacon = wl_cfg80211_add_set_beacon, .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait, #ifdef WL_SCHED_SCAN .sched_scan_start = wl_cfg80211_sched_scan_start, .sched_scan_stop = wl_cfg80211_sched_scan_stop, #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) */ }; s32 wl_mode_to_nl80211_iftype(s32 mode) { s32 err = 0; switch (mode) { case WL_MODE_BSS: return NL80211_IFTYPE_STATION; case WL_MODE_IBSS: return NL80211_IFTYPE_ADHOC; case WL_MODE_AP: return NL80211_IFTYPE_AP; default: return NL80211_IFTYPE_UNSPECIFIED; } return err; } static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev) { s32 err = 0; wdev->wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv)); if (unlikely(!wdev->wiphy)) { WL_ERR(("Couldn not allocate wiphy device\n")); err = -ENOMEM; return err; } set_wiphy_dev(wdev->wiphy, sdiofunc_dev); wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX; /* Report how many SSIDs Driver can support per Scan request */ wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX; wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; #ifdef WL_SCHED_SCAN wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT; wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT; wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX; wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; #endif /* WL_SCHED_SCAN */ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR); wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; /* wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; - set in runtime */ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->cipher_suites = __wl_cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); wdev->wiphy->max_remain_on_channel_duration = 5000; wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes; #ifndef WL_POWERSAVE_DISABLED wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; #else wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; #endif /* !WL_POWERSAVE_DISABLED */ wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39) WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS | #endif WIPHY_FLAG_4ADDR_STATION; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) /* wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; */ #endif /* AP_SME flag can be advertised to remove patch from wpa_supplicant */ wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; WL_DBG(("Registering custom regulatory)\n")); wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); /* Now we can register wiphy with cfg80211 module */ err = wiphy_register(wdev->wiphy); if (unlikely(err < 0)) { WL_ERR(("Couldn not register wiphy device (%d)\n", err)); wiphy_free(wdev->wiphy); } return err; } static void wl_free_wdev(struct wl_priv *wl) { struct wireless_dev *wdev = wl->wdev; struct wiphy *wiphy; if (!wdev) { WL_ERR(("wdev is invalid\n")); return; } wiphy = wdev->wiphy; wiphy_unregister(wdev->wiphy); wdev->wiphy->dev.parent = NULL; wl_delete_all_netinfo(wl); wiphy_free(wiphy); /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "wl", * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!! */ } static s32 wl_inform_bss(struct wl_priv *wl) { struct wl_scan_results *bss_list; struct wl_bss_info *bi = NULL; /* must be initialized */ s32 err = 0; s32 i; bss_list = wl->bss_list; WL_DBG(("scanned AP count (%d)\n", bss_list->count)); bi = next_bss(bss_list, bi); for_each_bss(bss_list, bi, i) { err = wl_inform_single_bss(wl, bi); if (unlikely(err)) break; } return err; } static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) { struct wiphy *wiphy = wiphy_from_scan(wl); struct ieee80211_mgmt *mgmt; struct ieee80211_channel *channel; struct ieee80211_supported_band *band; struct wl_cfg80211_bss_info *notif_bss_info; struct wl_scan_req *sr = wl_to_sr(wl); struct beacon_proberesp *beacon_proberesp; struct cfg80211_bss *cbss = NULL; s32 mgmt_type; s32 signal; u32 freq; s32 err = 0; if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) { WL_DBG(("Beacon is larger than buffer. Discarding\n")); return err; } notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL); if (unlikely(!notif_bss_info)) { WL_ERR(("notif_bss_info alloc failed\n")); return -ENOMEM; } mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf; notif_bss_info->channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec); if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; if (!band) { WL_ERR(("No valid band")); kfree(notif_bss_info); return -EINVAL; } notif_bss_info->rssi = dtoh16(bi->RSSI); memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN); mgmt_type = (bi->flags & WL_BSS_FLAGS_FROM_BEACON) ? IEEE80211_STYPE_BEACON : IEEE80211_STYPE_PROBE_RESP; if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) { mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type); } beacon_proberesp = wl->active_scan ? (struct beacon_proberesp *)&mgmt->u.probe_resp : (struct beacon_proberesp *)&mgmt->u.beacon; beacon_proberesp->timestamp = 0; beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period); beacon_proberesp->capab_info = cpu_to_le16(bi->capability); wl_rst_ie(wl); wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length); wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length); wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX - offsetof(struct wl_cfg80211_bss_info, frame_buf)); notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, u.beacon.variable) + wl_get_ielen(wl); #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) freq = ieee80211_channel_to_frequency(notif_bss_info->channel); #else freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band); #endif channel = ieee80211_get_channel(wiphy, freq); if (!channel) { WL_ERR(("No valid channel: %u\n", freq)); kfree(notif_bss_info); return -EINVAL; } WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM" "mgmt_type %d frame_len %d\n", bi->SSID, notif_bss_info->rssi, notif_bss_info->channel, mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type, notif_bss_info->frame_len)); signal = notif_bss_info->rssi * 100; if (!mgmt->u.probe_resp.timestamp) { struct timespec ts; get_monotonic_boottime(&ts); mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec * 1000000) + ts.tv_nsec / 1000; } cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, le16_to_cpu(notif_bss_info->frame_len), signal, GFP_KERNEL); if (unlikely(!cbss)) { WL_ERR(("cfg80211_inform_bss_frame error\n")); kfree(notif_bss_info); return -EINVAL; } cfg80211_put_bss(cbss); kfree(notif_bss_info); return err; } static s32 wl_inform_ibss(struct wl_priv *wl, const u8 *bssid) { struct net_device *ndev = wl_to_prmry_ndev(wl); struct wiphy *wiphy = wl_to_wiphy(wl); struct wl_bss_info *bi = NULL; struct ieee80211_channel *notify_channel; struct ieee80211_supported_band *band; struct cfg80211_bss *bss; s32 err = 0; u16 channel; u32 freq; u32 wsec = 0; u16 notify_capability; u16 notify_interval; u8 *notify_ie; size_t notify_ielen; s32 notify_signal; WL_TRACE(("Enter\n")); if (wl->scan_request) { wl_notify_escan_complete(wl, ndev, true, true); } mutex_lock(&wl->usr_sync); *(u32 *)wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, wl->extra_buf, WL_EXTRA_BUF_MAX, false); if (err) { WL_ERR(("Failed to get bss info for IBSS\n")); err = -EIO; goto CleanUp; } bi = (struct wl_bss_info *)(wl->extra_buf + 4); if (memcmp(bssid, &bi->BSSID, ETHER_ADDR_LEN)) { WL_ERR(("BSSID mismatch: Inform %02x:%02x:%02x:%02x:%02x:%02x," "%02x:%02x:%02x:%02x:%02x:%02x\n", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2], bi->BSSID.octet[3], bi->BSSID.octet[4], bi->BSSID.octet[5])); err = -EINVAL; goto CleanUp; } err = wldev_iovar_getint(ndev, "wsec", &wsec); if (err) { WL_ERR(("wsec failed: %d\n", err)); err = -EIO; goto CleanUp; } channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(dtohchanspec(bi->chanspec)); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) freq = ieee80211_channel_to_frequency(channel); (void)band->band; #else freq = ieee80211_channel_to_frequency(channel, band->band); #endif notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = dtoh16(bi->capability); notify_interval = dtoh16(bi->beacon_period); notify_ie = (u8 *)bi + dtoh16(bi->ie_offset); notify_ielen = dtoh32(bi->ie_length); notify_signal = (int16)dtoh16(bi->RSSI) * 100; if (wl->p2p_supported) { notify_capability |= DOT11_CAP_IBSS; if (wsec) notify_capability |= DOT11_CAP_PRIVACY; } WL_DBG(("BSSID %02x:%02x:%02x:%02x:%02x:%02x", bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5])); WL_INFO(("channel: %d(%d)\n", channel, freq)); WL_INFO(("capability: %X\n", notify_capability)); WL_INFO(("beacon interval: %d ms\n", notify_interval)); WL_INFO(("signal: %d dBm\n", notify_signal)); WL_INFO(("ie_len: %d\n", notify_ielen)); bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, 0, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); if (!bss) { WL_ERR(("cfg80211_inform_bss() Failed\n")); err = -ENOMEM; goto CleanUp; } cfg80211_put_bss(bss); err = 0; CleanUp: mutex_unlock(&wl->usr_sync); WL_TRACE(("Exit\n")); return err; } static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev) { u32 event = ntoh32(e->event_type); u32 status = ntoh32(e->status); u16 flags = ntoh16(e->flags); WL_DBG(("event %d, status %d flags %x\n", event, status, flags)); if (event == WLC_E_SET_SSID) { if (status == WLC_E_STATUS_SUCCESS) { return true; } } else if (event == WLC_E_LINK) { if (flags & WLC_EVENT_MSG_LINK) if (!wl_is_ibssmode(wl, ndev)) return true; } WL_DBG(("wl_is_linkup false\n")); return false; } static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e) { u32 event = ntoh32(e->event_type); u16 flags = ntoh16(e->flags); if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND || event == WLC_E_DISASSOC || event == WLC_E_DEAUTH) { return true; } else if (event == WLC_E_LINK) { if (!(flags & WLC_EVENT_MSG_LINK)) return true; } return false; } static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e) { u32 event = ntoh32(e->event_type); u32 status = ntoh32(e->status); if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS) return true; if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) return true; return false; } /* The mainline kernel >= 3.2.0 has support for indicating new/del station * to AP/P2P GO via events. If this change is backported to kernel for which * this driver is being built, then define WL_CFG80211_STA_EVENT. You * should use this new/del sta event mechanism for BRCM supplicant >= 22. */ static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { s32 err = 0; u32 event = ntoh32(e->event_type); u32 reason = ntoh32(e->reason); u32 len = ntoh32(e->datalen); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) bool isfree = false; u8 *mgmt_frame; u8 bsscfgidx = e->bsscfgidx; s32 freq; s32 channel; u8 body[WL_FRAME_LEN]; u16 fc = 0; struct ieee80211_supported_band *band; struct ether_addr da; struct ether_addr bssid; struct wiphy *wiphy = wl_to_wiphy(wl); channel_info_t ci; #else struct station_info sinfo; #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !WL_CFG80211_STA_EVENT */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) memset(body, 0, sizeof(body)); memset(&bssid, 0, ETHER_ADDR_LEN); WL_DBG(("Enter event %d ndev %p\n", event, ndev)); if (wl_get_mode_by_netdev(wl, ndev) == WL_INVALID) return WL_INVALID; if (len > WL_FRAME_LEN) { WL_ERR(("Received frame length %d from dongle is greater than" " allocated body buffer len %d", len, WL_FRAME_LEN)); goto exit; } memcpy(body, data, len); wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync); memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN); err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); switch (event) { case WLC_E_ASSOC_IND: fc = FC_ASSOC_REQ; break; case WLC_E_REASSOC_IND: fc = FC_REASSOC_REQ; break; case WLC_E_DISASSOC_IND: fc = FC_DISASSOC; break; case WLC_E_DEAUTH_IND: fc = FC_DISASSOC; break; case WLC_E_DEAUTH: fc = FC_DISASSOC; break; default: fc = 0; goto exit; } if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) return err; channel = dtoh32(ci.hw_channel); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; if (!band) { WL_ERR(("No valid band")); return -EINVAL; } #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) freq = ieee80211_channel_to_frequency(channel); #else freq = ieee80211_channel_to_frequency(channel, band->band); #endif err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid, &mgmt_frame, &len, body); if (err < 0) goto exit; isfree = true; if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) { cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); } else if (event == WLC_E_DISASSOC_IND) { cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); } exit: if (isfree) kfree(mgmt_frame); return err; #else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */ sinfo.filled = 0; if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) && reason == DOT11_SC_SUCCESS) { sinfo.filled = STATION_INFO_ASSOC_REQ_IES; if (!data) { WL_ERR(("No IEs present in ASSOC/REASSOC_IND")); return -EINVAL; } sinfo.assoc_req_ies = data; sinfo.assoc_req_ies_len = len; cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC); } else if (event == WLC_E_DISASSOC_IND) { cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); } #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */ return err; } static s32 wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { bool act; s32 err = 0; u32 event = ntoh32(e->event_type); u32 reason; if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) { wl_notify_connect_status_ap(wl, ndev, e, data); } else { WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", ntoh32(e->event_type), ntoh32(e->status), ndev)); if((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DISASSOC_IND)) { reason = ntoh32(e->reason); wl->deauth_reason = reason; WL_ERR(("Received %s event with reason code: %d\n", (event == WLC_E_DEAUTH_IND)? "WLC_E_DEAUTH_IND":"WLC_E_DISASSOC_IND", reason)); } if (wl_is_linkup(wl, e, ndev)) { wl_link_up(wl); act = true; wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT); wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); wl->deauth_reason = 0; if (wl_is_ibssmode(wl, ndev)) { wl_ibss_join_done(wl, ndev, e, data, true); WL_DBG(("wl_ibss_join_done succeeded\n")); } else { if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) { printk("wl_bss_connect_done succeeded with " MACDBG "\n", MAC2STRDBG((u8*)(&e->addr))); wl_bss_connect_done(wl, ndev, e, data, true); WL_DBG(("joined in BSS network \"%s\"\n", ((struct wlc_ssid *) wl_read_prof(wl, ndev, WL_PROF_SSID))->SSID)); } } } else if (wl_is_linkdown(wl, e)) { if (wl->scan_request) { if (wl->escan_on) { wl_notify_escan_complete(wl, ndev, true, true); } else { del_timer_sync(&wl->scan_timeout); wl_iscan_aborted(wl); } } if (wl_get_drv_status(wl, CONNECTED, ndev)) { scb_val_t scbval; u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); wl_clr_drv_status(wl, CONNECTED, ndev); if (! wl_get_drv_status(wl, DISCONNECTING, ndev)) { /* To make sure disconnect, explictly send dissassoc * for BSSID 00:00:00:00:00:00 issue */ scbval.val = WLAN_REASON_DEAUTH_LEAVING; memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); scbval.val = htod32(scbval.val); wldev_ioctl(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); WL_ERR(("link down, calling cfg80211_disconnected" " with deauth_reason:%d\n", wl->deauth_reason)); if (!wl_is_ibssmode(wl, ndev)) cfg80211_disconnected(ndev, wl->deauth_reason, NULL, 0, GFP_KERNEL); wl_link_down(wl); wl_init_prof(wl, ndev); } } else if (wl_get_drv_status(wl, CONNECTING, ndev)) { printk("link down, during connecting\n"); if (wl_is_ibssmode(wl, ndev)) wl_ibss_join_done(wl, ndev, e, data, false); else wl_bss_connect_done(wl, ndev, e, data, false); } wl_clr_drv_status(wl, DISCONNECTING, ndev); } else if (wl_is_nonetwork(wl, e)) { printk("connect failed event=%d e->status %d e->reason %d\n", event, (int)ntoh32(e->status), (int)ntoh32(e->reason)); /* Clean up any pending scan request */ if (wl->scan_request) { if (wl->escan_on) { wl_notify_escan_complete(wl, ndev, true, true); } else { del_timer_sync(&wl->scan_timeout); wl_iscan_aborted(wl); } } if (wl_get_drv_status(wl, CONNECTING, ndev)) wl_bss_connect_done(wl, ndev, e, data, false); } else { printk("%s nothing\n", __FUNCTION__); } } return err; } static s32 wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { bool act; s32 err = 0; u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); WL_DBG(("Enter \n")); if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) { if (wl_get_drv_status(wl, CONNECTED, ndev)) wl_bss_roaming_done(wl, ndev, e, data); else wl_bss_connect_done(wl, ndev, e, data, true); act = true; wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT); wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); } return err; } static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev) { wl_assoc_info_t assoc_info; struct wl_connect_info *conn_info = wl_to_conn(wl); s32 err = 0; WL_DBG(("Enter \n")); err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, wl->extra_buf, WL_ASSOC_INFO_MAX, NULL); if (unlikely(err)) { WL_ERR(("could not get assoc info (%d)\n", err)); return err; } memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t)); assoc_info.req_len = htod32(assoc_info.req_len); assoc_info.resp_len = htod32(assoc_info.resp_len); assoc_info.flags = htod32(assoc_info.flags); if (conn_info->req_ie_len) { conn_info->req_ie_len = 0; bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); } if (conn_info->resp_ie_len) { conn_info->resp_ie_len = 0; bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); } if (assoc_info.req_len) { err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, wl->extra_buf, WL_ASSOC_INFO_MAX, NULL); if (unlikely(err)) { WL_ERR(("could not get assoc req (%d)\n", err)); return err; } conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { conn_info->req_ie_len -= ETHER_ADDR_LEN; } if (conn_info->req_ie_len <= MAX_REQ_LINE) memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len); else { WL_ERR(("%s IE size %d above max %d size \n", __FUNCTION__, conn_info->req_ie_len, MAX_REQ_LINE)); return err; } } else { conn_info->req_ie_len = 0; } if (assoc_info.resp_len) { err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, wl->extra_buf, WL_ASSOC_INFO_MAX, NULL); if (unlikely(err)) { WL_ERR(("could not get assoc resp (%d)\n", err)); return err; } conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp); if (conn_info->resp_ie_len <= MAX_REQ_LINE) memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len); else { WL_ERR(("%s IE size %d above max %d size \n", __FUNCTION__, conn_info->resp_ie_len, MAX_REQ_LINE)); return err; } } else { conn_info->resp_ie_len = 0; } WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len, conn_info->resp_ie_len)); return err; } static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params, size_t *join_params_size) { chanspec_t chanspec = 0; if (ch != 0) { join_params->params.chanspec_num = 1; join_params->params.chanspec_list[0] = ch; if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) chanspec |= WL_CHANSPEC_BAND_2G; else chanspec |= WL_CHANSPEC_BAND_5G; chanspec |= WL_CHANSPEC_BW_20; chanspec |= WL_CHANSPEC_CTL_SB_NONE; *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + join_params->params.chanspec_num * sizeof(chanspec_t); join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; join_params->params.chanspec_list[0] |= chanspec; join_params->params.chanspec_list[0] = htodchanspec(join_params->params.chanspec_list[0]); join_params->params.chanspec_num = htod32(join_params->params.chanspec_num); WL_DBG(("%s join_params->params.chanspec_list[0]= %X\n", __FUNCTION__, join_params->params.chanspec_list[0])); } } static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev) { struct cfg80211_bss *bss; struct wl_bss_info *bi; struct wlc_ssid *ssid; struct bcm_tlv *tim; s32 beacon_interval; s32 dtim_period; size_t ie_len; u8 *ie; u8 *curbssid; s32 err = 0; struct wiphy *wiphy; wiphy = wl_to_wiphy(wl); if (wl_is_ibssmode(wl, ndev)) return err; ssid = (struct wlc_ssid *)wl_read_prof(wl, ndev, WL_PROF_SSID); curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); bss = cfg80211_get_bss(wiphy, NULL, curbssid, ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); mutex_lock(&wl->usr_sync); if (!bss) { WL_DBG(("Could not find the AP\n")); *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, wl->extra_buf, WL_EXTRA_BUF_MAX, false); if (unlikely(err)) { WL_ERR(("Could not get bss info %d\n", err)); goto update_bss_info_out; } bi = (struct wl_bss_info *)(wl->extra_buf + 4); if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { WL_ERR(("Bssid doesn't match\n")); err = -EIO; goto update_bss_info_out; } err = wl_inform_single_bss(wl, bi); if (unlikely(err)) goto update_bss_info_out; ie = ((u8 *)bi) + bi->ie_offset; ie_len = bi->ie_length; beacon_interval = cpu_to_le16(bi->beacon_period); } else { WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid)); ie = bss->information_elements; ie_len = bss->len_information_elements; beacon_interval = bss->beacon_interval; cfg80211_put_bss(bss); } tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM); if (tim) { dtim_period = tim->data[1]; } else { /* * active scan was done so we could not get dtim * information out of probe response. * so we speficially query dtim information. */ err = wldev_ioctl(ndev, WLC_GET_DTIMPRD, &dtim_period, sizeof(dtim_period), false); if (unlikely(err)) { WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err)); goto update_bss_info_out; } } wl_update_prof(wl, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT); wl_update_prof(wl, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD); update_bss_info_out: if (unlikely(err)) { WL_ERR(("Failed with error %d\n", err)); } mutex_unlock(&wl->usr_sync); return err; } static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { struct wl_connect_info *conn_info = wl_to_conn(wl); s32 err = 0; u8 *curbssid; wl_get_assoc_ies(wl, ndev); wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); wl_update_bss_info(wl, ndev); wl_update_pmklist(ndev, wl->pmk_list, err); cfg80211_roamed(ndev, #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) NULL, #endif curbssid, conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); WL_DBG(("Report roaming result\n")); wl_set_drv_status(wl, CONNECTED, ndev); return err; } static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, bool completed) { struct wl_connect_info *conn_info = wl_to_conn(wl); s32 err = 0; u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); WL_DBG((" enter\n")); if (wl->scan_request) { wl_notify_escan_complete(wl, ndev, true, true); } if (is_zero_ether_addr(curbssid)) { curbssid = wl_read_prof(wl, ndev, WL_PROF_PENDING_BSSID); if (is_zero_ether_addr(curbssid)) { WL_ERR(("Invalid BSSID\n")); curbssid = NULL; } } if (wl_get_drv_status(wl, CONNECTING, ndev)) { wl_clr_drv_status(wl, CONNECTING, ndev); if (completed) { wl_get_assoc_ies(wl, ndev); wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); wl_update_bss_info(wl, ndev); wl_update_pmklist(ndev, wl->pmk_list, err); wl_set_drv_status(wl, CONNECTED, ndev); } cfg80211_connect_result(ndev, curbssid, conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, completed ? WLAN_STATUS_SUCCESS : (e->reason) ? ntoh32(e->reason) : WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); if (completed) WL_INFO(("Report connect result - connection succeeded\n")); else WL_ERR(("Report connect result - connection failed\n")); } return err; } static s32 wl_ibss_join_done(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, bool completed) { s32 err = 0; WL_TRACE(("Enter\n")); if (wl->scan_request) { wl_notify_escan_complete(wl, ndev, true, true); } if (wl_get_drv_status(wl, CONNECTING, ndev)) { wl_clr_drv_status(wl, CONNECTING, ndev); if (completed) { err = wl_inform_ibss(wl, (u8 *)&e->addr); if (err) { WL_ERR(("wl_inform_ibss() failed: %d\n", err)); } wl_set_drv_status(wl, CONNECTED, ndev); cfg80211_ibss_joined(ndev, (u8 *)&e->addr, GFP_KERNEL); WL_DBG(("cfg80211_ibss_joined() called with valid BSSID\n")); } } WL_TRACE(("Exit\n")); return err; } static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { u16 flags = ntoh16(e->flags); enum nl80211_key_type key_type; mutex_lock(&wl->usr_sync); if (flags & WLC_EVENT_MSG_GROUP) key_type = NL80211_KEYTYPE_GROUP; else key_type = NL80211_KEYTYPE_PAIRWISE; cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, NULL, GFP_KERNEL); mutex_unlock(&wl->usr_sync); return 0; } #ifdef PNO_SUPPORT static s32 wl_notify_pfn_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { WL_ERR((">>> PNO Event\n")); #ifndef WL_SCHED_SCAN mutex_lock(&wl->usr_sync); /* TODO: Use cfg80211_sched_scan_results(wiphy); */ cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); mutex_unlock(&wl->usr_sync); #else /* If cfg80211 scheduled scan is supported, report the pno results via sched * scan results */ wl_notify_sched_scan_results(wl, ndev, e, data); #endif /* WL_SCHED_SCAN */ return 0; } #endif /* PNO_SUPPORT */ static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { struct channel_info channel_inform; struct wl_scan_results *bss_list; u32 len = WL_SCAN_BUF_MAX; s32 err = 0; unsigned long flags; WL_DBG(("Enter \n")); if (!wl_get_drv_status(wl, SCANNING, ndev)) { WL_ERR(("scan is not ready \n")); return err; } if (wl->iscan_on && wl->iscan_kickstart) return wl_wakeup_iscan(wl_to_iscan(wl)); mutex_lock(&wl->usr_sync); wl_clr_drv_status(wl, SCANNING, ndev); err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform, sizeof(channel_inform), false); if (unlikely(err)) { WL_ERR(("scan busy (%d)\n", err)); goto scan_done_out; } channel_inform.scan_channel = dtoh32(channel_inform.scan_channel); if (unlikely(channel_inform.scan_channel)) { WL_DBG(("channel_inform.scan_channel (%d)\n", channel_inform.scan_channel)); } wl->bss_list = wl->scan_results; bss_list = wl->bss_list; memset(bss_list, 0, len); bss_list->buflen = htod32(len); err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false); if (unlikely(err)) { WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err)); err = -EINVAL; goto scan_done_out; } bss_list->buflen = dtoh32(bss_list->buflen); bss_list->version = dtoh32(bss_list->version); bss_list->count = dtoh32(bss_list->count); err = wl_inform_bss(wl); scan_done_out: del_timer_sync(&wl->scan_timeout); spin_lock_irqsave(&wl->cfgdrv_lock, flags); if (wl->scan_request) { cfg80211_scan_done(wl->scan_request, false); wl->scan_request = NULL; } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); WL_DBG(("cfg80211_scan_done\n")); mutex_unlock(&wl->usr_sync); return err; } static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, const struct ether_addr *sa, const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody) { struct dot11_management_header *hdr; u32 totlen = 0; s32 err = 0; u8 *offset; u32 prebody_len = *body_len; switch (fc) { case FC_ASSOC_REQ: /* capability , listen interval */ totlen = DOT11_ASSOC_REQ_FIXED_LEN; *body_len += DOT11_ASSOC_REQ_FIXED_LEN; break; case FC_REASSOC_REQ: /* capability, listen inteval, ap address */ totlen = DOT11_REASSOC_REQ_FIXED_LEN; *body_len += DOT11_REASSOC_REQ_FIXED_LEN; break; } totlen += DOT11_MGMT_HDR_LEN + prebody_len; *pheader = kzalloc(totlen, GFP_KERNEL); if (*pheader == NULL) { WL_ERR(("memory alloc failed \n")); return -ENOMEM; } hdr = (struct dot11_management_header *) (*pheader); hdr->fc = htol16(fc); hdr->durid = 0; hdr->seq = 0; offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len); bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN); bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN); bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN); bcopy((const char*)pbody, offset, prebody_len); *body_len = totlen; return err; } static s32 wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { struct ieee80211_supported_band *band; struct wiphy *wiphy = wl_to_wiphy(wl); struct ether_addr da; struct ether_addr bssid; bool isfree = false; s32 err = 0; s32 freq; struct net_device *dev = NULL; wifi_p2p_pub_act_frame_t *act_frm = NULL; wifi_p2p_action_frame_t *p2p_act_frm = NULL; wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; wl_event_rx_frame_data_t *rxframe = (wl_event_rx_frame_data_t*)data; u32 event = ntoh32(e->event_type); u8 *mgmt_frame; u8 bsscfgidx = e->bsscfgidx; u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); memset(&bssid, 0, ETHER_ADDR_LEN); if (wl->p2p_net == ndev) { dev = wl_to_prmry_ndev(wl); } else { dev = ndev; } if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; if (!band) { WL_ERR(("No valid band")); return -EINVAL; } if ((event == WLC_E_P2P_PROBREQ_MSG) && wl->p2p && wl_get_p2p_status(wl, GO_NEG_PHASE)) { WL_DBG(("Filtering P2P probe_req while being in GO-Neg state\n")); goto exit; } #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) freq = ieee80211_channel_to_frequency(channel); #else freq = ieee80211_channel_to_frequency(channel, band->band); #endif if (event == WLC_E_ACTION_FRAME_RX) { wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr", NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync); wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN); err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid, &mgmt_frame, &mgmt_frame_len, (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1)); if (err < 0) { WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n", __func__, mgmt_frame_len, channel, freq)); goto exit; } isfree = true; if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { act_frm = (wifi_p2p_pub_act_frame_t *) (&mgmt_frame[DOT11_MGMT_HDR_LEN]); } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { p2p_act_frm = (wifi_p2p_action_frame_t *) (&mgmt_frame[DOT11_MGMT_HDR_LEN]); (void) p2p_act_frm; } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (&mgmt_frame[DOT11_MGMT_HDR_LEN]); (void) sd_act_frm; } wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN], mgmt_frame_len - DOT11_MGMT_HDR_LEN); /* * After complete GO Negotiation, roll back to mpc mode */ if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) || (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) { wldev_iovar_setint(dev, "mpc", 1); } if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) { WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); wl_clr_p2p_status(wl, GO_NEG_PHASE); } if (act_frm && (act_frm->subtype == P2P_PAF_GON_RSP)) { /* Cancel the dwell time of req frame */ WL_DBG(("P2P: Received GO NEG Resp frame, cancelling the dwell time\n")); wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); } } else { mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1); } cfg80211_rx_mgmt(ndev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__, mgmt_frame_len, ntoh32(e->datalen), channel, freq)); if (isfree) kfree(mgmt_frame); exit: return 0; } #ifdef WL_SCHED_SCAN /* If target scan is not reliable, set the below define to "1" to do a * full escan */ #define FULL_ESCAN_ON_PFN_NET_FOUND 1 static s32 wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { wl_pfn_net_info_t *netinfo, *pnetinfo; struct cfg80211_scan_request request; struct wiphy *wiphy = wl_to_wiphy(wl); int err = 0; struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT]; struct ieee80211_channel *channel = NULL; int channel_req = 0; int band = 0; struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data; WL_DBG(("Enter\n")); if (e->event_type == WLC_E_PFN_NET_LOST) { WL_PNO(("PFN NET LOST event. Do Nothing \n")); return 0; } WL_PNO((">>> PFN NET FOUND event. count:%d \n", pfn_result->count)); if (pfn_result->count > 0) { int i; memset(&request, 0x00, sizeof(struct cfg80211_scan_request)); memset(&ssid, 0x00, sizeof(ssid)); request.wiphy = wiphy; pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) - sizeof(wl_pfn_net_info_t)); channel = (struct ieee80211_channel *)kzalloc( (sizeof(struct ieee80211_channel) * MAX_PFN_LIST_COUNT), GFP_KERNEL); if (!channel) { WL_ERR(("No memory")); err = -ENOMEM; goto out_err; } for (i = 0; i < pfn_result->count; i++) { netinfo = &pnetinfo[i]; if (!netinfo) { WL_ERR(("Invalid netinfo ptr. index:%d", i)); err = -EINVAL; goto out_err; } WL_PNO((">>> SSID:%s Channel:%d \n", netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel)); /* PFN result doesn't have all the info which are required by the supplicant * (For e.g IEs) Do a target Escan so that sched scan results are reported * via wl_inform_single_bss in the required format. Escan does require the * scan request in the form of cfg80211_scan_request. For timebeing, create * cfg80211_scan_request one out of the received PNO event. */ memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.SSID_len); ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len; request.n_ssids++; channel_req = netinfo->pfnsubnet.channel; band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band); channel[i].band = band; channel[i].flags |= IEEE80211_CHAN_NO_HT40; request.channels[i] = &channel[i]; request.n_channels++; } /* assign parsed ssid array */ if (request.n_ssids) request.ssids = &ssid[0]; if (wl_get_drv_status_all(wl, SCANNING)) { /* Abort any on-going scan */ wl_notify_escan_complete(wl, ndev, true, true); } if (wl_get_p2p_status(wl, DISCOVERY_ON)) { WL_PNO((">>> P2P discovery was ON. Disabling it\n")); err = wl_cfgp2p_discover_enable_search(wl, false); if (unlikely(err)) { wl_clr_drv_status(wl, SCANNING, ndev); goto out_err; } } wl_set_drv_status(wl, SCANNING, ndev); #if FULL_ESCAN_ON_PFN_NET_FOUND WL_PNO((">>> Doing Full ESCAN on PNO event\n")); err = wl_do_escan(wl, wiphy, ndev, NULL); #else WL_PNO((">>> Doing targeted ESCAN on PNO event\n")); err = wl_do_escan(wl, wiphy, ndev, &request); #endif if (err) { wl_clr_drv_status(wl, SCANNING, ndev); goto out_err; } wl->sched_scan_running = TRUE; } else { WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n")); } out_err: if (channel) kfree(channel); return err; } #endif /* WL_SCHED_SCAN */ static void wl_init_conf(struct wl_conf *conf) { WL_DBG(("Enter \n")); conf->frag_threshold = (u32)-1; conf->rts_threshold = (u32)-1; conf->retry_short = (u32)-1; conf->retry_long = (u32)-1; conf->tx_power = -1; } static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev) { unsigned long flags; struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); spin_lock_irqsave(&wl->cfgdrv_lock, flags); memset(profile, 0, sizeof(struct wl_profile)); spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); } static void wl_init_event_handler(struct wl_priv *wl) { memset(wl->evt_handler, 0, sizeof(wl->evt_handler)); wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status; wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status; wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status; wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status; wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status; wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status; wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status; wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status; wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status; wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status; wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame; wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete; wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete; wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete; #ifdef PNO_SUPPORT wl->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status; #endif /* PNO_SUPPORT */ } static s32 wl_init_priv_mem(struct wl_priv *wl) { WL_DBG(("Enter \n")); wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); if (unlikely(!wl->scan_results)) { WL_ERR(("Scan results alloc failed\n")); goto init_priv_mem_out; } wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL); if (unlikely(!wl->conf)) { WL_ERR(("wl_conf alloc failed\n")); goto init_priv_mem_out; } wl->scan_req_int = (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL); if (unlikely(!wl->scan_req_int)) { WL_ERR(("Scan req alloc failed\n")); goto init_priv_mem_out; } wl->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); if (unlikely(!wl->ioctl_buf)) { WL_ERR(("Ioctl buf alloc failed\n")); goto init_priv_mem_out; } wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); if (unlikely(!wl->escan_ioctl_buf)) { WL_ERR(("Ioctl buf alloc failed\n")); goto init_priv_mem_out; } wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (unlikely(!wl->extra_buf)) { WL_ERR(("Extra buf alloc failed\n")); goto init_priv_mem_out; } wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL); if (unlikely(!wl->iscan)) { WL_ERR(("Iscan buf alloc failed\n")); goto init_priv_mem_out; } wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL); if (unlikely(!wl->pmk_list)) { WL_ERR(("pmk list alloc failed\n")); goto init_priv_mem_out; } wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL); if (unlikely(!wl->sta_info)) { WL_ERR(("sta info alloc failed\n")); goto init_priv_mem_out; } wl->afx_hdl = (void *)kzalloc(sizeof(*wl->afx_hdl), GFP_KERNEL); if (unlikely(!wl->afx_hdl)) { WL_ERR(("afx hdl alloc failed\n")); goto init_priv_mem_out; } else { init_completion(&wl->act_frm_scan); INIT_WORK(&wl->afx_hdl->work, wl_cfg80211_afx_handler); } return 0; init_priv_mem_out: wl_deinit_priv_mem(wl); return -ENOMEM; } static void wl_deinit_priv_mem(struct wl_priv *wl) { kfree(wl->scan_results); wl->scan_results = NULL; kfree(wl->conf); wl->conf = NULL; kfree(wl->scan_req_int); wl->scan_req_int = NULL; kfree(wl->ioctl_buf); wl->ioctl_buf = NULL; kfree(wl->escan_ioctl_buf); wl->escan_ioctl_buf = NULL; kfree(wl->extra_buf); wl->extra_buf = NULL; kfree(wl->iscan); wl->iscan = NULL; kfree(wl->pmk_list); wl->pmk_list = NULL; kfree(wl->sta_info); wl->sta_info = NULL; if (wl->afx_hdl) { cancel_work_sync(&wl->afx_hdl->work); kfree(wl->afx_hdl); wl->afx_hdl = NULL; } if (wl->ap_info) { kfree(wl->ap_info->wpa_ie); kfree(wl->ap_info->rsn_ie); kfree(wl->ap_info->wps_ie); kfree(wl->ap_info); wl->ap_info = NULL; } } static s32 wl_create_event_handler(struct wl_priv *wl) { int ret = 0; WL_DBG(("Enter \n")); /* Do not use DHD in cfg driver */ wl->event_tsk.thr_pid = -1; PROC_START(wl_event_handler, wl, &wl->event_tsk, 0); if (wl->event_tsk.thr_pid < 0) ret = -ENOMEM; return ret; } static void wl_destroy_event_handler(struct wl_priv *wl) { if (wl->event_tsk.thr_pid >= 0) PROC_STOP(&wl->event_tsk); } static void wl_term_iscan(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); WL_TRACE(("In\n")); if (wl->iscan_on && iscan->tsk) { iscan->state = WL_ISCAN_STATE_IDLE; WL_INFO(("SIGTERM\n")); send_sig(SIGTERM, iscan->tsk, 1); WL_DBG(("kthread_stop\n")); kthread_stop(iscan->tsk); iscan->tsk = NULL; } } static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted) { struct wl_priv *wl = iscan_to_wl(iscan); struct net_device *ndev = wl_to_prmry_ndev(wl); unsigned long flags; WL_DBG(("Enter \n")); if(!aborted) wl->scan_busy_count = 0; if (!wl_get_drv_status(wl, SCANNING, ndev)) { wl_clr_drv_status(wl, SCANNING, ndev); WL_ERR(("Scan complete while device not scanning\n")); return; } spin_lock_irqsave(&wl->cfgdrv_lock, flags); wl_clr_drv_status(wl, SCANNING, ndev); if (likely(wl->scan_request)) { cfg80211_scan_done(wl->scan_request, aborted); wl->scan_request = NULL; } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); wl->iscan_kickstart = false; } static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan) { if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) { WL_DBG(("wake up iscan\n")); up(&iscan->sync); return 0; } return -EIO; } static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status, struct wl_scan_results **bss_list) { struct wl_iscan_results list; struct wl_scan_results *results; struct wl_iscan_results *list_buf; s32 err = 0; WL_DBG(("Enter \n")); memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX); list_buf = (struct wl_iscan_results *)iscan->scan_buf; results = &list_buf->results; results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; results->version = 0; results->count = 0; memset(&list, 0, sizeof(list)); list.results.buflen = htod32(WL_ISCAN_BUF_MAX); err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list, WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf, WL_ISCAN_BUF_MAX, NULL); if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); return err; } results->buflen = dtoh32(results->buflen); results->version = dtoh32(results->version); results->count = dtoh32(results->count); WL_DBG(("results->count = %d\n", results->count)); WL_DBG(("results->buflen = %d\n", results->buflen)); *status = dtoh32(list_buf->status); *bss_list = results; return err; } static s32 wl_iscan_done(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; mutex_lock(&wl->usr_sync); wl_inform_bss(wl); wl_notify_iscan_complete(iscan, false); mutex_unlock(&wl->usr_sync); return err; } static s32 wl_iscan_pending(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl->iscan; s32 err = 0; /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); iscan->timer_on = 1; return err; } static s32 wl_iscan_inprogress(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl->iscan; s32 err = 0; mutex_lock(&wl->usr_sync); wl_inform_bss(wl); wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); mutex_unlock(&wl->usr_sync); /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); iscan->timer_on = 1; return err; } static s32 wl_iscan_aborted(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; mutex_lock(&wl->usr_sync); wl_notify_iscan_complete(iscan, true); mutex_unlock(&wl->usr_sync); return err; } static s32 wl_iscan_thread(void *data) { struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data; struct wl_priv *wl = iscan_to_wl(iscan); u32 status; int err = 0; allow_signal(SIGTERM); status = WL_SCAN_RESULTS_PARTIAL; while (likely(!down_interruptible(&iscan->sync))) { if (kthread_should_stop()) break; if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } mutex_lock(&wl->usr_sync); err = wl_get_iscan_results(iscan, &status, &wl->bss_list); if (unlikely(err)) { status = WL_SCAN_RESULTS_ABORTED; WL_ERR(("Abort iscan\n")); } mutex_unlock(&wl->usr_sync); iscan->iscan_handler[status] (wl); } if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } WL_DBG(("%s was terminated\n", __func__)); return 0; } static void wl_scan_timeout(unsigned long data) { struct wl_priv *wl = (struct wl_priv *)data; schedule_work(&wl->work_scan_timeout); } static void wl_scan_timeout_process(struct work_struct *work) { struct wl_priv *wl; wl = (wl_priv_t *)container_of(work, wl_priv_t, work_scan_timeout); if (wl->scan_request) { WL_ERR(("timer expired\n")); if (wl->escan_on) wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true); else wl_notify_iscan_complete(wl_to_iscan(wl), true); } /* Assume FW is in bad state if there are continuous scan timeouts */ wl->scan_busy_count++; if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { wl->scan_busy_count = 0; WL_ERR(("Continuous scan timeouts!! Exercising FW hang recovery\n")); net_os_send_hang_message(wl->escan_info.ndev); } } static void wl_iscan_timer(unsigned long data) { struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data; if (iscan) { iscan->timer_on = 0; WL_DBG(("timer expired\n")); wl_wakeup_iscan(iscan); } } static s32 wl_invoke_iscan(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); int err = 0; if (wl->iscan_on && !iscan->tsk) { iscan->state = WL_ISCAN_STATE_IDLE; sema_init(&iscan->sync, 0); iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan"); if (IS_ERR(iscan->tsk)) { WL_ERR(("Could not create iscan thread\n")); iscan->tsk = NULL; return -ENOMEM; } } return err; } static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan) { memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler)); iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done; iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress; iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending; iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted; iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted; } static s32 wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, unsigned long state, void *ndev) { struct net_device *dev = ndev; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wl_priv *wl = wlcfg_drv_priv; WL_DBG(("Enter \n")); if (!wdev || !wl || dev == wl_to_prmry_ndev(wl)) return NOTIFY_DONE; switch (state) { case NETDEV_UNREGISTER: /* after calling list_del_rcu(&wdev->list) */ wl_dealloc_netinfo(wl, ndev); break; case NETDEV_GOING_DOWN: /* At NETDEV_DOWN state, wdev_cleanup_work work will be called. * In front of door, the function checks * whether current scan is working or not. * If the scanning is still working, wdev_cleanup_work call WARN_ON and * make the scan done forcibly. */ if (wl_get_drv_status(wl, SCANNING, dev)) { if (wl->escan_on) { wl_notify_escan_complete(wl, dev, true, true); } } break; } return NOTIFY_DONE; } static struct notifier_block wl_cfg80211_netdev_notifier = { .notifier_call = wl_cfg80211_netdev_notifier_call, }; static s32 wl_notify_escan_complete(struct wl_priv *wl, struct net_device *ndev, bool aborted, bool fw_abort) { wl_scan_params_t *params = NULL; s32 params_size = 0; s32 err = BCME_OK; unsigned long flags; struct net_device *dev; WL_DBG(("Enter \n")); if(!aborted) wl->scan_busy_count = 0; if (wl->scan_request) { if (wl->scan_request->dev == wl->p2p_net) dev = wl_to_prmry_ndev(wl); else dev = wl->scan_request->dev; } else { WL_DBG(("wl->scan_request is NULL may be internal scan." "doing scan_abort for ndev %p primary %p", ndev, wl_to_prmry_ndev(wl))); dev = ndev; } if (fw_abort && !in_atomic()) { /* Our scan params only need space for 1 channel and 0 ssids */ params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size); if (params == NULL) { WL_ERR(("scan params allocation failed \n")); err = -ENOMEM; } else { /* Do a scan abort to stop the driver's scan engine */ err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true); if (err < 0) { WL_ERR(("scan abort failed \n")); } } } if (timer_pending(&wl->scan_timeout)) del_timer_sync(&wl->scan_timeout); spin_lock_irqsave(&wl->cfgdrv_lock, flags); #ifdef WL_SCHED_SCAN if (wl->sched_scan_req && !wl->scan_request) { WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n")); if (!aborted) cfg80211_sched_scan_results(wl->sched_scan_req->wiphy); wl->sched_scan_running = FALSE; wl->sched_scan_req = NULL; } #endif /* WL_SCHED_SCAN */ if (likely(wl->scan_request)) { cfg80211_scan_done(wl->scan_request, aborted); wl->scan_request = NULL; } if (p2p_is_on(wl)) wl_clr_p2p_status(wl, SCANNING); wl_clr_drv_status(wl, SCANNING, dev); spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); if (params) kfree(params); return err; } static s32 wl_escan_handler(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { s32 err = BCME_OK; s32 status = ntoh32(e->status); wl_bss_info_t *bi; wl_escan_result_t *escan_result; wl_bss_info_t *bss = NULL; wl_scan_results_t *list; wifi_p2p_ie_t * p2p_ie; u32 bi_length; u32 i; u8 *p2p_dev_addr = NULL; WL_DBG((" enter event type : %d, status : %d \n", ntoh32(e->event_type), ntoh32(e->status))); mutex_lock(&wl->usr_sync); /* P2P SCAN is coming from primary interface */ if (wl_get_p2p_status(wl, SCANNING)) { if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) ndev = wl->afx_hdl->dev; else ndev = wl->escan_info.ndev; } if (!ndev || !wl->escan_on || (!wl_get_drv_status(wl, SCANNING, ndev) && !wl->sched_scan_running)) { WL_ERR(("escan is not ready ndev %p wl->escan_on %d" " drv_status 0x%x e_type %d e_states %d\n", ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev), ntoh32(e->event_type), ntoh32(e->status))); goto exit; } escan_result = (wl_escan_result_t *)data; if (status == WLC_E_STATUS_PARTIAL) { WL_INFO(("WLC_E_STATUS_PARTIAL \n")); if (!escan_result) { WL_ERR(("Invalid escan result (NULL pointer)\n")); goto exit; } if (dtoh16(escan_result->bss_count) != 1) { WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); goto exit; } bi = escan_result->bss_info; if (!bi) { WL_ERR(("Invalid escan bss info (NULL pointer)\n")); goto exit; } bi_length = dtoh32(bi->length); if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length)); goto exit; } if (!(wl_to_wiphy(wl)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) { if (dtoh16(bi->capability) & DOT11_CAP_IBSS) { WL_DBG(("Ignoring IBSS result\n")); goto exit; } } if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length); if (p2p_dev_addr && !memcmp(p2p_dev_addr, wl->afx_hdl->pending_tx_dst_addr.octet, ETHER_ADDR_LEN)) { s32 channel = CHSPEC_CHANNEL(dtohchanspec(bi->chanspec)); WL_DBG(("ACTION FRAME SCAN : Peer " MACSTR " found, channel : %d\n", MAC2STR(wl->afx_hdl->pending_tx_dst_addr.octet), channel)); wl_clr_p2p_status(wl, SCANNING); wl->afx_hdl->peer_chan = channel; complete(&wl->act_frm_scan); goto exit; } } else { list = (wl_scan_results_t *)wl->escan_info.escan_buf; if (bi_length > ESCAN_BUF_SIZE - list->buflen) { WL_ERR(("Buffer is too small: ignoring\n")); goto exit; } #if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) if (wl->p2p_net && wl->scan_request && wl->scan_request->dev == wl->p2p_net) { #else if (p2p_is_on(wl) && p2p_scan(wl)) { #endif /* p2p scan && allow only probe response */ if (bi->flags & WL_BSS_FLAGS_FROM_BEACON) goto exit; if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, bi->ie_length)) == NULL) { WL_ERR(("Couldn't find P2PIE in probe" " response/beacon\n")); goto exit; } } #define WLC_BSS_RSSI_ON_CHANNEL 0x0002 for (i = 0; i < list->count; i++) { bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) : list->bss_info; if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) && CHSPEC_BAND(bi->chanspec) == CHSPEC_BAND(bss->chanspec) && bi->SSID_len == bss->SSID_len && !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) { if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) == (bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) { /* preserve max RSSI if the measurements are * both on-channel or both off-channel */ bss->RSSI = MAX(bss->RSSI, bi->RSSI); } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) && (bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) { /* preserve the on-channel rssi measurement * if the new measurement is off channel */ bss->RSSI = bi->RSSI; bss->flags |= WLC_BSS_RSSI_ON_CHANNEL; } goto exit; } } memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length); list->version = dtoh32(bi->version); list->buflen += bi_length; list->count++; } } else if (status == WLC_E_STATUS_SUCCESS) { wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { WL_INFO(("ACTION FRAME SCAN DONE\n")); wl_clr_p2p_status(wl, SCANNING); wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { WL_INFO(("ESCAN COMPLETED\n")); wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, false, false); } } else if (status == WLC_E_STATUS_ABORT) { wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { WL_INFO(("ACTION FRAME SCAN DONE\n")); wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); wl_clr_p2p_status(wl, SCANNING); if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { WL_INFO(("ESCAN ABORTED\n")); wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, true, false); } } else if (status == WLC_E_STATUS_NEWSCAN) { /* Do Nothing. Ignore this event */ } else { WL_ERR(("unexpected Escan Event %d : abort\n", status)); wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { WL_INFO(("ACTION FRAME SCAN DONE\n")); wl_clr_p2p_status(wl, SCANNING); wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, true, false); } } exit: mutex_unlock(&wl->usr_sync); return err; } static s32 wl_init_scan(struct wl_priv *wl) { struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); int err = 0; if (wl->iscan_on) { iscan->dev = wl_to_prmry_ndev(wl); iscan->state = WL_ISCAN_STATE_IDLE; wl_init_iscan_handler(iscan); iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; init_timer(&iscan->timer); iscan->timer.data = (unsigned long) iscan; iscan->timer.function = wl_iscan_timer; sema_init(&iscan->sync, 0); iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan"); if (IS_ERR(iscan->tsk)) { WL_ERR(("Could not create iscan thread\n")); iscan->tsk = NULL; return -ENOMEM; } iscan->data = wl; } else if (wl->escan_on) { wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler; wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; } /* Init scan_timeout timer */ init_timer(&wl->scan_timeout); wl->scan_timeout.data = (unsigned long) wl; wl->scan_timeout.function = wl_scan_timeout; return err; } static s32 wl_init_priv(struct wl_priv *wl) { struct wiphy *wiphy = wl_to_wiphy(wl); struct net_device *ndev = wl_to_prmry_ndev(wl); s32 err = 0; wl->scan_request = NULL; wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT); wl->iscan_on = false; wl->escan_on = true; wl->roam_on = false; wl->iscan_kickstart = false; wl->active_scan = true; wl->rf_blocked = false; wl->deauth_reason = 0; spin_lock_init(&wl->cfgdrv_lock); mutex_init(&wl->ioctl_buf_sync); init_waitqueue_head(&wl->netif_change_event); wl_init_eq(wl); err = wl_init_priv_mem(wl); if (err) return err; if (wl_create_event_handler(wl)) return -ENOMEM; wl_init_event_handler(wl); mutex_init(&wl->usr_sync); INIT_WORK(&wl->work_scan_timeout, wl_scan_timeout_process); err = wl_init_scan(wl); if (err) return err; wl_init_conf(wl->conf); wl_init_prof(wl, ndev); wl_link_down(wl); DNGL_FUNC(dhd_cfg80211_init, (wl)); return err; } static void wl_deinit_priv(struct wl_priv *wl) { DNGL_FUNC(dhd_cfg80211_deinit, (wl)); wl_destroy_event_handler(wl); wl_flush_eq(wl); wl_link_down(wl); del_timer_sync(&wl->scan_timeout); wl_term_iscan(wl); cancel_work_sync(&wl->work_scan_timeout); wl_deinit_priv_mem(wl); unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier); } #if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) static s32 wl_cfg80211_attach_p2p(void) { struct wl_priv *wl = wlcfg_drv_priv; WL_TRACE(("Enter \n")); if (wl_cfgp2p_register_ndev(wl) < 0) { WL_ERR(("%s: P2P attach failed. \n", __func__)); return -ENODEV; } return 0; } static s32 wl_cfg80211_detach_p2p(void) { struct wl_priv *wl = wlcfg_drv_priv; struct wireless_dev *wdev = wl->p2p_wdev; WL_DBG(("Enter \n")); if (!wdev || !wl) { WL_ERR(("Invalid Ptr\n")); return -EINVAL; } wl_cfgp2p_unregister_ndev(wl); wl->p2p_wdev = NULL; wl->p2p_net = NULL; WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev)); kfree(wdev); return 0; } #endif /* defined(WLP2P) && defined(WL_ENABLE_P2P_IF) */ s32 wl_cfg80211_attach_post(struct net_device *ndev) { struct wl_priv * wl = NULL; s32 err = 0; WL_TRACE(("In\n")); if (unlikely(!ndev)) { WL_ERR(("ndev is invaild\n")); return -ENODEV; } wl = wlcfg_drv_priv; if (wl && !wl_get_drv_status(wl, READY, ndev)) { if (wl->wdev && wl_cfgp2p_supported(wl, ndev)) { wl->wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)| BIT(NL80211_IFTYPE_P2P_GO)); if ((err = wl_cfgp2p_init_priv(wl)) != 0) goto fail; #if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) if (wl->p2p_net) { /* Update MAC addr for p2p0 interface here. */ memcpy(wl->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN); wl->p2p_net->dev_addr[0] |= 0x02; printk("%s: p2p_dev_addr="MACSTR "\n", wl->p2p_net->name, MAC2STR(wl->p2p_net->dev_addr)); } else { WL_ERR(("p2p_net not yet populated." " Couldn't update the MAC Address for p2p0 \n")); return -ENODEV; } #endif /* defined(WLP2P) && (WL_ENABLE_P2P_IF) */ wl->p2p_supported = true; } } else return -ENODEV; wl_set_drv_status(wl, READY, ndev); fail: return err; } s32 wl_cfg80211_attach(struct net_device *ndev, void *data) { struct wireless_dev *wdev; struct wl_priv *wl; s32 err = 0; struct device *dev; WL_TRACE(("In\n")); if (!ndev) { WL_ERR(("ndev is invaild\n")); return -ENODEV; } WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev())); dev = wl_cfg80211_get_parent_dev(); wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (unlikely(!wdev)) { WL_ERR(("Could not allocate wireless device\n")); return -ENOMEM; } err = wl_setup_wiphy(wdev, dev); if (unlikely(err)) { kfree(wdev); return -ENOMEM; } wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); wl = (struct wl_priv *)wiphy_priv(wdev->wiphy); wl->wdev = wdev; wl->pub = data; INIT_LIST_HEAD(&wl->net_list); ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; err = wl_alloc_netinfo(wl, ndev, wdev, WL_MODE_BSS); if (err) { WL_ERR(("Failed to alloc net_info (%d)\n", err)); goto cfg80211_attach_out; } err = wl_init_priv(wl); if (err) { WL_ERR(("Failed to init iwm_priv (%d)\n", err)); goto cfg80211_attach_out; } err = wl_setup_rfkill(wl, TRUE); if (err) { WL_ERR(("Failed to setup rfkill %d\n", err)); goto cfg80211_attach_out; } err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier); if (err) { WL_ERR(("Failed to register notifierl %d\n", err)); goto cfg80211_attach_out; } #if defined(COEX_DHCP) if (wl_cfg80211_btcoex_init(wl)) goto cfg80211_attach_out; #endif wlcfg_drv_priv = wl; #if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) err = wl_cfg80211_attach_p2p(); if (err) goto cfg80211_attach_out; #endif return err; cfg80211_attach_out: err = wl_setup_rfkill(wl, FALSE); wl_free_wdev(wl); return err; } void wl_cfg80211_detach(void *para) { struct wl_priv *wl; wl = wlcfg_drv_priv; WL_TRACE(("In\n")); #if defined(COEX_DHCP) wl_cfg80211_btcoex_deinit(wl); #endif #if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) wl_cfg80211_detach_p2p(); #endif wl_setup_rfkill(wl, FALSE); if (wl->p2p_supported) wl_cfgp2p_deinit_priv(wl); wl_deinit_priv(wl); wlcfg_drv_priv = NULL; wl_cfg80211_clear_parent_dev(); wl_free_wdev(wl); /* PLEASE do NOT call any function after wl_free_wdev, the driver's private structure "wl", * which is the private part of wiphy, has been freed in wl_free_wdev !!!!!!!!!!! */ } static void wl_wakeup_event(struct wl_priv *wl) { if (wl->event_tsk.thr_pid >= 0) { DHD_OS_WAKE_LOCK(wl->pub); up(&wl->event_tsk.sema); } } static int wl_is_p2p_event(struct wl_event_q *e) { switch (e->etype) { /* We have to seperate out the P2P events received * on primary interface so that it can be send up * via p2p0 interface. */ case WLC_E_P2P_PROBREQ_MSG: case WLC_E_P2P_DISC_LISTEN_COMPLETE: case WLC_E_ACTION_FRAME_RX: case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE: case WLC_E_ACTION_FRAME_COMPLETE: if (e->emsg.ifidx != 0) { WL_TRACE(("P2P Event on Virtual I/F (ifidx:%d) \n", e->emsg.ifidx)); /* We are only bothered about the P2P events received * on primary interface. For rest of them return false * so that it is sent over the interface corresponding * to the ifidx. */ return FALSE; } else { WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)." " Sent it to p2p0 \n", e->emsg.ifidx)); return TRUE; } break; default: WL_TRACE(("NON-P2P Event %d on ifidx (ifidx:%d) \n", e->etype, e->emsg.ifidx)); return FALSE; } } static s32 wl_event_handler(void *data) { struct net_device *netdev; struct wl_priv *wl = NULL; struct wl_event_q *e; tsk_ctl_t *tsk = (tsk_ctl_t *)data; wl = (struct wl_priv *)tsk->parent; DAEMONIZE("dhd_cfg80211_event"); complete(&tsk->completed); while (down_interruptible (&tsk->sema) == 0) { SMP_RD_BARRIER_DEPENDS(); if (tsk->terminated) break; while ((e = wl_deq_event(wl))) { WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx)); /* All P2P device address related events comes on primary interface since * there is no corresponding bsscfg for P2P interface. Map it to p2p0 * interface. */ if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_net)) { netdev = wl->p2p_net; } else { netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx); } if (!netdev) netdev = wl_to_prmry_ndev(wl); if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) { wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata); } else { WL_DBG(("Unknown Event (%d): ignoring\n", e->etype)); } wl_put_event(e); } DHD_OS_WAKE_UNLOCK(wl->pub); } WL_ERR(("%s was terminated\n", __func__)); complete_and_exit(&tsk->completed, 0); return 0; } void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) { u32 event_type = ntoh32(e->event_type); struct wl_priv *wl = wlcfg_drv_priv; #if (WL_DBG_LEVEL > 0) s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ? wl_dbg_estr[event_type] : (s8 *) "Unknown"; WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr)); #endif /* (WL_DBG_LEVEL > 0) */ if (likely(!wl_enq_event(wl, ndev, event_type, e, data))) wl_wakeup_event(wl); } static void wl_init_eq(struct wl_priv *wl) { wl_init_eq_lock(wl); INIT_LIST_HEAD(&wl->eq_list); } static void wl_flush_eq(struct wl_priv *wl) { struct wl_event_q *e; unsigned long flags; flags = wl_lock_eq(wl); while (!list_empty(&wl->eq_list)) { e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list); list_del(&e->eq_list); kfree(e); } wl_unlock_eq(wl, flags); } /* * retrieve first queued event from head */ static struct wl_event_q *wl_deq_event(struct wl_priv *wl) { struct wl_event_q *e = NULL; unsigned long flags; flags = wl_lock_eq(wl); if (likely(!list_empty(&wl->eq_list))) { e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list); list_del(&e->eq_list); } wl_unlock_eq(wl, flags); return e; } /* * push event to tail of the queue */ static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg, void *data) { struct wl_event_q *e; s32 err = 0; uint32 evtq_size; uint32 data_len; unsigned long flags; gfp_t aflags; data_len = 0; if (data) data_len = ntoh32(msg->datalen); evtq_size = sizeof(struct wl_event_q) + data_len; aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; e = kzalloc(evtq_size, aflags); if (unlikely(!e)) { WL_ERR(("event alloc failed\n")); return -ENOMEM; } e->etype = event; memcpy(&e->emsg, msg, sizeof(wl_event_msg_t)); if (data) memcpy(e->edata, data, data_len); flags = wl_lock_eq(wl); list_add_tail(&e->eq_list, &wl->eq_list); wl_unlock_eq(wl, flags); return err; } static void wl_put_event(struct wl_event_q *e) { kfree(e); } static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype) { s32 infra = 0; s32 err = 0; s32 mode = 0; switch (iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: WL_ERR(("type (%d) : currently we do not support this mode\n", iftype)); err = -EINVAL; return err; case NL80211_IFTYPE_ADHOC: mode = WL_MODE_IBSS; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: mode = WL_MODE_BSS; infra = 1; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: mode = WL_MODE_AP; infra = 1; break; default: err = -EINVAL; WL_ERR(("invalid type (%d)\n", iftype)); return err; } infra = htod32(infra); err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true); if (unlikely(err)) { WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); return err; } wl_set_mode_by_netdev(wl, ndev, mode); return 0; } s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) { s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; s8 eventmask[WL_EVENTING_MASK_LEN]; s32 err = 0; /* Setup event_msgs */ bcm_mkiovar("event_msgs", NULL, 0, iovbuf, sizeof(iovbuf)); err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); if (unlikely(err)) { WL_ERR(("Get event_msgs error (%d)\n", err)); goto eventmsg_out; } memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); if (add) { setbit(eventmask, event); } else { clrbit(eventmask, event); } bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); if (unlikely(err)) { WL_ERR(("Set event_msgs error (%d)\n", err)); goto eventmsg_out; } eventmsg_out: return err; } static int wl_construct_reginfo(struct wl_priv *wl, s32 bw_cap) { struct net_device *dev = wl_to_prmry_ndev(wl); struct ieee80211_channel *band_chan_arr = NULL; wl_uint32_list_t *list; u32 i, j, index, n_2g, n_5g, band, channel, array_size; u32 *n_cnt = NULL; chanspec_t c = 0; s32 err = BCME_OK; bool update; bool ht40_allowed; u8 *pbuf = NULL; #define LOCAL_BUF_LEN 1024 pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL); if (pbuf == NULL) { WL_ERR(("failed to allocate local buf\n")); return -ENOMEM; } list = (wl_uint32_list_t *)(void *)pbuf; list->count = htod32(WL_NUMCHANSPECS); err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, 0, pbuf, LOCAL_BUF_LEN, 0, &wl->ioctl_buf_sync); if (err != 0) { WL_ERR(("get chanspecs failed with %d\n", err)); kfree(pbuf); return err; } #undef LOCAL_BUF_LEN band = array_size = n_2g = n_5g = 0; for (i = 0; i < dtoh32(list->count); i++) { index = 0; update = FALSE; ht40_allowed = FALSE; c = (chanspec_t)dtoh32(list->element[i]); channel = CHSPEC_CHANNEL(c); if (CHSPEC_IS40(c)) { if (CHSPEC_SB_UPPER(c)) channel += CH_10MHZ_APART; else channel -= CH_10MHZ_APART; } if (CHSPEC_IS2G(c) && channel <= CH_MAX_2G_CHANNEL) { band_chan_arr = __wl_2ghz_channels; array_size = ARRAYSIZE(__wl_2ghz_channels); n_cnt = &n_2g; band = IEEE80211_BAND_2GHZ; ht40_allowed = (bw_cap == WLC_N_BW_40ALL) ? TRUE : FALSE; } else if (CHSPEC_IS5G(c) && channel > CH_MAX_2G_CHANNEL) { band_chan_arr = __wl_5ghz_a_channels; array_size = ARRAYSIZE(__wl_5ghz_a_channels); n_cnt = &n_5g; band = IEEE80211_BAND_5GHZ; ht40_allowed = (bw_cap == WLC_N_BW_20ALL) ? FALSE : TRUE; } else { WL_ERR(("Invalid Channel received %x\n", channel)); continue; } for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { if (band_chan_arr[j].hw_value == channel) { update = TRUE; break; } } if (update) index = j; else index = *n_cnt; if (index < array_size) { #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) band_chan_arr[index].center_freq = ieee80211_channel_to_frequency(channel); #else band_chan_arr[index].center_freq = ieee80211_channel_to_frequency(channel, band); #endif band_chan_arr[index].hw_value = channel; if (CHSPEC_IS40(c) && ht40_allowed) { u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40; if (CHSPEC_SB_UPPER(c)) { if (ht40_flag == IEEE80211_CHAN_NO_HT40) band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS; } else { band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; if (ht40_flag == IEEE80211_CHAN_NO_HT40) band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40MINUS; } } else { band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40; if (band == IEEE80211_BAND_2GHZ) channel |= WL_CHANSPEC_BAND_2G; else channel |= WL_CHANSPEC_BAND_5G; err = wldev_iovar_getint(dev, "per_chan_info", &channel); if (!err) { if (channel & WL_CHAN_RADAR) { band_chan_arr[index].flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS; } if (channel & WL_CHAN_PASSIVE) { band_chan_arr[index].flags |= IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS; } } } if (!update) (*n_cnt)++; } } __wl_band_2ghz.n_channels = n_2g; __wl_band_5ghz_a.n_channels = n_5g; kfree(pbuf); return err; } s32 wl_update_wiphybands(struct wl_priv *wl) { struct wiphy *wiphy; struct net_device *dev; u32 bandlist[3]; u32 nband = 0; u32 i = 0; s32 err = 0; int nmode = 0; int bw_cap = 0; int index = 0; bool rollback_lock = false; WL_DBG(("Entry")); if (wl == NULL) { wl = wlcfg_drv_priv; mutex_lock(&wl->usr_sync); rollback_lock = true; } dev = wl_to_prmry_ndev(wl); memset(bandlist, 0, sizeof(bandlist)); err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist, sizeof(bandlist), false); if (unlikely(err)) { WL_ERR(("error read bandlist (%d)\n", err)); goto end_bands; } wiphy = wl_to_wiphy(wl); nband = bandlist[0]; wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; err = wldev_iovar_getint(dev, "nmode", &nmode); if (unlikely(err)) { WL_ERR(("error reading nmode (%d)\n", err)); } else { /* For nmodeonly check bw cap */ err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); if (unlikely(err)) { WL_ERR(("error get mimo_bw_cap (%d)\n", err)); } } err = wl_construct_reginfo(wl, bw_cap); if (err) { WL_ERR(("wl_construct_reginfo() fails err=%d\n", err)); if (err != BCME_UNSUPPORTED) goto end_bands; /* Ignore error if "chanspecs" command is not supported */ err = 0; } for (i = 1; i <= nband && i < sizeof(bandlist)/sizeof(u32); i++) { index = -1; if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) { wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; index = IEEE80211_BAND_5GHZ; if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G) wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; } else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) { wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; index = IEEE80211_BAND_2GHZ; if (bw_cap == WLC_N_BW_40ALL) wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; } if ((index >= 0) && nmode) { wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40; wiphy->bands[index]->ht_cap.ht_supported = TRUE; wiphy->bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; wiphy->bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; wiphy->bands[index]->ht_cap.mcs.rx_mask[0] = 0xff; } if ((index >= 0) && bw_cap) { wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; } } wiphy_apply_custom_regulatory(wiphy, &brcm_regdom); end_bands: if (rollback_lock) mutex_unlock(&wl->usr_sync); return err; } static s32 __wl_cfg80211_up(struct wl_priv *wl) { s32 err = 0; struct net_device *ndev = wl_to_prmry_ndev(wl); struct wireless_dev *wdev = ndev->ieee80211_ptr; WL_DBG(("In\n")); err = dhd_config_dongle(wl, false); if (unlikely(err)) return err; err = wl_config_ifmode(wl, ndev, wdev->iftype); if (unlikely(err && err != -EINPROGRESS)) { WL_ERR(("wl_config_ifmode failed\n")); } err = wl_update_wiphybands(wl); if (unlikely(err)) { WL_ERR(("wl_update_wiphybands failed\n")); } err = dhd_monitor_init(wl->pub); err = wl_invoke_iscan(wl); wl_set_drv_status(wl, READY, ndev); return err; } static s32 __wl_cfg80211_down(struct wl_priv *wl) { s32 err = 0; unsigned long flags; struct net_info *iter, *next; struct net_device *ndev = wl_to_prmry_ndev(wl); #ifdef WL_ENABLE_P2P_IF struct wiphy *wiphy = wl_to_prmry_ndev(wl)->ieee80211_ptr->wiphy; struct net_device *p2p_net = wl->p2p_net; #endif WL_DBG(("In\n")); /* Check if cfg80211 interface is already down */ if (!wl_get_drv_status(wl, READY, ndev)) return err; /* it is even not ready */ for_each_ndev(wl, iter, next) wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev); wl_term_iscan(wl); spin_lock_irqsave(&wl->cfgdrv_lock, flags); if (wl->scan_request) { cfg80211_scan_done(wl->scan_request, true); wl->scan_request = NULL; } for_each_ndev(wl, iter, next) { wl_clr_drv_status(wl, READY, iter->ndev); wl_clr_drv_status(wl, SCANNING, iter->ndev); wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev); wl_clr_drv_status(wl, CONNECTING, iter->ndev); wl_clr_drv_status(wl, CONNECTED, iter->ndev); wl_clr_drv_status(wl, DISCONNECTING, iter->ndev); wl_clr_drv_status(wl, AP_CREATED, iter->ndev); wl_clr_drv_status(wl, AP_CREATING, iter->ndev); } wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; #ifdef WL_ENABLE_P2P_IF wiphy->interface_modes = (wiphy->interface_modes) & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)| BIT(NL80211_IFTYPE_P2P_GO))); if ((p2p_net) && (p2p_net->flags & IFF_UP)) { /* p2p0 interface is still UP. Bring it down */ p2p_net->flags &= ~IFF_UP; } #endif /* WL_ENABLE_P2P_IF */ spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); DNGL_FUNC(dhd_cfg80211_down, (wl)); wl_flush_eq(wl); wl_link_down(wl); if (wl->p2p_supported) wl_cfgp2p_down(wl); dhd_monitor_uninit(); return err; } s32 wl_cfg80211_up(void *para) { struct wl_priv *wl; s32 err = 0; WL_DBG(("In\n")); wl = wlcfg_drv_priv; mutex_lock(&wl->usr_sync); wl_cfg80211_attach_post(wl_to_prmry_ndev(wl)); err = __wl_cfg80211_up(wl); if (err) WL_ERR(("__wl_cfg80211_up failed\n")); mutex_unlock(&wl->usr_sync); return err; } /* Private Event to Supplicant with indication that chip hangs */ int wl_cfg80211_hang(struct net_device *dev, u16 reason) { struct wl_priv *wl; wl = wlcfg_drv_priv; WL_ERR(("In : chip crash eventing\n")); cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL); if (wl != NULL) { wl_link_down(wl); } return 0; } s32 wl_cfg80211_down(void *para) { struct wl_priv *wl; s32 err = 0; WL_DBG(("In\n")); wl = wlcfg_drv_priv; mutex_lock(&wl->usr_sync); err = __wl_cfg80211_down(wl); mutex_unlock(&wl->usr_sync); return err; } static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item) { unsigned long flags; void *rptr = NULL; struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); if (!profile) return NULL; spin_lock_irqsave(&wl->cfgdrv_lock, flags); switch (item) { case WL_PROF_SEC: rptr = &profile->sec; break; case WL_PROF_ACT: rptr = &profile->active; break; case WL_PROF_BSSID: rptr = profile->bssid; break; case WL_PROF_PENDING_BSSID: rptr = profile->pending_bssid; break; case WL_PROF_SSID: rptr = &profile->ssid; break; } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); if (!rptr) WL_ERR(("invalid item (%d)\n", item)); return rptr; } static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data, s32 item) { s32 err = 0; struct wlc_ssid *ssid; unsigned long flags; struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); if (!profile) return WL_INVALID; spin_lock_irqsave(&wl->cfgdrv_lock, flags); switch (item) { case WL_PROF_SSID: ssid = (wlc_ssid_t *) data; memset(profile->ssid.SSID, 0, sizeof(profile->ssid.SSID)); memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len); profile->ssid.SSID_len = ssid->SSID_len; break; case WL_PROF_BSSID: if (data) memcpy(profile->bssid, data, ETHER_ADDR_LEN); else memset(profile->bssid, 0, ETHER_ADDR_LEN); break; case WL_PROF_PENDING_BSSID: if (data) memcpy(profile->pending_bssid, data, ETHER_ADDR_LEN); else memset(profile->pending_bssid, 0, ETHER_ADDR_LEN); break; case WL_PROF_SEC: memcpy(&profile->sec, data, sizeof(profile->sec)); break; case WL_PROF_ACT: profile->active = *(bool *)data; break; case WL_PROF_BEACONINT: profile->beacon_interval = *(u16 *)data; break; case WL_PROF_DTIMPERIOD: profile->dtim_period = *(u8 *)data; break; default: WL_ERR(("unsupported item (%d)\n", item)); err = -EOPNOTSUPP; break; } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); return err; } void wl_cfg80211_dbg_level(u32 level) { /* * prohibit to change debug level * by insmod parameter. * eventually debug level will be configured * in compile time by using CONFIG_XXX */ /* wl_dbg_level = level; */ } static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev) { return wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS; } static __used bool wl_is_ibssstarter(struct wl_priv *wl) { return wl->ibss_starter; } static void wl_rst_ie(struct wl_priv *wl) { struct wl_ie *ie = wl_to_ie(wl); ie->offset = 0; } static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v) { struct wl_ie *ie = wl_to_ie(wl); s32 err = 0; if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) { WL_ERR(("ei crosses buffer boundary\n")); return -ENOSPC; } ie->buf[ie->offset] = t; ie->buf[ie->offset + 1] = l; memcpy(&ie->buf[ie->offset + 2], v, l); ie->offset += l + 2; return err; } static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size) { u8 *ssidie; ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size); if (!ssidie) return; if (ssidie[1] != bi->SSID_len) { if (ssidie[1]) { WL_ERR(("%s: Wrong SSID len: %d != %d\n", __func__, ssidie[1], bi->SSID_len)); return; } memmove(ssidie + bi->SSID_len + 2, ssidie + 2, *ie_size - (ssidie + 2 - ie_stream)); memcpy(ssidie + 2, bi->SSID, bi->SSID_len); *ie_size = *ie_size + bi->SSID_len; ssidie[1] = bi->SSID_len; return; } if (*(ssidie + 2) == '\0') memcpy(ssidie + 2, bi->SSID, bi->SSID_len); return; } static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size) { struct wl_ie *ie = wl_to_ie(wl); s32 err = 0; if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) { WL_ERR(("ei_stream crosses buffer boundary\n")); return -ENOSPC; } memcpy(&ie->buf[ie->offset], ie_stream, ie_size); ie->offset += ie_size; return err; } static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size) { struct wl_ie *ie = wl_to_ie(wl); s32 err = 0; if (unlikely(ie->offset > dst_size)) { WL_ERR(("dst_size is not enough\n")); return -ENOSPC; } memcpy(dst, &ie->buf[0], ie->offset); return err; } static u32 wl_get_ielen(struct wl_priv *wl) { struct wl_ie *ie = wl_to_ie(wl); return ie->offset; } static void wl_link_up(struct wl_priv *wl) { wl->link_up = true; } static void wl_link_down(struct wl_priv *wl) { struct wl_connect_info *conn_info = wl_to_conn(wl); WL_DBG(("In\n")); wl->link_up = false; conn_info->req_ie_len = 0; conn_info->resp_ie_len = 0; } static unsigned long wl_lock_eq(struct wl_priv *wl) { unsigned long flags; spin_lock_irqsave(&wl->eq_lock, flags); return flags; } static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags) { spin_unlock_irqrestore(&wl->eq_lock, flags); } static void wl_init_eq_lock(struct wl_priv *wl) { spin_lock_init(&wl->eq_lock); } static void wl_delay(u32 ms) { if (in_atomic() || (ms < jiffies_to_msecs(1))) { mdelay(ms); } else { msleep(ms); } } s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) { struct wl_priv *wl = wlcfg_drv_priv; struct ether_addr p2pif_addr; struct ether_addr primary_mac; if (!wl->p2p) return -1; if (!p2p_is_on(wl)) { get_primary_mac(wl, &primary_mac); wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr); } else { memcpy(p2pdev_addr->octet, wl->p2p->dev_addr.octet, ETHER_ADDR_LEN); } return 0; } s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) { struct wl_priv *wl; wl = wlcfg_drv_priv; return wl_cfgp2p_set_p2p_noa(wl, net, buf, len); } s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) { struct wl_priv *wl; wl = wlcfg_drv_priv; return wl_cfgp2p_get_p2p_noa(wl, net, buf, len); } s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) { struct wl_priv *wl; wl = wlcfg_drv_priv; return wl_cfgp2p_set_p2p_ps(wl, net, buf, len); } s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, enum wl_management_type type) { struct wl_priv *wl; struct net_device *ndev = NULL; struct ether_addr primary_mac; s32 ret = 0; s32 bssidx = 0; s32 pktflag = 0; wl = wlcfg_drv_priv; if (wl_get_drv_status(wl, AP_CREATING, net) || wl_get_drv_status(wl, AP_CREATED, net)) { ndev = net; bssidx = 0; } else if (wl->p2p) { if (net == wl->p2p_net) { net = wl_to_prmry_ndev(wl); } if (!wl->p2p->on) { get_primary_mac(wl, &primary_mac); wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr); /* In case of p2p_listen command, supplicant send remain_on_channel * without turning on P2P */ p2p_on(wl) = true; ret = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0); if (unlikely(ret)) { goto exit; } } if (net != wl_to_prmry_ndev(wl)) { if (wl_get_mode_by_netdev(wl, net) == WL_MODE_AP) { ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION); } } else { ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); } } if (ndev != NULL) { switch (type) { case WL_BEACON: pktflag = VNDR_IE_BEACON_FLAG; break; case WL_PROBE_RESP: pktflag = VNDR_IE_PRBRSP_FLAG; break; case WL_ASSOC_RESP: pktflag = VNDR_IE_ASSOCRSP_FLAG; break; } if (pktflag) ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len); } exit: return ret; } static const struct rfkill_ops wl_rfkill_ops = { .set_block = wl_rfkill_set }; static int wl_rfkill_set(void *data, bool blocked) { struct wl_priv *wl = (struct wl_priv *)data; WL_DBG(("Enter \n")); WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); if (!wl) return -EINVAL; wl->rf_blocked = blocked; return 0; } static int wl_setup_rfkill(struct wl_priv *wl, bool setup) { s32 err = 0; WL_DBG(("Enter \n")); if (!wl) return -EINVAL; if (setup) { wl->rfkill = rfkill_alloc("brcmfmac-wifi", wl_cfg80211_get_parent_dev(), RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl); if (!wl->rfkill) { err = -ENOMEM; goto err_out; } err = rfkill_register(wl->rfkill); if (err) rfkill_destroy(wl->rfkill); } else { if (!wl->rfkill) { err = -ENOMEM; goto err_out; } rfkill_unregister(wl->rfkill); rfkill_destroy(wl->rfkill); } err_out: return err; } struct device *wl_cfg80211_get_parent_dev(void) { return cfg80211_parent_dev; } void wl_cfg80211_set_parent_dev(void *dev) { cfg80211_parent_dev = dev; } static void wl_cfg80211_clear_parent_dev(void) { cfg80211_parent_dev = NULL; } static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac) { wldev_iovar_getbuf_bsscfg(wl_to_prmry_ndev(wl), "cur_etheraddr", NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync); memcpy(mac->octet, wl->ioctl_buf, ETHER_ADDR_LEN); } int wl_cfg80211_do_driver_init(struct net_device *net) { struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net); if (!wl || !wl->wdev) return -EINVAL; if (dhd_do_driver_init(wl->wdev->netdev) < 0) return -1; return 0; } void wl_cfg80211_enable_trace(int level) { wl_dbg_level |= WL_DBG_DBG; } static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct net_device *dev, u64 cookie) { return 0; }
gpl-2.0
Bootz/OpenStage-Project
src/server/scripts/EasternKingdoms/MoltenCore/boss_majordomo_executus.cpp
1
7882
/* * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Majordomo_Executus SD%Complete: 30 SDComment: Correct spawning and Event NYI SDCategory: Molten Core EndScriptData */ #include "ObjectMgr.h" #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ScriptedGossip.h" #include "molten_core.h" enum Texts { SAY_AGGRO = -1409003, SAY_SPAWN = -1409004, SAY_SLAY = -1409005, SAY_SPECIAL = -1409006, SAY_DEFEAT = -1409007, SAY_SUMMON_MAJ = -1409008, SAY_ARRIVAL1_RAG = -1409009, SAY_ARRIVAL2_MAJ = -1409010, SAY_ARRIVAL3_RAG = -1409011, SAY_ARRIVAL5_RAG = -1409012, }; enum Spells { SPELL_MAGIC_REFLECTION = 20619, SPELL_DAMAGE_REFLECTION = 21075, SPELL_BLAST_WAVE = 20229, SPELL_AEGIS_OF_RAGNAROS = 20620, SPELL_TELEPORT = 20618, SPELL_SUMMON_RAGNAROS = 19774, }; #define GOSSIP_HELLO 4995 #define GOSSIP_SELECT "Tell me more." enum Events { EVENT_MAGIC_REFLECTION = 1, EVENT_DAMAGE_REFLECTION = 2, EVENT_BLAST_WAVE = 3, EVENT_TELEPORT = 4, EVENT_OUTRO_1 = 5, EVENT_OUTRO_2 = 6, EVENT_OUTRO_3 = 7, }; class boss_majordomo : public CreatureScript { public: boss_majordomo() : CreatureScript("boss_majordomo") { } struct boss_majordomoAI : public BossAI { boss_majordomoAI(Creature* pCreature) : BossAI(pCreature, BOSS_MAJORDOMO_EXECUTUS) { } void KilledUnit(Unit* /*victim*/) { if (urand(0, 99) < 25) DoScriptText(SAY_SLAY, me); } void EnterCombat(Unit* who) { BossAI::EnterCombat(who); DoScriptText(SAY_AGGRO, me); events.ScheduleEvent(EVENT_MAGIC_REFLECTION, 30000); events.ScheduleEvent(EVENT_DAMAGE_REFLECTION, 15000); events.ScheduleEvent(EVENT_BLAST_WAVE, 10000); events.ScheduleEvent(EVENT_TELEPORT, 20000); } void UpdateAI(const uint32 diff) { if (instance && instance->GetBossState(BOSS_MAJORDOMO_EXECUTUS) != DONE) { if (!UpdateVictim()) return; events.Update(diff); if (!me->FindNearestCreature(NPC_FLAMEWAKER_HEALER, 100.0f) && !me->FindNearestCreature(NPC_FLAMEWAKER_ELITE, 100.0f)) { instance->UpdateEncounterState(ENCOUNTER_CREDIT_KILL_CREATURE, me->GetEntry(), me); me->setFaction(35); me->AI()->EnterEvadeMode(); DoScriptText(SAY_DEFEAT, me); _JustDied(); events.ScheduleEvent(EVENT_OUTRO_1, 32000); return; } if (me->HasUnitState(UNIT_STAT_CASTING)) return; if (HealthBelowPct(50)) DoCast(me, SPELL_AEGIS_OF_RAGNAROS, true); while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_MAGIC_REFLECTION: DoCast(me, SPELL_MAGIC_REFLECTION); events.ScheduleEvent(EVENT_MAGIC_REFLECTION, 30000); break; case EVENT_DAMAGE_REFLECTION: DoCast(me, SPELL_DAMAGE_REFLECTION); events.ScheduleEvent(EVENT_DAMAGE_REFLECTION, 30000); break; case EVENT_BLAST_WAVE: DoCastVictim(SPELL_BLAST_WAVE); events.ScheduleEvent(EVENT_BLAST_WAVE, 10000); break; case EVENT_TELEPORT: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 1)) DoCast(target, SPELL_TELEPORT); events.ScheduleEvent(EVENT_TELEPORT, 20000); break; default: break; } } DoMeleeAttackIfReady(); } else { events.Update(diff); while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_OUTRO_1: me->NearTeleportTo(RagnarosTelePos.GetPositionX(), RagnarosTelePos.GetPositionY(), RagnarosTelePos.GetPositionZ(), RagnarosTelePos.GetOrientation()); me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); break; case EVENT_OUTRO_2: if (instance) instance->instance->SummonCreature(NPC_RAGNAROS, RagnarosSummonPos); break; case EVENT_OUTRO_3: DoScriptText(SAY_ARRIVAL2_MAJ, me); break; default: break; } } } } void DoAction(const int32 action) { if (action == ACTION_START_RAGNAROS) { me->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); DoScriptText(SAY_SUMMON_MAJ, me); events.ScheduleEvent(EVENT_OUTRO_2, 8000); events.ScheduleEvent(EVENT_OUTRO_3, 24000); } else if (action == ACTION_START_RAGNAROS_ALT) { me->setFaction(35); me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); } } }; bool OnGossipHello(Player* player, Creature* creature) { player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_SELECT, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+1); player->SEND_GOSSIP_MENU(GOSSIP_HELLO, creature->GetGUID()); return true; } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*uiSender*/, uint32 /*uiAction*/) { player->CLOSE_GOSSIP_MENU(); creature->AI()->DoAction(ACTION_START_RAGNAROS); return true; } CreatureAI* GetAI(Creature* creature) const { return new boss_majordomoAI(creature); } }; void AddSC_boss_majordomo() { new boss_majordomo(); }
gpl-2.0
matrixise/empathy-with-import-gajim-account
libempathy/empathy-tp-contact-list.c
1
31425
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Copyright (C) 2007 Xavier Claessens <xclaesse@gmail.com> * Copyright (C) 2007-2008 Collabora Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Xavier Claessens <xclaesse@gmail.com> */ #include <config.h> #include <string.h> #include <glib/gi18n-lib.h> #include <telepathy-glib/channel.h> #include <telepathy-glib/connection.h> #include <telepathy-glib/util.h> #include <telepathy-glib/dbus.h> #include "empathy-tp-contact-list.h" #include "empathy-contact-list.h" #include "empathy-tp-group.h" #include "empathy-utils.h" #define DEBUG_FLAG EMPATHY_DEBUG_TP | EMPATHY_DEBUG_CONTACT #include "empathy-debug.h" #define GET_PRIV(obj) EMPATHY_GET_PRIV (obj, EmpathyTpContactList) typedef struct { McAccount *account; TpConnection *connection; const gchar *protocol_group; gboolean ready; EmpathyTpGroup *publish; EmpathyTpGroup *subscribe; GList *members; GList *pendings; GList *groups; GHashTable *contacts_groups; } EmpathyTpContactListPriv; typedef enum { TP_CONTACT_LIST_TYPE_PUBLISH, TP_CONTACT_LIST_TYPE_SUBSCRIBE, TP_CONTACT_LIST_TYPE_UNKNOWN } TpContactListType; static void tp_contact_list_iface_init (EmpathyContactListIface *iface); enum { DESTROY, LAST_SIGNAL }; enum { PROP_0, PROP_ACCOUNT, }; static guint signals[LAST_SIGNAL]; G_DEFINE_TYPE_WITH_CODE (EmpathyTpContactList, empathy_tp_contact_list, G_TYPE_OBJECT, G_IMPLEMENT_INTERFACE (EMPATHY_TYPE_CONTACT_LIST, tp_contact_list_iface_init)); static void tp_contact_list_group_destroy_cb (EmpathyTpGroup *group, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); DEBUG ("Group destroyed: %s", empathy_tp_group_get_name (group)); priv->groups = g_list_remove (priv->groups, group); g_object_unref (group); } static void tp_contact_list_group_member_added_cb (EmpathyTpGroup *group, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); const gchar *group_name; GList **groups; if (!g_list_find (priv->members, contact)) { return; } groups = g_hash_table_lookup (priv->contacts_groups, contact); if (!groups) { groups = g_slice_new0 (GList*); g_hash_table_insert (priv->contacts_groups, g_object_ref (contact), groups); } group_name = empathy_tp_group_get_name (group); if (!g_list_find_custom (*groups, group_name, (GCompareFunc) strcmp)) { DEBUG ("Contact %s (%d) added to group %s", empathy_contact_get_id (contact), empathy_contact_get_handle (contact), group_name); *groups = g_list_prepend (*groups, g_strdup (group_name)); g_signal_emit_by_name (list, "groups-changed", contact, group_name, TRUE); } } static void tp_contact_list_group_member_removed_cb (EmpathyTpGroup *group, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); const gchar *group_name; GList **groups, *l; if (!g_list_find (priv->members, contact)) { return; } groups = g_hash_table_lookup (priv->contacts_groups, contact); if (!groups) { return; } group_name = empathy_tp_group_get_name (group); if ((l = g_list_find_custom (*groups, group_name, (GCompareFunc) strcmp))) { DEBUG ("Contact %s (%d) removed from group %s", empathy_contact_get_id (contact), empathy_contact_get_handle (contact), group_name); *groups = g_list_delete_link (*groups, l); g_signal_emit_by_name (list, "groups-changed", contact, group_name, FALSE); } } static EmpathyTpGroup * tp_contact_list_find_group (EmpathyTpContactList *list, const gchar *group) { EmpathyTpContactListPriv *priv = GET_PRIV (list); GList *l; for (l = priv->groups; l; l = l->next) { if (!tp_strdiff (group, empathy_tp_group_get_name (l->data))) { return l->data; } } return NULL; } static TpContactListType tp_contact_list_get_type (EmpathyTpContactList *list, EmpathyTpGroup *group) { const gchar *name; name = empathy_tp_group_get_name (group); if (!tp_strdiff (name, "subscribe")) { return TP_CONTACT_LIST_TYPE_SUBSCRIBE; } else if (!tp_strdiff (name, "publish")) { return TP_CONTACT_LIST_TYPE_PUBLISH; } return TP_CONTACT_LIST_TYPE_UNKNOWN; } static void tp_contact_list_add_member (EmpathyTpContactList *list, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message) { EmpathyTpContactListPriv *priv = GET_PRIV (list); GList *l; /* Add to the list and emit signal */ priv->members = g_list_prepend (priv->members, g_object_ref (contact)); g_signal_emit_by_name (list, "members-changed", contact, actor, reason, message, TRUE); /* This contact is now member, implicitly accept pending. */ if (g_list_find (priv->pendings, contact)) { empathy_tp_group_add_member (priv->publish, contact, ""); } /* Update groups of the contact */ for (l = priv->groups; l; l = l->next) { if (empathy_tp_group_is_member (l->data, contact)) { tp_contact_list_group_member_added_cb (l->data, contact, NULL, 0, NULL, list); } } } static void tp_contact_list_added_cb (EmpathyTpGroup *group, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); TpContactListType list_type; list_type = tp_contact_list_get_type (list, group); DEBUG ("Contact %s (%d) added to list type %d", empathy_contact_get_id (contact), empathy_contact_get_handle (contact), list_type); /* We now get the presence of that contact, add it to members */ if (list_type == TP_CONTACT_LIST_TYPE_SUBSCRIBE && !g_list_find (priv->members, contact)) { tp_contact_list_add_member (list, contact, actor, reason, message); } /* We now send our presence to that contact, remove it from pendings */ if (list_type == TP_CONTACT_LIST_TYPE_PUBLISH && g_list_find (priv->pendings, contact)) { g_signal_emit_by_name (list, "pendings-changed", contact, actor, reason, message, FALSE); priv->pendings = g_list_remove (priv->pendings, contact); g_object_unref (contact); } } static void tp_contact_list_removed_cb (EmpathyTpGroup *group, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); TpContactListType list_type; list_type = tp_contact_list_get_type (list, group); DEBUG ("Contact %s (%d) removed from list type %d", empathy_contact_get_id (contact), empathy_contact_get_handle (contact), list_type); /* This contact refuses to send us his presence, remove from members. */ if (list_type == TP_CONTACT_LIST_TYPE_SUBSCRIBE && g_list_find (priv->members, contact)) { g_signal_emit_by_name (list, "members-changed", contact, actor, reason, message, FALSE); priv->members = g_list_remove (priv->members, contact); g_object_unref (contact); } /* We refuse to send our presence to that contact, remove from pendings */ if (list_type == TP_CONTACT_LIST_TYPE_PUBLISH && g_list_find (priv->pendings, contact)) { g_signal_emit_by_name (list, "pendings-changed", contact, actor, reason, message, FALSE); priv->pendings = g_list_remove (priv->pendings, contact); g_object_unref (contact); } } static void tp_contact_list_pending_cb (EmpathyTpGroup *group, EmpathyContact *contact, EmpathyContact *actor, guint reason, const gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); TpContactListType list_type; list_type = tp_contact_list_get_type (list, group); DEBUG ("Contact %s (%d) pending in list type %d", empathy_contact_get_id (contact), empathy_contact_get_handle (contact), list_type); /* We want this contact in our contact list but we don't get its * presence yet. Add to members anyway. */ if (list_type == TP_CONTACT_LIST_TYPE_SUBSCRIBE && !g_list_find (priv->members, contact)) { tp_contact_list_add_member (list, contact, actor, reason, message); } /* This contact wants our presence, auto accept if he is member, * otherwise he is pending. */ if (list_type == TP_CONTACT_LIST_TYPE_PUBLISH && !g_list_find (priv->pendings, contact)) { if (g_list_find (priv->members, contact)) { empathy_tp_group_add_member (priv->publish, contact, ""); } else { priv->pendings = g_list_prepend (priv->pendings, g_object_ref (contact)); g_signal_emit_by_name (list, "pendings-changed", contact, actor, reason, message, TRUE); } } } static void tp_contact_list_invalidated_cb (TpConnection *connection, guint domain, gint code, gchar *message, EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); GList *l; DEBUG ("Connection invalidated"); /* Remove all contacts */ for (l = priv->members; l; l = l->next) { g_signal_emit_by_name (list, "members-changed", l->data, NULL, 0, NULL, FALSE); g_object_unref (l->data); } for (l = priv->pendings; l; l = l->next) { g_signal_emit_by_name (list, "pendings-changed", l->data, NULL, 0, NULL, FALSE); g_object_unref (l->data); } g_list_free (priv->members); g_list_free (priv->pendings); priv->members = NULL; priv->pendings = NULL; /* Tell the world to not use us anymore */ g_signal_emit (list, signals[DESTROY], 0); } static void tp_contact_list_group_list_free (GList **groups) { g_list_foreach (*groups, (GFunc) g_free, NULL); g_list_free (*groups); g_slice_free (GList*, groups); } static void tp_contact_list_add_channel (EmpathyTpContactList *list, const gchar *object_path, const gchar *channel_type, TpHandleType handle_type, guint handle) { EmpathyTpContactListPriv *priv = GET_PRIV (list); TpChannel *channel; EmpathyTpGroup *group; const gchar *group_name; GList *contacts, *l; if (strcmp (channel_type, TP_IFACE_CHANNEL_TYPE_CONTACT_LIST) != 0 || handle_type != TP_HANDLE_TYPE_GROUP) { return; } channel = tp_channel_new (priv->connection, object_path, channel_type, handle_type, handle, NULL); group = empathy_tp_group_new (channel); empathy_run_until_ready (group); g_object_unref (channel); /* Check if already exists */ group_name = empathy_tp_group_get_name (group); if (tp_contact_list_find_group (list, group_name)) { g_object_unref (group); return; } /* Add the group */ DEBUG ("New server-side group: %s", group_name); priv->groups = g_list_prepend (priv->groups, group); g_signal_connect (group, "member-added", G_CALLBACK (tp_contact_list_group_member_added_cb), list); g_signal_connect (group, "member-removed", G_CALLBACK (tp_contact_list_group_member_removed_cb), list); g_signal_connect (group, "destroy", G_CALLBACK (tp_contact_list_group_destroy_cb), list); /* Get initial members */ contacts = empathy_tp_group_get_members (group); for (l = contacts; l; l = l->next) { tp_contact_list_group_member_added_cb (group, l->data, NULL, 0, NULL, list); g_object_unref (l->data); } g_list_free (contacts); } static void tp_contact_list_new_channel_cb (TpConnection *proxy, const gchar *object_path, const gchar *channel_type, guint handle_type, guint handle, gboolean suppress_handler, gpointer user_data, GObject *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); if (!suppress_handler && priv->ready) { tp_contact_list_add_channel (EMPATHY_TP_CONTACT_LIST (list), object_path, channel_type, handle_type, handle); } } static void tp_contact_list_list_channels_cb (TpConnection *connection, const GPtrArray *channels, const GError *error, gpointer user_data, GObject *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); guint i; if (error) { DEBUG ("Error: %s", error->message); return; } for (i = 0; i < channels->len; i++) { GValueArray *chan_struct; const gchar *object_path; const gchar *channel_type; TpHandleType handle_type; guint handle; chan_struct = g_ptr_array_index (channels, i); object_path = g_value_get_boxed (g_value_array_get_nth (chan_struct, 0)); channel_type = g_value_get_string (g_value_array_get_nth (chan_struct, 1)); handle_type = g_value_get_uint (g_value_array_get_nth (chan_struct, 2)); handle = g_value_get_uint (g_value_array_get_nth (chan_struct, 3)); tp_contact_list_add_channel (EMPATHY_TP_CONTACT_LIST (list), object_path, channel_type, handle_type, handle); } priv->ready = TRUE; } static void tp_contact_list_request_channel_cb (TpConnection *connection, const gchar *object_path, const GError *error, gpointer user_data, GObject *weak_object) { EmpathyTpContactList *list = EMPATHY_TP_CONTACT_LIST (weak_object); EmpathyTpContactListPriv *priv = GET_PRIV (list); EmpathyTpGroup *group; TpChannel *channel; TpContactListType list_type; GList *contacts, *l; if (error) { DEBUG ("Error: %s", error->message); return; } channel = tp_channel_new (connection, object_path, TP_IFACE_CHANNEL_TYPE_CONTACT_LIST, TP_HANDLE_TYPE_LIST, GPOINTER_TO_UINT (user_data), NULL); group = empathy_tp_group_new (channel); empathy_run_until_ready (group); list_type = tp_contact_list_get_type (list, group); if (list_type == TP_CONTACT_LIST_TYPE_PUBLISH && !priv->publish) { DEBUG ("Got publish list"); priv->publish = group; /* Publish is the list of contacts to who we send our * presence. Makes no sense to be in remote-pending */ g_signal_connect (group, "local-pending", G_CALLBACK (tp_contact_list_pending_cb), list); contacts = empathy_tp_group_get_local_pendings (group); for (l = contacts; l; l = l->next) { EmpathyPendingInfo *info = l->data; tp_contact_list_pending_cb (group, info->member, info->actor, 0, info->message, list); empathy_pending_info_free (info); } g_list_free (contacts); } else if (list_type == TP_CONTACT_LIST_TYPE_SUBSCRIBE && !priv->subscribe) { DEBUG ("Got subscribe list"); priv->subscribe = group; /* Subscribe is the list of contacts from who we * receive presence. Makes no sense to be in * local-pending */ g_signal_connect (group, "remote-pending", G_CALLBACK (tp_contact_list_pending_cb), list); contacts = empathy_tp_group_get_remote_pendings (group); for (l = contacts; l; l = l->next) { tp_contact_list_pending_cb (group, l->data, NULL, 0, NULL, list); g_object_unref (l->data); } g_list_free (contacts); } else { DEBUG ("Type of contact list channel unknown or aleady " "have that list: %s", empathy_tp_group_get_name (group)); g_object_unref (group); return; } /* For all list types when need to get members */ g_signal_connect (group, "member-added", G_CALLBACK (tp_contact_list_added_cb), list); g_signal_connect (group, "member-removed", G_CALLBACK (tp_contact_list_removed_cb), list); contacts = empathy_tp_group_get_members (group); for (l = contacts; l; l = l->next) { tp_contact_list_added_cb (group, l->data, NULL, 0, NULL, list); g_object_unref (l->data); } g_list_free (contacts); } static void tp_contact_list_request_handle_cb (TpConnection *connection, const GArray *handles, const GError *error, gpointer user_data, GObject *list) { guint handle; if (error) { DEBUG ("Error: %s", error->message); return; } handle = g_array_index (handles, guint, 0); tp_cli_connection_call_request_channel (connection, -1, TP_IFACE_CHANNEL_TYPE_CONTACT_LIST, TP_HANDLE_TYPE_LIST, handle, TRUE, tp_contact_list_request_channel_cb, GUINT_TO_POINTER (handle), NULL, list); } static void tp_contact_list_request_list (EmpathyTpContactList *list, const gchar *type) { EmpathyTpContactListPriv *priv = GET_PRIV (list); const gchar *names[] = {type, NULL}; tp_cli_connection_call_request_handles (priv->connection, -1, TP_HANDLE_TYPE_LIST, names, tp_contact_list_request_handle_cb, NULL, NULL, G_OBJECT (list)); } static void tp_contact_list_finalize (GObject *object) { EmpathyTpContactListPriv *priv; EmpathyTpContactList *list; list = EMPATHY_TP_CONTACT_LIST (object); priv = GET_PRIV (list); DEBUG ("finalize: %p", object); if (priv->subscribe) { g_object_unref (priv->subscribe); } if (priv->publish) { g_object_unref (priv->publish); } if (priv->account) { g_object_unref (priv->account); } if (priv->connection) { g_signal_handlers_disconnect_by_func (priv->connection, tp_contact_list_invalidated_cb, object); g_object_unref (priv->connection); } g_hash_table_destroy (priv->contacts_groups); g_list_foreach (priv->groups, (GFunc) g_object_unref, NULL); g_list_free (priv->groups); g_list_foreach (priv->members, (GFunc) g_object_unref, NULL); g_list_free (priv->members); g_list_foreach (priv->pendings, (GFunc) g_object_unref, NULL); g_list_free (priv->pendings); G_OBJECT_CLASS (empathy_tp_contact_list_parent_class)->finalize (object); } static void tp_contact_list_connection_ready (TpConnection *connection, const GError *error, gpointer list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); if (error) { tp_contact_list_invalidated_cb (connection, error->domain, error->code, error->message, EMPATHY_TP_CONTACT_LIST (list)); return; } g_signal_connect (priv->connection, "invalidated", G_CALLBACK (tp_contact_list_invalidated_cb), list); tp_contact_list_request_list (list, "publish"); tp_contact_list_request_list (list, "subscribe"); tp_cli_connection_call_list_channels (priv->connection, -1, tp_contact_list_list_channels_cb, NULL, NULL, list); tp_cli_connection_connect_to_new_channel (priv->connection, tp_contact_list_new_channel_cb, NULL, NULL, list, NULL); } static void tp_contact_list_constructed (GObject *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); MissionControl *mc; guint status; McProfile *profile; const gchar *protocol_name; /* Get the connection. status==0 means CONNECTED */ mc = empathy_mission_control_new (); status = mission_control_get_connection_status (mc, priv->account, NULL); g_return_if_fail (status == 0); priv->connection = mission_control_get_tpconnection (mc, priv->account, NULL); g_return_if_fail (priv->connection != NULL); g_object_unref (mc); tp_connection_call_when_ready (priv->connection, tp_contact_list_connection_ready, list); /* Check for protocols that does not support contact groups. We can * put all contacts into a special group in that case. * FIXME: Default group should be an information in the profile */ profile = mc_account_get_profile (priv->account); protocol_name = mc_profile_get_protocol_name (profile); if (strcmp (protocol_name, "local-xmpp") == 0) { priv->protocol_group = _("People nearby"); } g_object_unref (profile); } static void tp_contact_list_get_property (GObject *object, guint param_id, GValue *value, GParamSpec *pspec) { EmpathyTpContactListPriv *priv = GET_PRIV (object); switch (param_id) { case PROP_ACCOUNT: g_value_set_object (value, priv->account); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, param_id, pspec); break; }; } static void tp_contact_list_set_property (GObject *object, guint param_id, const GValue *value, GParamSpec *pspec) { EmpathyTpContactListPriv *priv = GET_PRIV (object); switch (param_id) { case PROP_ACCOUNT: priv->account = g_object_ref (g_value_get_object (value)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, param_id, pspec); break; }; } static void empathy_tp_contact_list_class_init (EmpathyTpContactListClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS (klass); object_class->finalize = tp_contact_list_finalize; object_class->constructed = tp_contact_list_constructed; object_class->get_property = tp_contact_list_get_property; object_class->set_property = tp_contact_list_set_property; g_object_class_install_property (object_class, PROP_ACCOUNT, g_param_spec_object ("account", "The Account", "The account associated with the contact list", MC_TYPE_ACCOUNT, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); signals[DESTROY] = g_signal_new ("destroy", G_TYPE_FROM_CLASS (klass), G_SIGNAL_RUN_LAST, 0, NULL, NULL, g_cclosure_marshal_VOID__VOID, G_TYPE_NONE, 0); g_type_class_add_private (object_class, sizeof (EmpathyTpContactListPriv)); } static void empathy_tp_contact_list_init (EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv = G_TYPE_INSTANCE_GET_PRIVATE (list, EMPATHY_TYPE_TP_CONTACT_LIST, EmpathyTpContactListPriv); list->priv = priv; priv->contacts_groups = g_hash_table_new_full (g_direct_hash, g_direct_equal, (GDestroyNotify) g_object_unref, (GDestroyNotify) tp_contact_list_group_list_free); } EmpathyTpContactList * empathy_tp_contact_list_new (McAccount *account) { return g_object_new (EMPATHY_TYPE_TP_CONTACT_LIST, "account", account, NULL); } McAccount * empathy_tp_contact_list_get_account (EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv; g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), NULL); priv = GET_PRIV (list); return priv->account; } static void tp_contact_list_add (EmpathyContactList *list, EmpathyContact *contact, const gchar *message) { EmpathyTpContactListPriv *priv = GET_PRIV (list); g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); if (priv->subscribe) { empathy_tp_group_add_member (priv->subscribe, contact, message); } if (priv->publish && g_list_find (priv->pendings, contact)) { empathy_tp_group_add_member (priv->publish, contact, message); } } static void tp_contact_list_remove (EmpathyContactList *list, EmpathyContact *contact, const gchar *message) { EmpathyTpContactListPriv *priv = GET_PRIV (list); g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); if (priv->subscribe) { empathy_tp_group_remove_member (priv->subscribe, contact, message); } if (priv->publish) { empathy_tp_group_remove_member (priv->publish, contact, message); } } static GList * tp_contact_list_get_members (EmpathyContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), NULL); g_list_foreach (priv->members, (GFunc) g_object_ref, NULL); return g_list_copy (priv->members); } static GList * tp_contact_list_get_pendings (EmpathyContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), NULL); g_list_foreach (priv->pendings, (GFunc) g_object_ref, NULL); return g_list_copy (priv->pendings); } static GList * tp_contact_list_get_all_groups (EmpathyContactList *list) { EmpathyTpContactListPriv *priv = GET_PRIV (list); GList *groups = NULL, *l; g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), NULL); if (priv->protocol_group) { groups = g_list_prepend (groups, g_strdup (priv->protocol_group)); } for (l = priv->groups; l; l = l->next) { const gchar *name; name = empathy_tp_group_get_name (l->data); groups = g_list_prepend (groups, g_strdup (name)); } return groups; } static GList * tp_contact_list_get_groups (EmpathyContactList *list, EmpathyContact *contact) { EmpathyTpContactListPriv *priv = GET_PRIV (list); GList **groups; GList *ret = NULL, *l; g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), NULL); if (priv->protocol_group) { ret = g_list_prepend (ret, g_strdup (priv->protocol_group)); } groups = g_hash_table_lookup (priv->contacts_groups, contact); if (!groups) { return ret; } for (l = *groups; l; l = l->next) { ret = g_list_prepend (ret, g_strdup (l->data)); } return ret; } static EmpathyTpGroup * tp_contact_list_get_group (EmpathyTpContactList *list, const gchar *group) { EmpathyTpContactListPriv *priv = GET_PRIV (list); EmpathyTpGroup *tp_group; gchar *object_path; guint handle; GArray *handles; const char *names[2] = {group, NULL}; GError *error = NULL; tp_group = tp_contact_list_find_group (list, group); if (tp_group) { return tp_group; } DEBUG ("creating new group: %s", group); if (!tp_cli_connection_run_request_handles (priv->connection, -1, TP_HANDLE_TYPE_GROUP, names, &handles, &error, NULL)) { DEBUG ("Failed to RequestHandles: %s", error ? error->message : "No error given"); g_clear_error (&error); return NULL; } handle = g_array_index (handles, guint, 0); g_array_free (handles, TRUE); if (!tp_cli_connection_run_request_channel (priv->connection, -1, TP_IFACE_CHANNEL_TYPE_CONTACT_LIST, TP_HANDLE_TYPE_GROUP, handle, TRUE, &object_path, &error, NULL)) { DEBUG ("Failed to RequestChannel: %s", error ? error->message : "No error given"); g_clear_error (&error); return NULL; } tp_contact_list_add_channel (EMPATHY_TP_CONTACT_LIST (list), object_path, TP_IFACE_CHANNEL_TYPE_CONTACT_LIST, TP_HANDLE_TYPE_GROUP, handle); g_free (object_path); return tp_contact_list_find_group (list, group); } static void tp_contact_list_add_to_group (EmpathyContactList *list, EmpathyContact *contact, const gchar *group) { EmpathyTpGroup *tp_group; g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); tp_group = tp_contact_list_get_group (EMPATHY_TP_CONTACT_LIST (list), group); if (tp_group) { empathy_tp_group_add_member (tp_group, contact, ""); } } static void tp_contact_list_remove_from_group (EmpathyContactList *list, EmpathyContact *contact, const gchar *group) { EmpathyTpGroup *tp_group; g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); tp_group = tp_contact_list_find_group (EMPATHY_TP_CONTACT_LIST (list), group); if (tp_group) { empathy_tp_group_remove_member (tp_group, contact, ""); } } static void tp_contact_list_rename_group (EmpathyContactList *list, const gchar *old_group, const gchar *new_group) { EmpathyTpGroup *tp_group; GList *members; g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); tp_group = tp_contact_list_find_group (EMPATHY_TP_CONTACT_LIST (list), old_group); if (!tp_group) { return; } DEBUG ("rename group %s to %s", old_group, new_group); /* Remove all members from the old group */ members = empathy_tp_group_get_members (tp_group); empathy_tp_group_remove_members (tp_group, members, ""); empathy_tp_group_close (tp_group); /* Add all members to the new group */ tp_group = tp_contact_list_get_group (EMPATHY_TP_CONTACT_LIST (list), new_group); empathy_tp_group_add_members (tp_group, members, ""); g_list_foreach (members, (GFunc) g_object_unref, NULL); g_list_free (members); } static void tp_contact_list_remove_group (EmpathyContactList *list, const gchar *group) { EmpathyTpGroup *tp_group; GList *members; g_return_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list)); tp_group = tp_contact_list_find_group (EMPATHY_TP_CONTACT_LIST (list), group); if (!tp_group) { return; } DEBUG ("remove group %s", group); /* Remove all members of the group */ members = empathy_tp_group_get_members (tp_group); empathy_tp_group_remove_members (tp_group, members, ""); empathy_tp_group_close (tp_group); g_list_foreach (members, (GFunc) g_object_unref, NULL); g_list_free (members); } static void tp_contact_list_iface_init (EmpathyContactListIface *iface) { iface->add = tp_contact_list_add; iface->remove = tp_contact_list_remove; iface->get_members = tp_contact_list_get_members; iface->get_pendings = tp_contact_list_get_pendings; iface->get_all_groups = tp_contact_list_get_all_groups; iface->get_groups = tp_contact_list_get_groups; iface->add_to_group = tp_contact_list_add_to_group; iface->remove_from_group = tp_contact_list_remove_from_group; iface->rename_group = tp_contact_list_rename_group; iface->remove_group = tp_contact_list_remove_group; } gboolean empathy_tp_contact_list_can_add (EmpathyTpContactList *list) { EmpathyTpContactListPriv *priv; TpChannelGroupFlags flags; g_return_val_if_fail (EMPATHY_IS_TP_CONTACT_LIST (list), FALSE); priv = GET_PRIV (list); if (priv->subscribe == NULL) return FALSE; flags = empathy_tp_group_get_flags (priv->subscribe); return (flags & TP_CHANNEL_GROUP_FLAG_CAN_ADD) != 0; }
gpl-2.0
mynew/FunCore
src/server/game/Guilds/Guild.cpp
1
97858
/* * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "DatabaseEnv.h" #include "Guild.h" #include "GuildMgr.h" #include "ScriptMgr.h" #include "Chat.h" #include "Config.h" #include "SocialMgr.h" #include "Log.h" #include "AccountMgr.h" #define MAX_GUILD_BANK_TAB_TEXT_LEN 500 #define EMBLEM_PRICE 10 * GOLD inline uint32 _GetGuildBankTabPrice(uint8 tabId) { switch (tabId) { case 0: return 100; case 1: return 250; case 2: return 500; case 3: return 1000; case 4: return 2500; case 5: return 5000; default: return 0; } } void Guild::SendCommandResult(WorldSession* session, GuildCommandType type, GuildCommandError errCode, const std::string& param) { WorldPacket data(SMSG_GUILD_COMMAND_RESULT, 8 + param.size() + 1); data << uint32(type); data << param; data << uint32(errCode); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_COMMAND_RESULT)"); } void Guild::SendSaveEmblemResult(WorldSession* session, GuildEmblemError errCode) { WorldPacket data(MSG_SAVE_GUILD_EMBLEM, 4); data << uint32(errCode); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (MSG_SAVE_GUILD_EMBLEM)"); } /////////////////////////////////////////////////////////////////////////////// // LogHolder Guild::LogHolder::~LogHolder() { // Cleanup for (GuildLog::iterator itr = m_log.begin(); itr != m_log.end(); ++itr) delete (*itr); } // Adds event loaded from database to collection inline void Guild::LogHolder::LoadEvent(LogEntry* entry) { if (m_nextGUID == uint32(GUILD_EVENT_LOG_GUID_UNDEFINED)) m_nextGUID = entry->GetGUID(); m_log.push_front(entry); } // Adds new event happened in game. // If maximum number of events is reached, oldest event is removed from collection. inline void Guild::LogHolder::AddEvent(SQLTransaction& trans, LogEntry* entry) { // Check max records limit if (m_log.size() >= m_maxRecords) { LogEntry* oldEntry = m_log.front(); delete oldEntry; m_log.pop_front(); } // Add event to list m_log.push_back(entry); // Save to DB entry->SaveToDB(trans); } // Writes information about all events into packet. inline void Guild::LogHolder::WritePacket(WorldPacket& data) const { data << uint8(m_log.size()); for (GuildLog::const_iterator itr = m_log.begin(); itr != m_log.end(); ++itr) (*itr)->WritePacket(data); } inline uint32 Guild::LogHolder::GetNextGUID() { // Next guid was not initialized. It means there are no records for this holder in DB yet. // Start from the beginning. if (m_nextGUID == uint32(GUILD_EVENT_LOG_GUID_UNDEFINED)) m_nextGUID = 0; else m_nextGUID = (m_nextGUID + 1) % m_maxRecords; return m_nextGUID; } /////////////////////////////////////////////////////////////////////////////// // EventLogEntry void Guild::EventLogEntry::SaveToDB(SQLTransaction& trans) const { PreparedStatement* stmt = NULL; stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_EVENTLOG); stmt->setUInt32(0, m_guildId); stmt->setUInt32(1, m_guid); CharacterDatabase.ExecuteOrAppend(trans, stmt); uint8 index = 0; stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_EVENTLOG); stmt->setUInt32( index, m_guildId); stmt->setUInt32(++index, m_guid); stmt->setUInt8 (++index, uint8(m_eventType)); stmt->setUInt32(++index, m_playerGuid1); stmt->setUInt32(++index, m_playerGuid2); stmt->setUInt8 (++index, m_newRank); stmt->setUInt64(++index, m_timestamp); CharacterDatabase.ExecuteOrAppend(trans, stmt); } void Guild::EventLogEntry::WritePacket(WorldPacket& data) const { // Event type data << uint8(m_eventType); // Player 1 data << uint64(MAKE_NEW_GUID(m_playerGuid1, 0, HIGHGUID_PLAYER)); // Player 2 not for left/join guild events if (m_eventType != GUILD_EVENT_LOG_JOIN_GUILD && m_eventType != GUILD_EVENT_LOG_LEAVE_GUILD) data << uint64(MAKE_NEW_GUID(m_playerGuid2, 0, HIGHGUID_PLAYER)); // New Rank - only for promote/demote guild events if (m_eventType == GUILD_EVENT_LOG_PROMOTE_PLAYER || m_eventType == GUILD_EVENT_LOG_DEMOTE_PLAYER) data << uint8(m_newRank); // Event timestamp data << uint32(::time(NULL) - m_timestamp); } /////////////////////////////////////////////////////////////////////////////// // BankEventLogEntry void Guild::BankEventLogEntry::SaveToDB(SQLTransaction& trans) const { PreparedStatement* stmt = NULL; uint8 index = 0; stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_EVENTLOG); stmt->setUInt32( index, m_guildId); stmt->setUInt32(++index, m_guid); stmt->setUInt8 (++index, m_bankTabId); CharacterDatabase.ExecuteOrAppend(trans, stmt); index = 0; stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_BANK_EVENTLOG); stmt->setUInt32( index, m_guildId); stmt->setUInt32(++index, m_guid); stmt->setUInt8 (++index, m_bankTabId); stmt->setUInt8 (++index, uint8(m_eventType)); stmt->setUInt32(++index, m_playerGuid); stmt->setUInt32(++index, m_itemOrMoney); stmt->setUInt16(++index, m_itemStackCount); stmt->setUInt8 (++index, m_destTabId); stmt->setUInt64(++index, m_timestamp); CharacterDatabase.ExecuteOrAppend(trans, stmt); } void Guild::BankEventLogEntry::WritePacket(WorldPacket& data) const { data << uint8(m_eventType); data << uint64(MAKE_NEW_GUID(m_playerGuid, 0, HIGHGUID_PLAYER)); data << uint32(m_itemOrMoney); // if ( m_eventType != 4 || m_eventType != 5 || m_eventType != 6 || m_eventType != 8 || m_eventType != 9 ) if (m_eventType < GUILD_BANK_LOG_DEPOSIT_MONEY) { data << uint32(m_itemStackCount); if (m_eventType == GUILD_BANK_LOG_MOVE_ITEM || m_eventType == GUILD_BANK_LOG_MOVE_ITEM2) data << uint8(m_destTabId); } data << uint32(time(NULL) - m_timestamp); } /////////////////////////////////////////////////////////////////////////////// // RankInfo void Guild::RankInfo::LoadFromDB(Field* fields) { m_rankId = fields[1].GetUInt8(); m_name = fields[2].GetString(); m_rights = fields[3].GetUInt32(); m_bankMoneyPerDay = fields[4].GetUInt32(); if (m_rankId == GR_GUILDMASTER) // Prevent loss of leader rights m_rights |= GR_RIGHT_ALL; } void Guild::RankInfo::SaveToDB(SQLTransaction& trans) const { PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_RANK); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_rankId); stmt->setString(2, m_name); stmt->setUInt32(3, m_rights); CharacterDatabase.ExecuteOrAppend(trans, stmt); } void Guild::RankInfo::WritePacket(WorldPacket& data) const { data << uint32(m_rights); data << uint32(m_bankMoneyPerDay); // In game set in gold, in packet set in bronze. for (uint8 i = 0; i < GUILD_BANK_MAX_TABS; ++i) { data << uint32(m_bankTabRightsAndSlots[i].rights); data << uint32(m_bankTabRightsAndSlots[i].slots); } } void Guild::RankInfo::SetName(const std::string& name) { if (m_name == name) return; m_name = name; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_RANK_NAME); stmt->setString(0, m_name); stmt->setUInt8 (1, m_rankId); stmt->setUInt32(2, m_guildId); CharacterDatabase.Execute(stmt); } void Guild::RankInfo::SetRights(uint32 rights) { if (m_rankId == GR_GUILDMASTER) // Prevent loss of leader rights rights = GR_RIGHT_ALL; if (m_rights == rights) return; m_rights = rights; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_RANK_RIGHTS); stmt->setUInt32(0, m_rights); stmt->setUInt8 (1, m_rankId); stmt->setUInt32(2, m_guildId); CharacterDatabase.Execute(stmt); } void Guild::RankInfo::SetBankMoneyPerDay(uint32 money) { if (m_rankId == GR_GUILDMASTER) // Prevent loss of leader rights money = uint32(GUILD_WITHDRAW_MONEY_UNLIMITED); if (m_bankMoneyPerDay == money) return; m_bankMoneyPerDay = money; PreparedStatement* stmt = NULL; stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_RANK_BANK_MONEY); stmt->setUInt32(0, money); stmt->setUInt8 (1, m_rankId); stmt->setUInt32(2, m_guildId); CharacterDatabase.Execute(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_RESET_GUILD_RANK_BANK_RESET_TIME); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_rankId); CharacterDatabase.Execute(stmt); } void Guild::RankInfo::SetBankTabSlotsAndRights(uint8 tabId, GuildBankRightsAndSlots rightsAndSlots, bool saveToDB) { if (m_rankId == GR_GUILDMASTER) // Prevent loss of leader rights rightsAndSlots.SetGuildMasterValues(); if (m_bankTabRightsAndSlots[tabId].IsEqual(rightsAndSlots)) return; m_bankTabRightsAndSlots[tabId] = rightsAndSlots; if (saveToDB) { PreparedStatement* stmt = NULL; stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_RIGHT); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, tabId); stmt->setUInt8 (2, m_rankId); CharacterDatabase.Execute(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_BANK_RIGHT); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, tabId); stmt->setUInt8 (2, m_rankId); stmt->setUInt8 (3, m_bankTabRightsAndSlots[tabId].rights); stmt->setUInt32(4, m_bankTabRightsAndSlots[tabId].slots); CharacterDatabase.Execute(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_RESET_GUILD_RANK_BANK_TIME0 + tabId); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_rankId); CharacterDatabase.Execute(stmt); } } /////////////////////////////////////////////////////////////////////////////// // BankTab bool Guild::BankTab::LoadFromDB(Field* fields) { m_name = fields[2].GetString(); m_icon = fields[3].GetString(); m_text = fields[4].GetString(); return true; } bool Guild::BankTab::LoadItemFromDB(Field* fields) { uint8 slotId = fields[13].GetUInt8(); uint32 itemGuid = fields[14].GetUInt32(); uint32 itemEntry = fields[15].GetUInt32(); if (slotId >= GUILD_BANK_MAX_SLOTS) { sLog->outError("Invalid slot for item (GUID: %u, id: %u) in guild bank, skipped.", itemGuid, itemEntry); return false; } ItemTemplate const* proto = sObjectMgr->GetItemTemplate(itemEntry); if (!proto) { sLog->outError("Unknown item (GUID: %u, id: %u) in guild bank, skipped.", itemGuid, itemEntry); return false; } Item* pItem = NewItemOrBag(proto); if (!pItem->LoadFromDB(itemGuid, 0, fields, itemEntry)) { sLog->outError("Item (GUID %u, id: %u) not found in item_instance, deleting from guild bank!", itemGuid, itemEntry); PreparedStatement *stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_NONEXISTENT_GUILD_BANK_ITEM); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_tabId); stmt->setUInt8 (2, slotId); CharacterDatabase.Execute(stmt); delete pItem; return false; } pItem->AddToWorld(); m_items[slotId] = pItem; return true; } // Deletes contents of the tab from the world (and from DB if necessary) void Guild::BankTab::Delete(SQLTransaction& trans, bool removeItemsFromDB) { for (uint8 slotId = 0; slotId < GUILD_BANK_MAX_SLOTS; ++slotId) if (Item* pItem = m_items[slotId]) { pItem->RemoveFromWorld(); if (removeItemsFromDB) pItem->DeleteFromDB(trans); delete pItem; pItem = NULL; } } inline void Guild::BankTab::WritePacket(WorldPacket& data) const { data << uint8(GUILD_BANK_MAX_SLOTS); for (uint8 slotId = 0; slotId < GUILD_BANK_MAX_SLOTS; ++slotId) WriteSlotPacket(data, slotId); } // Writes information about contents of specified slot into packet. void Guild::BankTab::WriteSlotPacket(WorldPacket& data, uint8 slotId) const { Item* pItem = GetItem(slotId); uint32 itemEntry = pItem ? pItem->GetEntry() : 0; data << uint8(slotId); data << uint32(itemEntry); if (itemEntry) { data << uint32(0); // 3.3.0 (0x00018020, 0x00018000) data << uint32(pItem->GetItemRandomPropertyId()); // Random item property id if (pItem->GetItemRandomPropertyId()) data << uint32(pItem->GetItemSuffixFactor()); // SuffixFactor data << uint32(pItem->GetCount()); // ITEM_FIELD_STACK_COUNT data << uint32(0); data << uint8(abs(pItem->GetSpellCharges())); // Spell charges uint8 enchCount = 0; size_t enchCountPos = data.wpos(); data << uint8(enchCount); // Number of enchantments for (uint32 i = PERM_ENCHANTMENT_SLOT; i < MAX_ENCHANTMENT_SLOT; ++i) if (uint32 enchId = pItem->GetEnchantmentId(EnchantmentSlot(i))) { data << uint8(i); data << uint32(enchId); ++enchCount; } data.put<uint8>(enchCountPos, enchCount); } } void Guild::BankTab::SetInfo(const std::string& name, const std::string& icon) { if (m_name == name && m_icon == icon) return; m_name = name; m_icon = icon; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_BANK_TAB_INFO); stmt->setString(0, m_name); stmt->setString(1, m_icon); stmt->setUInt32(2, m_guildId); stmt->setUInt8 (3, m_tabId); CharacterDatabase.Execute(stmt); } void Guild::BankTab::SetText(const std::string& text) { if (m_text == text) return; m_text = text; utf8truncate(m_text, MAX_GUILD_BANK_TAB_TEXT_LEN); // DB and client size limitation PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_BANK_TAB_TEXT); stmt->setString(0, m_text); stmt->setUInt32(1, m_guildId); stmt->setUInt8 (2, m_tabId); CharacterDatabase.Execute(stmt); } // Sets/removes contents of specified slot. // If pItem == NULL contents are removed. bool Guild::BankTab::SetItem(SQLTransaction& trans, uint8 slotId, Item* pItem) { if (slotId >= GUILD_BANK_MAX_SLOTS) return false; m_items[slotId] = pItem; PreparedStatement* stmt = NULL; stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_ITEM); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_tabId); stmt->setUInt8 (2, slotId); CharacterDatabase.ExecuteOrAppend(trans, stmt); if (pItem) { stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_BANK_ITEM); stmt->setUInt32(0, m_guildId); stmt->setUInt8 (1, m_tabId); stmt->setUInt8 (2, slotId); stmt->setUInt32(3, pItem->GetGUIDLow()); CharacterDatabase.ExecuteOrAppend(trans, stmt); pItem->SetUInt64Value(ITEM_FIELD_CONTAINED, 0); pItem->SetUInt64Value(ITEM_FIELD_OWNER, 0); pItem->FSetState(ITEM_NEW); pItem->SaveToDB(trans); // Not in inventory and can be saved standalone } return true; } void Guild::BankTab::SendText(const Guild* guild, WorldSession* session) const { WorldPacket data(MSG_QUERY_GUILD_BANK_TEXT, 1 + m_text.size() + 1); data << uint8(m_tabId); data << m_text; if (session) session->SendPacket(&data); else guild->BroadcastPacket(&data); } /////////////////////////////////////////////////////////////////////////////// // Member void Guild::Member::SetStats(Player* player) { m_name = player->GetName(); m_level = player->getLevel(); m_class = player->getClass(); m_zoneId = player->GetZoneId(); m_accountId = player->GetSession()->GetAccountId(); } void Guild::Member::SetStats(const std::string& name, uint8 level, uint8 _class, uint32 zoneId, uint32 accountId) { m_name = name; m_level = level; m_class = _class; m_zoneId = zoneId; m_accountId = accountId; } void Guild::Member::SetPublicNote(const std::string& publicNote) { if (m_publicNote == publicNote) return; m_publicNote = publicNote; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_MEMBER_PNOTE); stmt->setString(0, publicNote); stmt->setUInt32(1, GUID_LOPART(m_guid)); CharacterDatabase.Execute(stmt); } void Guild::Member::SetOfficerNote(const std::string& officerNote) { if (m_officerNote == officerNote) return; m_officerNote = officerNote; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_MEMBER_OFFNOTE); stmt->setString(0, officerNote); stmt->setUInt32(1, GUID_LOPART(m_guid)); CharacterDatabase.Execute(stmt); } void Guild::Member::ChangeRank(uint8 newRank) { m_rankId = newRank; // Update rank information in player's field, if he is online. if (Player* player = FindPlayer()) player->SetRank(newRank); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_MEMBER_RANK); stmt->setUInt8 (0, newRank); stmt->setUInt32(1, GUID_LOPART(m_guid)); CharacterDatabase.Execute(stmt); } void Guild::Member::SaveToDB(SQLTransaction& trans) const { PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_MEMBER); stmt->setUInt32(0, m_guildId); stmt->setUInt32(1, GUID_LOPART(m_guid)); stmt->setUInt8 (2, m_rankId); stmt->setString(3, m_publicNote); stmt->setString(4, m_officerNote); CharacterDatabase.ExecuteOrAppend(trans, stmt); } // Loads member's data from database. // If member has broken fields (level, class) returns false. // In this case member has to be removed from guild. bool Guild::Member::LoadFromDB(Field* fields) { m_publicNote = fields[3].GetString(); m_officerNote = fields[4].GetString(); m_bankRemaining[GUILD_BANK_MAX_TABS].resetTime = fields[5].GetUInt32(); m_bankRemaining[GUILD_BANK_MAX_TABS].value = fields[6].GetUInt32(); for (uint8 i = 0; i < GUILD_BANK_MAX_TABS; ++i) { m_bankRemaining[i].resetTime = fields[7 + i * 2].GetUInt32(); m_bankRemaining[i].value = fields[8 + i * 2].GetUInt32(); } SetStats(fields[19].GetString(), fields[20].GetUInt8(), fields[21].GetUInt8(), fields[22].GetUInt32(), fields[23].GetUInt32()); m_logoutTime = fields[24].GetUInt32(); if (!CheckStats()) return false; if (!m_zoneId) { sLog->outError("Player (GUID: %u) has broken zone-data", GUID_LOPART(m_guid)); m_zoneId = Player::GetZoneIdFromDB(m_guid); } return true; } // Validate player fields. Returns false if corrupted fields are found. bool Guild::Member::CheckStats() const { if (m_level < 1) { sLog->outError("Player (GUID: %u) has a broken data in field `characters`.`level`, deleting him from guild!", GUID_LOPART(m_guid)); return false; } if (m_class < CLASS_WARRIOR || m_class >= MAX_CLASSES) { sLog->outError("Player (GUID: %u) has a broken data in field `characters`.`class`, deleting him from guild!", GUID_LOPART(m_guid)); return false; } return true; } void Guild::Member::WritePacket(WorldPacket& data) const { if (Player* player = FindPlayer()) { data << uint64(player->GetGUID()); data << uint8(1); data << player->GetName(); data << uint32(m_rankId); data << uint8(player->getLevel()); data << uint8(player->getClass()); data << uint8(0); // new 2.4.0 data << uint32(player->GetZoneId()); } else { data << m_guid; data << uint8(0); data << m_name; data << uint32(m_rankId); data << uint8(m_level); data << uint8(m_class); data << uint8(0); // new 2.4.0 data << uint32(m_zoneId); data << float(float(::time(NULL) - m_logoutTime) / DAY); } data << m_publicNote; data << m_officerNote; } // Decreases amount of money/slots left for today. // If (tabId == GUILD_BANK_MAX_TABS) decrease money amount. // Otherwise decrease remaining items amount for specified tab. void Guild::Member::DecreaseBankRemainingValue(SQLTransaction& trans, uint8 tabId, uint32 amount) { m_bankRemaining[tabId].value -= amount; PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement( tabId == GUILD_BANK_MAX_TABS ? CHAR_SET_GUILD_MEMBER_BANK_REM_MONEY : CHAR_SET_GUILD_MEMBER_BANK_REM_SLOTS0 + tabId); stmt->setUInt32(0, m_bankRemaining[tabId].value); stmt->setUInt32(1, m_guildId); stmt->setUInt32(2, GUID_LOPART(m_guid)); CharacterDatabase.ExecuteOrAppend(trans, stmt); } // Get amount of money/slots left for today. // If (tabId == GUILD_BANK_MAX_TABS) return money amount. // Otherwise return remaining items amount for specified tab. // If reset time was more than 24 hours ago, renew reset time and reset amount to maximum value. uint32 Guild::Member::GetBankRemainingValue(uint8 tabId, const Guild* guild) const { // Guild master has unlimited amount. if (IsRank(GR_GUILDMASTER)) return tabId == GUILD_BANK_MAX_TABS ? GUILD_WITHDRAW_MONEY_UNLIMITED : GUILD_WITHDRAW_SLOT_UNLIMITED; // Check rights for non-money tab. if (tabId != GUILD_BANK_MAX_TABS) if ((guild->_GetRankBankTabRights(m_rankId, tabId) & GUILD_BANK_RIGHT_VIEW_TAB) != GUILD_BANK_RIGHT_VIEW_TAB) return 0; uint32 curTime = uint32(::time(NULL) / MINUTE); // minutes if (curTime > m_bankRemaining[tabId].resetTime + 24 * HOUR / MINUTE) { RemainingValue& rv = const_cast <RemainingValue&> (m_bankRemaining[tabId]); rv.resetTime = curTime; rv.value = tabId == GUILD_BANK_MAX_TABS ? guild->_GetRankBankMoneyPerDay(m_rankId) : guild->_GetRankBankTabSlotsPerDay(m_rankId, tabId); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement( tabId == GUILD_BANK_MAX_TABS ? CHAR_SET_GUILD_MEMBER_BANK_TIME_MONEY : CHAR_SET_GUILD_MEMBER_BANK_TIME_REM_SLOTS0 + tabId); stmt->setUInt32(0, m_bankRemaining[tabId].resetTime); stmt->setUInt32(1, m_bankRemaining[tabId].value); stmt->setUInt32(2, m_guildId); stmt->setUInt32(3, GUID_LOPART(m_guid)); CharacterDatabase.Execute(stmt); } return m_bankRemaining[tabId].value; } inline void Guild::Member::ResetTabTimes() { for (uint8 tabId = 0; tabId < GUILD_BANK_MAX_TABS; ++tabId) m_bankRemaining[tabId].resetTime = 0; } inline void Guild::Member::ResetMoneyTime() { m_bankRemaining[GUILD_BANK_MAX_TABS].resetTime = 0; } /////////////////////////////////////////////////////////////////////////////// // EmblemInfo void EmblemInfo::LoadFromDB(Field* fields) { m_style = fields[3].GetUInt8(); m_color = fields[4].GetUInt8(); m_borderStyle = fields[5].GetUInt8(); m_borderColor = fields[6].GetUInt8(); m_backgroundColor = fields[7].GetUInt8(); } void EmblemInfo::WritePacket(WorldPacket& data) const { data << uint32(m_style); data << uint32(m_color); data << uint32(m_borderStyle); data << uint32(m_borderColor); data << uint32(m_backgroundColor); } void EmblemInfo::SaveToDB(uint32 guildId) const { PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_EMBLEM_INFO); stmt->setUInt32(0, m_style); stmt->setUInt32(1, m_color); stmt->setUInt32(2, m_borderStyle); stmt->setUInt32(3, m_borderColor); stmt->setUInt32(4, m_backgroundColor); stmt->setUInt32(5, guildId); CharacterDatabase.Execute(stmt); } /////////////////////////////////////////////////////////////////////////////// // MoveItemData bool Guild::MoveItemData::CheckItem(uint32& splitedAmount) { ASSERT(m_pItem); if (splitedAmount > m_pItem->GetCount()) return false; if (splitedAmount == m_pItem->GetCount()) splitedAmount = 0; return true; } bool Guild::MoveItemData::CanStore(Item* pItem, bool swap, bool sendError) { m_vec.clear(); InventoryResult msg = CanStore(pItem, swap); if (sendError && msg != EQUIP_ERR_OK) m_pPlayer->SendEquipError(msg, pItem); return (msg == EQUIP_ERR_OK); } bool Guild::MoveItemData::CloneItem(uint32 count) { ASSERT(m_pItem); m_pClonedItem = m_pItem->CloneItem(count); if (!m_pClonedItem) { m_pPlayer->SendEquipError(EQUIP_ERR_ITEM_NOT_FOUND, m_pItem); return false; } return true; } void Guild::MoveItemData::LogAction(MoveItemData* pFrom) const { ASSERT(pFrom->GetItem()); sScriptMgr->OnGuildItemMove(m_pGuild, m_pPlayer, pFrom->GetItem(), pFrom->IsBank(), pFrom->GetContainer(), pFrom->GetSlotId(), IsBank(), GetContainer(), GetSlotId()); } inline void Guild::MoveItemData::CopySlots(SlotIds& ids) const { for (ItemPosCountVec::const_iterator itr = m_vec.begin(); itr != m_vec.end(); ++itr) ids.insert(uint8(itr->pos)); } /////////////////////////////////////////////////////////////////////////////// // PlayerMoveItemData bool Guild::PlayerMoveItemData::InitItem() { m_pItem = m_pPlayer->GetItemByPos(m_container, m_slotId); if (m_pItem) { // Anti-WPE protection. Do not move non-empty bags to bank. if (m_pItem->IsNotEmptyBag()) { m_pPlayer->SendEquipError(EQUIP_ERR_CAN_ONLY_DO_WITH_EMPTY_BAGS, m_pItem); m_pItem = NULL; } // Bound items cannot be put into bank. else if (!m_pItem->CanBeTraded()) { m_pPlayer->SendEquipError(EQUIP_ERR_ITEMS_CANT_BE_SWAPPED, m_pItem); m_pItem = NULL; } } return (m_pItem != NULL); } void Guild::PlayerMoveItemData::RemoveItem(SQLTransaction& trans, MoveItemData* /*pOther*/, uint32 splitedAmount) { if (splitedAmount) { m_pItem->SetCount(m_pItem->GetCount() - splitedAmount); m_pItem->SetState(ITEM_CHANGED, m_pPlayer); m_pPlayer->SaveInventoryAndGoldToDB(trans); } else { m_pPlayer->MoveItemFromInventory(m_container, m_slotId, true); m_pItem->DeleteFromInventoryDB(trans); m_pItem = NULL; } } Item* Guild::PlayerMoveItemData::StoreItem(SQLTransaction& trans, Item* pItem) { ASSERT(pItem); m_pPlayer->MoveItemToInventory(m_vec, pItem, true); m_pPlayer->SaveInventoryAndGoldToDB(trans); return pItem; } void Guild::PlayerMoveItemData::LogBankEvent(SQLTransaction& trans, MoveItemData* pFrom, uint32 count) const { ASSERT(pFrom); // Bank -> Char m_pGuild->_LogBankEvent(trans, GUILD_BANK_LOG_WITHDRAW_ITEM, pFrom->GetContainer(), m_pPlayer->GetGUIDLow(), pFrom->GetItem()->GetEntry(), count); } inline InventoryResult Guild::PlayerMoveItemData::CanStore(Item* pItem, bool swap) { return m_pPlayer->CanStoreItem(m_container, m_slotId, m_vec, pItem, swap); } /////////////////////////////////////////////////////////////////////////////// // BankMoveItemData bool Guild::BankMoveItemData::InitItem() { m_pItem = m_pGuild->_GetItem(m_container, m_slotId); return (m_pItem != NULL); } bool Guild::BankMoveItemData::HasStoreRights(MoveItemData* pOther) const { ASSERT(pOther); // Do not check rights if item is being swapped within the same bank tab if (pOther->IsBank() && pOther->GetContainer() == m_container) return true; return m_pGuild->_MemberHasTabRights(m_pPlayer->GetGUID(), m_container, GUILD_BANK_RIGHT_DEPOSIT_ITEM); } bool Guild::BankMoveItemData::HasWithdrawRights(MoveItemData* pOther) const { ASSERT(pOther); // Do not check rights if item is being swapped within the same bank tab if (pOther->IsBank() && pOther->GetContainer() == m_container) return true; return (m_pGuild->_GetMemberRemainingSlots(m_pPlayer->GetGUID(), m_container) != 0); } void Guild::BankMoveItemData::RemoveItem(SQLTransaction& trans, MoveItemData* pOther, uint32 splitedAmount) { ASSERT(m_pItem); if (splitedAmount) { m_pItem->SetCount(m_pItem->GetCount() - splitedAmount); m_pItem->FSetState(ITEM_CHANGED); m_pItem->SaveToDB(trans); } else { m_pGuild->_RemoveItem(trans, m_container, m_slotId); m_pItem = NULL; } // Decrease amount of player's remaining items (if item is moved to different tab or to player) if (!pOther->IsBank() || pOther->GetContainer() != m_container) m_pGuild->_DecreaseMemberRemainingSlots(trans, m_pPlayer->GetGUID(), m_container); } Item* Guild::BankMoveItemData::StoreItem(SQLTransaction& trans, Item* pItem) { if (!pItem) return NULL; BankTab* pTab = m_pGuild->GetBankTab(m_container); if (!pTab) return NULL; Item* pLastItem = pItem; for (ItemPosCountVec::const_iterator itr = m_vec.begin(); itr != m_vec.end(); ) { ItemPosCount pos(*itr); ++itr; sLog->outDebug(LOG_FILTER_GUILD, "GUILD STORAGE: StoreItem tab = %u, slot = %u, item = %u, count = %u", m_container, m_slotId, pItem->GetEntry(), pItem->GetCount()); pLastItem = _StoreItem(trans, pTab, pItem, pos, itr != m_vec.end()); } return pLastItem; } void Guild::BankMoveItemData::LogBankEvent(SQLTransaction& trans, MoveItemData* pFrom, uint32 count) const { ASSERT(pFrom->GetItem()); if (pFrom->IsBank()) // Bank -> Bank m_pGuild->_LogBankEvent(trans, GUILD_BANK_LOG_MOVE_ITEM, pFrom->GetContainer(), m_pPlayer->GetGUIDLow(), pFrom->GetItem()->GetEntry(), count, m_container); else // Char -> Bank m_pGuild->_LogBankEvent(trans, GUILD_BANK_LOG_DEPOSIT_ITEM, m_container, m_pPlayer->GetGUIDLow(), pFrom->GetItem()->GetEntry(), count); } void Guild::BankMoveItemData::LogAction(MoveItemData* pFrom) const { MoveItemData::LogAction(pFrom); if (!pFrom->IsBank() && sWorld->getBoolConfig(CONFIG_GM_LOG_TRADE) && !AccountMgr::IsPlayerAccount(m_pPlayer->GetSession()->GetSecurity())) // TODO: move to scripts sLog->outCommand(m_pPlayer->GetSession()->GetAccountId(), "GM %s (Account: %u) deposit item: %s (Entry: %d Count: %u) to guild bank (Guild ID: %u)", m_pPlayer->GetName(), m_pPlayer->GetSession()->GetAccountId(), pFrom->GetItem()->GetTemplate()->Name1.c_str(), pFrom->GetItem()->GetEntry(), pFrom->GetItem()->GetCount(), m_pGuild->GetId()); } Item* Guild::BankMoveItemData::_StoreItem(SQLTransaction& trans, BankTab* pTab, Item* pItem, ItemPosCount& pos, bool clone) const { uint8 slotId = uint8(pos.pos); uint32 count = pos.count; if (Item* pItemDest = pTab->GetItem(slotId)) { pItemDest->SetCount(pItemDest->GetCount() + count); pItemDest->FSetState(ITEM_CHANGED); pItemDest->SaveToDB(trans); if (!clone) { pItem->RemoveFromWorld(); pItem->DeleteFromDB(trans); delete pItem; } return pItemDest; } if (clone) pItem = pItem->CloneItem(count); else pItem->SetCount(count); if (pItem && pTab->SetItem(trans, slotId, pItem)) return pItem; return NULL; } // Tries to reserve space for source item. // If item in destination slot exists it must be the item of the same entry // and stack must have enough space to take at least one item. // Returns false if destination item specified and it cannot be used to reserve space. bool Guild::BankMoveItemData::_ReserveSpace(uint8 slotId, Item* pItem, Item* pItemDest, uint32& count) { uint32 requiredSpace = pItem->GetMaxStackCount(); if (pItemDest) { // Make sure source and destination items match and destination item has space for more stacks. if (pItemDest->GetEntry() != pItem->GetEntry() || pItemDest->GetCount() >= pItem->GetMaxStackCount()) return false; requiredSpace -= pItemDest->GetCount(); } // Let's not be greedy, reserve only required space requiredSpace = std::min(requiredSpace, count); // Reserve space ItemPosCount pos(slotId, requiredSpace); if (!pos.isContainedIn(m_vec)) { m_vec.push_back(pos); count -= requiredSpace; } return true; } void Guild::BankMoveItemData::CanStoreItemInTab(Item* pItem, uint8 skipSlotId, bool merge, uint32& count) { for (uint8 slotId = 0; (slotId < GUILD_BANK_MAX_SLOTS) && (count > 0); ++slotId) { // Skip slot already processed in CanStore (when destination slot was specified) if (slotId == skipSlotId) continue; Item* pItemDest = m_pGuild->_GetItem(m_container, slotId); if (pItemDest == pItem) pItemDest = NULL; // If merge skip empty, if not merge skip non-empty if ((pItemDest != NULL) != merge) continue; _ReserveSpace(slotId, pItem, pItemDest, count); } } InventoryResult Guild::BankMoveItemData::CanStore(Item* pItem, bool swap) { sLog->outDebug(LOG_FILTER_GUILD, "GUILD STORAGE: CanStore() tab = %u, slot = %u, item = %u, count = %u", m_container, m_slotId, pItem->GetEntry(), pItem->GetCount()); uint32 count = pItem->GetCount(); // Soulbound items cannot be moved if (pItem->IsSoulBound()) return EQUIP_ERR_CANT_DROP_SOULBOUND; // Make sure destination bank tab exists if (m_container >= m_pGuild->_GetPurchasedTabsSize()) return EQUIP_ERR_ITEM_DOESNT_GO_INTO_BAG; // Slot explicitely specified. Check it. if (m_slotId != NULL_SLOT) { Item* pItemDest = m_pGuild->_GetItem(m_container, m_slotId); // Ignore swapped item (this slot will be empty after move) if ((pItemDest == pItem) || swap) pItemDest = NULL; if (!_ReserveSpace(m_slotId, pItem, pItemDest, count)) return EQUIP_ERR_ITEM_CANT_STACK; if (count == 0) return EQUIP_ERR_OK; } // Slot was not specified or it has not enough space for all the items in stack // Search for stacks to merge with if (pItem->GetMaxStackCount() > 1) { CanStoreItemInTab(pItem, m_slotId, true, count); if (count == 0) return EQUIP_ERR_OK; } // Search free slot for item CanStoreItemInTab(pItem, m_slotId, false, count); if (count == 0) return EQUIP_ERR_OK; return EQUIP_ERR_BANK_FULL; } /////////////////////////////////////////////////////////////////////////////// // Guild Guild::Guild() : m_id(0), m_leaderGuid(0), m_createdDate(0), m_accountsNumber(0), m_bankMoney(0), m_eventLog(NULL) { memset(&m_bankEventLog, 0, (GUILD_BANK_MAX_TABS + 1) * sizeof(LogHolder*)); } Guild::~Guild() { SQLTransaction temp(NULL); _DeleteBankItems(temp); // Cleanup if (m_eventLog) delete m_eventLog; for (uint8 tabId = 0; tabId <= GUILD_BANK_MAX_TABS; ++tabId) if (m_bankEventLog[tabId]) delete m_bankEventLog[tabId]; for (Members::iterator itr = m_members.begin(); itr != m_members.end(); ++itr) delete itr->second; } // Creates new guild with default data and saves it to database. bool Guild::Create(Player* pLeader, const std::string& name) { // Check if guild with such name already exists if (sGuildMgr->GetGuildByName(name)) return false; WorldSession* pLeaderSession = pLeader->GetSession(); if (!pLeaderSession) return false; m_id = sGuildMgr->GenerateGuildId(); m_leaderGuid = pLeader->GetGUID(); m_name = name; m_info = ""; m_motd = "No message set."; m_bankMoney = 0; m_createdDate = ::time(NULL); _CreateLogHolders(); sLog->outDebug(LOG_FILTER_GUILD, "GUILD: creating guild [%s] for leader %s (%u)", name.c_str(), pLeader->GetName(), GUID_LOPART(m_leaderGuid)); PreparedStatement* stmt = NULL; SQLTransaction trans = CharacterDatabase.BeginTransaction(); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_MEMBERS); stmt->setUInt32(0, m_id); trans->Append(stmt); uint8 index = 0; stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD); stmt->setUInt32( index, m_id); stmt->setString(++index, name); stmt->setUInt32(++index, GUID_LOPART(m_leaderGuid)); stmt->setString(++index, m_info); stmt->setString(++index, m_motd); stmt->setUInt64(++index, uint32(m_createdDate)); stmt->setUInt32(++index, m_emblemInfo.GetStyle()); stmt->setUInt32(++index, m_emblemInfo.GetColor()); stmt->setUInt32(++index, m_emblemInfo.GetBorderStyle()); stmt->setUInt32(++index, m_emblemInfo.GetBorderColor()); stmt->setUInt32(++index, m_emblemInfo.GetBackgroundColor()); stmt->setUInt64(++index, m_bankMoney); trans->Append(stmt); CharacterDatabase.CommitTransaction(trans); // Create default ranks _CreateDefaultGuildRanks(pLeaderSession->GetSessionDbLocaleIndex()); // Add guildmaster bool ret = AddMember(m_leaderGuid, GR_GUILDMASTER); if (ret) // Call scripts on successful create sScriptMgr->OnGuildCreate(this, pLeader, name); return ret; } // Disbands guild and deletes all related data from database void Guild::Disband() { // Call scripts before guild data removed from database sScriptMgr->OnGuildDisband(this); _BroadcastEvent(GE_DISBANDED, 0); // Remove all members while (!m_members.empty()) { Members::const_iterator itr = m_members.begin(); DeleteMember(itr->second->GetGUID(), true); } PreparedStatement* stmt = NULL; SQLTransaction trans = CharacterDatabase.BeginTransaction(); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD); stmt->setUInt32(0, m_id); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_RANKS); stmt->setUInt32(0, m_id); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_TABS); stmt->setUInt32(0, m_id); trans->Append(stmt); // Free bank tab used memory and delete items stored in them _DeleteBankItems(trans, true); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_ITEMS); stmt->setUInt32(0, m_id); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_RIGHTS); stmt->setUInt32(0, m_id); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_EVENTLOGS); stmt->setUInt32(0, m_id); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_EVENTLOGS); stmt->setUInt32(0, m_id); trans->Append(stmt); CharacterDatabase.CommitTransaction(trans); sGuildMgr->RemoveGuild(m_id); } /////////////////////////////////////////////////////////////////////////////// // HANDLE CLIENT COMMANDS void Guild::HandleRoster(WorldSession* session /*= NULL*/) { // Guess size WorldPacket data(SMSG_GUILD_ROSTER, (4 + m_motd.length() + 1 + m_info.length() + 1 + 4 + _GetRanksSize() * (4 + 4 + GUILD_BANK_MAX_TABS * (4 + 4)) + m_members.size() * 50)); data << uint32(m_members.size()); data << m_motd; data << m_info; data << uint32(_GetRanksSize()); for (Ranks::const_iterator ritr = m_ranks.begin(); ritr != m_ranks.end(); ++ritr) ritr->WritePacket(data); for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) itr->second->WritePacket(data); if (session) session->SendPacket(&data); else BroadcastPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_ROSTER)"); } void Guild::HandleQuery(WorldSession* session) { WorldPacket data(SMSG_GUILD_QUERY_RESPONSE, 8 * 32 + 200); // Guess size data << uint32(m_id); data << m_name; for (uint8 i = 0 ; i < GUILD_RANKS_MAX_COUNT; ++i) // Alwayse show 10 ranks { if (i < _GetRanksSize()) data << m_ranks[i].GetName(); else data << uint8(0); // Empty string } m_emblemInfo.WritePacket(data); data << uint32(0); // Something new in WotLK session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_QUERY_RESPONSE)"); } void Guild::HandleSetMOTD(WorldSession* session, const std::string& motd) { if (m_motd == motd) return; // Player must have rights to set MOTD if (!_HasRankRight(session->GetPlayer(), GR_RIGHT_SETMOTD)) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); else { m_motd = motd; sScriptMgr->OnGuildMOTDChanged(this, motd); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_MOTD); stmt->setString(0, motd); stmt->setUInt32(1, m_id); CharacterDatabase.Execute(stmt); _BroadcastEvent(GE_MOTD, 0, motd.c_str()); } } void Guild::HandleSetInfo(WorldSession* session, const std::string& info) { if (m_info == info) return; // Player must have rights to set guild's info if (!_HasRankRight(session->GetPlayer(), GR_RIGHT_MODIFY_GUILD_INFO)) SendCommandResult(session, GUILD_CREATE_S, ERR_GUILD_PERMISSIONS); else { m_info = info; sScriptMgr->OnGuildInfoChanged(this, info); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_INFO); stmt->setString(0, info); stmt->setUInt32(1, m_id); CharacterDatabase.Execute(stmt); } } void Guild::HandleSetEmblem(WorldSession* session, const EmblemInfo& emblemInfo) { Player* player = session->GetPlayer(); if (!_IsLeader(player)) // "Only guild leaders can create emblems." SendSaveEmblemResult(session, ERR_GUILDEMBLEM_NOTGUILDMASTER); else if (!player->HasEnoughMoney(EMBLEM_PRICE)) // "You can't afford to do that." SendSaveEmblemResult(session, ERR_GUILDEMBLEM_NOTENOUGHMONEY); else { player->ModifyMoney(-int32(EMBLEM_PRICE)); m_emblemInfo = emblemInfo; m_emblemInfo.SaveToDB(m_id); // "Guild Emblem saved." SendSaveEmblemResult(session, ERR_GUILDEMBLEM_SUCCESS); HandleQuery(session); } } void Guild::HandleSetLeader(WorldSession* session, const std::string& name) { Player* player = session->GetPlayer(); // Only leader can assign new leader if (!_IsLeader(player)) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); // Old leader must be a member of guild else if (Member* pOldLeader = GetMember(player->GetGUID())) { // New leader must be a member of guild if (Member* pNewLeader = GetMember(session, name)) { _SetLeaderGUID(pNewLeader); pOldLeader->ChangeRank(GR_OFFICER); _BroadcastEvent(GE_LEADER_CHANGED, 0, player->GetName(), name.c_str()); } } else SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); } void Guild::HandleSetBankTabInfo(WorldSession* session, uint8 tabId, const std::string& name, const std::string& icon) { if (BankTab* pTab = GetBankTab(tabId)) { pTab->SetInfo(name, icon); SendBankTabsInfo(session); _SendBankContent(session, tabId); } } void Guild::HandleSetMemberNote(WorldSession* session, const std::string& name, const std::string& note, bool officer) { // Player must have rights to set public/officer note if (!_HasRankRight(session->GetPlayer(), officer ? GR_RIGHT_EOFFNOTE : GR_RIGHT_EPNOTE)) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); // Noted player must be a member of guild else if (Member* pMember = GetMember(session, name)) { if (officer) pMember->SetOfficerNote(note); else pMember->SetPublicNote(note); HandleRoster(session); } } void Guild::HandleSetRankInfo(WorldSession* session, uint8 rankId, const std::string& name, uint32 rights, uint32 moneyPerDay, GuildBankRightsAndSlotsVec rightsAndSlots) { // Only leader can modify ranks if (!_IsLeader(session->GetPlayer())) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); else if (RankInfo* rankInfo = GetRankInfo(rankId)) { sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Changed RankName to '%s', rights to 0x%08X", name.c_str(), rights); rankInfo->SetName(name); rankInfo->SetRights(rights); _SetRankBankMoneyPerDay(rankId, moneyPerDay); uint8 tabId = 0; for (GuildBankRightsAndSlotsVec::const_iterator itr = rightsAndSlots.begin(); itr != rightsAndSlots.end(); ++itr) _SetRankBankTabRightsAndSlots(rankId, tabId++, *itr); HandleQuery(session); HandleRoster(); // Broadcast for tab rights update } } void Guild::HandleBuyBankTab(WorldSession* session, uint8 tabId) { if (tabId != _GetPurchasedTabsSize()) return; uint32 tabCost = _GetGuildBankTabPrice(tabId) * GOLD; if (!tabCost) return; Player* player = session->GetPlayer(); if (!player->HasEnoughMoney(tabCost)) // Should not happen, this is checked by client return; if (!_CreateNewBankTab()) return; player->ModifyMoney(-int32(tabCost)); _SetRankBankMoneyPerDay(player->GetRank(), uint32(GUILD_WITHDRAW_MONEY_UNLIMITED)); _SetRankBankTabRightsAndSlots(player->GetRank(), tabId, GuildBankRightsAndSlots(GUILD_BANK_RIGHT_FULL, uint32(GUILD_WITHDRAW_SLOT_UNLIMITED))); HandleRoster(); // Broadcast for tab rights update SendBankTabsInfo(session); } void Guild::HandleInviteMember(WorldSession* session, const std::string& name) { Player* pInvitee = sObjectAccessor->FindPlayerByName(name.c_str()); if (!pInvitee) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PLAYER_NOT_FOUND_S, name); return; } Player* player = session->GetPlayer(); // Do not show invitations from ignored players if (pInvitee->GetSocial()->HasIgnore(player->GetGUIDLow())) return; if (!sWorld->getBoolConfig(CONFIG_ALLOW_TWO_SIDE_INTERACTION_GUILD) && pInvitee->GetTeam() != player->GetTeam()) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_NOT_ALLIED, name); return; } // Invited player cannot be in another guild if (pInvitee->GetGuildId()) { SendCommandResult(session, GUILD_INVITE_S, ERR_ALREADY_IN_GUILD_S, name); return; } // Invited player cannot be invited if (pInvitee->GetGuildIdInvited()) { SendCommandResult(session, GUILD_INVITE_S, ERR_ALREADY_INVITED_TO_GUILD_S, name); return; } // Inviting player must have rights to invite if (!_HasRankRight(player, GR_RIGHT_INVITE)) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); return; } sLog->outDebug(LOG_FILTER_GUILD, "Player %s invited %s to join his Guild", player->GetName(), name.c_str()); pInvitee->SetGuildIdInvited(m_id); _LogEvent(GUILD_EVENT_LOG_INVITE_PLAYER, player->GetGUIDLow(), pInvitee->GetGUIDLow()); WorldPacket data(SMSG_GUILD_INVITE, 8 + 10); // Guess size data << player->GetName(); data << m_name; pInvitee->GetSession()->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_INVITE)"); } void Guild::HandleAcceptMember(WorldSession* session) { Player* player = session->GetPlayer(); if (!sWorld->getBoolConfig(CONFIG_ALLOW_TWO_SIDE_INTERACTION_GUILD) && player->GetTeam() != sObjectMgr->GetPlayerTeamByGUID(GetLeaderGUID())) return; if (AddMember(player->GetGUID())) { _LogEvent(GUILD_EVENT_LOG_JOIN_GUILD, player->GetGUIDLow()); _BroadcastEvent(GE_JOINED, player->GetGUID(), player->GetName()); } } void Guild::HandleLeaveMember(WorldSession* session) { Player* player = session->GetPlayer(); // If leader is leaving if (_IsLeader(player)) { if (m_members.size() > 1) // Leader cannot leave if he is not the last member SendCommandResult(session, GUILD_QUIT_S, ERR_GUILD_LEADER_LEAVE); else // Guild is disbanded if leader leaves. Disband(); } else { DeleteMember(player->GetGUID(), false, false); _LogEvent(GUILD_EVENT_LOG_LEAVE_GUILD, player->GetGUIDLow()); _BroadcastEvent(GE_LEFT, player->GetGUID(), player->GetName()); SendCommandResult(session, GUILD_QUIT_S, ERR_PLAYER_NO_MORE_IN_GUILD, m_name); } } void Guild::HandleRemoveMember(WorldSession* session, const std::string& name) { Player* player = session->GetPlayer(); // Player must have rights to remove members if (!_HasRankRight(player, GR_RIGHT_REMOVE)) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); // Removed player must be a member of guild else if (Member* pMember = GetMember(session, name)) { // Leader cannot be removed if (pMember->IsRank(GR_GUILDMASTER)) SendCommandResult(session, GUILD_QUIT_S, ERR_GUILD_LEADER_LEAVE); // Do not allow to remove player with the same rank or higher else if (pMember->IsRankNotLower(player->GetRank())) SendCommandResult(session, GUILD_QUIT_S, ERR_GUILD_RANK_TOO_HIGH_S, name); else { uint64 guid = pMember->GetGUID(); // After call to DeleteMember pointer to member becomes invalid DeleteMember(guid, false, true); _LogEvent(GUILD_EVENT_LOG_UNINVITE_PLAYER, player->GetGUIDLow(), GUID_LOPART(guid)); _BroadcastEvent(GE_REMOVED, 0, name.c_str(), player->GetName()); } } } void Guild::HandleUpdateMemberRank(WorldSession* session, const std::string& name, bool demote) { Player* player = session->GetPlayer(); // Player must have rights to promote if (!_HasRankRight(player, demote ? GR_RIGHT_DEMOTE : GR_RIGHT_PROMOTE)) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); // Promoted player must be a member of guild else if (Member* pMember = GetMember(session, name)) { // Player cannot promote himself if (pMember->IsSamePlayer(player->GetGUID())) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_NAME_INVALID); return; } if (demote) { // Player can demote only lower rank members if (pMember->IsRankNotLower(player->GetRank())) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_RANK_TOO_HIGH_S, name); return; } // Lowest rank cannot be demoted if (pMember->GetRankId() >= _GetLowestRankId()) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_RANK_TOO_LOW_S, name); return; } } else { // Allow to promote only to lower rank than member's rank // pMember->GetRank() + 1 is the highest rank that current player can promote to if (pMember->IsRankNotLower(player->GetRank() + 1)) { SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_RANK_TOO_HIGH_S, name); return; } } // When promoting player, rank is decreased, when demoting - increased uint32 newRankId = pMember->GetRankId() + (demote ? 1 : -1); pMember->ChangeRank(newRankId); _LogEvent(demote ? GUILD_EVENT_LOG_DEMOTE_PLAYER : GUILD_EVENT_LOG_PROMOTE_PLAYER, player->GetGUIDLow(), GUID_LOPART(pMember->GetGUID()), newRankId); _BroadcastEvent(demote ? GE_DEMOTION : GE_PROMOTION, 0, player->GetName(), name.c_str(), _GetRankName(newRankId).c_str()); } } void Guild::HandleAddNewRank(WorldSession* session, const std::string& name) { if (_GetRanksSize() >= GUILD_RANKS_MAX_COUNT) return; // Only leader can add new rank if (!_IsLeader(session->GetPlayer())) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); else { _CreateRank(name, GR_RIGHT_GCHATLISTEN | GR_RIGHT_GCHATSPEAK); HandleQuery(session); HandleRoster(); // Broadcast for tab rights update } } void Guild::HandleRemoveLowestRank(WorldSession* session) { // Cannot remove rank if total count is minimum allowed by the client if (_GetRanksSize() <= GUILD_RANKS_MIN_COUNT) return; // Only leader can delete ranks if (!_IsLeader(session->GetPlayer())) SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); else { uint8 rankId = _GetLowestRankId(); // Delete bank rights for rank PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_RIGHTS_FOR_RANK); stmt->setUInt32(0, m_id); stmt->setUInt8 (1, rankId); CharacterDatabase.Execute(stmt); // Delete rank stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_LOWEST_RANK); stmt->setUInt32(0, m_id); stmt->setUInt8 (1, rankId); CharacterDatabase.Execute(stmt); m_ranks.pop_back(); HandleQuery(session); HandleRoster(); // Broadcast for tab rights update } } void Guild::HandleMemberDepositMoney(WorldSession* session, uint32 amount) { if (!_GetPurchasedTabsSize()) return; // No guild bank tabs - no money in bank Player* player = session->GetPlayer(); // Call script after validation and before money transfer. sScriptMgr->OnGuildMemberDepositMoney(this, player, amount); SQLTransaction trans = CharacterDatabase.BeginTransaction(); // Add money to bank _ModifyBankMoney(trans, amount, true); // Remove money from player player->ModifyMoney(-int32(amount)); player->SaveGoldToDB(trans); // Log GM action (TODO: move to scripts) if (!AccountMgr::IsPlayerAccount(player->GetSession()->GetSecurity()) && sWorld->getBoolConfig(CONFIG_GM_LOG_TRADE)) { sLog->outCommand(player->GetSession()->GetAccountId(), "GM %s (Account: %u) deposit money (Amount: %u) to guild bank (Guild ID %u)", player->GetName(), player->GetSession()->GetAccountId(), amount, m_id); } // Log guild bank event _LogBankEvent(trans, GUILD_BANK_LOG_DEPOSIT_MONEY, uint8(0), player->GetGUIDLow(), amount); CharacterDatabase.CommitTransaction(trans); SendBankTabsInfo(session); _SendBankContent(session, 0); _SendBankMoneyUpdate(session); } bool Guild::HandleMemberWithdrawMoney(WorldSession* session, uint32 amount, bool repair) { if (!_GetPurchasedTabsSize()) return false; // No guild bank tabs - no money if (m_bankMoney < amount) // Not enough money in bank return false; Player* player = session->GetPlayer(); if (!_HasRankRight(player, repair ? GR_RIGHT_WITHDRAW_REPAIR : GR_RIGHT_WITHDRAW_GOLD)) return false; uint32 remainingMoney = _GetMemberRemainingMoney(player->GetGUID()); if (!remainingMoney) return false; if (remainingMoney < amount) return false; // Call script after validation and before money transfer. sScriptMgr->OnGuildMemberWitdrawMoney(this, player, amount, repair); SQLTransaction trans = CharacterDatabase.BeginTransaction(); // Update remaining money amount if (remainingMoney < uint32(GUILD_WITHDRAW_MONEY_UNLIMITED)) if (Member* pMember = GetMember(player->GetGUID())) pMember->DecreaseBankRemainingValue(trans, GUILD_BANK_MAX_TABS, amount); // Remove money from bank _ModifyBankMoney(trans, amount, false); // Add money to player (if required) if (!repair) { player->ModifyMoney(amount); player->SaveGoldToDB(trans); } // Log guild bank event _LogBankEvent(trans, repair ? GUILD_BANK_LOG_REPAIR_MONEY : GUILD_BANK_LOG_WITHDRAW_MONEY, uint8(0), player->GetGUIDLow(), amount); CharacterDatabase.CommitTransaction(trans); SendMoneyInfo(session); if (!repair) { SendBankTabsInfo(session); _SendBankContent(session, 0); _SendBankMoneyUpdate(session); } return true; } void Guild::HandleMemberLogout(WorldSession* session) { Player* player = session->GetPlayer(); if (Member* pMember = GetMember(player->GetGUID())) { pMember->SetStats(player); pMember->UpdateLogoutTime(); } _BroadcastEvent(GE_SIGNED_OFF, player->GetGUID(), player->GetName()); } void Guild::HandleDisband(WorldSession* session) { // Only leader can disband guild if (!_IsLeader(session->GetPlayer())) Guild::SendCommandResult(session, GUILD_INVITE_S, ERR_GUILD_PERMISSIONS); else { Disband(); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Guild Successfully Disbanded"); } } /////////////////////////////////////////////////////////////////////////////// // Send data to client void Guild::SendInfo(WorldSession* session) const { WorldPacket data(SMSG_GUILD_INFO, m_name.size() + 4 + 4 + 4); data << m_name; data << secsToTimeBitFields(m_createdDate); // 3.x (prev. year + month + day) data << uint32(m_members.size()); // Number of members data << m_accountsNumber; // Number of accounts session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_INFO)"); } void Guild::SendEventLog(WorldSession* session) const { WorldPacket data(MSG_GUILD_EVENT_LOG_QUERY, 1 + m_eventLog->GetSize() * (1 + 8 + 4)); m_eventLog->WritePacket(data); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Sent (MSG_GUILD_EVENT_LOG_QUERY)"); } void Guild::SendBankLog(WorldSession* session, uint8 tabId) const { // GUILD_BANK_MAX_TABS send by client for money log if (tabId < _GetPurchasedTabsSize() || tabId == GUILD_BANK_MAX_TABS) { const LogHolder* pLog = m_bankEventLog[tabId]; WorldPacket data(MSG_GUILD_BANK_LOG_QUERY, pLog->GetSize() * (4 * 4 + 1) + 1 + 1); data << uint8(tabId); pLog->WritePacket(data); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Sent (MSG_GUILD_BANK_LOG_QUERY)"); } } void Guild::SendBankTabData(WorldSession* session, uint8 tabId) const { if (tabId < _GetPurchasedTabsSize()) { SendMoneyInfo(session); _SendBankContent(session, tabId); } } void Guild::SendBankTabsInfo(WorldSession* session) const { WorldPacket data(SMSG_GUILD_BANK_LIST, 500); data << uint64(m_bankMoney); data << uint8(0); // TabInfo packet must be for tabId 0 data << uint32(_GetMemberRemainingSlots(session->GetPlayer()->GetGUID(), 0)); data << uint8(1); // Tell client that this packet includes tab info data << uint8(_GetPurchasedTabsSize()); // Number of tabs for (uint8 i = 0; i < _GetPurchasedTabsSize(); ++i) m_bankTabs[i]->WriteInfoPacket(data); data << uint8(0); // Do not send tab content session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_BANK_LIST)"); } void Guild::SendBankTabText(WorldSession* session, uint8 tabId) const { if (const BankTab* pTab = GetBankTab(tabId)) pTab->SendText(this, session); } void Guild::SendPermissions(WorldSession* session) const { uint64 guid = session->GetPlayer()->GetGUID(); uint8 rankId = session->GetPlayer()->GetRank(); WorldPacket data(MSG_GUILD_PERMISSIONS, 4 * 15 + 1); data << uint32(rankId); data << uint32(_GetRankRights(rankId)); data << uint32(_GetMemberRemainingMoney(guid)); data << uint8 (_GetPurchasedTabsSize()); // Why sending all info when not all tabs are purchased??? for (uint8 tabId = 0; tabId < GUILD_BANK_MAX_TABS; ++tabId) { data << uint32(_GetRankBankTabRights(rankId, tabId)); data << uint32(_GetMemberRemainingSlots(guid, tabId)); } session->SendPacket(&data); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Sent (MSG_GUILD_PERMISSIONS)"); } void Guild::SendMoneyInfo(WorldSession* session) const { WorldPacket data(MSG_GUILD_BANK_MONEY_WITHDRAWN, 4); data << uint32(_GetMemberRemainingMoney(session->GetPlayer()->GetGUID())); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Sent MSG_GUILD_BANK_MONEY_WITHDRAWN"); } void Guild::SendLoginInfo(WorldSession* session) const { WorldPacket data(SMSG_GUILD_EVENT, 1 + 1 + m_motd.size() + 1); data << uint8(GE_MOTD); data << uint8(1); data << m_motd; session->SendPacket(&data); sLog->outDebug(LOG_FILTER_GUILD, "WORLD: Sent guild MOTD (SMSG_GUILD_EVENT)"); SendBankTabsInfo(session); _BroadcastEvent(GE_SIGNED_ON, session->GetPlayer()->GetGUID(), session->GetPlayer()->GetName()); } /////////////////////////////////////////////////////////////////////////////// // Loading methods bool Guild::LoadFromDB(Field* fields) { m_id = fields[0].GetUInt32(); m_name = fields[1].GetString(); m_leaderGuid = MAKE_NEW_GUID(fields[2].GetUInt32(), 0, HIGHGUID_PLAYER); m_emblemInfo.LoadFromDB(fields); m_info = fields[8].GetString(); m_motd = fields[9].GetString(); m_createdDate = time_t(fields[10].GetUInt32()); m_bankMoney = fields[11].GetUInt64(); uint8 purchasedTabs = uint8(fields[12].GetUInt32()); if (purchasedTabs > GUILD_BANK_MAX_TABS) purchasedTabs = GUILD_BANK_MAX_TABS; m_bankTabs.resize(purchasedTabs); for (uint8 i = 0; i < purchasedTabs; ++i) m_bankTabs[i] = new BankTab(m_id, i); _CreateLogHolders(); return true; } void Guild::LoadRankFromDB(Field* fields) { RankInfo rankInfo(m_id); rankInfo.LoadFromDB(fields); m_ranks.push_back(rankInfo); } bool Guild::LoadMemberFromDB(Field* fields) { uint32 lowguid = fields[1].GetUInt32(); Member *pMember = new Member(m_id, MAKE_NEW_GUID(lowguid, 0, HIGHGUID_PLAYER), fields[2].GetUInt8()); if (!pMember->LoadFromDB(fields)) { _DeleteMemberFromDB(lowguid); delete pMember; return false; } m_members[lowguid] = pMember; return true; } void Guild::LoadBankRightFromDB(Field* fields) { // rights slots GuildBankRightsAndSlots rightsAndSlots(fields[3].GetUInt8(), fields[4].GetUInt32()); // rankId tabId _SetRankBankTabRightsAndSlots(fields[2].GetUInt8(), fields[1].GetUInt8(), rightsAndSlots, false); } bool Guild::LoadEventLogFromDB(Field* fields) { if (m_eventLog->CanInsert()) { m_eventLog->LoadEvent(new EventLogEntry( m_id, // guild id fields[1].GetUInt32(), // guid time_t(fields[6].GetUInt32()), // timestamp GuildEventLogTypes(fields[2].GetUInt8()), // event type fields[3].GetUInt32(), // player guid 1 fields[4].GetUInt32(), // player guid 2 fields[5].GetUInt8())); // rank return true; } return false; } bool Guild::LoadBankEventLogFromDB(Field* fields) { uint8 dbTabId = fields[1].GetUInt8(); bool isMoneyTab = (dbTabId == GUILD_BANK_MONEY_LOGS_TAB); if (dbTabId < _GetPurchasedTabsSize() || isMoneyTab) { uint8 tabId = isMoneyTab ? uint8(GUILD_BANK_MAX_TABS) : dbTabId; LogHolder* pLog = m_bankEventLog[tabId]; if (pLog->CanInsert()) { uint32 guid = fields[2].GetUInt32(); GuildBankEventLogTypes eventType = GuildBankEventLogTypes(fields[3].GetUInt8()); if (BankEventLogEntry::IsMoneyEvent(eventType)) { if (!isMoneyTab) { sLog->outError("GuildBankEventLog ERROR: MoneyEvent(LogGuid: %u, Guild: %u) does not belong to money tab (%u), ignoring...", guid, m_id, dbTabId); return false; } } else if (isMoneyTab) { sLog->outError("GuildBankEventLog ERROR: non-money event (LogGuid: %u, Guild: %u) belongs to money tab, ignoring...", guid, m_id); return false; } pLog->LoadEvent(new BankEventLogEntry( m_id, // guild id guid, // guid time_t(fields[8].GetUInt32()), // timestamp dbTabId, // tab id eventType, // event type fields[4].GetUInt32(), // player guid fields[5].GetUInt32(), // item or money fields[6].GetUInt16(), // itam stack count fields[7].GetUInt8())); // dest tab id } } return true; } bool Guild::LoadBankTabFromDB(Field* fields) { uint32 tabId = fields[1].GetUInt8(); if (tabId >= _GetPurchasedTabsSize()) { sLog->outError("Invalid tab (tabId: %u) in guild bank, skipped.", tabId); return false; } return m_bankTabs[tabId]->LoadFromDB(fields); } bool Guild::LoadBankItemFromDB(Field* fields) { uint8 tabId = fields[12].GetUInt8(); if (tabId >= _GetPurchasedTabsSize()) { sLog->outError("Invalid tab for item (GUID: %u, id: #%u) in guild bank, skipped.", fields[14].GetUInt32(), fields[15].GetUInt32()); return false; } return m_bankTabs[tabId]->LoadItemFromDB(fields); } // Validates guild data loaded from database. Returns false if guild should be deleted. bool Guild::Validate() { // Validate ranks data // GUILD RANKS represent a sequence starting from 0 = GUILD_MASTER (ALL PRIVILEGES) to max 9 (lowest privileges). // The lower rank id is considered higher rank - so promotion does rank-- and demotion does rank++ // Between ranks in sequence cannot be gaps - so 0, 1, 2, 4 is impossible // Min ranks count is 5 and max is 10. bool broken_ranks = false; if (_GetRanksSize() < GUILD_RANKS_MIN_COUNT || _GetRanksSize() > GUILD_RANKS_MAX_COUNT) { sLog->outError("Guild %u has invalid number of ranks, creating new...", m_id); broken_ranks = true; } else { for (uint8 rankId = 0; rankId < _GetRanksSize(); ++rankId) { RankInfo* rankInfo = GetRankInfo(rankId); if (rankInfo->GetId() != rankId) { sLog->outError("Guild %u has broken rank id %u, creating default set of ranks...", m_id, rankId); broken_ranks = true; } } } if (broken_ranks) { m_ranks.clear(); _CreateDefaultGuildRanks(DEFAULT_LOCALE); } // Validate members' data for (Members::iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (itr->second->GetRankId() > _GetRanksSize()) itr->second->ChangeRank(_GetLowestRankId()); // Repair the structure of the guild. // If the guildmaster doesn't exist or isn't member of the guild // attempt to promote another member. Member* pLeader = GetMember(m_leaderGuid); if (!pLeader) { DeleteMember(m_leaderGuid); // If no more members left, disband guild if (m_members.empty()) { Disband(); return false; } } else if (!pLeader->IsRank(GR_GUILDMASTER)) _SetLeaderGUID(pLeader); // Check config if multiple guildmasters are allowed if (!ConfigMgr::GetBoolDefault("Guild.AllowMultipleGuildMaster", 0)) for (Members::iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (itr->second->GetRankId() == GR_GUILDMASTER && !itr->second->IsSamePlayer(m_leaderGuid)) itr->second->ChangeRank(GR_OFFICER); _UpdateAccountsNumber(); return true; } /////////////////////////////////////////////////////////////////////////////// // Broadcasts void Guild::BroadcastToGuild(WorldSession* session, bool officerOnly, const std::string& msg, uint32 language) const { if (session && session->GetPlayer() && _HasRankRight(session->GetPlayer(), officerOnly ? GR_RIGHT_OFFCHATSPEAK : GR_RIGHT_GCHATSPEAK)) { WorldPacket data; ChatHandler::FillMessageData(&data, session, officerOnly ? CHAT_MSG_OFFICER : CHAT_MSG_GUILD, language, NULL, 0, msg.c_str(), NULL); for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (Player* player = itr->second->FindPlayer()) if (player->GetSession() && _HasRankRight(player, officerOnly ? GR_RIGHT_OFFCHATLISTEN : GR_RIGHT_GCHATLISTEN) && !player->GetSocial()->HasIgnore(session->GetPlayer()->GetGUIDLow())) player->GetSession()->SendPacket(&data); } } void Guild::BroadcastPacketToRank(WorldPacket* packet, uint8 rankId) const { for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (itr->second->IsRank(rankId)) if (Player* player = itr->second->FindPlayer()) player->GetSession()->SendPacket(packet); } void Guild::BroadcastPacket(WorldPacket* packet) const { for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (Player* player = itr->second->FindPlayer()) player->GetSession()->SendPacket(packet); } /////////////////////////////////////////////////////////////////////////////// // Members handling bool Guild::AddMember(uint64 guid, uint8 rankId) { Player* player = ObjectAccessor::FindPlayer(guid); // Player cannot be in guild if (player) { if (player->GetGuildId() != 0) return false; } else if (Player::GetGuildIdFromDB(guid) != 0) return false; // Remove all player signs from another petitions // This will be prevent attempt to join many guilds and corrupt guild data integrity Player::RemovePetitionsAndSigns(guid, GUILD_CHARTER_TYPE); uint32 lowguid = GUID_LOPART(guid); // If rank was not passed, assing lowest possible rank if (rankId == GUILD_RANK_NONE) rankId = _GetLowestRankId(); Member* pMember = new Member(m_id, guid, rankId); if (player) pMember->SetStats(player); else { bool ok = false; // Player must exist PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_LOAD_CHAR_DATA_FOR_GUILD); stmt->setUInt32(0, lowguid); if (PreparedQueryResult result = CharacterDatabase.Query(stmt)) { Field* fields = result->Fetch(); pMember->SetStats( fields[0].GetString(), fields[1].GetUInt8(), fields[2].GetUInt8(), fields[3].GetUInt16(), fields[4].GetUInt32()); ok = pMember->CheckStats(); } if (!ok) { delete pMember; return false; } } m_members[lowguid] = pMember; SQLTransaction trans(NULL); pMember->SaveToDB(trans); // If player not in game data in will be loaded from guild tables, so no need to update it! if (player) { player->SetInGuild(m_id); player->SetRank(rankId); player->SetGuildIdInvited(0); } _UpdateAccountsNumber(); // Call scripts if member was succesfully added (and stored to database) sScriptMgr->OnGuildAddMember(this, player, rankId); return true; } void Guild::DeleteMember(uint64 guid, bool isDisbanding, bool isKicked) { uint32 lowguid = GUID_LOPART(guid); Player* player = ObjectAccessor::FindPlayer(guid); // Guild master can be deleted when loading guild and guid doesn't exist in characters table // or when he is removed from guild by gm command if (m_leaderGuid == guid && !isDisbanding) { Member* oldLeader = NULL; Member* newLeader = NULL; for (Guild::Members::iterator i = m_members.begin(); i != m_members.end(); ++i) { if (i->first == lowguid) oldLeader = i->second; else if (!newLeader || newLeader->GetRankId() > i->second->GetRankId()) newLeader = i->second; } if (!newLeader) { Disband(); return; } _SetLeaderGUID(newLeader); // If player not online data in data field will be loaded from guild tabs no need to update it !! if (Player* newLeaderPlayer = newLeader->FindPlayer()) newLeaderPlayer->SetRank(GR_GUILDMASTER); // If leader does not exist (at guild loading with deleted leader) do not send broadcasts if (oldLeader) { _BroadcastEvent(GE_LEADER_CHANGED, 0, oldLeader->GetName().c_str(), newLeader->GetName().c_str()); _BroadcastEvent(GE_LEFT, guid, oldLeader->GetName().c_str()); } } // Call script on remove before member is acutally removed from guild (and database) sScriptMgr->OnGuildRemoveMember(this, player, isDisbanding, isKicked); if (Member* pMember = GetMember(guid)) delete pMember; m_members.erase(lowguid); // If player not online data in data field will be loaded from guild tabs no need to update it !! if (player) { player->SetInGuild(0); player->SetRank(0); } _DeleteMemberFromDB(lowguid); if (!isDisbanding) _UpdateAccountsNumber(); } bool Guild::ChangeMemberRank(uint64 guid, uint8 newRank) { if (newRank <= _GetLowestRankId()) // Validate rank (allow only existing ranks) if (Member* pMember = GetMember(guid)) { pMember->ChangeRank(newRank); return true; } return false; } /////////////////////////////////////////////////////////////////////////////// // Bank (items move) void Guild::SwapItems(Player* player, uint8 tabId, uint8 slotId, uint8 destTabId, uint8 destSlotId, uint32 splitedAmount) { if (tabId >= _GetPurchasedTabsSize() || slotId >= GUILD_BANK_MAX_SLOTS || destTabId >= _GetPurchasedTabsSize() || destSlotId >= GUILD_BANK_MAX_SLOTS) return; if (tabId == destTabId && slotId == destSlotId) return; BankMoveItemData from(this, player, tabId, slotId); BankMoveItemData to(this, player, destTabId, destSlotId); _MoveItems(&from, &to, splitedAmount); } void Guild::SwapItemsWithInventory(Player* player, bool toChar, uint8 tabId, uint8 slotId, uint8 playerBag, uint8 playerSlotId, uint32 splitedAmount) { if ((slotId >= GUILD_BANK_MAX_SLOTS && slotId != NULL_SLOT) || tabId >= _GetPurchasedTabsSize()) return; BankMoveItemData bankData(this, player, tabId, slotId); PlayerMoveItemData charData(this, player, playerBag, playerSlotId); if (toChar) _MoveItems(&bankData, &charData, splitedAmount); else _MoveItems(&charData, &bankData, splitedAmount); } /////////////////////////////////////////////////////////////////////////////// // Bank tabs void Guild::SetBankTabText(uint8 tabId, const std::string& text) { if (BankTab* pTab = GetBankTab(tabId)) { pTab->SetText(text); pTab->SendText(this, NULL); } } /////////////////////////////////////////////////////////////////////////////// // Private methods void Guild::_CreateLogHolders() { m_eventLog = new LogHolder(m_id, sWorld->getIntConfig(CONFIG_GUILD_EVENT_LOG_COUNT)); for (uint8 tabId = 0; tabId <= GUILD_BANK_MAX_TABS; ++tabId) m_bankEventLog[tabId] = new LogHolder(m_id, sWorld->getIntConfig(CONFIG_GUILD_BANK_EVENT_LOG_COUNT)); } bool Guild::_CreateNewBankTab() { if (_GetPurchasedTabsSize() >= GUILD_BANK_MAX_TABS) return false; uint8 tabId = _GetPurchasedTabsSize(); // Next free id m_bankTabs.push_back(new BankTab(m_id, tabId)); PreparedStatement* stmt = NULL; SQLTransaction trans = CharacterDatabase.BeginTransaction(); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_TAB); stmt->setUInt32(0, m_id); stmt->setUInt8 (1, tabId); trans->Append(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_BANK_TAB); stmt->setUInt32(0, m_id); stmt->setUInt8 (1, tabId); trans->Append(stmt); CharacterDatabase.CommitTransaction(trans); return true; } void Guild::_CreateDefaultGuildRanks(LocaleConstant loc) { PreparedStatement* stmt = NULL; stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_RANKS); stmt->setUInt32(0, m_id); CharacterDatabase.Execute(stmt); stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_GUILD_BANK_RIGHTS); stmt->setUInt32(0, m_id); CharacterDatabase.Execute(stmt); _CreateRank(sObjectMgr->GetTrinityString(LANG_GUILD_MASTER, loc), GR_RIGHT_ALL); _CreateRank(sObjectMgr->GetTrinityString(LANG_GUILD_OFFICER, loc), GR_RIGHT_ALL); _CreateRank(sObjectMgr->GetTrinityString(LANG_GUILD_VETERAN, loc), GR_RIGHT_GCHATLISTEN | GR_RIGHT_GCHATSPEAK); _CreateRank(sObjectMgr->GetTrinityString(LANG_GUILD_MEMBER, loc), GR_RIGHT_GCHATLISTEN | GR_RIGHT_GCHATSPEAK); _CreateRank(sObjectMgr->GetTrinityString(LANG_GUILD_INITIATE, loc), GR_RIGHT_GCHATLISTEN | GR_RIGHT_GCHATSPEAK); } void Guild::_CreateRank(const std::string& name, uint32 rights) { if (_GetRanksSize() >= GUILD_RANKS_MAX_COUNT) return; // Ranks represent sequence 0, 1, 2, ... where 0 means guildmaster uint8 newRankId = _GetRanksSize(); RankInfo info(m_id, newRankId, name, rights, 0); m_ranks.push_back(info); SQLTransaction trans = CharacterDatabase.BeginTransaction(); for (uint8 i = 0; i < _GetPurchasedTabsSize(); ++i) { // Create bank rights with default values PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_ADD_GUILD_BANK_RIGHT_DEFAULT); stmt->setUInt32(0, m_id); stmt->setUInt8 (1, i); stmt->setUInt8 (2, newRankId); trans->Append(stmt); } info.SaveToDB(trans); CharacterDatabase.CommitTransaction(trans); } // Updates the number of accounts that are in the guild // Player may have many characters in the guild, but with the same account void Guild::_UpdateAccountsNumber() { // We use a set to be sure each element will be unique std::set<uint32> accountsIdSet; for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) accountsIdSet.insert(itr->second->GetAccountId()); m_accountsNumber = accountsIdSet.size(); } // Detects if player is the guild master. // Check both leader guid and player's rank (otherwise multiple feature with // multiple guild masters won't work) bool Guild::_IsLeader(Player* player) const { if (player->GetGUID() == m_leaderGuid) return true; if (const Member* pMember = GetMember(player->GetGUID())) return pMember->IsRank(GR_GUILDMASTER); return false; } void Guild::_DeleteBankItems(SQLTransaction& trans, bool removeItemsFromDB) { for (uint8 tabId = 0; tabId < _GetPurchasedTabsSize(); ++tabId) { m_bankTabs[tabId]->Delete(trans, removeItemsFromDB); delete m_bankTabs[tabId]; m_bankTabs[tabId] = NULL; } m_bankTabs.clear(); } bool Guild::_ModifyBankMoney(SQLTransaction& trans, uint64 amount, bool add) { if (add) m_bankMoney += amount; else { // Check if there is enough money in bank. if (m_bankMoney < amount) return false; m_bankMoney -= amount; } PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_BANK_MONEY); stmt->setUInt64(0, m_bankMoney); stmt->setUInt32(1, m_id); trans->Append(stmt); return true; } void Guild::_SetLeaderGUID(Member* pLeader) { if (!pLeader) return; m_leaderGuid = pLeader->GetGUID(); pLeader->ChangeRank(GR_GUILDMASTER); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SET_GUILD_LEADER); stmt->setUInt32(0, GUID_LOPART(m_leaderGuid)); stmt->setUInt32(1, m_id); CharacterDatabase.Execute(stmt); } void Guild::_SetRankBankMoneyPerDay(uint8 rankId, uint32 moneyPerDay) { if (RankInfo* rankInfo = GetRankInfo(rankId)) { for (Members::iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (itr->second->IsRank(rankId)) itr->second->ResetMoneyTime(); rankInfo->SetBankMoneyPerDay(moneyPerDay); } } void Guild::_SetRankBankTabRightsAndSlots(uint8 rankId, uint8 tabId, GuildBankRightsAndSlots rightsAndSlots, bool saveToDB) { if (tabId >= _GetPurchasedTabsSize()) return; if (RankInfo* rankInfo = GetRankInfo(rankId)) { for (Members::iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (itr->second->IsRank(rankId)) itr->second->ResetTabTimes(); rankInfo->SetBankTabSlotsAndRights(tabId, rightsAndSlots, saveToDB); } } inline std::string Guild::_GetRankName(uint8 rankId) const { if (const RankInfo* rankInfo = GetRankInfo(rankId)) return rankInfo->GetName(); return "<unknown>"; } inline uint32 Guild::_GetRankRights(uint8 rankId) const { if (const RankInfo* rankInfo = GetRankInfo(rankId)) return rankInfo->GetRights(); return 0; } inline uint32 Guild::_GetRankBankMoneyPerDay(uint8 rankId) const { if (const RankInfo* rankInfo = GetRankInfo(rankId)) return rankInfo->GetBankMoneyPerDay(); return 0; } inline uint32 Guild::_GetRankBankTabSlotsPerDay(uint8 rankId, uint8 tabId) const { if (tabId < _GetPurchasedTabsSize()) if (const RankInfo* rankInfo = GetRankInfo(rankId)) return rankInfo->GetBankTabSlotsPerDay(tabId); return 0; } inline uint8 Guild::_GetRankBankTabRights(uint8 rankId, uint8 tabId) const { if (const RankInfo* rankInfo = GetRankInfo(rankId)) return rankInfo->GetBankTabRights(tabId); return 0; } inline uint32 Guild::_GetMemberRemainingSlots(uint64 guid, uint8 tabId) const { if (const Member* pMember = GetMember(guid)) return pMember->GetBankRemainingValue(tabId, this); return 0; } inline uint32 Guild::_GetMemberRemainingMoney(uint64 guid) const { if (const Member* pMember = GetMember(guid)) return pMember->GetBankRemainingValue(GUILD_BANK_MAX_TABS, this); return 0; } inline void Guild::_DecreaseMemberRemainingSlots(SQLTransaction& trans, uint64 guid, uint8 tabId) { // Remaining slots must be more then 0 if (uint32 remainingSlots = _GetMemberRemainingSlots(guid, tabId)) // Ignore guild master if (remainingSlots < uint32(GUILD_WITHDRAW_SLOT_UNLIMITED)) if (Member* pMember = GetMember(guid)) pMember->DecreaseBankRemainingValue(trans, tabId, 1); } inline bool Guild::_MemberHasTabRights(uint64 guid, uint8 tabId, uint32 rights) const { if (const Member* pMember = GetMember(guid)) { // Leader always has full rights if (pMember->IsRank(GR_GUILDMASTER) || m_leaderGuid == guid) return true; return (_GetRankBankTabRights(pMember->GetRankId(), tabId) & rights) == rights; } return false; } // Add new event log record inline void Guild::_LogEvent(GuildEventLogTypes eventType, uint32 playerGuid1, uint32 playerGuid2, uint8 newRank) { SQLTransaction trans = CharacterDatabase.BeginTransaction(); m_eventLog->AddEvent(trans, new EventLogEntry(m_id, m_eventLog->GetNextGUID(), eventType, playerGuid1, playerGuid2, newRank)); CharacterDatabase.CommitTransaction(trans); sScriptMgr->OnGuildEvent(this, uint8(eventType), playerGuid1, playerGuid2, newRank); } // Add new bank event log record void Guild::_LogBankEvent(SQLTransaction& trans, GuildBankEventLogTypes eventType, uint8 tabId, uint32 lowguid, uint32 itemOrMoney, uint16 itemStackCount, uint8 destTabId) { if (tabId > GUILD_BANK_MAX_TABS) return; uint8 dbTabId = tabId; if (BankEventLogEntry::IsMoneyEvent(eventType)) { tabId = GUILD_BANK_MAX_TABS; dbTabId = GUILD_BANK_MONEY_LOGS_TAB; } LogHolder* pLog = m_bankEventLog[tabId]; pLog->AddEvent(trans, new BankEventLogEntry(m_id, pLog->GetNextGUID(), eventType, dbTabId, lowguid, itemOrMoney, itemStackCount, destTabId)); sScriptMgr->OnGuildBankEvent(this, uint8(eventType), tabId, lowguid, itemOrMoney, itemStackCount, destTabId); } inline Item* Guild::_GetItem(uint8 tabId, uint8 slotId) const { if (const BankTab* tab = GetBankTab(tabId)) return tab->GetItem(slotId); return NULL; } inline void Guild::_RemoveItem(SQLTransaction& trans, uint8 tabId, uint8 slotId) { if (BankTab* pTab = GetBankTab(tabId)) pTab->SetItem(trans, slotId, NULL); } void Guild::_MoveItems(MoveItemData* pSrc, MoveItemData* pDest, uint32 splitedAmount) { // 1. Initialize source item if (!pSrc->InitItem()) return; // No source item // 2. Check source item if (!pSrc->CheckItem(splitedAmount)) return; // Source item or splited amount is invalid /* if (pItemSrc->GetCount() == 0) { sLog->outCrash("Guild::SwapItems: Player %s(GUIDLow: %u) tried to move item %u from tab %u slot %u to tab %u slot %u, but item %u has a stack of zero!", player->GetName(), player->GetGUIDLow(), pItemSrc->GetEntry(), tabId, slotId, destTabId, destSlotId, pItemSrc->GetEntry()); //return; // Commented out for now, uncomment when it's verified that this causes a crash!! } // */ // 3. Check destination rights if (!pDest->HasStoreRights(pSrc)) return; // Player has no rights to store item in destination // 4. Check source withdraw rights if (!pSrc->HasWithdrawRights(pDest)) return; // Player has no rights to withdraw items from source // 5. Check split if (splitedAmount) { // 5.1. Clone source item if (!pSrc->CloneItem(splitedAmount)) return; // Item could not be cloned // 5.2. Move splited item to destination _DoItemsMove(pSrc, pDest, true, splitedAmount); } else // 6. No split { // 6.1. Try to merge items in destination (pDest->GetItem() == NULL) if (!_DoItemsMove(pSrc, pDest, false)) // Item could not be merged { // 6.2. Try to swap items // 6.2.1. Initialize destination item if (!pDest->InitItem()) return; // 6.2.2. Check rights to store item in source (opposite direction) if (!pSrc->HasStoreRights(pDest)) return; // Player has no rights to store item in source (opposite direction) if (!pDest->HasWithdrawRights(pSrc)) return; // Player has no rights to withdraw item from destination (opposite direction) // 6.2.3. Swap items (pDest->GetItem() != NULL) _DoItemsMove(pSrc, pDest, true); } } // 7. Send changes _SendBankContentUpdate(pSrc, pDest); } bool Guild::_DoItemsMove(MoveItemData* pSrc, MoveItemData* pDest, bool sendError, uint32 splitedAmount) { Item* pDestItem = pDest->GetItem(); bool swap = (pDestItem != NULL); Item* pSrcItem = pSrc->GetItem(splitedAmount); // 1. Can store source item in destination if (!pDest->CanStore(pSrcItem, swap, sendError)) return false; // 2. Can store destination item in source if (swap) if (!pSrc->CanStore(pDestItem, true, true)) return false; // GM LOG (TODO: move to scripts) pDest->LogAction(pSrc); if (swap) pSrc->LogAction(pDest); SQLTransaction trans = CharacterDatabase.BeginTransaction(); // 3. Log bank events pDest->LogBankEvent(trans, pSrc, pSrcItem->GetCount()); if (swap) pSrc->LogBankEvent(trans, pDest, pDestItem->GetCount()); // 4. Remove item from source pSrc->RemoveItem(trans, pDest, splitedAmount); // 5. Remove item from destination if (swap) pDest->RemoveItem(trans, pSrc); // 6. Store item in destination pDest->StoreItem(trans, pSrcItem); // 7. Store item in source if (swap) pSrc->StoreItem(trans, pDestItem); CharacterDatabase.CommitTransaction(trans); return true; } void Guild::_SendBankContent(WorldSession* session, uint8 tabId) const { uint64 guid = session->GetPlayer()->GetGUID(); if (_MemberHasTabRights(guid, tabId, GUILD_BANK_RIGHT_VIEW_TAB)) if (const BankTab* pTab = GetBankTab(tabId)) { WorldPacket data(SMSG_GUILD_BANK_LIST, 1200); data << uint64(m_bankMoney); data << uint8(tabId); data << uint32(_GetMemberRemainingSlots(guid, tabId)); data << uint8(0); // Tell client that there's no tab info in this packet pTab->WritePacket(data); session->SendPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_BANK_LIST)"); } } void Guild::_SendBankMoneyUpdate(WorldSession* session) const { WorldPacket data(SMSG_GUILD_BANK_LIST, 8 + 1 + 4 + 1 + 1); data << uint64(m_bankMoney); data << uint8(0); // tabId, default 0 data << uint32(_GetMemberRemainingSlots(session->GetPlayer()->GetGUID(), 0)); data << uint8(0); // Tell that there's no tab info in this packet data << uint8(0); // No items BroadcastPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_BANK_LIST)"); } void Guild::_SendBankContentUpdate(MoveItemData* pSrc, MoveItemData* pDest) const { ASSERT(pSrc->IsBank() || pDest->IsBank()); uint8 tabId = 0; SlotIds slots; if (pSrc->IsBank()) // B -> { tabId = pSrc->GetContainer(); slots.insert(pSrc->GetSlotId()); if (pDest->IsBank()) // B -> B { // Same tab - add destination slots to collection if (pDest->GetContainer() == pSrc->GetContainer()) pDest->CopySlots(slots); else // Different tabs - send second message { SlotIds destSlots; pDest->CopySlots(destSlots); _SendBankContentUpdate(pDest->GetContainer(), destSlots); } } } else if (pDest->IsBank()) // C -> B { tabId = pDest->GetContainer(); pDest->CopySlots(slots); } _SendBankContentUpdate(tabId, slots); } void Guild::_SendBankContentUpdate(uint8 tabId, SlotIds slots) const { if (const BankTab* pTab = GetBankTab(tabId)) { WorldPacket data(SMSG_GUILD_BANK_LIST, 1200); data << uint64(m_bankMoney); data << uint8(tabId); size_t rempos = data.wpos(); data << uint32(0); // Item withdraw amount, will be filled later data << uint8(0); // Tell client that there's no tab info in this packet data << uint8(slots.size()); for (uint8 slotId = 0; slotId < GUILD_BANK_MAX_SLOTS; ++slotId) if (slots.find(slotId) != slots.end()) pTab->WriteSlotPacket(data, slotId); for (Members::const_iterator itr = m_members.begin(); itr != m_members.end(); ++itr) if (_MemberHasTabRights(itr->second->GetGUID(), tabId, GUILD_BANK_RIGHT_VIEW_TAB)) if (Player* player = itr->second->FindPlayer()) { data.put<uint32>(rempos, uint32(_GetMemberRemainingSlots(player->GetGUID(), tabId))); player->GetSession()->SendPacket(&data); } sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent (SMSG_GUILD_BANK_LIST)"); } } void Guild::_BroadcastEvent(GuildEvents guildEvent, uint64 guid, const char* param1, const char* param2, const char* param3) const { uint8 count = !param3 ? (!param2 ? (!param1 ? 0 : 1) : 2) : 3; WorldPacket data(SMSG_GUILD_EVENT, 1 + 1 + count + (guid ? 8 : 0)); data << uint8(guildEvent); data << uint8(count); if (param3) data << param1 << param2 << param3; else if (param2) data << param1 << param2; else if (param1) data << param1; if (guid) data << uint64(guid); BroadcastPacket(&data); sLog->outDebug(LOG_FILTER_NETWORKIO, "WORLD: Sent SMSG_GUILD_EVENT"); }
gpl-2.0
shonumi/gekko-gc-emu
src/core/src/powerpc/cpu_core_regs.cpp
1
1691
#include "common.h" #include "cpu_core_regs.h" #include "crc.h" /* u32 ireg_PC() { return ireg.PC; } void set_ireg_PC(u32 NewPC) { ireg.PC = NewPC; } u32 ireg_gpr(u32 Register) { return ireg.gpr[Register]; } void set_ireg_gpr(u32 Register, u32 NewVal) { ireg.gpr[Register] = NewVal; } u32 ireg_spr(u32 Register) { return ireg.spr[Register]; } void ireg.spr[u32 Register, u32 NewVal) { ireg.spr[Register] = NewVal; } void set_ireg_MSR(u32 NewVal) { ireg.MSR = NewVal; } u32 ireg_MSR() { return ireg.MSR; } void set_ireg_CR(u32 NewVal) { ireg.CR = NewVal; } u32 ireg_CR() { return ireg.CR; } void set_ireg_fpr_64(u32 Register, u32 ps, f64 NewVal) { switch(ps) { case 0: ireg.fpr[Register].ps0._f64 = NewVal; break; case 1: ireg.fpr[Register].ps1._f64 = NewVal; break; }; } f64 ireg_fpr_64(u32 Register, u32 ps) { if(ps) return ireg.fpr[Register].ps1._f64; else return ireg.fpr[Register].ps0._f64; } void set_ireg_fpr_32(u32 Register, u32 ps, f32 NewVal) { if(ps) ireg.fpr[Register].ps1._f32[1] = NewVal; else ireg.fpr[Register].ps0._f32[1] = NewVal; } f32 ireg_fpr_32(u32 Register, u32 ps) { if(ps) return ireg.fpr[Register].ps1._f32[1]; else return ireg.fpr[Register].ps0._f32[1]; } u32 ireg_FPSCR() { return ireg.FPSCR; } void set_ireg_FPSCR(u32 NewVal) { ireg.FPSCR = NewVal; } u32 ireg_IC() { return ireg.IC; } void set_ireg_IC(u32 NewVal) { ireg.IC = NewVal; } u32 ireg_sr(u32 Register) { return ireg.sr[Register]; } void set_ireg_sr(u32 Register, u32 NewVal) { ireg.sr[Register] = NewVal; } */
gpl-2.0
papylhomme/diskmonitor
settings/appearance.cpp
1
2001
/**************************************************************************** * DisKMonitor, KDE tools to monitor SMART and MDRaid health status * * Copyright (C) 2014-2015 Michaël Lhomme <papylhomme@gmail.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * ****************************************************************************/ #include "appearance.h" #include "iconmode.h" using namespace Settings; /* * Constructor */ Appearance::Appearance(QWidget *parent) : QWidget(parent) { setupUi(this); iconModeButtonGroup = new InvisibleButtonGroup(this); iconModeButtonGroup -> setObjectName("kcfg_IconMode"); iconModeButtonGroup -> addButton(emotesIconModeRadioButton, int(DiskMonitor::IconMode::Emotes)); iconModeButtonGroup -> addButton(dialogsIconModeRadioButton, int(DiskMonitor::IconMode::Dialogs)); iconModeButtonGroup -> addButton(customIconModeRadioButton, int(DiskMonitor::IconMode::Custom)); }
gpl-2.0
Bootz/OpenStage-Project
src/server/scripts/Kalimdor/CavernsOfTime/CullingOfStratholme/culling_of_stratholme.cpp
1
60366
/* * Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptPCH.h" #include "culling_of_stratholme.h" #include "ScriptedEscortAI.h" enum Says { //First Act - Uther and Jaina Dialog SAY_PHASE101 = -1595070, //Arthas SAY_PHASE102 = -1595071, //Uther SAY_PHASE103 = -1595072, //Arthas SAY_PHASE104 = -1595073, //Arthas SAY_PHASE105 = -1595074, //Uther SAY_PHASE106 = -1595075, //Arthas SAY_PHASE107 = -1595076, //Uther SAY_PHASE108 = -1595077, //Arthas SAY_PHASE109 = -1595078, //Arthas SAY_PHASE110 = -1595079, //Uther SAY_PHASE111 = -1595080, //Arthas SAY_PHASE112 = -1595081, //Uther SAY_PHASE113 = -1595082, //Jaina SAY_PHASE114 = -1595083, //Arthas SAY_PHASE115 = -1595084, //Uther SAY_PHASE116 = -1595085, //Arthas SAY_PHASE117 = -1595086, //Jaina SAY_PHASE118 = -1595087, //Arthas //Second Act - City Streets SAY_PHASE201 = -1595088, //Arthas SAY_PHASE202 = -1595089, //Cityman SAY_PHASE203 = -1595090, //Arthas SAY_PHASE204 = -1595091, //Crazyman SAY_PHASE205 = -1595092, //Arthas SAY_PHASE206 = -1595009, //Malganis SAY_PHASE207 = -1595010, //Malganis SAY_PHASE208 = -1595093, //Arthas SAY_PHASE209 = -1595094, //Arthas SAY_PHASE210 = -1595095, //Arthas //Third Act - Town Hall SAY_PHASE301 = -1595096, //Arthas SAY_PHASE302 = -1595097, //Drakonian SAY_PHASE303 = -1595098, //Arthas SAY_PHASE304 = -1595099, //Arthas SAY_PHASE305 = -1595100, //Drakonian SAY_PHASE306 = -1595101, //Arthas SAY_PHASE307 = -1595102, //Arthas SAY_PHASE308 = -1595103, //Arthas SAY_PHASE309 = -1595104, //Arthas SAY_PHASE310 = -1595105, //Arthas SAY_PHASE311 = -1595106, //Arthas SAY_PHASE312 = -1595107, //Arthas SAY_PHASE313 = -1595108, //Arthas SAY_PHASE314 = -1595000, //Epoch SAY_PHASE315 = -1595109, //Arthas //Fourth Act - Fire Corridor SAY_PHASE401 = -1595110, //Arthas SAY_PHASE402 = -1595111, //Arthas SAY_PHASE403 = -1595112, //Arthas SAY_PHASE404 = -1595113, //Arthas SAY_PHASE405 = -1595114, //Arthas SAY_PHASE406 = -1595115, //Arthas SAY_PHASE407 = -1595116, //Arthas //Fifth Act - Mal'Ganis Fight SAY_PHASE501 = -1595117, //Arthas SAY_PHASE502 = -1595118, //Arthas SAY_PHASE503 = -1595119, //Arthas SAY_PHASE504 = -1595120, //Arthas }; enum NPCs { NPC_INFINITE_ADVERSARY = 27742, NPC_INFINITE_HUNTER = 27743, NPC_INFINITE_AGENT = 27744, NPC_TIME_RIFT = 28409, NPC_ZOMBIE = 27737, NPC_GHOUL = 28249, NPC_NECROMANCER = 28200, NPC_STALKER = 28199, NPC_FIEND = 27734, NPC_GOLEM = 28201, NPC_EGHOUL = 27729, NPC_CONSTRUCT = 27736, NPC_INVIS_TARGET = 20562, NPC_KNIGHT_ESCORT = 27745, NPC_PRIEST_ESCORT = 27747, NPC_CITY_MAN = 28167, NPC_CITY_MAN2 = 28169, NPC_CITY_MAN3 = 31126, NPC_CITY_MAN4 = 31127, }; enum Spells { SPELL_FEAR = 39176, SPELL_ARTHAS_AURA = 52442, SPELL_EXORCISM_N = 52445, SPELL_EXORCISM_H = 58822, SPELL_HOLY_LIGHT = 52444, SPELL_ARCANE_DISRUPTION = 49590, }; enum GossipMenuArthas { GOSSIP_MENU_ARTHAS_1 = 100001, GOSSIP_MENU_ARTHAS_2 = 100002, GOSSIP_MENU_ARTHAS_3 = 100003, GOSSIP_MENU_ARTHAS_4 = 100004, GOSSIP_MENU_ARTHAS_5 = 100005 }; enum EncounterData { ENCOUNTER_WAVES_NUMBER = 8, ENCOUNTER_WAVES_MAX_SPAWNS = 5, ENCOUNTER_DRACONIAN_NUMBER = 4, ENCOUNTER_CHRONO_SPAWNS = 19 }; // Locations for necromancers and add to spawn float WavesLocations[ENCOUNTER_WAVES_NUMBER][ENCOUNTER_WAVES_MAX_SPAWNS][5]= { { {NPC_ZOMBIE, 2164.698975f, 1255.392944f, 135.040878f, 0.490202f}, {NPC_ZOMBIE, 2183.501465f, 1263.079102f, 134.859055f, 3.169981f}, {NPC_GHOUL, 2177.512939f, 1247.313843f, 135.846695f, 1.696574f}, {NPC_GHOUL, 2171.991943f, 1246.615845f, 135.745026f, 1.696574f}, {0, 0, 0, 0, 0} }, { {NPC_GHOUL, 2254.434326f, 1163.427612f, 138.055038f, 2.077358f}, {NPC_GHOUL, 2254.703613f, 1158.867798f, 138.212234f, 2.345532f}, {NPC_GHOUL, 2257.615723f, 1162.310913f, 138.091202f, 2.077358f}, {NPC_NECROMANCER, 2258.258057f, 1157.250732f, 138.272873f, 2.387766f}, {0, 0, 0, 0, 0} }, { {NPC_STALKER, 2348.120117f, 1202.302490f, 130.491104f, 4.698538f}, {NPC_GHOUL, 2352.863525f, 1207.819092f, 130.424271f, 4.949865f}, {NPC_GHOUL, 2343.593750f, 1207.915039f, 130.781311f, 4.321547f}, {NPC_NECROMANCER, 2348.257324f, 1212.202515f, 130.670135f, 4.450352f}, {0, 0, 0, 0, 0} }, { {NPC_STALKER, 2139.825195f, 1356.277100f, 132.199615f, 5.820131f}, {NPC_GHOUL, 2137.073486f, 1362.464844f, 132.271637f, 5.820131f}, {NPC_GHOUL, 2134.075684f, 1354.148071f, 131.885864f, 5.820131f}, {NPC_NECROMANCER, 2133.302246f, 1358.907837f, 132.037689f, 5.820131f}, {0, 0, 0, 0, 0} }, { {NPC_NECROMANCER, 2264.013428f, 1174.055908f, 138.093094f, 2.860481f}, {NPC_GHOUL, 2264.207764f, 1170.892700f, 138.034973f, 2.860481f}, {NPC_GHOUL, 2266.948975f, 1176.898926f, 137.976929f, 2.860481f}, {NPC_STALKER, 2269.215576f, 1170.109253f, 137.742691f, 2.860481f}, {NPC_FIEND, 2273.106689f, 1176.101074f, 137.880508f, 2.860481f} }, { {NPC_GOLEM, 2349.701660f, 1188.436646f, 130.428864f, 3.908642f}, {NPC_GHOUL, 2349.909180f, 1194.582642f, 130.417816f, 3.577001f}, {NPC_EGHOUL, 2354.662598f, 1185.692017f, 130.552032f, 3.577001f}, {NPC_EGHOUL, 2354.716797f, 1191.614380f, 130.539810f, 3.577001f}, {0, 0, 0, 0, 0} }, { {NPC_CONSTRUCT, 2145.212891f, 1355.288086f, 132.288773f, 6.004838f}, {NPC_NECROMANCER, 2137.078613f, 1357.612671f, 132.173340f, 6.004838f}, {NPC_EGHOUL, 2139.402100f, 1352.541626f, 132.127518f, 5.812850f}, {NPC_EGHOUL, 2142.408447f, 1360.760620f, 132.321564f, 5.812850f}, {0, 0, 0, 0, 0} }, { {NPC_GHOUL, 2172.686279f, 1259.618164f, 134.391754f, 1.865499f}, {NPC_FIEND, 2177.649170f, 1256.061157f, 135.096512f, 1.849572f}, {NPC_CONSTRUCT, 2170.782959f, 1253.594849f, 134.973022f, 1.849572f}, {NPC_NECROMANCER, 2175.595703f, 1249.041992f, 135.603531f, 1.849572f}, {0, 0, 0, 0, 0} } }; // Locations for rifts to spawn and draconians to go float RiftAndSpawnsLocations[ENCOUNTER_CHRONO_SPAWNS][5]= { {NPC_TIME_RIFT, 2431.790039f, 1190.670044f, 148.076004f, 0.187923f}, {NPC_INFINITE_ADVERSARY, 2433.857910f, 1185.612061f, 148.075974f, 4.566168f}, {NPC_INFINITE_ADVERSARY, 2437.577881f, 1188.241089f, 148.075974f, 0.196999f}, {NPC_INFINITE_AGENT, 2437.165527f, 1192.294922f, 148.075974f, 0.169247f}, {NPC_INFINITE_HUNTER, 2434.989990f, 1197.679565f, 148.075974f, 0.715971f}, {NPC_TIME_RIFT, 2403.954834f, 1178.815430f, 148.075943f, 4.966126f}, {NPC_INFINITE_AGENT, 2403.676758f, 1171.495850f, 148.075607f, 4.902797f}, {NPC_INFINITE_HUNTER, 2407.691162f, 1172.162720f, 148.075607f, 4.963010f}, {NPC_TIME_RIFT, 2414.217041f, 1133.446167f, 148.076050f, 1.706972f}, {NPC_INFINITE_ADVERSARY, 2416.024658f, 1139.456177f, 148.076431f, 1.752129f}, {NPC_INFINITE_HUNTER, 2410.866699f, 1139.680542f, 148.076431f, 1.752129f}, {NPC_TIME_RIFT, 2433.081543f, 1099.869751f, 148.076157f, 1.809509f}, {NPC_INFINITE_ADVERSARY, 2426.947998f, 1107.471680f, 148.076019f, 1.877580f}, {NPC_INFINITE_HUNTER, 2432.944580f, 1108.896362f, 148.208160f, 2.199241f}, {NPC_TIME_RIFT, 2444.077637f, 1114.366089f, 148.076157f, 3.049565f}, {NPC_INFINITE_ADVERSARY, 2438.190674f, 1118.368164f, 148.076172f, 3.139232f}, {NPC_INFINITE_AGENT, 2435.861328f, 1113.402954f, 148.169327f, 2.390271f}, {NPC_TIME_RIFT, 2463.131592f, 1115.391724f, 152.473129f, 3.409651f}, {NPC_EPOCH, 2451.809326f, 1112.901245f, 149.220459f, 3.363617f} }; #define GOSSIP_ITEM_ARTHAS_0 "I'm ready to start Culling of Stratholme." #define GOSSIP_ITEM_ARTHAS_1 "Yes, my Prince. We're ready." #define GOSSIP_ITEM_ARTHAS_2 "We're only doing what is best for Loarderon your Highness." #define GOSSIP_ITEM_ARTHAS_3 "I'm ready." #define GOSSIP_ITEM_ARTHAS_4 "For Lordaeron!" #define GOSSIP_ITEM_ARTHAS_5 "I'm ready to battle the dreadlord, sire." class npc_arthas : public CreatureScript { public: npc_arthas() : CreatureScript("npc_arthas") { } bool OnGossipSelect(Player* pPlayer, Creature* pCreature, uint32 /*sender*/, uint32 action) { pPlayer->PlayerTalkClass->ClearMenus(); npc_arthasAI* pAI = CAST_AI(npc_arthas::npc_arthasAI, pCreature->AI()); if (!pAI) return false; switch (action) { case GOSSIP_ACTION_INFO_DEF: pAI->Start(true, true, pPlayer->GetGUID(), 0, false, false); pAI->SetDespawnAtEnd(false); pAI->bStepping = false; pAI->uiStep = 1; break; case GOSSIP_ACTION_INFO_DEF+1: pAI->bStepping = true; pAI->uiStep = 24; break; case GOSSIP_ACTION_INFO_DEF+2: pAI->SetHoldState(false); pAI->bStepping = false; pAI->uiStep = 61; break; case GOSSIP_ACTION_INFO_DEF+3: pAI->SetHoldState(false); break; case GOSSIP_ACTION_INFO_DEF+4: pAI->bStepping = true; pAI->uiStep = 84; break; case GOSSIP_ACTION_INFO_DEF+5: pAI->bStepping = true; pAI->uiStep = 85; break; } pPlayer->CLOSE_GOSSIP_MENU(); pAI->SetDespawnAtFar(true); pCreature->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); return true; } bool OnGossipHello(Player* pPlayer, Creature* pCreature) { npc_arthasAI* pAI = CAST_AI(npc_arthas::npc_arthasAI, pCreature->AI()); if (pAI && pAI->bStepping == false) { switch (pAI->uiGossipStep) { case 0: //This one is a workaround since the very beggining of the script is wrong. { QuestStatus status = pPlayer->GetQuestStatus(13149); if (status != QUEST_STATUS_COMPLETE && status != QUEST_STATUS_REWARDED) return false; pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_0, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF); pPlayer->SEND_GOSSIP_MENU(907, pCreature->GetGUID()); break; } case 1: pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_1, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+1); pPlayer->SEND_GOSSIP_MENU(GOSSIP_MENU_ARTHAS_1, pCreature->GetGUID()); break; case 2: pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_2, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+2); pPlayer->SEND_GOSSIP_MENU(GOSSIP_MENU_ARTHAS_2, pCreature->GetGUID()); break; case 3: pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_3, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+3); pPlayer->SEND_GOSSIP_MENU(GOSSIP_MENU_ARTHAS_3, pCreature->GetGUID()); break; case 4: pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_4, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+4); pPlayer->SEND_GOSSIP_MENU(GOSSIP_MENU_ARTHAS_4, pCreature->GetGUID()); break; case 5: pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_ARTHAS_5, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+5); pPlayer->SEND_GOSSIP_MENU(GOSSIP_MENU_ARTHAS_5, pCreature->GetGUID()); break; default: return false; } } return true; } CreatureAI* GetAI(Creature* pCreature) const { return new npc_arthasAI(pCreature); } struct npc_arthasAI : public npc_escortAI { npc_arthasAI(Creature* pCreature) : npc_escortAI(pCreature) { pInstance = pCreature->GetInstanceScript(); Reset(); } InstanceScript* pInstance; bool bStepping; uint32 uiStep; uint32 uiPhaseTimer; uint32 uiGossipStep; uint32 uiPlayerFaction; uint32 uiBossEvent; uint32 uiWave; uint64 uiUtherGUID; uint64 uiJainaGUID; uint64 uiCitymenGUID[2]; uint64 uiWaveGUID[ENCOUNTER_WAVES_MAX_SPAWNS]; uint64 uiInfiniteDraconianGUID[ENCOUNTER_DRACONIAN_NUMBER]; uint64 uiStalkerGUID; uint64 uiBossGUID; //uiMeathookGUID || uiSalrammGUID uint64 uiEpochGUID; uint64 uiMalganisGUID; uint64 uiInfiniteGUID; uint32 uiExorcismTimer; void Reset() { uiUtherGUID = 0; uiJainaGUID = 0; for (uint8 i = 0; i < 2; ++i) uiCitymenGUID[i] = 0; for (uint8 i = 0; i < ENCOUNTER_WAVES_MAX_SPAWNS; ++i) uiWaveGUID[i] = 0; for (uint8 i = 0; i < ENCOUNTER_DRACONIAN_NUMBER; ++i) uiInfiniteDraconianGUID[i] = 0; uiStalkerGUID = 0; uiBossGUID = 0; uiEpochGUID = 0; uiMalganisGUID = 0; uiInfiniteGUID = 0; if (pInstance) { pInstance->SetData(DATA_ARTHAS_EVENT, NOT_STARTED); switch(pInstance->GetData(DATA_ARTHAS_EVENT)) { case NOT_STARTED: bStepping = true; uiStep = 0; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); uiBossEvent = DATA_MEATHOOK_EVENT; uiGossipStep = 0; break; } uiPhaseTimer = 1000; uiExorcismTimer = 7300; uiWave = 0; } } void EnterCombat(Unit* /*who*/) { DoCast(me, SPELL_ARTHAS_AURA); } void JustDied(Unit* /*killer*/) { if (pInstance) pInstance->SetData(DATA_ARTHAS_EVENT, FAIL); } void SpawnTimeRift(uint32 timeRiftID, uint64* guidVector) { me->SummonCreature((uint32)RiftAndSpawnsLocations[timeRiftID][0], RiftAndSpawnsLocations[timeRiftID][1], RiftAndSpawnsLocations[timeRiftID][2], RiftAndSpawnsLocations[timeRiftID][3], RiftAndSpawnsLocations[timeRiftID][4], TEMPSUMMON_TIMED_DESPAWN, 11000); for (uint32 i = timeRiftID+1; i < ENCOUNTER_CHRONO_SPAWNS; ++i) { if ((uint32)RiftAndSpawnsLocations[i][0] == NPC_TIME_RIFT) break; if (Creature* pTemp = me->SummonCreature((uint32)RiftAndSpawnsLocations[i][0], RiftAndSpawnsLocations[timeRiftID][1], RiftAndSpawnsLocations[timeRiftID][2], RiftAndSpawnsLocations[timeRiftID][3], RiftAndSpawnsLocations[timeRiftID][4], TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 900000)) { guidVector[i-timeRiftID-1] = pTemp->GetGUID(); pTemp->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pTemp->SetReactState(REACT_PASSIVE); pTemp->GetMotionMaster()->MovePoint(0, RiftAndSpawnsLocations[i][1], RiftAndSpawnsLocations[i][2], RiftAndSpawnsLocations[i][3]); if ((uint32)RiftAndSpawnsLocations[i][0] == NPC_EPOCH) uiEpochGUID = pTemp->GetGUID(); } } } void SpawnWaveGroup(uint32 waveID, uint64* guidVector) { for (uint32 i = 0; i < ENCOUNTER_WAVES_MAX_SPAWNS; ++i) { if ((uint32)WavesLocations[waveID][i][0] == 0) break; if (Creature* pTemp = me->SummonCreature((uint32)WavesLocations[waveID][i][0], WavesLocations[waveID][i][1], WavesLocations[waveID][i][2], WavesLocations[waveID][i][3], WavesLocations[waveID][i][4], TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 900000)) { guidVector[i] = pTemp->GetGUID(); } } } void SetHoldState(bool bOnHold) { SetEscortPaused(bOnHold); } void JumpToNextStep(uint32 uiTimer) { uiPhaseTimer = uiTimer; ++uiStep; } void WaypointReached(uint32 uiPointId) { switch(uiPointId) { case 0: case 1: case 3: case 9: case 10: case 11: case 22: case 23: case 26: case 55: case 56: SetHoldState(true); bStepping = true; break; case 7: if (Unit* pCityman0 = me->SummonCreature(NPC_CITY_MAN, 2091.977f, 1275.021f, 140.757f, 0.558f, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 60000)) uiCitymenGUID[0] = pCityman0->GetGUID(); if (Unit* pCityman1 = me->SummonCreature(NPC_CITY_MAN2, 2093.514f, 1275.842f, 140.408f, 3.801f, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 60000)) uiCitymenGUID[1] = pCityman1->GetGUID(); break; case 8: uiGossipStep = 1; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); SetHoldState(true); break; case 12: SetRun(true); DoScriptText(SAY_PHASE210, me); if (Unit* pDisguised0 = me->SummonCreature(NPC_CITY_MAN3, 2398.14f, 1207.81f, 134.04f, 5.155249f, TEMPSUMMON_DEAD_DESPAWN, 180000)) { uiInfiniteDraconianGUID[0] = pDisguised0->GetGUID(); if (Unit* pDisguised1 = me->SummonCreature(NPC_CITY_MAN4, 2403.22f, 1205.54f, 134.04f, 3.311264f, TEMPSUMMON_DEAD_DESPAWN, 180000)) { uiInfiniteDraconianGUID[1] = pDisguised1->GetGUID(); if (Unit* pDisguised2 = me->SummonCreature(NPC_CITY_MAN, 2400.82f, 1201.69f, 134.01f, 1.534082f, TEMPSUMMON_DEAD_DESPAWN, 180000)) { uiInfiniteDraconianGUID[2] = pDisguised2->GetGUID(); pDisguised0->SetUInt64Value(UNIT_FIELD_TARGET, uiInfiniteDraconianGUID[1]); pDisguised1->SetUInt64Value(UNIT_FIELD_TARGET, uiInfiniteDraconianGUID[0]); pDisguised2->SetUInt64Value(UNIT_FIELD_TARGET, uiInfiniteDraconianGUID[1]); } } } break; case 20: uiGossipStep = 2; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); SetRun(false); SetHoldState(true); break; case 21: DoScriptText(SAY_PHASE301, me); break; case 25: SetRun(false); SpawnTimeRift(0, &uiInfiniteDraconianGUID[0]); DoScriptText(SAY_PHASE307, me); break; case 29: SetRun(false); SpawnTimeRift(5, &uiInfiniteDraconianGUID[0]); SpawnTimeRift(8, &uiInfiniteDraconianGUID[2]); DoScriptText(SAY_PHASE309, me); SetHoldState(true); bStepping = true; break; case 31: SetRun(false); SpawnTimeRift(11, &uiInfiniteDraconianGUID[0]); SpawnTimeRift(14, &uiInfiniteDraconianGUID[2]); DoScriptText(SAY_PHASE311, me); SetHoldState(true); bStepping = true; break; case 32: DoScriptText(SAY_PHASE401, me); break; case 34: DoScriptText(SAY_PHASE402, me); break; case 35: DoScriptText(SAY_PHASE403, me); break; case 36: if (pInstance) if (GameObject* pGate = pInstance->instance->GetGameObject(pInstance->GetData64(DATA_SHKAF_GATE))) pGate->SetGoState(GO_STATE_ACTIVE); break; case 45: SetRun(true); SetDespawnAtFar(false); uiGossipStep = 4; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); SetHoldState(true); break; case 47: SetRun(false); DoScriptText(SAY_PHASE405, me); break; case 48: SetRun(true); DoScriptText(SAY_PHASE406, me); break; case 53: DoScriptText(SAY_PHASE407, me); break; case 54: uiGossipStep = 5; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); SetHoldState(true); break; } } void UpdateAI(const uint32 diff) { npc_escortAI::UpdateAI(diff); DoMeleeAttackIfReady(); if (bStepping) { if (uiPhaseTimer <= diff) { switch(uiStep) { //After reset case 0: if (Unit* pJaina = GetClosestCreatureWithEntry(me, NPC_JAINA, 50.0f)) uiJainaGUID = pJaina->GetGUID(); else if (Unit* pJaina = me->SummonCreature(NPC_JAINA, 1895.48f, 1292.66f, 143.706f, 0.023475f, TEMPSUMMON_DEAD_DESPAWN, 180000)) uiJainaGUID = pJaina->GetGUID(); bStepping = false; JumpToNextStep(0); break; //After waypoint 0 case 1: me->RemoveUnitMovementFlag(MOVEMENTFLAG_WALKING); if (Unit* pUther = me->SummonCreature(NPC_UTHER, 1794.357f, 1272.183f, 140.558f, 1.37f, TEMPSUMMON_DEAD_DESPAWN, 180000)) { uiUtherGUID = pUther->GetGUID(); pUther->RemoveUnitMovementFlag(MOVEMENTFLAG_WALKING); pUther->GetMotionMaster()->MovePoint(0, 1897.018f, 1287.487f, 143.481f); pUther->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); me->SetUInt64Value(UNIT_FIELD_TARGET, uiUtherGUID); } JumpToNextStep(17000); break; case 2: DoScriptText(SAY_PHASE101, me); JumpToNextStep(2000); break; case 3: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) { DoScriptText(SAY_PHASE102, pUther); } JumpToNextStep(8000); break; case 4: SetEscortPaused(false); bStepping = false; SetRun(false); DoScriptText(SAY_PHASE103, me); JumpToNextStep(0); break; //After waypoint 1 case 5: if (Creature* pJaina = Unit::GetCreature(*me, uiJainaGUID)) pJaina->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); DoScriptText(SAY_PHASE104, me); JumpToNextStep(10000); break; case 6: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) DoScriptText(SAY_PHASE105, pUther); JumpToNextStep(1000); break; case 7: DoScriptText(SAY_PHASE106, me); JumpToNextStep(4000); break; case 8: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) DoScriptText(SAY_PHASE107, pUther); JumpToNextStep(6000); break; case 9: DoScriptText(SAY_PHASE108, me); JumpToNextStep(4000); break; case 10: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) DoScriptText(SAY_PHASE109, pUther); JumpToNextStep(8000); break; case 11: DoScriptText(SAY_PHASE110, me); JumpToNextStep(4000); break; case 12: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) DoScriptText(SAY_PHASE111, pUther); JumpToNextStep(4000); break; case 13: DoScriptText(SAY_PHASE112, me); JumpToNextStep(11000); break; case 14: if (Creature* pJaina = Unit::GetCreature(*me, uiJainaGUID)) DoScriptText(SAY_PHASE113, pJaina); JumpToNextStep(3000); break; case 15: DoScriptText(SAY_PHASE114, me); JumpToNextStep(9000); break; case 16: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) DoScriptText(SAY_PHASE115, pUther); JumpToNextStep(4000); break; case 17: if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) { pUther->AddUnitMovementFlag(MOVEMENTFLAG_WALKING); pUther->GetMotionMaster()->MovePoint(0, 1794.357f, 1272.183f, 140.558f); } JumpToNextStep(1000); break; case 18: if (Creature* pJaina = Unit::GetCreature(*me, uiJainaGUID)) { me->SetUInt64Value(UNIT_FIELD_TARGET, uiJainaGUID); pJaina->AddUnitMovementFlag(MOVEMENTFLAG_WALKING); pJaina->GetMotionMaster()->MovePoint(0, 1794.357f, 1272.183f, 140.558f); } JumpToNextStep(1000); break; case 19: DoScriptText(SAY_PHASE116, me); JumpToNextStep(1000); break; case 20: if (Creature* pJaina = Unit::GetCreature(*me, uiJainaGUID)) DoScriptText(SAY_PHASE117, pJaina); JumpToNextStep(3000); break; case 21: SetEscortPaused(false); bStepping = false; me->SetUInt64Value(UNIT_FIELD_TARGET, 0); JumpToNextStep(0); break; //After waypoint 3 case 22: DoScriptText(SAY_PHASE118, me); me->SetUInt64Value(UNIT_FIELD_TARGET, uiJainaGUID); JumpToNextStep(10000); break; case 23: SetEscortPaused(false); bStepping = false; SetRun(true); if (Creature* pJaina = Unit::GetCreature(*me, uiJainaGUID)) pJaina->DisappearAndDie(); if (Creature* pUther = Unit::GetCreature(*me, uiUtherGUID)) pUther->DisappearAndDie(); me->SetUInt64Value(UNIT_FIELD_TARGET, 0); JumpToNextStep(0); break; //After Gossip 1 (waypoint 8) case 24: if (Unit* pStalker = me->SummonCreature(NPC_INVIS_TARGET, 2026.469f, 1287.088f, 143.596f, 1.37f, TEMPSUMMON_TIMED_DESPAWN, 14000)) { uiStalkerGUID = pStalker->GetGUID(); me->SetUInt64Value(UNIT_FIELD_TARGET, uiStalkerGUID); } JumpToNextStep(1000); break; case 25: DoScriptText(SAY_PHASE201, me); JumpToNextStep(12000); break; case 26: SetEscortPaused(false); bStepping = false; SetRun(false); me->SetUInt64Value(UNIT_FIELD_TARGET, 0); JumpToNextStep(0); break; //After waypoint 9 case 27: me->SetUInt64Value(UNIT_FIELD_TARGET, uiCitymenGUID[0]); if (Creature* pCityman = Unit::GetCreature(*me, uiCitymenGUID[0])) { pCityman->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); pCityman->AddUnitMovementFlag(MOVEMENTFLAG_WALKING); pCityman->GetMotionMaster()->MovePoint(0, 2088.625f, 1279.191f, 140.743f); } JumpToNextStep(2000); break; case 28: if (Creature* pCityman = Unit::GetCreature(*me, uiCitymenGUID[0])) DoScriptText(SAY_PHASE202, pCityman); JumpToNextStep(4000); break; case 29: SetEscortPaused(false); bStepping = false; DoScriptText(SAY_PHASE203, me); JumpToNextStep(0); break; //After waypoint 10 case 30: me->HandleEmoteCommand(37); JumpToNextStep(1000); break; case 31: SetEscortPaused(false); bStepping = false; if (Creature* pCityman1 = Unit::GetCreature(*me, uiCitymenGUID[1])) { DoScriptText(SAY_PHASE204, pCityman1); pCityman1->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); if (Creature* pCityman0 = Unit::GetCreature(*me, uiCitymenGUID[0])) pCityman0->Kill(pCityman0); me->SetUInt64Value(UNIT_FIELD_TARGET, uiCitymenGUID[1]); } JumpToNextStep(0); break; //After waypoint 11 case 32: me->HandleEmoteCommand(37); JumpToNextStep(1000); break; case 33: if (Creature* pCityman1 = Unit::GetCreature(*me, uiCitymenGUID[1])) pCityman1->Kill(pCityman1); JumpToNextStep(1000); break; case 34: if (Unit* pStalker = me->SummonCreature(NPC_INVIS_TARGET, 2081.447f, 1287.770f, 141.3241f, 1.37f, TEMPSUMMON_TIMED_DESPAWN, 10000)) { uiStalkerGUID = pStalker->GetGUID(); me->SetUInt64Value(UNIT_FIELD_TARGET, uiStalkerGUID); } DoScriptText(SAY_PHASE205, me); JumpToNextStep(3000); break; case 35: if (Unit* pStalkerM = me->SummonCreature(NPC_INVIS_TARGET, 2117.349f, 1288.624f, 136.271f, 1.37f, TEMPSUMMON_TIMED_DESPAWN, 60000)) { uiStalkerGUID = pStalkerM->GetGUID(); me->SetUInt64Value(UNIT_FIELD_TARGET, uiStalkerGUID); } JumpToNextStep(1000); break; case 36: if (Creature* pMalganis = me->SummonCreature(NPC_MAL_GANIS, 2117.349f, 1288.624f, 136.271f, 1.37f, TEMPSUMMON_TIMED_DESPAWN, 60000)) { if (Creature* pStalkerM = Unit::GetCreature(*me, uiStalkerGUID)) pMalganis->CastSpell(pStalkerM, 63793, false); uiMalganisGUID = pMalganis->GetGUID(); DoScriptText(SAY_PHASE206, pMalganis); pMalganis->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); pMalganis->SetReactState(REACT_PASSIVE); } JumpToNextStep(11000); break; case 37: if (Creature* pMalganis = Unit::GetCreature(*me, uiMalganisGUID)) { if (Creature* pZombie = GetClosestCreatureWithEntry(pMalganis, NPC_CITY_MAN, 100.0f)) pZombie->UpdateEntry(NPC_ZOMBIE, 0); else if (Creature* pZombie = GetClosestCreatureWithEntry(pMalganis, NPC_CITY_MAN2, 100.0f)) pZombie->UpdateEntry(NPC_ZOMBIE, 0); else //There's no one else to transform uiStep++; } else uiStep++; uiPhaseTimer = 500; break; case 38: if (Creature* pMalganis = Unit::GetCreature(*me, uiMalganisGUID)) DoScriptText(SAY_PHASE207, pMalganis); JumpToNextStep(17000); break; case 39: if (Creature* pMalganis = Unit::GetCreature(*me, uiMalganisGUID)) pMalganis->SetVisible(false); DoScriptText(SAY_PHASE208, me); JumpToNextStep(7000); break; case 40: if (Unit* pStalker = me->SummonCreature(NPC_INVIS_TARGET, 2081.447f, 1287.770f, 141.3241f, 1.37f, TEMPSUMMON_TIMED_DESPAWN, 10000)) { uiStalkerGUID = pStalker->GetGUID(); me->SetUInt64Value(UNIT_FIELD_TARGET, uiStalkerGUID); } DoScriptText(SAY_PHASE209, me); uiBossEvent = DATA_MEATHOOK_EVENT; if (pInstance) pInstance->SetData(DATA_ARTHAS_EVENT, IN_PROGRESS); me->SetReactState(REACT_DEFENSIVE); SetDespawnAtFar(false); JumpToNextStep(5000); break; case 41: //Summon wave group case 43: case 45: case 47: case 51: case 53: case 55: case 57: if (pInstance->GetData(uiBossEvent) != DONE) { SpawnWaveGroup(uiWave, uiWaveGUID); uiWave++; } JumpToNextStep(500); break; case 42: //Wait group to die case 44: case 46: case 48: case 52: case 54: case 56: case 58: if (pInstance->GetData(uiBossEvent) != DONE) { uint32 mobCounter = 0; uint32 deadCounter = 0; for (uint8 i = 0; i < ENCOUNTER_WAVES_MAX_SPAWNS; ++i) { if (uiWaveGUID[i] == 0) break; ++mobCounter; Unit* pTemp = Unit::GetCreature(*me, uiWaveGUID[i]); if (!pTemp || pTemp->isDead()) ++deadCounter; } if (mobCounter <= deadCounter) //If group is dead JumpToNextStep(1000); else uiPhaseTimer = 1000; } else JumpToNextStep(500); break; case 49: //Summon Boss case 59: if (pInstance->GetData(uiBossEvent) != DONE) { uint32 uiBossID = 0; if (uiBossEvent == DATA_MEATHOOK_EVENT) uiBossID = NPC_MEATHOOK; else if (uiBossEvent == DATA_SALRAMM_EVENT) uiBossID = NPC_SALRAMM; if (Unit* pBoss = me->SummonCreature(uiBossID, 2232.19f, 1331.933f, 126.662f, 3.15f, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 900000)) { uiBossGUID = pBoss->GetGUID(); pBoss->AddUnitMovementFlag(MOVEMENTFLAG_WALKING); pBoss->GetMotionMaster()->MovePoint(0, 2194.110f, 1332.00f, 130.00f); } } JumpToNextStep(30000); break; case 50: //Wait Boss death case 60: if (pInstance) { if (pInstance->GetData(uiBossEvent) == DONE) { JumpToNextStep(1000); if (uiBossEvent == DATA_MEATHOOK_EVENT) uiBossEvent = DATA_SALRAMM_EVENT; else if (uiBossEvent == DATA_SALRAMM_EVENT) { SetHoldState(false); bStepping = false; uiBossEvent = DATA_EPOCH_EVENT; } } else if (pInstance->GetData(uiBossEvent) == FAIL) npc_escortAI::EnterEvadeMode(); else uiPhaseTimer = 10000; } break; //After Gossip 2 (waypoint 22) case 61: me->SetReactState(REACT_AGGRESSIVE); if (Creature* pDisguised0 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[0])) pDisguised0->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); if (Creature* pDisguised1 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[1])) pDisguised1->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); if (Creature* pDisguised2 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[2])) pDisguised2->SetUInt64Value(UNIT_FIELD_TARGET, me->GetGUID()); JumpToNextStep(1000); break; case 62: if (Creature* pDisguised0 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[0])) DoScriptText(SAY_PHASE302, pDisguised0); JumpToNextStep(7000); break; case 63: DoScriptText(SAY_PHASE303, me); SetHoldState(false); bStepping = false; JumpToNextStep(0); break; //After waypoint 23 case 64: me->HandleEmoteCommand(54); JumpToNextStep(1000); break; case 65: if (Creature* pDisguised0 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[0])) pDisguised0->HandleEmoteCommand(11); JumpToNextStep(1000); break; case 66: DoScriptText(SAY_PHASE304, me); JumpToNextStep(2000); break; case 67: if (Creature* pDisguised0 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[0])) DoScriptText(SAY_PHASE305, pDisguised0); JumpToNextStep(1000); break; case 68: if (Creature* pDisguised2 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[2])) { pDisguised2->UpdateEntry(NPC_INFINITE_HUNTER, 0); //Make them unattackable pDisguised2->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pDisguised2->SetReactState(REACT_PASSIVE); } JumpToNextStep(2000); break; case 69: if (Creature* pDisguised1 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[1])) { pDisguised1->UpdateEntry(NPC_INFINITE_AGENT, 0); //Make them unattackable pDisguised1->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pDisguised1->SetReactState(REACT_PASSIVE); } JumpToNextStep(2000); break; case 70: if (Creature* pDisguised0 = Unit::GetCreature(*me, uiInfiniteDraconianGUID[0])) { pDisguised0->UpdateEntry(NPC_INFINITE_ADVERSARY, 0); //Make them unattackable pDisguised0->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pDisguised0->SetReactState(REACT_PASSIVE); } JumpToNextStep(2000); break; case 71: //After waypoint 26, 29, 31 case 73: case 75: case 77: //Make cratures attackable for (uint32 i = 0; i< ENCOUNTER_DRACONIAN_NUMBER; ++i) if (Creature* pTemp = Unit::GetCreature(*me, uiInfiniteDraconianGUID[i])) { pTemp->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pTemp->SetReactState(REACT_AGGRESSIVE); } JumpToNextStep(5000); break; case 72: case 74: case 76: if (me->isInCombat()) uiPhaseTimer = 1000; else { if (uiStep == 72) DoScriptText(SAY_PHASE308, me); if (uiStep == 74) DoScriptText(SAY_PHASE308, me); if (uiStep == 76) DoScriptText(SAY_PHASE310, me); SetHoldState(false); bStepping = false; SetRun(true); JumpToNextStep(2000); } break; case 78: if (me->isInCombat()) uiPhaseTimer = 1000; else { DoScriptText(SAY_PHASE312, me); JumpToNextStep(5000); } break; case 79: DoScriptText(SAY_PHASE313, me); JumpToNextStep(1000); break; case 80: if (pInstance) if (pInstance->GetData(DATA_EPOCH_EVENT) != DONE) { SpawnTimeRift(17, &uiEpochGUID); if (Creature* pEpoch = Unit::GetCreature(*me, uiEpochGUID)) DoScriptText(SAY_PHASE314, pEpoch); me->SetUInt64Value(UNIT_FIELD_TARGET, uiEpochGUID); } JumpToNextStep(18000); break; case 81: if (pInstance) if (pInstance->GetData(DATA_EPOCH_EVENT) != DONE) DoScriptText(SAY_PHASE315, me); JumpToNextStep(6000); break; case 82: if (pInstance) if (pInstance->GetData(DATA_EPOCH_EVENT) != DONE) { if (Creature* pEpoch = Unit::GetCreature(*me, uiEpochGUID)) { //Make Epoch attackable pEpoch->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_PASSIVE); pEpoch->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); pEpoch->SetReactState(REACT_AGGRESSIVE); } } JumpToNextStep(1000); break; case 83: if (pInstance) { if (pInstance->GetData(DATA_EPOCH_EVENT) == DONE) { uiGossipStep = 3; me->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); bStepping = false; uiBossEvent = DATA_MAL_GANIS_EVENT; JumpToNextStep(15000); } else if (pInstance->GetData(DATA_EPOCH_EVENT) == FAIL) npc_escortAI::EnterEvadeMode(); else uiPhaseTimer = 10000; } break; //After Gossip 4 case 84: DoScriptText(SAY_PHASE404, me); SetHoldState(false); bStepping = false; break; //After Gossip 5 case 85: DoScriptText(SAY_PHASE501, me); if (Creature* pMalganis = me->SummonCreature(NPC_MAL_GANIS, 2296.665f, 1502.362f, 128.362f, 4.961f, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 900000)) { uiMalganisGUID = pMalganis->GetGUID(); pMalganis->SetReactState(REACT_PASSIVE); } if (pInstance) if (GameObject* pGate = pInstance->instance->GetGameObject(pInstance->GetData64(DATA_MAL_GANIS_GATE_1))) pGate->SetGoState(GO_STATE_ACTIVE); SetHoldState(false); bStepping = false; JumpToNextStep(0); break; //After waypoint 55 case 86: DoScriptText(SAY_PHASE502, me); JumpToNextStep(6000); me->SetUInt64Value(UNIT_FIELD_TARGET, uiMalganisGUID); break; case 87: if (Creature* pMalganis = Unit::GetCreature(*me, uiMalganisGUID)) { pMalganis->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_OOC_NOT_ATTACKABLE | UNIT_FLAG_UNK_6 | UNIT_FLAG_PASSIVE | UNIT_FLAG_UNK_15); pMalganis->SetReactState(REACT_AGGRESSIVE); } JumpToNextStep(1000); break; case 88: if (pInstance) { if (pInstance->GetData(DATA_MAL_GANIS_EVENT) == DONE) { SetHoldState(false); JumpToNextStep(1000); } else if (pInstance->GetData(DATA_MAL_GANIS_EVENT) == FAIL) npc_escortAI::EnterEvadeMode(); else uiPhaseTimer = 10000; } break; //After waypoint 56 case 89: SetRun(true); me->SetUInt64Value(UNIT_FIELD_TARGET, uiMalganisGUID); DoScriptText(SAY_PHASE503, me); JumpToNextStep(7000); break; case 90: if (pInstance) { pInstance->SetData(DATA_ARTHAS_EVENT, DONE); //Rewards: Achiev & Chest ;D me->SetUInt64Value(UNIT_FIELD_TARGET, pInstance->GetData64(DATA_MAL_GANIS_GATE_2)); //Look behind } DoScriptText(SAY_PHASE504, me); bStepping = false; break; } } else uiPhaseTimer -= diff; } //Battling skills if (!me->getVictim()) return; if (uiExorcismTimer < diff) { if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) DoCast(target, SPELL_EXORCISM_N); uiExorcismTimer = 7300; } else uiExorcismTimer -= diff; if (HealthBelowPct(40)) DoCast(me, SPELL_HOLY_LIGHT); } }; }; class npc_crate_helper : public CreatureScript { public: npc_crate_helper() : CreatureScript("npc_create_helper_cot") { } struct npc_crate_helperAI : public NullCreatureAI { npc_crate_helperAI(Creature* creature) : NullCreatureAI(creature) { _marked = false; } void SpellHit(Unit* /*caster*/, SpellEntry const* spell) { if (spell->Id == SPELL_ARCANE_DISRUPTION && !_marked) { _marked = true; if (InstanceScript* instance = me->GetInstanceScript()) instance->SetData(DATA_CRATE_COUNT, instance->GetData(DATA_CRATE_COUNT) + 1); if (GameObject* crate = me->FindNearestGameObject(GO_SUSPICIOUS_CRATE, 5.0f)) { crate->SummonGameObject(GO_PLAGUED_CRATE, crate->GetPositionX(), crate->GetPositionY(), crate->GetPositionZ(), crate->GetOrientation(), 0.0f, 0.0f, 0.0f, 0.0f, DAY); crate->Delete(); } } } private: bool _marked; }; CreatureAI* GetAI(Creature* creature) const { return new npc_crate_helperAI(creature); } }; void AddSC_culling_of_stratholme() { new npc_arthas(); new npc_crate_helper(); }
gpl-2.0
cgsecurity/testdisk
src/file_lxo.c
1
3189
/* File: file_lxo.c Copyright (C) 2011 Christophe GRENIER <grenier@cgsecurity.org> This software is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #if !defined(SINGLE_FORMAT) || defined(SINGLE_FORMAT_lxo) #ifdef HAVE_CONFIG_H #include <config.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include <stdio.h> #include "types.h" #include "common.h" #include "filegen.h" /*@ requires valid_register_header_check(file_stat); */ static void register_header_check_lxo(file_stat_t *file_stat); const file_hint_t file_hint_lxo= { .extension="lxo", .description="lxo/lwo 3d model", .max_filesize=PHOTOREC_MAX_FILE_SIZE, .recover=1, .enable_by_default=1, .register_header_check=&register_header_check_lxo }; struct lxo_header { char magic[4]; uint32_t size; char type[3]; } __attribute__ ((gcc_struct, __packed__)); /*@ @ requires buffer_size >= sizeof(struct lxo_header); @ requires separation: \separated(&file_hint_lxo, buffer+(..), file_recovery, file_recovery_new); @ requires valid_header_check_param(buffer, buffer_size, safe_header_only, file_recovery, file_recovery_new); @ ensures valid_header_check_result(\result, file_recovery_new); @ assigns *file_recovery_new; @*/ static int header_check_lxo(const unsigned char *buffer, const unsigned int buffer_size, const unsigned int safe_header_only, const file_recovery_t *file_recovery, file_recovery_t *file_recovery_new) { const struct lxo_header *header=(const struct lxo_header *)buffer; const uint64_t size=(uint64_t)be32(header->size) + 8; if(size < sizeof(struct lxo_header)) return 0; if(buffer[8]=='L' && buffer[9]=='X' && buffer[10]=='O') { reset_file_recovery(file_recovery_new); file_recovery_new->extension=file_hint_lxo.extension; file_recovery_new->calculated_file_size=size; file_recovery_new->file_check=&file_check_size; file_recovery_new->data_check=&data_check_size; return 1; } if(buffer[8]=='L' && buffer[9]=='W' && buffer[10]=='O') { reset_file_recovery(file_recovery_new); file_recovery_new->extension="lwo"; file_recovery_new->calculated_file_size=size; file_recovery_new->file_check=&file_check_size; file_recovery_new->data_check=&data_check_size; return 1; } return 0; } static void register_header_check_lxo(file_stat_t *file_stat) { static const unsigned char lxo_header[4]= { 'F' , 'O' , 'R' , 'M' }; register_header_check(0, lxo_header, sizeof(lxo_header), &header_check_lxo, file_stat); } #endif
gpl-2.0
gelu/ChgSD2
scripts/outland/black_temple/boss_illidan.cpp
1
92942
/* Copyright (C) 2006 - 2011 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Boss_Illidan_Stormrage SD%Complete: 90 SDComment: SDCategory: Black Temple EndScriptData */ #include "precompiled.h" #include "black_temple.h" #include "WorldPacket.h" /**** Creature Summon and Recognition IDs ****/ enum CreatureEntry { EMPTY = 0, AKAMA = 22990, ILLIDAN_STORMRAGE = 22917, BLADE_OF_AZZINOTH = 22996, FLAME_OF_AZZINOTH = 22997, MAIEV_SHADOWSONG = 23197, SHADOW_DEMON = 23375, DEMON_FIRE = 23069, FLAME_CRASH = 23336, ILLIDAN_DOOR_TRIGGER = 23412, SPIRIT_OF_OLUM = 23411, SPIRIT_OF_UDALO = 23410, ILLIDARI_ELITE = 23226, PARASITIC_SHADOWFIEND = 23498, CAGE_TRAP_TRIGGER = 23292, }; /************* Quotes and Sounds ***********************/ // Gossip for when a player clicks Akama #define GOSSIP_ITEM "We are ready to face Illidan" #define SAY_CONVO_1 -1564097 #define SAY_CONVO_2 -1564098 #define SAY_CONVO_3 -1564099 #define SAY_CONVO_4 -1564100 #define SAY_CONVO_5 -1564101 #define SAY_CONVO_6 -1564102 #define SAY_CONVO_7 -1564103 #define SAY_CONVO_8 -1564104 #define SAY_CONVO_9 -1564105 #define SAY_CONVO_10 -1564106 #define SAY_CONVO_11 -1564107 #define SAY_CONVO_12 -1564108 #define SAY_CONVO_13 -1564109 #define SAY_CONVO_14 -1564110 #define SAY_CONVO_15 -1564111 #define SAY_TAUNT_1 -1564112 #define SAY_TAUNT_2 -1564113 #define SAY_TAUNT_3 -1564114 #define SAY_TAUNT_4 -1564115 #define SAY_MAIEV_TAUNT_1 -1564116 #define SAY_MAIEV_TAUNT_2 -1564117 #define SAY_MAIEV_TAUNT_3 -1564118 #define SAY_MAIEV_TAUNT_4 -1564119 //emote only defined if not related to textId (in database) struct Yells { int32 textId; uint32 creature, timer, emote; bool Talk; }; static Yells Conversation[]= { {SAY_CONVO_1, ILLIDAN_STORMRAGE, 8000, 0, true}, {0, ILLIDAN_STORMRAGE, 5000, 396, true}, {SAY_CONVO_2, AKAMA, 7000, 0, true}, {0, AKAMA, 5000, 66, true}, {SAY_CONVO_3, ILLIDAN_STORMRAGE, 8000, 0, true}, {SAY_CONVO_4, AKAMA, 3000, 0, true}, {0, AKAMA, 2000, 15, true}, {SAY_CONVO_5, ILLIDAN_STORMRAGE, 3000, 0, true}, {0, EMPTY, 1000, 0, true}, {0, EMPTY, 0, 0, false}, {SAY_CONVO_6, ILLIDAN_STORMRAGE, 8000, 0, true}, {SAY_CONVO_7, MAIEV_SHADOWSONG, 8000, 0, true}, {SAY_CONVO_8, ILLIDAN_STORMRAGE, 7000, 0, true}, {SAY_CONVO_9, MAIEV_SHADOWSONG, 8000, 0, true}, {SAY_CONVO_10, ILLIDAN_STORMRAGE, 1000, 0, false}, {SAY_CONVO_11, MAIEV_SHADOWSONG, 6000, 0, true}, // Emote dead for now. Kill him later {SAY_CONVO_12, ILLIDAN_STORMRAGE, 22000, 0, true}, {SAY_CONVO_13, MAIEV_SHADOWSONG, 9000, 0, true}, {SAY_CONVO_14, MAIEV_SHADOWSONG, 0, true}, {SAY_CONVO_15, AKAMA, 8000, 0, true}, {0, EMPTY, 1000, 0, false} }; static Yells RandomTaunts[]= { {SAY_TAUNT_1, ILLIDAN_STORMRAGE, 0, 0, false}, {SAY_TAUNT_2, ILLIDAN_STORMRAGE, 0, 0, false}, {SAY_TAUNT_3, ILLIDAN_STORMRAGE, 0, 0, false}, {SAY_TAUNT_4, ILLIDAN_STORMRAGE, 0, 0, false} }; static Yells MaievTaunts[]= { {SAY_MAIEV_TAUNT_1, MAIEV_SHADOWSONG, 0, 0, false}, {SAY_MAIEV_TAUNT_2, MAIEV_SHADOWSONG, 0, 0, false}, {SAY_MAIEV_TAUNT_3, MAIEV_SHADOWSONG, 0, 0, false}, {SAY_MAIEV_TAUNT_4, MAIEV_SHADOWSONG, 0, 0, false} }; // Yells for/by Akama #define SAY_AKAMA_BEWARE -1564120 #define SAY_AKAMA_MINION -1564121 #define SAY_AKAMA_LEAVE -1564122 // Self explanatory #define SAY_KILL1 -1564123 #define SAY_KILL2 -1564124 // I think I'll fly now and let my subordinates take you on #define SAY_TAKEOFF -1564125 #define SAY_SUMMONFLAMES -1564126 // When casting Eye Blast. Demon Fire will be appear on places that he casts this #define SAY_EYE_BLAST -1564127 // kk, I go big, dark and demon on you. #define SAY_MORPH -1564128 // I KILL! #define SAY_ENRAGE -1564129 /************** Spells *************/ // Normal Form #define SPELL_SHEAR 41032 // Reduces Max. Health by 60% for 7 seconds. Can stack 19 times. 1.5 second cast #define SPELL_FLAME_CRASH 40832 // Summons an invis/unselect passive mob that has an aura of flame in a circle around him. #define SPELL_DRAW_SOUL 40904 // 5k Shadow Damage in front of him. Heals Illidan for 100k health (script effect) #define SPELL_PARASITIC_SHADOWFIEND 41917 // DoT of 3k Shadow every 2 seconds. Lasts 10 seconds. (Script effect: Summon 2 parasites once the debuff has ticked off) #define SPELL_SUMMON_PARASITICS 41915 // Summons 2 Parasitic Shadowfiends on the target. It's supposed to be cast as soon as the Parasitic Shadowfiend debuff is gone, but the spells aren't linked :( #define SPELL_AGONIZING_FLAMES 40932 // 4k fire damage initial to target and anyone w/i 5 yards. PHASE 3 ONLY #define SPELL_ENRAGE 40683 // Increases damage by 50% and attack speed by 30%. 20 seconds, PHASE 5 ONLY // Flying (Phase 2) #define SPELL_THROW_GLAIVE 39635 // Throws a glaive on the ground #define SPELL_THROW_GLAIVE2 39849 // Animation for the above spell #define SPELL_GLAIVE_RETURNS 39873 // Glaive flies back to Illidan #define SPELL_FIREBALL 40598 // 2.5k-3.5k damage in 10 yard radius. 2 second cast time. #define SPELL_DARK_BARRAGE 40585 // 10 second channeled spell, 3k shadow damage per second. // Demon Form #define SPELL_DEMON_TRANSFORM_1 40511 // First phase of animations for transforming into Dark Illidan (fall to ground) #define SPELL_DEMON_TRANSFORM_2 40398 // Second phase of animations (kneel) #define SPELL_DEMON_TRANSFORM_3 40510 // Final phase of animations (stand up and roar) #define SPELL_DEMON_FORM 40506 // Transforms into Demon Illidan. Has an Aura of Dread on him. #define SPELL_SHADOW_BLAST 41078 // 8k - 11k Shadow Damage. Targets highest threat. Has a splash effect, damaging anyone in 20 yards of the target. #define SPELL_FLAME_BURST 41126 // Hurls fire at entire raid for ~3.5k damage every 10 seconds. Resistable. (Does not work: Script effect) #define SPELL_FLAME_BURST_EFFECT 41131 // The actual damage. Handled by core (41126 triggers 41131) // Other Illidan spells #define SPELL_KNEEL 39656 // Before beginning encounter, this is how he appears (talking to Wilson). #define SPELL_SHADOW_PRISON 40647 // Illidan casts this spell to immobilize entire raid when he summons Maiev. #define SPELL_DEATH 41220 // This spell doesn't do anything except stun Illidan and set him on his knees. #define SPELL_BERSERK 45078 // Damage increased by 500%, attack speed by 150% // Non-Illidan spells #define SPELL_AKAMA_DOOR_CHANNEL 41268 // Akama's channel spell on the door before the Temple Summit #define SPELL_DEATHSWORN_DOOR_CHANNEL 41269 // Olum and Udalo's channel spell on the door before the Temple Summit #define SPELL_AKAMA_DOOR_FAIL 41271 // Not sure where this is really used... #define SPELL_HEALING_POTION 40535 // Akama uses this to heal himself to full. #define SPELL_AZZINOTH_CHANNEL 39857 // Glaives cast it on Flames. Not sure if this is the right spell. #define SPELL_SHADOW_DEMON_PASSIVE 41079 // Adds the "shadowform" aura to Shadow Demons. #define SPELL_CONSUME_SOUL 41080 // Once the Shadow Demons reach their target, they use this to kill them #define SPELL_PARALYZE 41083 // Shadow Demons cast this on their target #define SPELL_PURPLE_BEAM 39123 // Purple Beam connecting Shadow Demon to their target #define SPELL_CAGE_TRAP_DUMMY 40761 // Put this in DB for cage trap GO. #define SPELL_EYE_BLAST_TRIGGER 40017 // This summons Demon Form every few seconds and deals ~20k damage in its radius #define SPELL_EYE_BLAST 39908 // This does the blue flamey animation. #define SPELL_FLAME_CRASH_EFFECT 40836 // Firey blue ring of circle that the other flame crash summons #define SPELL_BLAZE_EFFECT 40610 // Green flame on the ground, triggers damage (5k) every few seconds #define SPELL_BLAZE_SUMMON 40637 // Summons the Blaze creature #define SPELL_DEMON_FIRE 40029 // Blue fire trail left by Eye Blast. Deals 2k per second if players stand on it. #define SPELL_CAGED 40695 // Caged Trap triggers will cast this on Illidan if he is within 3 yards #define SPELL_CAGE_TRAP_SUMMON 40694 // Summons a Cage Trap GO (bugged) on the ground along with a Cage Trap Disturb Trigger mob (working) #define SPELL_CAGE_TRAP_BEAM 40713 // 8 Triggers on the ground in an octagon cast spells like this on Illidan 'caging him' #define SPELL_FLAME_BLAST 40631 // Flames of Azzinoth use this. Frontal cone AoE 7k-9k damage. #define SPELL_CHARGE 40602 // Flames of Azzinoth charges whoever is too far from them. They enrage after this. For simplicity, we'll use the same enrage as Illidan. #define SPELL_TELEPORT_VISUAL 41232 // Teleport visual for Maiev #define SPELL_SHADOWFIEND_PASSIVE 41913 // Passive aura for shadowfiends // Other defines #define CENTER_X 676.740f #define CENTER_Y 305.297f #define CENTER_Z 353.192f #define EQUIP_ID_MAIN_HAND 32837 #define EQUIP_ID_OFF_HAND 32838 /*** Phase Names ***/ enum Phase { PHASE_NORMAL = 1, PHASE_FLIGHT = 2, PHASE_NORMAL_2 = 3, PHASE_DEMON = 4, PHASE_NORMAL_MAIEV = 5, PHASE_DEMON_SEQUENCE = 6, }; struct Locations { float x, y, z; uint32 id; }; static Locations GlaivePosition[]= { {695.105f, 305.303f, 354.256f}, {659.338f, 305.303f, 354.256f}, {700.105f, 305.303f, 354.256f}, {664.338f, 305.303f, 354.256f} }; static Locations EyeBlast[]= { {650.697f, 320.128f, 353.730f}, {652.799f, 275.091f, 353.367f}, {701.527f, 273.815f, 353.230f}, {709.865f, 325.654f, 353.322f} }; static Locations AkamaWP[]= { {770.01f, 304.50f, 312.29f}, // Bottom of the first stairs, at the doors {780.66f, 304.50f, 319.74f}, // Top of the first stairs {790.13f, 319.68f, 319.76f}, // Bottom of the second stairs (left from the entrance) {787.17f, 347.38f, 341.42f}, // Top of the second stairs {781.34f, 350.31f, 341.44f}, // Bottom of the third stairs {762.60f, 361.06f, 353.60f}, // Top of the third stairs {756.35f, 360.52f, 353.27f}, // Before the door-thingy {743.82f, 342.21f, 353.00f}, // Somewhere further {732.69f, 305.13f, 353.00f}, // In front of Illidan {738.11f, 365.44f, 353.00f}, // in front of the door-thingy (the other one!) {792.18f, 366.62f, 341.42f}, // Down the first flight of stairs {796.84f, 304.89f, 319.76f}, // Down the second flight of stairs {782.01f, 304.55f, 319.76f} // Final location - back at the initial gates. This is where he will fight the minions! }; // 755.762, 304.0747, 312.1769 -- This is where Akama should be spawned static Locations SpiritSpawns[]= { {755.5426f, 309.9156f, 312.2129f, SPIRIT_OF_UDALO}, {755.5426f, 298.7923f, 312.0834f, SPIRIT_OF_OLUM} }; struct WayPoints { WayPoints(uint32 _id, float _x, float _y, float _z) { id = _id; x = _x; y = _y; z = _z; } uint32 id; float x, y, z; }; struct Animation // For the demon transformation { uint32 aura, unaura, timer, size, displayid, phase; bool equip; }; static Animation DemonTransformation[]= { {SPELL_DEMON_TRANSFORM_1, 0, 1300, 0, 0, 6, true}, {SPELL_DEMON_TRANSFORM_2, SPELL_DEMON_TRANSFORM_1, 4000, 0, 0, 6, true}, {SPELL_DEMON_FORM, 0, 3000, 1073741824, 21322, 6, false}, {SPELL_DEMON_TRANSFORM_3, SPELL_DEMON_TRANSFORM_2, 3500, 0, 0, 6, false}, {0, 0, 0, 0, 0, 4, false}, {SPELL_DEMON_TRANSFORM_1, 0, 1500, 0, 0, 6, false}, {SPELL_DEMON_TRANSFORM_2, SPELL_DEMON_TRANSFORM_1, 4000, 0, 0, 6, false}, {0, SPELL_DEMON_FORM, 3000, 1069547520, 21135, 6, false}, {SPELL_DEMON_TRANSFORM_3, SPELL_DEMON_TRANSFORM_2, 3500, 0, 0, 6, true}, {0, 0, 0, 0, 0, 8, true} }; /**** Demon Fire will be used for Eye Blast. Illidan needs to have access to it's vars and functions, so we'll set it here ****/ struct MANGOS_DLL_DECL demonfireAI : public ScriptedAI { demonfireAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); Reset(); } ScriptedInstance* m_pInstance; uint64 IllidanGUID; bool IsTrigger; uint32 CheckTimer; uint32 DemonFireTimer; uint32 DespawnTimer; void Reset() { IllidanGUID = 0; IsTrigger = false; CheckTimer = 2000; DemonFireTimer = 0; DespawnTimer = 45000; } void AttackStart(Unit* who) { } void MoveInLineOfSight(Unit *who){ } void UpdateAI(const uint32 diff) { if (IsTrigger) return; m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); if (CheckTimer < diff) { if (!IllidanGUID && m_pInstance) { if (Creature* pIllidan = m_pInstance->instance->GetCreature(m_pInstance->GetData64(NPC_ILLIDAN_STORMRAGE))) { IllidanGUID = m_pInstance->GetData64(NPC_ILLIDAN_STORMRAGE); if (!pIllidan->HasSplineFlag(SPLINEFLAG_NO_SPLINE)) m_creature->SetDeathState(JUST_DIED); } } CheckTimer = 2000; }else CheckTimer -= diff; if (DemonFireTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_DEMON_FIRE); DemonFireTimer = 30000; }else DemonFireTimer -= diff; if (DespawnTimer < diff) m_creature->SetDeathState(JUST_DIED); else DespawnTimer -= diff; DoMeleeAttackIfReady(); } }; /******* Functions and vars for Akama's AI ******/ struct MANGOS_DLL_DECL npc_akama_illidanAI : public ScriptedAI { npc_akama_illidanAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); WayPointList.clear(); Reset(); } /* Instance Data */ ScriptedInstance* m_pInstance; /* Timers */ uint32 ChannelTimer; uint32 TalkTimer; uint32 WalkTimer; uint32 SummonMinionTimer; /* GUIDs */ uint64 IllidanGUID; uint64 PlayerGUID; uint64 SpiritGUID[2]; uint64 ChannelGUID; bool IsTalking; bool StartChanneling; bool DoorOpen; bool FightMinions; bool IsReturningToIllidan; bool IsWalking; uint32 TalkCount; uint32 ChannelCount; std::list<WayPoints> WayPointList; std::list<WayPoints>::iterator WayPoint; void BeginEvent(uint64 PlayerGUID); void Reset() { if (m_pInstance) { m_pInstance->SetData(TYPE_ILLIDAN, NOT_STARTED); GameObject* pGate = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(GO_ILLIDAN_GATE)); // close door if already open (when raid wipes or something) if (pGate && !pGate->GetGoState()) pGate->SetGoState(GO_STATE_READY); for(uint32 i = GO_ILLIDAN_DOOR_R; i < GO_ILLIDAN_DOOR_L + 1; ++i) { if (GameObject* pDoor = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(i))) pDoor->SetGoState(GO_STATE_ACTIVE); } } IllidanGUID = 0; PlayerGUID = 0; ChannelGUID = 0; for(uint8 i = 0; i < 2; ++i) SpiritGUID[i] = 0; ChannelTimer = 0; ChannelCount = 0; SummonMinionTimer = 2000; WalkTimer = 0; IsWalking = false; TalkTimer = 0; TalkCount = 0; KillAllElites(); IsReturningToIllidan = false; FightMinions = false; IsTalking = false; StartChanneling = false; DoorOpen = false; // Database sometimes has strange values.. m_creature->SetUInt32Value(UNIT_NPC_FLAGS, 0); m_creature->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); m_creature->SetVisibility(VISIBILITY_ON); } // Do not call reset in Akama's evade mode, as this will stop him from summoning minions after he kills the first bit void EnterEvadeMode() { m_creature->RemoveAllAuras(); m_creature->DeleteThreatList(); m_creature->CombatStop(true); } void KillAllElites() { std::vector<ObjectGuid> vGuids; m_creature->FillGuidsListFromThreatList(vGuids); for (std::vector<ObjectGuid>::const_iterator itr = vGuids.begin();itr != vGuids.end(); ++itr) { Unit* pUnit = m_creature->GetMap()->GetUnit(*itr); if (pUnit && pUnit->GetTypeId() == TYPEID_UNIT && pUnit->GetEntry() == ILLIDARI_ELITE) pUnit->SetDeathState(JUST_DIED); } } void ReturnToIllidan() { KillAllElites(); FightMinions = false; IsReturningToIllidan = true; WayPoint = WayPointList.begin(); m_creature->SetSpeedRate(MOVE_RUN, 2.0f); m_creature->RemoveSplineFlag(SPLINEFLAG_WALKMODE); IsWalking = true; } void AddWaypoint(uint32 id, float x, float y, float z) { WayPoints AWP(id, x, y, z); WayPointList.push_back(AWP); } void DamageTaken(Unit *done_by, uint32 &damage) { if (damage > m_creature->GetHealth() && (done_by->GetGUID() != m_creature->GetGUID())) { damage = 0; DoCastSpellIfCan(m_creature, SPELL_HEALING_POTION); } } void BeginDoorEvent(Player* pPlayer) { // Requires Instance and this additional check to prevent exploits if (!m_pInstance || m_pInstance->GetData(TYPE_COUNCIL) != DONE) return; debug_log("½Å±¾¿â£º Akama - Door event initiated by player %s", pPlayer->GetName()); PlayerGUID = pPlayer->GetGUID(); if (GameObject* pGate = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(GO_ILLIDAN_GATE))) { float x,y,z; pGate->GetPosition(x, y, z); Creature* Channel = m_creature->SummonCreature(ILLIDAN_DOOR_TRIGGER, x, y, z+5, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 360000); if (Channel) { ChannelGUID = Channel->GetGUID(); // Invisible but spell visuals can still be seen. Channel->SetDisplayId(11686); Channel->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); float PosX, PosY, PosZ; m_creature->GetPosition(PosX, PosY, PosZ); for(uint8 i = 0; i < 2; ++i) { Creature* Spirit = m_creature->SummonCreature(SpiritSpawns[i].id, SpiritSpawns[i].x, SpiritSpawns[i].y, SpiritSpawns[i].z, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 360000); if (Spirit) { Spirit->SetVisibility(VISIBILITY_OFF); SpiritGUID[i] = Spirit->GetGUID(); } } StartChanneling = true; m_creature->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); DoCastSpellIfCan(Channel, SPELL_AKAMA_DOOR_FAIL); } } } void MovementInform(uint32 type, uint32 id) { if (type != POINT_MOTION_TYPE || !IsWalking) return; if (WayPoint->id != id) return; switch(id) { case 6: if (!IsReturningToIllidan) { // open the doors that close the summit for(uint32 i = GO_ILLIDAN_DOOR_R; i < GO_ILLIDAN_DOOR_L+1; ++i) { if (GameObject* pDoor = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(i))) pDoor->SetGoState(GO_STATE_ACTIVE); } } break; case 7: if (IsReturningToIllidan) { IsWalking = false; if (IllidanGUID) { Creature* Illidan = m_creature->GetMap()->GetCreature(IllidanGUID); if (Illidan) { float dx = Illidan->GetPositionX() + rand()%15; float dy = Illidan->GetPositionY() + rand()%15; m_creature->GetMotionMaster()->MovePoint(13, dx, dy, Illidan->GetPositionZ()); m_creature->SetUInt64Value(UNIT_FIELD_TARGET, IllidanGUID); } } } break; case 8: m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); if (!IsReturningToIllidan) { IsWalking = false; BeginEvent(PlayerGUID); } break; case 12: IsWalking = false; FightMinions = true; m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); break; } ++WayPoint; WalkTimer = 200; } void DeleteFromThreatList() { // If we do not have Illidan's GUID, do not proceed if (!IllidanGUID) return; // Create a pointer to Illidan Creature* Illidan = m_creature->GetMap()->GetCreature(IllidanGUID); // No use to continue if Illidan does not exist if (!Illidan) return; ThreatList const& tList = m_creature->getThreatManager().getThreatList(); for (ThreatList::const_iterator itr = tList.begin();itr != tList.end(); ++itr) { // Loop through threatlist till our GUID is found in it. if ((*itr)->getUnitGuid() == m_creature->GetGUID()) { (*itr)->removeReference(); // Delete ourself from his threatlist. return; // No need to continue anymore. } } // Now we delete our threatlist to prevent attacking anyone for now m_creature->DeleteThreatList(); } void UpdateAI(const uint32 diff) { if (IllidanGUID) { if (Creature* Illidan = m_creature->GetMap()->GetCreature(IllidanGUID)) { if (Illidan->IsInEvadeMode() && !m_creature->IsInEvadeMode()) EnterEvadeMode(); if (Illidan->GetHealthPercent() < 85.0f && m_creature->isInCombat() && !FightMinions) { if (TalkTimer < diff) { switch(TalkCount) { case 0: DoScriptText(SAY_AKAMA_MINION, Illidan); TalkTimer = 8000; TalkCount = 1; break; case 1: DoScriptText(SAY_AKAMA_LEAVE, m_creature); TalkTimer = 3000; TalkCount = 2; break; case 2: IsTalking = true; TalkTimer = 2000; m_creature->RemoveAllAuras(); m_creature->CombatStop(true); m_creature->AttackStop(); m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); TalkCount = 3; break; case 3: DeleteFromThreatList(); IsWalking = true; WayPoint = WayPointList.begin(); std::advance(WayPoint, 9); m_creature->RemoveSplineFlag(SPLINEFLAG_WALKMODE); break; } }else TalkTimer -= diff; } if (Illidan->GetHealthPercent() < 4.0f && !IsReturningToIllidan) ReturnToIllidan(); } }else { if (m_pInstance) IllidanGUID = m_pInstance->GetData64(NPC_ILLIDAN_STORMRAGE); } if (IsWalking && WalkTimer) { if (WalkTimer <= diff) { if (WayPoint == WayPointList.end()) return; m_creature->GetMotionMaster()->MovePoint(WayPoint->id, WayPoint->x, WayPoint->y,WayPoint->z); WalkTimer = 0; }else WalkTimer -= diff; } if (StartChanneling) { if (ChannelTimer < diff) { switch(ChannelCount) { case 3: if (!DoorOpen) { m_creature->InterruptNonMeleeSpells(true); for(uint8 i = 0; i < 2; ++i) { if (SpiritGUID[i]) { Creature* Spirit = m_creature->GetMap()->GetCreature(SpiritGUID[i]); if (Spirit) Spirit->InterruptNonMeleeSpells(true); } } if (GameObject* pGate = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(GO_ILLIDAN_GATE))) pGate->SetGoState(GO_STATE_ACTIVE); ++ChannelCount; ChannelTimer = 5000; } break; case 4: m_creature->HandleEmote(EMOTE_ONESHOT_SALUTE); ChannelTimer = 2000; ++ChannelCount; break; case 5: DoScriptText(SAY_AKAMA_BEWARE, m_creature); if (ChannelGUID) { Creature* ChannelTarget = m_creature->GetMap()->GetCreature(ChannelGUID); if (ChannelTarget) ChannelTarget->SetDeathState(JUST_DIED); ChannelGUID = 0; } for(uint8 i = 0; i < 2; ++i) { if (SpiritGUID[i]) { Creature* Spirit = m_creature->GetMap()->GetCreature(SpiritGUID[i]); if (Spirit) Spirit->SetDeathState(JUST_DIED); } } ChannelTimer = 6000; ++ChannelCount; break; case 6: StartChanneling = false; if (WayPointList.empty()) { error_log("½Å±¾¿â£º Akama has no waypoints to start with!"); return; } WayPoint = WayPointList.begin(); m_creature->AddSplineFlag(SPLINEFLAG_WALKMODE); m_creature->GetMotionMaster()->MovePoint(WayPoint->id, WayPoint->x, WayPoint->y, WayPoint->z); IsWalking = true; break; default: if (ChannelGUID) { Creature* Channel = m_creature->GetMap()->GetCreature(ChannelGUID); if (Channel) { m_creature->InterruptNonMeleeSpells(true); for(uint8 i = 0; i < 2; ++i) { if (SpiritGUID[i]) { Creature* Spirit = m_creature->GetMap()->GetCreature(SpiritGUID[i]); if (Spirit) { Spirit->InterruptNonMeleeSpells(true); if (ChannelCount%2 == 0) { Spirit->CastSpell(Channel, SPELL_DEATHSWORN_DOOR_CHANNEL,false); DoCastSpellIfCan(Channel, SPELL_AKAMA_DOOR_CHANNEL); } else { if (Spirit->GetVisibility() == VISIBILITY_OFF) Spirit->SetVisibility(VISIBILITY_ON); } } } } if (ChannelCount < 3) ++ChannelCount; ChannelTimer = 10000; } } break; } }else ChannelTimer -= diff; } if (FightMinions) { if (SummonMinionTimer < diff) { if (IllidanGUID) { Creature* Illidan = m_creature->GetMap()->GetCreature(IllidanGUID); if (!Illidan || Illidan->IsInEvadeMode()) { Reset(); EnterEvadeMode(); return; } } float x,y,z; m_creature->GetPosition(x,y,z); Creature* Elite = m_creature->SummonCreature(ILLIDARI_ELITE, x+rand()%10, y+rand()%10, z, 0, TEMPSUMMON_TIMED_OR_DEAD_DESPAWN, 30000); if (Elite) { Elite->AI()->AttackStart(m_creature); Elite->AddThreat(m_creature, 1000000.0f); AttackStart(Elite); } SummonMinionTimer = urand(10000, 16000); }else SummonMinionTimer -= diff; } // If we don't have a target, or is talking, or has run away, return if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; DoMeleeAttackIfReady(); } }; /************************************** Illidan's AI ***************************************/ struct MANGOS_DLL_DECL boss_illidan_stormrageAI : public ScriptedAI { boss_illidan_stormrageAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); for(uint8 i = 0; i < 2; ++i) { FlameGUID[i] = 0; GlaiveGUID[i] = 0; } AkamaGUID = 0; MaievGUID = 0; Reset(); } /** Instance Data **/ ScriptedInstance* m_pInstance; /** Generic **/ bool IsTalking; bool HasSummoned; bool RefaceVictim; bool InformAkama; uint32 Phase; uint32 GlobalTimer; uint32 TalkCount; uint32 DemonFormSequence; /** GUIDs **/ uint64 FlameGUID[2]; uint64 GlaiveGUID[2]; uint64 AkamaGUID; uint64 MaievGUID; /** Timers **/ uint32 ShearTimer; uint32 DrawSoulTimer; uint32 FlameCrashTimer; uint32 ParasiticShadowFiendTimer; uint32 FireballTimer; uint32 EyeBlastTimer; uint32 DarkBarrageTimer; uint32 SummonBladesTimer; // Animate summoning the Blades of Azzinoth in Phase 2 uint32 SummonFlamesTimer; // Summon Flames of Azzinoth in Phase 2 uint32 CheckFlamesTimer; // This is used to check the status of the Flames to see if we should begin entering Phase 3 or not. uint32 RetrieveBladesTimer; // Animate retrieving the Blades of Azzinoth in Phase 2 -> 3 transition uint32 LandTimer; // This is used at the end of phase 2 to signal Illidan landing after Flames are dead uint32 AgonizingFlamesTimer; uint32 ShadowBlastTimer; uint32 FlameBurstTimer; uint32 ShadowDemonTimer; uint32 TalkTimer; uint32 TransformTimer; uint32 EnrageTimer; uint32 CageTimer; uint32 LayTrapTimer; uint32 AnimationTimer; uint32 TauntTimer; // This is used for his random yells uint32 FaceVictimTimer; uint32 BerserkTimer; void Reset() { Phase = PHASE_NORMAL; // Check if any flames/glaives are alive/existing. Kill if alive and set GUIDs to 0 for(uint8 i = 0; i < 2; ++i) { if (Creature* Flame = m_creature->GetMap()->GetCreature(FlameGUID[i])) { if (Flame->isAlive()) Flame->SetDeathState(JUST_DIED); FlameGUID[i] = 0; } if (Creature* Glaive = m_creature->GetMap()->GetCreature(GlaiveGUID[i])) { if (Glaive->isAlive()) Glaive->SetDeathState(JUST_DIED); GlaiveGUID[i] = 0; } } if (Creature* pAkama = m_creature->GetMap()->GetCreature(AkamaGUID)) { if (!pAkama->isAlive()) pAkama->Respawn(); pAkama->AI()->EnterEvadeMode(); } InformAkama = false; RefaceVictim = false; HasSummoned = false; FaceVictimTimer = 1000; BerserkTimer = 1500000; GlobalTimer = 0; DemonFormSequence = 0; /** Normal Form **/ ShearTimer = urand(20000, 30000); // 20 to 30 seconds FlameCrashTimer = 30000; // 30 seconds ParasiticShadowFiendTimer = 25000; // 25 seconds DrawSoulTimer = 50000; // 50 seconds /** Phase 2 **/ SummonBladesTimer = 10000; SummonFlamesTimer = 20000; // Phase 2 timers may be incorrect FireballTimer = 5000; DarkBarrageTimer = 45000; EyeBlastTimer = 30000; CheckFlamesTimer = 5000; RetrieveBladesTimer = 5000; LandTimer = 0; /** Phase 3+ **/ AgonizingFlamesTimer = 35000; // Phase 3+ timers may be incorrect ShadowBlastTimer = 3000; FlameBurstTimer = 10000; ShadowDemonTimer = 30000; TransformTimer = 90000; EnrageTimer = 40000; CageTimer = 30000; LayTrapTimer = CageTimer + 2000; AnimationTimer = 0; TauntTimer = 30000; // This timer may be off. m_creature->SetDisplayId(21135); m_creature->InterruptNonMeleeSpells(false); m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); // Unequip warglaives if needed SetEquipmentSlots(false, EQUIP_UNEQUIP, EQUIP_UNEQUIP, EQUIP_NO_CHANGE); m_creature->RemoveSplineFlag(SPLINEFLAG_NO_SPLINE); IsTalking = false; TalkCount = 0; TalkTimer = 0; if (m_pInstance) m_pInstance->SetData(TYPE_ILLIDAN, NOT_STARTED); } void AttackStart(Unit *who) { if (!who || IsTalking || Phase == 2 || Phase == 4 || Phase == 6 || m_creature->HasAura(SPELL_KNEEL, EFFECT_INDEX_0)) return; if (who == m_creature) return; if (m_creature->Attack(who, true)) { m_creature->AddThreat(who); m_creature->SetInCombatWith(who); who->SetInCombatWith(m_creature); DoStartMovement(who); } } void MoveInLineOfSight(Unit *who) { if (!who || m_creature->getVictim() || IsTalking || m_creature->HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE)) return; if (who->isTargetableForAttack() && who->isInAccessablePlaceFor(m_creature) && m_creature->IsHostileTo(who)) { if (!m_creature->CanFly() && m_creature->GetDistanceZ(who) > CREATURE_Z_ATTACK_RANGE) return; float attackRadius = m_creature->GetAttackDistance(who); if (m_creature->IsWithinDistInMap(who, attackRadius) && m_creature->IsWithinLOSInMap(who)) { who->RemoveSpellsCausingAura(SPELL_AURA_MOD_STEALTH); AttackStart(who); } } } void JustDied(Unit *killer) { IsTalking = false; TalkCount = 0; TalkTimer = 0; m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); if (!m_pInstance) return; // Completed m_pInstance->SetData(TYPE_ILLIDAN, DONE); for(uint32 i = GO_ILLIDAN_DOOR_R; i < GO_ILLIDAN_DOOR_L + 1; ++i) { // Open Doors if (GameObject* pDoor = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(i))) pDoor->SetGoState(GO_STATE_ACTIVE); } } void KilledUnit(Unit *victim) { if (victim == m_creature) return; DoScriptText(urand(0, 1) ? SAY_KILL1 : SAY_KILL2, m_creature); } void DamageTaken(Unit *done_by, uint32 &damage) { if (damage > m_creature->GetHealth()) // Don't let ourselves be slain before we do our death speech { damage = 0; m_creature->SetHealth(m_creature->GetMaxHealth()/100); } } void Cast(Unit* victim, uint32 Spell, bool triggered = false) { if (!victim) return; RefaceVictim = true; m_creature->SetUInt64Value(UNIT_FIELD_TARGET, victim->GetGUID()); m_creature->CastSpell(victim, Spell, triggered); } /** This will handle the cast of eye blast **/ void CastEyeBlast() { m_creature->InterruptNonMeleeSpells(false); DarkBarrageTimer += 10000; DoScriptText(SAY_EYE_BLAST, m_creature); uint32 initial = urand(0, 3); uint32 final = 0; if (initial < 3) final = initial+1; float initial_X = EyeBlast[initial].x; float initial_Y = EyeBlast[initial].y; float initial_Z = EyeBlast[initial].z; float final_X = EyeBlast[final].x; float final_Y = EyeBlast[final].y; float final_Z = EyeBlast[final].z; for(uint8 i = 0; i < 2; ++i) { if (Creature* pTrigger = m_creature->SummonCreature(DEMON_FIRE, initial_X, initial_Y, initial_Z, 0, TEMPSUMMON_TIMED_DESPAWN, 20000)) { if (demonfireAI* pTriggerAI = dynamic_cast<demonfireAI*>(pTrigger->AI())) pTriggerAI->IsTrigger = true; pTrigger->GetMotionMaster()->MovePoint(0, final_X, final_Y, final_Z); if (!i) pTrigger->CastSpell(pTrigger, SPELL_EYE_BLAST_TRIGGER, true); else { pTrigger->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); m_creature->SetUInt64Value(UNIT_FIELD_TARGET, pTrigger->GetGUID()); DoCastSpellIfCan(pTrigger, SPELL_EYE_BLAST); } } } } // It's only cast on players that are greater than 15 yards away from Illidan. //If no one is found, cast it on MT instead (since selecting someone in that 15 yard radius would cause the flames to hit the MT anyway). void CastAgonizingFlames() { // We'll use a grid searcher that selects a player that is at a distance >15 yards if (Player* pPlayer = GetPlayerAtMinimumRange(15.0f)) DoCastSpellIfCan(pPlayer, SPELL_AGONIZING_FLAMES); else DoCastSpellIfCan(m_creature->getVictim(), SPELL_AGONIZING_FLAMES); } void Talk(uint32 count) { if (!m_creature->isAlive()) return; int32 text = 0; if (Conversation[count].textId) text = Conversation[count].textId; TalkTimer = Conversation[count].timer; uint32 emote = Conversation[count].emote; IsTalking = Conversation[count].Talk; Creature* pCreature = NULL; uint64 GUID = 0; if (Conversation[count].creature == ILLIDAN_STORMRAGE) pCreature = m_creature; else if (Conversation[count].creature == AKAMA) { if (!AkamaGUID) { if (m_pInstance) { AkamaGUID = m_pInstance->GetData64(NPC_AKAMA); if (!AkamaGUID) return; GUID = AkamaGUID; } } else GUID = AkamaGUID; } else if (Conversation[count].creature == MAIEV_SHADOWSONG) { if (!MaievGUID) return; GUID = MaievGUID; } else if (Conversation[count].creature == EMPTY) // This is just for special cases without speech/sounds/emotes. return; if (GUID) // Now we check if we actually specified a GUID, if so: // we grab a pointer to that creature pCreature = m_creature->GetMap()->GetCreature(GUID); if (pCreature) { if (emote) pCreature->HandleEmote(emote); // Make the creature do some animation if (text) DoScriptText(text, pCreature); // Have the creature yell out some text } } void Move(float X, float Y, float Z, Creature* pCreature) { pCreature->GetMotionMaster()->MovePoint(0, X, Y, Z); } void HandleDemonTransformAnimation(uint32 count) { uint32 unaura = DemonTransformation[count].unaura; uint32 aura = DemonTransformation[count].aura; uint32 displayid = DemonTransformation[count].displayid; AnimationTimer = DemonTransformation[count].timer; uint32 size = DemonTransformation[count].size; m_creature->InterruptNonMeleeSpells(false); if (DemonTransformation[count].phase != 8) { m_creature->GetMotionMaster()->Clear(); m_creature->GetMotionMaster()->MoveIdle(); } if (unaura) m_creature->RemoveAurasDueToSpell(unaura); if (aura) DoCastSpellIfCan(m_creature, aura, CAST_TRIGGERED); if (displayid) // It's morphin time! m_creature->SetDisplayId(displayid); /*if (size) m_creature->SetUInt32Value(OBJECT_FIELD_SCALE_X, size); // Let us grow! (or shrink)*/ if (DemonTransformation[count].equip) { // Requip warglaives if needed SetEquipmentSlots(false, EQUIP_ID_MAIN_HAND, EQUIP_ID_OFF_HAND, EQUIP_NO_CHANGE); } else { // Unequip warglaives if needed SetEquipmentSlots(false, EQUIP_UNEQUIP, EQUIP_UNEQUIP, EQUIP_NO_CHANGE); } if (DemonTransformation[count].phase != 8) Phase = DemonTransformation[count].phase; // Set phase properly else { // Refollow and attack our old victim m_creature->GetMotionMaster()->MoveChase(m_creature->getVictim()); // Depending on whether we summoned Maiev, we switch to either phase 5 or 3 if (MaievGUID) Phase = PHASE_NORMAL_MAIEV; else Phase = PHASE_NORMAL_2; } if (count == 7) { DoResetThreat(); m_creature->RemoveAurasDueToSpell(SPELL_DEMON_FORM); } else if (count == 4) { DoResetThreat(); if (!m_creature->HasAura(SPELL_DEMON_FORM, EFFECT_INDEX_0)) DoCastSpellIfCan(m_creature, SPELL_DEMON_FORM, CAST_TRIGGERED); } } /** To reduce the amount of code in UpdateAI, we can seperate them into different functions and simply call them from UpdateAI **/ void EnterPhase2() { DoScriptText(SAY_TAKEOFF, m_creature); SummonBladesTimer = 10000; // Summon Glaives when this decrements SummonFlamesTimer = 20000; // Summon Flames when this decrements GlobalTimer += 20000; LandTimer = 0; Phase = PHASE_FLIGHT; m_creature->RemoveAllAuras(); m_creature->SetUInt64Value(UNIT_FIELD_TARGET, 0); // So players don't shoot us down m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); // We now hover! m_creature->AddSplineFlag(SPLINEFLAG_NO_SPLINE); m_creature->GetMotionMaster()->MovePoint(0, CENTER_X, CENTER_Y, CENTER_Z); for(uint8 i = 0; i < 2; ++i) { Creature* Glaive = m_creature->SummonCreature(BLADE_OF_AZZINOTH, GlaivePosition[i].x, GlaivePosition[i].y, GlaivePosition[i].z, 0, TEMPSUMMON_CORPSE_DESPAWN, 0); if (Glaive) { GlaiveGUID[i] = Glaive->GetGUID(); // We need this to remove them later on Glaive->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); Glaive->SetVisibility(VISIBILITY_OFF); Glaive->setFaction(m_creature->getFaction()); } } } void SummonBladesOfAzzinoth() { m_creature->GetMotionMaster()->Clear(false); LandTimer = 0; RetrieveBladesTimer = 0; // Make it look like we're throwing the glaives on the ground DoCastSpellIfCan(m_creature, SPELL_THROW_GLAIVE2); // We no longer wear the glaives! // since they are now channeling the flames (or will be) SetEquipmentSlots(false, EQUIP_UNEQUIP, EQUIP_UNEQUIP, EQUIP_NO_CHANGE); for(uint8 i = 0; i < 2; ++i) { Creature* Glaive = NULL; Glaive = m_creature->GetMap()->GetCreature(GlaiveGUID[i]); if (Glaive) { DoCastSpellIfCan(Glaive, SPELL_THROW_GLAIVE, CAST_TRIGGERED); Glaive->SetVisibility(VISIBILITY_ON); } } } void SummonFlamesOfAzzinoth() { DoScriptText(SAY_SUMMONFLAMES, m_creature); for(uint8 i = 0; i < 2; ++i) { Creature* Flame = NULL; Creature* Glaive = NULL; Glaive = m_creature->GetMap()->GetCreature(GlaiveGUID[i]); if (Glaive) { Flame = m_creature->SummonCreature(FLAME_OF_AZZINOTH, GlaivePosition[i+2].x, GlaivePosition[i+2].y, GlaivePosition[i+2].z, 0, TEMPSUMMON_CORPSE_TIMED_DESPAWN, 5000); if (Flame) { // Just in case the database has it as a different faction Flame->setFaction(m_creature->getFaction()); // Attack our target! Flame->AI()->AttackStart(m_creature->getVictim()); // Record GUID in order to check if they're dead later on to move to the next phase FlameGUID[i] = Flame->GetGUID(); // Glaives do some random Beam type channel on it. Glaive->CastSpell(Flame, SPELL_AZZINOTH_CHANNEL, true); if (m_creature->getVictim()) Flame->AI()->AttackStart(m_creature->getVictim()); } else { error_log("½Å±¾¿â£º Illidan Stormrage AI: Unable to summon Flame of Azzinoth (entry: 22997), please check your database"); EnterEvadeMode(); } } else { error_log("½Å±¾¿â£º Illidan Stormrage AI: Unable to summon Blade of Azzinoth (entry: 22996), please check your database"); } } DoResetThreat(); // And now reset our threatlist HasSummoned = true; } void SummonMaiev() { TauntTimer += 4000; GlobalTimer += 4000; m_creature->InterruptNonMeleeSpells(false); // Interrupt any of our spells Creature* Maiev = NULL; // Summon Maiev near Illidan Maiev = m_creature->SummonCreature(MAIEV_SHADOWSONG, m_creature->GetPositionX() + 10, m_creature->GetPositionY() + 5, m_creature->GetPositionZ()+2, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 45000); if (Maiev) { m_creature->GetMotionMaster()->Clear(false); // Stop moving, it's rude to walk and talk! m_creature->GetMotionMaster()->MoveIdle(); // Just in case someone is unaffected by Shadow Prison m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); DoCastSpellIfCan(m_creature, SPELL_SHADOW_PRISON, CAST_TRIGGERED); TalkCount = 10; IsTalking = true; // We are now talking/ Maiev->SetVisibility(VISIBILITY_OFF); // Leave her invisible until she has to talk Maiev->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); MaievGUID = Maiev->GetGUID(); } else // If Maiev cannot be summoned, reset the encounter and post some errors to the console. { EnterEvadeMode(); debug_log("½Å±¾¿â£º Unable to summon Maiev Shadowsong and enter Phase 4. Resetting Encounter."); error_log("½Å±¾¿â£º Unable to summon Maiev Shadowsong (entry: 23197). Check your database to see if you have the proper SQL for Maiev Shadowsong (entry: 23197)"); } } void InitializeDeath() { m_creature->RemoveAllAuras(); DoCastSpellIfCan(m_creature, SPELL_DEATH); // Animate his kneeling + stun him // Don't let the players interrupt our talk! m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); m_creature->GetMotionMaster()->Clear(false); // No moving! m_creature->GetMotionMaster()->MoveIdle(); if (MaievGUID) { if (Creature* Maiev = m_creature->GetMap()->GetCreature(MaievGUID)) { Maiev->CombatStop(true); // Maiev shouldn't do anything either. No point in her attacking us =] Maiev->GetMotionMaster()->Clear(false); // Stop her from moving as well Maiev->GetMotionMaster()->MoveIdle(); float distance = 10.0f; float dx = m_creature->GetPositionX() + (distance*cos(m_creature->GetOrientation())); float dy = m_creature->GetPositionY() + (distance*sin(m_creature->GetOrientation())); Maiev->NearTeleportTo(dx, dy, Maiev->GetPositionZ(), 0.0f); Maiev->CastSpell(Maiev, SPELL_TELEPORT_VISUAL, true); Maiev->SetUInt64Value(UNIT_FIELD_TARGET, m_creature->GetGUID()); } } IsTalking = true; ++TalkCount; } void UpdateAI(const uint32 diff) { /*** This section will handle the conversations ***/ if (IsTalking) // Somewhat more efficient using a function rather than a long switch { if (TalkTimer < diff) { switch(TalkCount) // This is only for specialized cases { case 0: // Time to stand up! m_creature->RemoveAurasDueToSpell(SPELL_KNEEL); break; case 8: // Equip our warglaives! SetEquipmentSlots(false, EQUIP_ID_MAIN_HAND, EQUIP_ID_OFF_HAND, EQUIP_NO_CHANGE); // Hostile if we weren't before m_creature->setFaction(14); break; case 9: if (AkamaGUID) { if (Creature* pAkama = m_creature->GetMap()->GetCreature(AkamaGUID)) { // Start attacking Akama AttackStart(pAkama); // Akama stop talk and start attack illidan if (npc_akama_illidanAI* pAkamaAI = dynamic_cast<npc_akama_illidanAI*>(pAkama->AI())) pAkamaAI->IsTalking = false; pAkama->AI()->AttackStart(m_creature); pAkama->AddThreat(m_creature, 1000000.0f); } } // We are now attackable! m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); debug_log("½Å±¾¿â£º Black Temple: Illidan intro complete, players can attack Illidan."); break; case 11: if (MaievGUID) { Creature* Maiev = m_creature->GetMap()->GetCreature(MaievGUID); if (Maiev) { // Maiev is now visible Maiev->SetVisibility(VISIBILITY_ON); // onoz she looks like she teleported! Maiev->CastSpell(Maiev, SPELL_TELEPORT_VISUAL, true); // Have her face us Maiev->SetUInt64Value(UNIT_FIELD_TARGET, m_creature->GetGUID()); // Face her, so it's not rude =P m_creature->SetUInt64Value(UNIT_FIELD_TARGET, Maiev->GetGUID()); } } break; case 14: if (MaievGUID) { Creature* Maiev = m_creature->GetMap()->GetCreature(MaievGUID); if (Maiev) { Maiev->GetMotionMaster()->Clear(false); Maiev->GetMotionMaster()->MoveChase(m_creature); // Have Maiev add a lot of threat on us so that players don't pull her off if they damage her via AOE Maiev->AddThreat(m_creature, 10000000.0f); // Force Maiev to attack us. Maiev->AI()->AttackStart(m_creature); Maiev->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); } } m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); m_creature->GetMotionMaster()->MoveChase(m_creature->getVictim()); IsTalking = false; FaceVictimTimer = 2000; RefaceVictim = true; break; case 20: // Kill ourself. if (MaievGUID) { Creature* Maiev = m_creature->GetMap()->GetCreature(MaievGUID); if (Maiev) { // Make Maiev leave Maiev->CastSpell(Maiev, SPELL_TELEPORT_VISUAL, true); Maiev->SetDeathState(JUST_DIED); } } IsTalking = false; if (m_creature->getVictim()) m_creature->getVictim()->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE,SPELL_SCHOOL_MASK_NORMAL, NULL, false); else // Now we kill ourself m_creature->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); break; } // This function does most of the talking Talk(TalkCount); ++TalkCount; }else TalkTimer -= diff; } // If we don't have a target, return. if (!m_creature->SelectHostileTarget() || !m_creature->getVictim() || IsTalking) return; // If we are 'caged', then we shouldn't do anything such as cast spells or transform into Demon Form. if (m_creature->HasAura(SPELL_CAGED, EFFECT_INDEX_0)) { // Just so that he doesn't immediately enrage after he stops being caged. EnrageTimer = 40000; CageTimer = 30000; return; } // Berserk Timer - flat 25 minutes if (!m_creature->HasAura(SPELL_BERSERK, EFFECT_INDEX_0) && Phase != PHASE_DEMON_SEQUENCE) { if (BerserkTimer < diff) { DoScriptText(SAY_ENRAGE, m_creature); DoCastSpellIfCan(m_creature, SPELL_BERSERK, CAST_TRIGGERED); }else BerserkTimer -= diff; } if (RefaceVictim) { if (FaceVictimTimer < diff) { m_creature->SetUInt64Value(UNIT_FIELD_TARGET, m_creature->getVictim()->GetGUID()); FaceVictimTimer = 1000; RefaceVictim = false; }else FaceVictimTimer -= diff; } /** Signal to change to phase 2 **/ if (m_creature->GetHealthPercent() < 65.0f && Phase == PHASE_NORMAL) EnterPhase2(); /** Signal to summon Maiev **/ if (m_creature->GetHealthPercent() < 30.0f && !MaievGUID && (Phase != PHASE_DEMON || Phase != PHASE_DEMON_SEQUENCE)) SummonMaiev(); /** Time for the death speech **/ if (m_creature->GetHealthPercent() < 1.0f && !IsTalking && (Phase != PHASE_DEMON || Phase != PHASE_DEMON_SEQUENCE)) InitializeDeath(); /***** Spells for Phase 1, 3 and 5 (Normal Form) ******/ if (Phase == PHASE_NORMAL || Phase == PHASE_NORMAL_2 || Phase == PHASE_NORMAL_MAIEV) { if (TauntTimer < diff) // His random taunt/yell timer. { uint32 random = urand(0, 3); int32 yell = RandomTaunts[random].textId; if (yell) DoScriptText(yell, m_creature); TauntTimer = 32000; }else TauntTimer -= diff; // Global Timer so that spells do not overlap. if (GlobalTimer < diff) { if (ShearTimer < diff) { DoCastSpellIfCan(m_creature->getVictim(), SPELL_SHEAR); ShearTimer = urand(25000, 40000); GlobalTimer += 2000; }else ShearTimer -= diff; if (FlameCrashTimer < diff) { //It spawns multiple flames sometimes. Therefore, we'll do this manually. //DoCastSpellIfCan(m_creature->getVictim(), SPELL_FLAME_CRASH); m_creature->SummonCreature(FLAME_CRASH, 0.0f, 0.0f, 0.0f, 0.0f, TEMPSUMMON_TIMED_DESPAWN, 40000); FlameCrashTimer = 35000; GlobalTimer += 2000; }else FlameCrashTimer -= diff; if (ParasiticShadowFiendTimer < diff) { Unit* target = NULL; target = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM,1); if (target && target->isAlive() && !target->HasAura(SPELL_PARASITIC_SHADOWFIEND, EFFECT_INDEX_0)) { Cast(target, SPELL_PARASITIC_SHADOWFIEND); ParasiticShadowFiendTimer = 40000; } }else ParasiticShadowFiendTimer -= diff; if (DrawSoulTimer < diff) { DoCastSpellIfCan(m_creature->getVictim(), SPELL_DRAW_SOUL); DrawSoulTimer = 55000; GlobalTimer += 3000; }else DrawSoulTimer -= diff; }else GlobalTimer -= diff; if (!IsTalking) DoMeleeAttackIfReady(); } /*** Phase 2 ***/ if (Phase == PHASE_FLIGHT) { // Check if we have summoned or not. if (!HasSummoned) { if (SummonBladesTimer) if (SummonBladesTimer <= diff) { SummonBladesOfAzzinoth(); SummonBladesTimer = 0; }else SummonBladesTimer -= diff; if (SummonFlamesTimer < diff) { SummonFlamesOfAzzinoth(); }else SummonFlamesTimer -= diff; } if (!m_creature->GetMotionMaster()->empty() && (m_creature->GetMotionMaster()->GetCurrentMovementGeneratorType() != POINT_MOTION_TYPE)) m_creature->GetMotionMaster()->Clear(false); if (HasSummoned) { if (CheckFlamesTimer) { if (CheckFlamesTimer <= diff) { // Check if flames are dead or non-existant. If so, set GUID to 0. for(uint8 i = 0; i < 2; ++i) { if (FlameGUID[i]) { Creature* Flame = m_creature->GetMap()->GetCreature(FlameGUID[i]); // If the flame dies, or somehow the pointer becomes invalid, reset GUID to 0. if (!Flame || !Flame->isAlive()) FlameGUID[i] = 0; } } CheckFlamesTimer = 500; }else CheckFlamesTimer -= diff; } // If both flames are dead/non-existant, kill glaives and change to phase 3. if (!FlameGUID[0] && !FlameGUID[1] && CheckFlamesTimer) { RetrieveBladesTimer = 5000; // Prepare for re-equipin! CheckFlamesTimer = 0; } if (RetrieveBladesTimer) { if (RetrieveBladesTimer <= diff) // Time to get back our glaives! { // Interrupt any spells we might be doing *cough* DArk Barrage *cough* m_creature->InterruptNonMeleeSpells(false); for(uint8 i = 0; i < 2; ++i) { if (GlaiveGUID[i]) { Creature* Glaive = m_creature->GetMap()->GetCreature(GlaiveGUID[i]); if (Glaive) { // Make it look like the Glaive flies back up to us Glaive->CastSpell(m_creature, SPELL_GLAIVE_RETURNS, true); // Despawn the Glaive Glaive->SetDeathState(JUST_DIED); } GlaiveGUID[i] = 0; } } // Re-equip our warblades! SetEquipmentSlots(false, EQUIP_ID_MAIN_HAND, EQUIP_ID_OFF_HAND, EQUIP_NO_CHANGE); // Prepare for landin'! LandTimer = 5000; RetrieveBladesTimer = 0; }else RetrieveBladesTimer -= diff; } if (LandTimer) { // Time to land! if (LandTimer <= diff) { DoResetThreat(); // anndddd touchdown! m_creature->HandleEmote(EMOTE_ONESHOT_LAND); m_creature->RemoveSplineFlag(SPLINEFLAG_NO_SPLINE); Phase = PHASE_NORMAL_2; // We should let the raid fight us =) m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); m_creature->SetUInt64Value(UNIT_FIELD_TARGET, m_creature->getVictim()->GetGUID()); // Chase our victim! m_creature->GetMotionMaster()->MoveChase(m_creature->getVictim()); }else LandTimer -= diff; return; // Do not continue past this point if LandTimer is not 0 and we are in phase 2. } } if (GlobalTimer < diff) { if (FireballTimer < diff) { Cast(m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0), SPELL_FIREBALL); FireballTimer = 5000; }else FireballTimer -= diff; if (DarkBarrageTimer < diff) { m_creature->InterruptNonMeleeSpells(false); if (Unit* pTarget = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0)) DoCastSpellIfCan(pTarget, SPELL_DARK_BARRAGE); DarkBarrageTimer = 35000; GlobalTimer += 9000; }else DarkBarrageTimer -= diff; if (EyeBlastTimer < diff) { CastEyeBlast(); EyeBlastTimer = 30000; }else EyeBlastTimer -= diff; }else GlobalTimer -= diff; } /** Phase 3,5 spells only**/ if (Phase == PHASE_NORMAL_2 || Phase == PHASE_NORMAL_MAIEV) { if (GlobalTimer < diff) { if (AgonizingFlamesTimer < diff) { CastAgonizingFlames(); AgonizingFlamesTimer = 60000; }else AgonizingFlamesTimer -= diff; }else GlobalTimer -= diff; if (TransformTimer < diff) { float CurHealth = m_creature->GetHealthPercent(); // Prevent Illidan from morphing if less than 32% or 5%, as this may cause issues with the phase transition or death speech if ((CurHealth < 32.0f && !MaievGUID) || CurHealth < 5.0f) return; Phase = PHASE_DEMON_SEQUENCE; // Transform sequence DemonFormSequence = 0; AnimationTimer = 0; DoScriptText(SAY_MORPH, m_creature); TransformTimer = 60000; FlameBurstTimer = 10000; ShadowDemonTimer = 30000; m_creature->GetMotionMaster()->Clear(false);// Stop moving }else TransformTimer -= diff; } /** Phase 4 spells only (Demon Form) **/ if (Phase == PHASE_DEMON) { // Stop moving if we are by clearing movement generators. if (!m_creature->GetMotionMaster()->empty()) m_creature->GetMotionMaster()->Clear(false); if (TransformTimer < diff) { Phase = PHASE_DEMON_SEQUENCE; DemonFormSequence = 5; AnimationTimer = 100; TransformTimer = 60000; }else TransformTimer -= diff; if (ShadowDemonTimer < diff) { m_creature->InterruptNonMeleeSpells(false); Creature* ShadowDemon = NULL; for(uint8 i = 0; i < 4; ++i) { Unit* target = NULL; target = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM,0); // only on players. if (target && target->GetTypeId() == TYPEID_PLAYER) { ShadowDemon = m_creature->SummonCreature(SHADOW_DEMON, 0.0f, 0.0f, 0.0f, 0.0f, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 25000); if (ShadowDemon) { ShadowDemon->AddThreat(target, 5000000.0f); ShadowDemon->AI()->AttackStart(target); ShadowDemon->SetInCombatWithZone(); } } } ShadowDemonTimer = 60000; }else ShadowDemonTimer -= diff; if (GlobalTimer < diff) { if (ShadowBlastTimer < diff) { Unit* target = m_creature->SelectAttackingTarget(ATTACKING_TARGET_TOPAGGRO, 0); if (target && target->isAlive()) { m_creature->SetUInt64Value(UNIT_FIELD_TARGET, target->GetGUID()); DoCastSpellIfCan(target, SPELL_SHADOW_BLAST); ShadowBlastTimer = 4000; GlobalTimer += 1500; } if (!m_creature->HasAura(SPELL_DEMON_FORM, EFFECT_INDEX_0)) DoCastSpellIfCan(m_creature, SPELL_DEMON_FORM, CAST_TRIGGERED); }else ShadowBlastTimer -= diff; if (FlameBurstTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_FLAME_BURST); FlameBurstTimer = 15000; }else FlameBurstTimer -= diff; }else GlobalTimer -= diff; } /** Phase 5 timers. Enrage spell **/ if (Phase == PHASE_NORMAL_MAIEV) { if (EnrageTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_ENRAGE); EnrageTimer = 40000; CageTimer = 30000; TransformTimer += 10000; }else EnrageTimer -= diff; // We'll handle Cage Trap in Illidan's script for simplicity's sake if (CageTimer < diff) { if (MaievGUID) { Creature* Maiev = m_creature->GetMap()->GetCreature(MaievGUID); Unit* target = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0); if (!Maiev || !target || (target->GetTypeId() != TYPEID_PLAYER)) return; float X, Y, Z; target->GetPosition(X, Y, Z); Maiev->GetMap()->CreatureRelocation(m_creature, X, Y, Z, Maiev->GetOrientation()); // Make it look like she 'teleported' Maiev->CastSpell(Maiev, SPELL_TELEPORT_VISUAL, true); // summon the trap! Maiev->CastSpell(Maiev, SPELL_CAGE_TRAP_SUMMON, false); } CageTimer = 15000; }else CageTimer -= diff; } if (Phase == PHASE_DEMON_SEQUENCE) // Demonic Transformation { if (AnimationTimer < diff) { HandleDemonTransformAnimation(DemonFormSequence); ++DemonFormSequence; }else AnimationTimer -= diff; } } }; /*********************** End of Illidan AI ******************************************/ void npc_akama_illidanAI::BeginEvent(uint64 PlayerGUID) { debug_log("½Å±¾¿â£º Akama - Illidan Introduction started. Illidan event properly begun."); if (m_pInstance) { IllidanGUID = m_pInstance->GetData64(NPC_ILLIDAN_STORMRAGE); m_pInstance->SetData(TYPE_ILLIDAN, IN_PROGRESS); } if (m_pInstance) { for(uint32 i = GO_ILLIDAN_DOOR_R; i < GO_ILLIDAN_DOOR_L+1; ++i) { if (GameObject* pDoor = m_pInstance->instance->GetGameObject(m_pInstance->GetData64(i))) pDoor->SetGoState(GO_STATE_READY); } } if (IllidanGUID) { m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); if (Creature* pIllidan = m_creature->GetMap()->GetCreature(IllidanGUID)) { boss_illidan_stormrageAI* pIllidanAI = dynamic_cast<boss_illidan_stormrageAI*>(pIllidan->AI()); if (!pIllidanAI) return; // Time for Illidan to stand up. pIllidan->RemoveAurasDueToSpell(SPELL_KNEEL); // First line of Akama-Illidan convo pIllidanAI->TalkCount = 0; // Begin Talking pIllidanAI->IsTalking = true; pIllidanAI->AkamaGUID = m_creature->GetGUID(); m_creature->SetUInt64Value(UNIT_FIELD_TARGET, pIllidan->GetGUID()); pIllidan->SetUInt64Value(UNIT_FIELD_TARGET, m_creature->GetGUID()); IsTalking = true; // Prevent Akama from starting to attack him // Prevent players from talking again m_creature->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); m_creature->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); pIllidan->GetMotionMaster()->Clear(false); pIllidan->GetMotionMaster()->MoveIdle(); m_creature->GetMotionMaster()->Clear(false); m_creature->GetMotionMaster()->MoveIdle(); if (PlayerGUID) { if (Player* pPlayer = m_creature->GetMap()->GetPlayer(PlayerGUID)) pIllidan->AddThreat(pPlayer, 100.0f); } } } } bool GossipHello_npc_akama_at_illidan(Player* pPlayer, Creature* pCreature) { // TODO: Add gossip item only when Council is done? pPlayer->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF); pPlayer->SEND_GOSSIP_MENU(10465, pCreature->GetGUID()); return true; } bool GossipSelect_npc_akama_at_illidan(Player* pPlayer, Creature* pCreature, uint32 uiSender, uint32 uiAction) { if (uiAction == GOSSIP_ACTION_INFO_DEF) // Time to begin the event { pPlayer->CLOSE_GOSSIP_MENU(); if (npc_akama_illidanAI* pAkamaAI = dynamic_cast<npc_akama_illidanAI*>(pCreature->AI())) pAkamaAI->BeginDoorEvent(pPlayer); } return true; } struct MANGOS_DLL_DECL boss_maievAI : public ScriptedAI { boss_maievAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); Reset(); }; uint32 TauntTimer; uint64 IllidanGUID; ScriptedInstance* m_pInstance; void Reset() { TauntTimer = 12000; IllidanGUID = 0; } void UpdateAI(const uint32 diff) { if (!IllidanGUID) { if (m_pInstance) IllidanGUID = m_pInstance->GetData64(NPC_ILLIDAN_STORMRAGE); }else { Creature* Illidan = m_creature->GetMap()->GetCreature(IllidanGUID); if (!Illidan || !Illidan->isAlive() || Illidan->IsInEvadeMode()) { m_creature->SetVisibility(VISIBILITY_OFF); m_creature->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); } else if (Illidan && Illidan->GetHealthPercent() < 2.0f) return; } // Return if we don't have a target if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; if (TauntTimer < diff) { uint32 random = urand(0, 3); int32 text = MaievTaunts[random].textId; DoScriptText(text, m_creature); TauntTimer = urand(22000, 42000); }else TauntTimer -= diff; DoMeleeAttackIfReady(); } }; struct MANGOS_DLL_DECL cage_trap_triggerAI : public ScriptedAI { cage_trap_triggerAI(Creature* pCreature) : ScriptedAI(pCreature) {Reset();} uint64 IllidanGUID; uint64 CageTrapGUID; uint32 DespawnTimer; bool Active; bool SummonedBeams; void Reset() { IllidanGUID = 0; CageTrapGUID = 0; Active = false; SummonedBeams = false; DespawnTimer = 0; m_creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); } void MoveInLineOfSight(Unit *who) { if (!Active) return; if (who && (who->GetTypeId() != TYPEID_PLAYER)) { if (who->GetEntry() == ILLIDAN_STORMRAGE) // Check if who is Illidan { if (!IllidanGUID && m_creature->IsWithinDistInMap(who, 3) && !who->HasAura(SPELL_CAGED, EFFECT_INDEX_0)) { IllidanGUID = who->GetGUID(); who->CastSpell(who, SPELL_CAGED, true); DespawnTimer = 5000; // Dispel his enrage if (who->HasAura(SPELL_ENRAGE, EFFECT_INDEX_0)) who->RemoveAurasDueToSpell(SPELL_ENRAGE); if (GameObject* pCageTrap = m_creature->GetMap()->GetGameObject(CageTrapGUID)) pCageTrap->SetLootState(GO_JUST_DEACTIVATED); } } } } void UpdateAI(const uint32 diff) { if (DespawnTimer) { if (DespawnTimer <= diff) m_creature->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); else DespawnTimer -= diff; } //if (IllidanGUID && !SummonedBeams) //{ // if (Creature* pIllidan = m_creature->GetMap()->GetCreature(IllidanGUID) // { // //TODO: Find proper spells and properly apply 'caged' Illidan effect // } //} } }; bool GOUse_go_cage_trap(Player* pPlayer, GameObject* pGo) { float x, y, z; pPlayer->GetPosition(x, y, z); // Grid search for nearest live creature of entry 23304 within 10 yards Creature* pTrigger = GetClosestCreatureWithEntry(pGo, 23304, 10.0f); if (!pTrigger) { error_log("½Å±¾¿â£º Cage Trap- Unable to find trigger. This Cage Trap is now useless"); return false; } if (cage_trap_triggerAI* pTriggerAI = dynamic_cast<cage_trap_triggerAI*>(pTrigger->AI())) pTriggerAI->Active = true; pGo->SetGoState(GO_STATE_ACTIVE); return true; } struct MANGOS_DLL_DECL flame_of_azzinothAI : public ScriptedAI { flame_of_azzinothAI(Creature* pCreature) : ScriptedAI(pCreature) {Reset();} uint32 FlameBlastTimer; uint32 SummonBlazeTimer; uint32 ChargeTimer; void Reset() { FlameBlastTimer = urand(15000, 30000); SummonBlazeTimer = urand(10000, 30000); ChargeTimer = 5000; } void Charge() { // Get the Threat List ThreatList const& tList = m_creature->getThreatManager().getThreatList(); // He doesn't have anyone in his threatlist, useless to continue if (tList.empty()) return; std::list<Unit*> targets; //store the threat list in a different container for (ThreatList::const_iterator itr = tList.begin();itr != tList.end(); ++itr) { Unit *target = m_creature->GetMap()->GetUnit((*itr)->getUnitGuid()); //only on alive players if (target && target->isAlive() && target->GetTypeId() == TYPEID_PLAYER) targets.push_back(target); } //Sort the list of players targets.sort(ObjectDistanceOrderReversed(m_creature)); //Resize so we only get the furthest target targets.resize(1); Unit* target = (*targets.begin()); if (target && (!m_creature->IsWithinDistInMap(target, 40))) { DoCastSpellIfCan(m_creature, SPELL_ENRAGE, CAST_TRIGGERED); DoCastSpellIfCan(target, SPELL_CHARGE); } } void UpdateAI(const uint32 diff) { if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; if (FlameBlastTimer < diff) { DoCastSpellIfCan(m_creature->getVictim(), SPELL_FLAME_BLAST); FlameBlastTimer = 30000; }else FlameBlastTimer -= diff; if (SummonBlazeTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_BLAZE_SUMMON); SummonBlazeTimer = urand(30000, 50000); }else SummonBlazeTimer -= diff; if (ChargeTimer < diff) { Charge(); ChargeTimer = 5000; }else ChargeTimer -= diff; DoMeleeAttackIfReady(); } }; struct MANGOS_DLL_DECL shadow_demonAI : public ScriptedAI { shadow_demonAI(Creature* pCreature) : ScriptedAI(pCreature) {Reset();} uint64 TargetGUID; void Reset() { TargetGUID = 0; } void JustDied(Unit *killer) { if (TargetGUID) { if (Player* pPlayer = m_creature->GetMap()->GetPlayer(TargetGUID)) pPlayer->RemoveAurasDueToSpell(SPELL_PARALYZE); } } void UpdateAI(const uint32 diff) { if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; // Only cast the below on players. if (m_creature->getVictim()->GetTypeId() != TYPEID_PLAYER) return; if (!m_creature->getVictim()->HasAura(SPELL_PARALYZE, EFFECT_INDEX_0)) { TargetGUID = m_creature->getVictim()->GetGUID(); m_creature->AddThreat(m_creature->getVictim(), 10000000.0f); DoCastSpellIfCan(m_creature, SPELL_SHADOW_DEMON_PASSIVE, CAST_TRIGGERED); DoCastSpellIfCan(m_creature->getVictim(), SPELL_PURPLE_BEAM, CAST_TRIGGERED); DoCastSpellIfCan(m_creature->getVictim(), SPELL_PARALYZE, CAST_TRIGGERED); } // Kill our target if we're very close. if (m_creature->IsWithinDistInMap(m_creature->getVictim(), 3)) DoCastSpellIfCan(m_creature->getVictim(), SPELL_CONSUME_SOUL); } }; struct MANGOS_DLL_DECL flamecrashAI : public ScriptedAI { flamecrashAI(Creature* pCreature) : ScriptedAI(pCreature) {Reset();} uint32 FlameCrashTimer; uint32 DespawnTimer; void Reset() { FlameCrashTimer = urand(3000, 8000); DespawnTimer = 60000; } void AttackStart(Unit *who) { } void MoveInLineOfSight(Unit *who){ } void UpdateAI(const uint32 diff) { if (FlameCrashTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_FLAME_CRASH_EFFECT); FlameCrashTimer = 15000; }else FlameCrashTimer -= diff; if (DespawnTimer < diff) { // So that players don't see the sparkly effect when we die. m_creature->SetVisibility(VISIBILITY_OFF); m_creature->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); }else DespawnTimer -= diff; } }; /* ** TODO This code was unused for long time (not used in DB and pointless) * ** Keep it temporarily as reference // Shadowfiends interact with Illidan, setting more targets in Illidan's hashmap struct MANGOS_DLL_DECL mob_parasitic_shadowfiendAI : public ScriptedAI { mob_parasitic_shadowfiendAI(Creature* pCreature) : ScriptedAI(pCreature) { Reset(); } void Reset() {} void DoMeleeAttackIfReady() { //If we are within range melee the target if (m_creature->CanReachWithMeleeAttack(m_creature->getVictim())) { //Make sure our attack is ready and we aren't currently casting if (m_creature->isAttackReady() && !m_creature->IsNonMeleeSpellCasted(false)) { if (!m_creature->getVictim()->HasAura(SPELL_PARASITIC_SHADOWFIEND, EFFECT_INDEX_0)) DoCastSpellIfCan(m_creature->getVictim(), SPELL_PARASITIC_SHADOWFIEND, CAST_TRIGGERED); m_creature->AttackerStateUpdate(m_creature->getVictim()); m_creature->resetAttackTimer(); } } } }; */ struct MANGOS_DLL_DECL blazeAI : public ScriptedAI { blazeAI(Creature* pCreature) : ScriptedAI(pCreature) {Reset();} uint32 BlazeTimer; uint32 DespawnTimer; void Reset() { BlazeTimer = 2000; DespawnTimer = 15000; } void AttackStart(Unit* who) { } void MoveInLineOfSight(Unit *who){ } void UpdateAI(const uint32 diff) { if (BlazeTimer < diff) { DoCastSpellIfCan(m_creature, SPELL_BLAZE_EFFECT); BlazeTimer = 15000; }else BlazeTimer -= diff; if (DespawnTimer < diff) { m_creature->SetVisibility(VISIBILITY_OFF); m_creature->DealDamage(m_creature, m_creature->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); }else DespawnTimer -= diff; } }; struct MANGOS_DLL_DECL blade_of_azzinothAI : public ScriptedAI { blade_of_azzinothAI(Creature* pCreature) : ScriptedAI(pCreature) { Reset(); } void Reset() {} // Do-Nothing-But-Stand-There void AttackStart(Unit* who) { } void MoveInLineOfSight(Unit* who) { } }; CreatureAI* GetAI_boss_illidan_stormrage(Creature* pCreature) { return new boss_illidan_stormrageAI(pCreature); } CreatureAI* GetAI_npc_akama_at_illidan(Creature* pCreature) { npc_akama_illidanAI* Akama_AI = new npc_akama_illidanAI(pCreature); for(uint8 i = 0; i < 13; ++i) Akama_AI->AddWaypoint(i, AkamaWP[i].x, AkamaWP[i].y, AkamaWP[i].z); return ((CreatureAI*)Akama_AI); } CreatureAI* GetAI_boss_maiev(Creature* pCreature) { return new boss_maievAI(pCreature); } CreatureAI* GetAI_mob_flame_of_azzinoth(Creature* pCreature) { return new flame_of_azzinothAI(pCreature); } CreatureAI* GetAI_cage_trap_trigger(Creature* pCreature) { return new cage_trap_triggerAI(pCreature); } CreatureAI* GetAI_shadow_demon(Creature* pCreature) { return new shadow_demonAI(pCreature); } CreatureAI* GetAI_flamecrash(Creature* pCreature) { return new flamecrashAI(pCreature); } CreatureAI* GetAI_demonfire(Creature* pCreature) { return new demonfireAI(pCreature); } CreatureAI* GetAI_blaze(Creature* pCreature) { return new blazeAI(pCreature); } CreatureAI* GetAI_blade_of_azzinoth(Creature* pCreature) { return new blade_of_azzinothAI(pCreature); } /* ** TODO dead code CreatureAI* GetAI_parasitic_shadowfiend(Creature* pCreature) { return new mob_parasitic_shadowfiendAI(pCreature); } */ void AddSC_boss_illidan() { Script* newscript; newscript = new Script; newscript->Name = "boss_illidan_stormrage"; newscript->GetAI = &GetAI_boss_illidan_stormrage; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "npc_akama_illidan"; newscript->GetAI = &GetAI_npc_akama_at_illidan; newscript->pGossipHello = &GossipHello_npc_akama_at_illidan; newscript->pGossipSelect = &GossipSelect_npc_akama_at_illidan; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "boss_maiev_shadowsong"; newscript->GetAI = &GetAI_boss_maiev; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_flame_of_azzinoth"; newscript->GetAI = &GetAI_mob_flame_of_azzinoth; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_blade_of_azzinoth"; newscript->GetAI = &GetAI_blade_of_azzinoth; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "gameobject_cage_trap"; newscript->pGOUse = &GOUse_go_cage_trap; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_cage_trap_trigger"; newscript->GetAI = &GetAI_cage_trap_trigger; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_shadow_demon"; newscript->GetAI = &GetAI_shadow_demon; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_flame_crash"; newscript->GetAI = &GetAI_flamecrash; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_demon_fire"; newscript->GetAI = &GetAI_demonfire; newscript->RegisterSelf(); newscript = new Script; newscript->Name = "mob_blaze"; newscript->GetAI = &GetAI_blaze; newscript->RegisterSelf(); /* ** TODO dead code newscript = new Script; newscript->Name = "mob_parasitic_shadowfiend"; newscript->GetAI = &GetAI_parasitic_shadowfiend; newscript->RegisterSelf(); */ }
gpl-2.0
juston-li/mako
drivers/cpufreq/cpufreq_ondemand.c
1
32243
/* * drivers/cpufreq/cpufreq_ondemand.c * * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. * Jun Nakajima <jun.nakajima@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/mutex.h> #include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/ktime.h> #include <linux/sched.h> #include <linux/input.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/hotplug.h> /* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler */ #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (100000) #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) #define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) /* * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition * latency of the processor. The governor will work on any processor with * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. * All times here are in uS. */ #define MIN_SAMPLING_RATE_RATIO (2) static unsigned int min_sampling_rate; #define LATENCY_MULTIPLIER (1000) #define MIN_LATENCY_MULTIPLIER (100) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) #define POWERSAVE_BIAS_MAXLEVEL (1000) #define POWERSAVE_BIAS_MINLEVEL (-1000) static void do_dbs_timer(struct work_struct *work); static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND static #endif struct cpufreq_governor cpufreq_gov_ondemand = { .name = "ondemand", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; /* Sampling types */ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_iowait; cputime64_t prev_cpu_wall; cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; unsigned int freq_hi_jiffies; unsigned int rate_mult; unsigned int prev_load; unsigned int max_load; int cpu; unsigned int sample_type:1; /* * percpu mutex that serializes governor limit change with * do_dbs_timer invocation. We do not want do_dbs_timer to run * when user is changing the governor or limits. */ struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info); static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* * dbs_mutex protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); struct dbs_work_struct { struct work_struct work; unsigned int cpu; }; static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work); static struct dbs_tuners { unsigned int sampling_rate; unsigned int up_threshold; unsigned int up_threshold_multi_core; unsigned int down_differential; unsigned int down_differential_multi_core; unsigned int optimal_freq; unsigned int up_threshold_any_cpu_load; unsigned int sync_freq; unsigned int ignore_nice; unsigned int sampling_down_factor; int powersave_bias; unsigned int io_is_busy; } dbs_tuners_ins = { .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD, .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL, .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD, .ignore_nice = 0, .powersave_bias = 0, .sync_freq = 0, .optimal_freq = 0, }; static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; u64 cur_wall_time; u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cur_wall_time - busy_time; if (wall) *wall = jiffies_to_usecs(cur_wall_time); return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { u64 idle_time = get_cpu_idle_time_us(cpu, NULL); if (idle_time == -1ULL) return get_cpu_idle_time_jiffy(cpu, wall); else idle_time += get_cpu_iowait_time_us(cpu, wall); return idle_time; } static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) { u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); if (iowait_time == -1ULL) return 0; return iowait_time; } /* * Find right freq to be set now with powersave_bias on. * Returns the freq_hi to be used right now and will set freq_hi_jiffies, * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. */ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation) { unsigned int freq_req, freq_avg; unsigned int freq_hi, freq_lo; unsigned int index = 0; unsigned int jiffies_total, jiffies_hi, jiffies_lo; int freq_reduc; struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); if (!dbs_info->freq_table) { dbs_info->freq_lo = 0; dbs_info->freq_lo_jiffies = 0; return freq_next; } cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, relation, &index); freq_req = dbs_info->freq_table[index].frequency; freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ index = 0; cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, CPUFREQ_RELATION_H, &index); freq_lo = dbs_info->freq_table[index].frequency; index = 0; cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, CPUFREQ_RELATION_L, &index); freq_hi = dbs_info->freq_table[index].frequency; /* Find out how long we have to be in hi and lo freqs */ if (freq_hi == freq_lo) { dbs_info->freq_lo = 0; dbs_info->freq_lo_jiffies = 0; return freq_lo; } jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); jiffies_hi = (freq_avg - freq_lo) * jiffies_total; jiffies_hi += ((freq_hi - freq_lo) / 2); jiffies_hi /= (freq_hi - freq_lo); jiffies_lo = jiffies_total - jiffies_hi; dbs_info->freq_lo = freq_lo; dbs_info->freq_lo_jiffies = jiffies_lo; dbs_info->freq_hi_jiffies = jiffies_hi; return freq_hi; } static int ondemand_powersave_bias_setspeed(struct cpufreq_policy *policy, struct cpufreq_policy *altpolicy, int level) { if (level == POWERSAVE_BIAS_MAXLEVEL) { /* maximum powersave; set to lowest frequency */ __cpufreq_driver_target(policy, (altpolicy) ? altpolicy->min : policy->min, CPUFREQ_RELATION_L); return 1; } else if (level == POWERSAVE_BIAS_MINLEVEL) { /* minimum powersave; set to highest frequency */ __cpufreq_driver_target(policy, (altpolicy) ? altpolicy->max : policy->max, CPUFREQ_RELATION_H); return 1; } return 0; } static void ondemand_powersave_bias_init_cpu(int cpu) { struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); dbs_info->freq_table = cpufreq_frequency_get_table(cpu); dbs_info->freq_lo = 0; } static void ondemand_powersave_bias_init(void) { int i; for_each_online_cpu(i) { ondemand_powersave_bias_init_cpu(i); } } /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } define_one_global_ro(sampling_rate_min); /* cpufreq_ondemand Governor Tunables */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } show_one(sampling_rate, sampling_rate); show_one(io_is_busy, io_is_busy); show_one(up_threshold, up_threshold); show_one(up_threshold_multi_core, up_threshold_multi_core); show_one(down_differential, down_differential); show_one(sampling_down_factor, sampling_down_factor); show_one(ignore_nice_load, ignore_nice); show_one(optimal_freq, optimal_freq); show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load); show_one(sync_freq, sync_freq); static ssize_t show_powersave_bias (struct kobject *kobj, struct attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias); } /** * update_sampling_rate - update sampling rate effective immediately if needed. * @new_rate: new sampling rate * * If new rate is smaller than the old, simply updaing * dbs_tuners_int.sampling_rate might not be appropriate. For example, * if the original sampling_rate was 1 second and the requested new sampling * rate is 10 ms because the user needs immediate reaction from ondemand * governor, but not sure if higher frequency will be required or not, * then, the governor may change the sampling rate too late; up to 1 second * later. Thus, if we are reducing the sampling rate, we need to make the * new value effective immediately. */ static void update_sampling_rate(unsigned int new_rate) { int cpu; dbs_tuners_ins.sampling_rate = new_rate = max(new_rate, min_sampling_rate); for_each_online_cpu(cpu) { struct cpufreq_policy *policy; struct cpu_dbs_info_s *dbs_info; unsigned long next_sampling, appointed_at; policy = cpufreq_cpu_get(cpu); if (!policy) continue; dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); cpufreq_cpu_put(policy); mutex_lock(&dbs_info->timer_mutex); if (!delayed_work_pending(&dbs_info->work)) { mutex_unlock(&dbs_info->timer_mutex); continue; } next_sampling = jiffies + usecs_to_jiffies(new_rate); appointed_at = dbs_info->work.timer.expires; if (time_before(next_sampling, appointed_at)) { mutex_unlock(&dbs_info->timer_mutex); cancel_delayed_work_sync(&dbs_info->work); mutex_lock(&dbs_info->timer_mutex); schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, usecs_to_jiffies(new_rate)); } mutex_unlock(&dbs_info->timer_mutex); } } static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; update_sampling_rate(input); return count; } static ssize_t store_sync_freq(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; dbs_tuners_ins.sync_freq = input; return count; } static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; dbs_tuners_ins.io_is_busy = !!input; return count; } static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; dbs_tuners_ins.optimal_freq = input; return count; } static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } dbs_tuners_ins.up_threshold = input; return count; } static ssize_t store_up_threshold_multi_core(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } dbs_tuners_ins.up_threshold_multi_core = input; return count; } static ssize_t store_up_threshold_any_cpu_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } dbs_tuners_ins.up_threshold_any_cpu_load = input; return count; } static ssize_t store_down_differential(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input >= dbs_tuners_ins.up_threshold || input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { return -EINVAL; } dbs_tuners_ins.down_differential = input; return count; } static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input, j; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; dbs_tuners_ins.sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->rate_mult = 1; } return count; } static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { unsigned int input; int ret; unsigned int j; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; if (input > 1) input = 1; if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ return count; } dbs_tuners_ins.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, const char *buf, size_t count) { int input = 0; int bypass = 0; int ret, cpu, reenable_timer, j; struct cpu_dbs_info_s *dbs_info; struct cpumask cpus_timer_done; cpumask_clear(&cpus_timer_done); ret = sscanf(buf, "%d", &input); if (ret != 1) return -EINVAL; if (input >= POWERSAVE_BIAS_MAXLEVEL) { input = POWERSAVE_BIAS_MAXLEVEL; bypass = 1; } else if (input <= POWERSAVE_BIAS_MINLEVEL) { input = POWERSAVE_BIAS_MINLEVEL; bypass = 1; } if (input == dbs_tuners_ins.powersave_bias) { /* no change */ return count; } reenable_timer = ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)); dbs_tuners_ins.powersave_bias = input; if (!bypass) { if (reenable_timer) { /* reinstate dbs timer */ for_each_online_cpu(cpu) { if (lock_policy_rwsem_write(cpu) < 0) continue; dbs_info = &per_cpu(od_cpu_dbs_info, cpu); for_each_cpu(j, &cpus_timer_done) { if (!dbs_info->cur_policy) { pr_err("Dbs policy is NULL\n"); goto skip_this_cpu; } if (cpumask_test_cpu(j, dbs_info-> cur_policy->cpus)) goto skip_this_cpu; } cpumask_set_cpu(cpu, &cpus_timer_done); if (dbs_info->cur_policy) { /* restart dbs timer */ dbs_timer_init(dbs_info); } skip_this_cpu: unlock_policy_rwsem_write(cpu); } } ondemand_powersave_bias_init(); } else { /* running at maximum or minimum frequencies; cancel dbs timer as periodic load sampling is not necessary */ for_each_online_cpu(cpu) { if (lock_policy_rwsem_write(cpu) < 0) continue; dbs_info = &per_cpu(od_cpu_dbs_info, cpu); for_each_cpu(j, &cpus_timer_done) { if (!dbs_info->cur_policy) { pr_err("Dbs policy is NULL\n"); goto skip_this_cpu_bypass; } if (cpumask_test_cpu(j, dbs_info-> cur_policy->cpus)) goto skip_this_cpu_bypass; } cpumask_set_cpu(cpu, &cpus_timer_done); if (dbs_info->cur_policy) { /* cpu using ondemand, cancel dbs timer */ mutex_lock(&dbs_info->timer_mutex); dbs_timer_exit(dbs_info); ondemand_powersave_bias_setspeed( dbs_info->cur_policy, NULL, input); mutex_unlock(&dbs_info->timer_mutex); } skip_this_cpu_bypass: unlock_policy_rwsem_write(cpu); } } return count; } define_one_global_rw(sampling_rate); define_one_global_rw(io_is_busy); define_one_global_rw(up_threshold); define_one_global_rw(down_differential); define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); define_one_global_rw(up_threshold_multi_core); define_one_global_rw(optimal_freq); define_one_global_rw(up_threshold_any_cpu_load); define_one_global_rw(sync_freq); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, &sampling_rate.attr, &up_threshold.attr, &down_differential.attr, &sampling_down_factor.attr, &ignore_nice_load.attr, &powersave_bias.attr, &io_is_busy.attr, &up_threshold_multi_core.attr, &optimal_freq.attr, &up_threshold_any_cpu_load.attr, &sync_freq.attr, NULL }; static struct attribute_group dbs_attr_group = { .attrs = dbs_attributes, .name = "ondemand", }; /************************** sysfs end ************************/ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) { if (dbs_tuners_ins.powersave_bias) freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); else if (p->cur == p->max) return; __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { /* Extrapolated load of this CPU */ unsigned int load_at_max_freq = 0; unsigned int max_load_freq; /* Current load across this CPU */ unsigned int cur_load = 0; unsigned int max_load_other_cpu = 0; struct cpufreq_policy *policy; unsigned int j; this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency * Every sampling_rate, we look for a the lowest * frequency which can sustain the load while keeping idle time over * 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. * Frequency reduction happens at minimum steps of * 5% (default) of current frequency */ /* Get Absolute Load - in terms of freq */ max_load_freq = 0; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; unsigned int idle_time, wall_time, iowait_time; unsigned int load_freq; int freq_avg; j_dbs_info = &per_cpu(od_cpu_dbs_info, j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; iowait_time = (unsigned int) (cur_iowait_time - j_dbs_info->prev_cpu_iowait); j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { u64 cur_nice; unsigned long cur_nice_jiffies; cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys */ cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } /* * For the purpose of ondemand, waiting for disk IO is an * indication that you're performance critical, and not that * the system is actually idle. So subtract the iowait time * from the cpu idle time. */ if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) idle_time -= iowait_time; if (unlikely(!wall_time || wall_time < idle_time)) continue; cur_load = 100 * (wall_time - idle_time) / wall_time; j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load); j_dbs_info->prev_load = cur_load; freq_avg = __cpufreq_driver_getavg(policy, j); if (freq_avg <= 0) freq_avg = policy->cur; load_freq = cur_load * freq_avg; if (load_freq > max_load_freq) max_load_freq = load_freq; } for_each_online_cpu(j) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(od_cpu_dbs_info, j); if (j == policy->cpu) continue; if (max_load_other_cpu < j_dbs_info->max_load) max_load_other_cpu = j_dbs_info->max_load; /* * The other cpu could be running at higher frequency * but may not have completed it's sampling_down_factor. * For that case consider other cpu is loaded so that * frequency imbalance does not occur. */ if ((j_dbs_info->cur_policy != NULL) && (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->max)) { if (policy->cur >= dbs_tuners_ins.optimal_freq) max_load_other_cpu = dbs_tuners_ins.up_threshold_any_cpu_load; } } /* calculate the scaled load across CPU */ load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq; cpufreq_notify_utilization(policy, load_at_max_freq); /* we want cpu0 to be the only core blocked for freq changes while we are touching the screen for UI interaction */ if (is_touching && policy->cpu == 0) { if (ktime_to_ms(ktime_get()) - freq_boosted_time >= 1000) is_touching = false; return; } /* Check for frequency increase */ if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) this_dbs_info->rate_mult = dbs_tuners_ins.sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } if (num_online_cpus() > 1) { if (max_load_other_cpu > dbs_tuners_ins.up_threshold_any_cpu_load) { if (policy->cur < dbs_tuners_ins.sync_freq) dbs_freq_increase(policy, dbs_tuners_ins.sync_freq); return; } if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core * policy->cur) { if (policy->cur < dbs_tuners_ins.optimal_freq) dbs_freq_increase(policy, dbs_tuners_ins.optimal_freq); return; } } /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; /* * The optimal frequency is the frequency that is the lowest that * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ if (max_load_freq < (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * policy->cur) { unsigned int freq_next; freq_next = max_load_freq / (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); /* No longer fully busy, reset rate_mult */ this_dbs_info->rate_mult = 1; if (freq_next < policy->min) freq_next = policy->min; if (num_online_cpus() > 1) { if (max_load_other_cpu > (dbs_tuners_ins.up_threshold_multi_core - dbs_tuners_ins.down_differential) && freq_next < dbs_tuners_ins.sync_freq) freq_next = dbs_tuners_ins.sync_freq; if (max_load_freq > (dbs_tuners_ins.up_threshold_multi_core - dbs_tuners_ins.down_differential_multi_core) * policy->cur) freq_next = dbs_tuners_ins.optimal_freq; } if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } else { int freq = powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); } } } static void do_dbs_timer(struct work_struct *work) { struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; int sample_type = dbs_info->sample_type; int delay; mutex_lock(&dbs_info->timer_mutex); /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || sample_type == DBS_NORMAL_SAMPLE) { dbs_check_cpu(dbs_info); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; } else { /* We want all CPUs to do sampling nearly on * same jiffy */ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate * dbs_info->rate_mult); if (num_online_cpus() > 1) delay -= jiffies % delay; } } else { __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); delay = dbs_info->freq_lo_jiffies; } schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) { /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (num_online_cpus() > 1) delay -= jiffies % delay; dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { cancel_delayed_work_sync(&dbs_info->work); } /* * Not all CPUs want IO time to be accounted as busy; this dependson how * efficient idling at a higher frequency/voltage is. * Pavel Machek says this is not so for various generations of AMD and old * Intel systems. * Mike Chan (androidlcom) calis this is also not true for ARM. * Because of this, whitelist specific known (series) of CPUs by default, and * leave all others up to the user. */ static int should_io_be_busy(void) { #if defined(CONFIG_X86) /* * For Intel, Core 2 (model 15) andl later have an efficient idle. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 15) return 1; #endif return 0; } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; int rc; this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; mutex_lock(&dbs_mutex); dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(od_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->cpu = cpu; this_dbs_info->rate_mult = 1; ondemand_powersave_bias_init_cpu(cpu); /* * Start the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 1) { unsigned int latency; rc = sysfs_create_group(cpufreq_global_kobject, &dbs_attr_group); if (rc) { mutex_unlock(&dbs_mutex); return rc; } /* policy latency is in nS. Convert it to uS first */ latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; /* Bring kernel and HW constraints together */ min_sampling_rate = max(min_sampling_rate, MIN_LATENCY_MULTIPLIER * latency); dbs_tuners_ins.sampling_rate = max(min_sampling_rate, latency * LATENCY_MULTIPLIER); dbs_tuners_ins.io_is_busy = should_io_be_busy(); if (dbs_tuners_ins.optimal_freq == 0) dbs_tuners_ins.optimal_freq = policy->min; if (dbs_tuners_ins.sync_freq == 0) dbs_tuners_ins.sync_freq = policy->min; } mutex_unlock(&dbs_mutex); if (!ondemand_powersave_bias_setspeed( this_dbs_info->cur_policy, NULL, dbs_tuners_ins.powersave_bias)) dbs_timer_init(this_dbs_info); break; case CPUFREQ_GOV_STOP: dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); dbs_enable--; /* If device is being removed, policy is no longer * valid. */ this_dbs_info->cur_policy = NULL; mutex_unlock(&dbs_mutex); if (!dbs_enable) sysfs_remove_group(cpufreq_global_kobject, &dbs_attr_group); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); else if (dbs_tuners_ins.powersave_bias != 0) ondemand_powersave_bias_setspeed( this_dbs_info->cur_policy, policy, dbs_tuners_ins.powersave_bias); mutex_unlock(&this_dbs_info->timer_mutex); break; } return 0; } static int __init cpufreq_gov_dbs_init(void) { u64 idle_time; unsigned int i; int cpu = get_cpu(); idle_time = get_cpu_idle_time_us(cpu, NULL); put_cpu(); if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; dbs_tuners_ins.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred * timer might skip some samples if idle/sleeping as needed. */ min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { /* For correct statistics, we need 10 ticks for each measure */ min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); } for_each_possible_cpu(i) { struct cpu_dbs_info_s *this_dbs_info = &per_cpu(od_cpu_dbs_info, i); struct dbs_work_struct *dbs_work = &per_cpu(dbs_refresh_work, i); mutex_init(&this_dbs_info->timer_mutex); dbs_work->cpu = i; } return cpufreq_register_governor(&cpufreq_gov_ondemand); } static void __exit cpufreq_gov_dbs_exit(void) { unsigned int i; cpufreq_unregister_governor(&cpufreq_gov_ondemand); for_each_possible_cpu(i) { struct cpu_dbs_info_s *this_dbs_info = &per_cpu(od_cpu_dbs_info, i); mutex_destroy(&this_dbs_info->timer_mutex); } } MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors"); MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND fs_initcall(cpufreq_gov_dbs_init); #else module_init(cpufreq_gov_dbs_init); #endif module_exit(cpufreq_gov_dbs_exit);
gpl-2.0
htc-m8-caf/android_kernel_htc_msm8974
drivers/net/wireless/wcnss/wcnss_wlan.c
1
64709
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/wcnss_wlan.h> #include <linux/platform_data/qcom_wcnss_device.h> #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/gpio.h> #include <linux/wakelock.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/clk.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/suspend.h> #include <linux/rwsem.h> #include <linux/mfd/pm8xxx/misc.h> #include <linux/qpnp/qpnp-adc.h> #include <mach/board.h> #include <mach/msm_smd.h> #include <mach/msm_iomap.h> #include <mach/subsystem_restart.h> #include <mach/devices_dtb.h> #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC #include "wcnss_prealloc.h" #endif #define DEVICE "wcnss_wlan" #define CTRL_DEVICE "wcnss_ctrl" #define VERSION "1.01" #define WCNSS_PIL_DEVICE "wcnss" #define WCNSS_CONFIG_UNSPECIFIED (-1) #define UINT32_MAX (0xFFFFFFFFU) static int has_48mhz_xo = WCNSS_CONFIG_UNSPECIFIED; module_param(has_48mhz_xo, int, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(has_48mhz_xo, "Is an external 48 MHz XO present"); static int has_calibrated_data = WCNSS_CONFIG_UNSPECIFIED; module_param(has_calibrated_data, int, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(has_calibrated_data, "whether calibrated data file available"); static int has_autodetect_xo = WCNSS_CONFIG_UNSPECIFIED; module_param(has_autodetect_xo, int, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(has_autodetect_xo, "Perform auto detect to configure IRIS XO"); static int do_not_cancel_vote = WCNSS_CONFIG_UNSPECIFIED; module_param(do_not_cancel_vote, int, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(do_not_cancel_vote, "Do not cancel votes for wcnss"); static DEFINE_SPINLOCK(reg_spinlock); #define MSM_RIVA_PHYS 0x03204000 #define MSM_PRONTO_PHYS 0xfb21b000 #define RIVA_SPARE_OFFSET 0x0b4 #define RIVA_SUSPEND_BIT BIT(24) #define MSM_RIVA_CCU_BASE 0x03200800 #define CCU_RIVA_INVALID_ADDR_OFFSET 0x100 #define CCU_RIVA_LAST_ADDR0_OFFSET 0x104 #define CCU_RIVA_LAST_ADDR1_OFFSET 0x108 #define CCU_RIVA_LAST_ADDR2_OFFSET 0x10c #define PRONTO_PMU_SPARE_OFFSET 0x1088 #define PRONTO_PMU_COM_GDSCR_OFFSET 0x0024 #define PRONTO_PMU_COM_GDSCR_SW_COLLAPSE BIT(0) #define PRONTO_PMU_COM_GDSCR_HW_CTRL BIT(1) #define PRONTO_PMU_WLAN_BCR_OFFSET 0x0050 #define PRONTO_PMU_WLAN_BCR_BLK_ARES BIT(0) #define PRONTO_PMU_WLAN_GDSCR_OFFSET 0x0054 #define PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE BIT(0) #define PRONTO_PMU_CBCR_OFFSET 0x0008 #define PRONTO_PMU_CBCR_CLK_EN BIT(0) #define PRONTO_PMU_COM_CPU_CBCR_OFFSET 0x0030 #define PRONTO_PMU_COM_AHB_CBCR_OFFSET 0x0034 #define PRONTO_PMU_WLAN_AHB_CBCR_OFFSET 0x0074 #define PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN BIT(0) #define PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF BIT(31) #define PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET 0x0120 #define PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN BIT(1) #define PRONTO_PMU_CFG_OFFSET 0x1004 #define PRONTO_PMU_COM_CSR_OFFSET 0x1040 #define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C #define MSM_PRONTO_A2XB_BASE 0xfb100400 #define A2XB_CFG_OFFSET 0x00 #define A2XB_INT_SRC_OFFSET 0x0c #define A2XB_TSTBUS_CTRL_OFFSET 0x14 #define A2XB_TSTBUS_OFFSET 0x18 #define A2XB_ERR_INFO_OFFSET 0x1c #define WCNSS_TSTBUS_CTRL_EN BIT(0) #define WCNSS_TSTBUS_CTRL_AXIM (0x02 << 1) #define WCNSS_TSTBUS_CTRL_CMDFIFO (0x03 << 1) #define WCNSS_TSTBUS_CTRL_WRFIFO (0x04 << 1) #define WCNSS_TSTBUS_CTRL_RDFIFO (0x05 << 1) #define WCNSS_TSTBUS_CTRL_CTRL (0x07 << 1) #define WCNSS_TSTBUS_CTRL_AXIM_CFG0 (0x00 << 6) #define WCNSS_TSTBUS_CTRL_AXIM_CFG1 (0x01 << 6) #define WCNSS_TSTBUS_CTRL_CTRL_CFG0 (0x00 << 12) #define WCNSS_TSTBUS_CTRL_CTRL_CFG1 (0x01 << 12) #define MSM_PRONTO_CCPU_BASE 0xfb205050 #define CCU_PRONTO_INVALID_ADDR_OFFSET 0x08 #define CCU_PRONTO_LAST_ADDR0_OFFSET 0x0c #define CCU_PRONTO_LAST_ADDR1_OFFSET 0x10 #define CCU_PRONTO_LAST_ADDR2_OFFSET 0x14 #define MSM_PRONTO_SAW2_BASE 0xfb219000 #define PRONTO_SAW2_SPM_STS_OFFSET 0x0c #define MSM_PRONTO_PLL_BASE 0xfb21b1c0 #define PRONTO_PLL_STATUS_OFFSET 0x1c #define MSM_PRONTO_TXP_STATUS 0xfb08040c #define MSM_PRONTO_TXP_STATUS 0xfb08040c #define MSM_PRONTO_TXP_PHY_ABORT 0xfb080488 #define MSM_PRONTO_BRDG_ERR_SRC 0xfb080fb0 #define MSM_PRONTO_ALARMS_TXCTL 0xfb0120a8 #define MSM_PRONTO_ALARMS_TACTL 0xfb012448 #define WCNSS_DEF_WLAN_RX_BUFF_COUNT 1024 #define WCNSS_VBATT_THRESHOLD 3500000 #define WCNSS_VBATT_GUARD 200 #define WCNSS_VBATT_HIGH 3700000 #define WCNSS_VBATT_LOW 3300000 #define WCNSS_CTRL_CHANNEL "WCNSS_CTRL" #define WCNSS_MAX_FRAME_SIZE (4*1024) #define WCNSS_VERSION_LEN 30 #define WCNSS_MAX_BUILD_VER_LEN 256 #define WCNSS_MAX_CMD_LEN (128) #define WCNSS_MIN_CMD_LEN (3) #define WCNSS_MIN_SERIAL_LEN (6) /* control messages from userspace */ #define WCNSS_USR_CTRL_MSG_START 0x00000000 #define WCNSS_USR_SERIAL_NUM (WCNSS_USR_CTRL_MSG_START + 1) #define WCNSS_USR_HAS_CAL_DATA (WCNSS_USR_CTRL_MSG_START + 2) #define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3) #define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x" /* message types */ #define WCNSS_CTRL_MSG_START 0x01000000 #define WCNSS_VERSION_REQ (WCNSS_CTRL_MSG_START + 0) #define WCNSS_VERSION_RSP (WCNSS_CTRL_MSG_START + 1) #define WCNSS_NVBIN_DNLD_REQ (WCNSS_CTRL_MSG_START + 2) #define WCNSS_NVBIN_DNLD_RSP (WCNSS_CTRL_MSG_START + 3) #define WCNSS_CALDATA_UPLD_REQ (WCNSS_CTRL_MSG_START + 4) #define WCNSS_CALDATA_UPLD_RSP (WCNSS_CTRL_MSG_START + 5) #define WCNSS_CALDATA_DNLD_REQ (WCNSS_CTRL_MSG_START + 6) #define WCNSS_CALDATA_DNLD_RSP (WCNSS_CTRL_MSG_START + 7) #define WCNSS_VBATT_LEVEL_IND (WCNSS_CTRL_MSG_START + 8) #define WCNSS_BUILD_VER_REQ (WCNSS_CTRL_MSG_START + 9) #define WCNSS_BUILD_VER_RSP (WCNSS_CTRL_MSG_START + 10) /* max 20mhz channel count */ #define WCNSS_MAX_CH_NUM 45 #define VALID_VERSION(version) \ ((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0) #define FW_CALDATA_CAPABLE() \ ((penv->fw_major >= 1) && (penv->fw_minor >= 5) ? 1 : 0) struct smd_msg_hdr { unsigned int msg_type; unsigned int msg_len; }; struct wcnss_version { struct smd_msg_hdr hdr; unsigned char major; unsigned char minor; unsigned char version; unsigned char revision; }; struct wcnss_pmic_dump { char reg_name[10]; u16 reg_addr; }; static struct wcnss_pmic_dump wcnss_pmic_reg_dump[] = { {"S2", 0x1D8}, {"L4", 0xB4}, {"L10", 0xC0}, {"LVS2", 0x62}, {"S4", 0x1E8}, {"LVS7", 0x06C}, {"LVS1", 0x060}, }; #define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" #define NV_FRAGMENT_SIZE 3072 #define MAX_CALIBRATED_DATA_SIZE (64*1024) #define LAST_FRAGMENT (1 << 0) #define MESSAGE_TO_FOLLOW (1 << 1) #define CAN_RECEIVE_CALDATA (1 << 15) #define WCNSS_RESP_SUCCESS 1 #define WCNSS_RESP_FAIL 0 #define TOTALFRAGMENTS(x) (((x % NV_FRAGMENT_SIZE) == 0) ? \ (x / NV_FRAGMENT_SIZE) : ((x / NV_FRAGMENT_SIZE) + 1)) struct nvbin_dnld_req_params { unsigned short frag_number; unsigned short msg_flags; unsigned int nvbin_buffer_size; }; struct nvbin_dnld_req_msg { struct smd_msg_hdr hdr; struct nvbin_dnld_req_params dnld_req_params; }; struct cal_data_params { unsigned int total_size; unsigned short frag_number; unsigned short msg_flags; unsigned int frag_size; }; struct cal_data_msg { struct smd_msg_hdr hdr; struct cal_data_params cal_params; }; struct vbatt_level { u32 curr_volt; u32 threshold; }; struct vbatt_message { struct smd_msg_hdr hdr; struct vbatt_level vbatt; }; static struct { struct platform_device *pdev; void *pil; struct resource *mmio_res; struct resource *tx_irq_res; struct resource *rx_irq_res; struct resource *gpios_5wire; const struct dev_pm_ops *pm_ops; int triggered; int smd_channel_ready; u32 wlan_rx_buff_count; smd_channel_t *smd_ch; unsigned char wcnss_version[WCNSS_VERSION_LEN]; unsigned char fw_major; unsigned char fw_minor; unsigned int serial_number; int thermal_mitigation; enum wcnss_hw_type wcnss_hw_type; void (*tm_notify)(struct device *, int); struct wcnss_wlan_config wlan_config; struct delayed_work wcnss_work; struct delayed_work vbatt_work; struct work_struct wcnssctrl_version_work; struct work_struct wcnssctrl_nvbin_dnld_work; struct work_struct wcnssctrl_rx_work; struct wake_lock wcnss_wake_lock; void __iomem *msm_wcnss_base; void __iomem *riva_ccu_base; void __iomem *pronto_a2xb_base; void __iomem *pronto_ccpu_base; void __iomem *pronto_saw2_base; void __iomem *pronto_pll_base; void __iomem *wlan_tx_status; void __iomem *wlan_tx_phy_aborts; void __iomem *wlan_brdg_err_source; void __iomem *alarms_txctl; void __iomem *alarms_tactl; void __iomem *fiq_reg; int nv_downloaded; unsigned char *fw_cal_data; unsigned char *user_cal_data; int fw_cal_rcvd; int fw_cal_exp_frag; int fw_cal_available; int user_cal_read; int user_cal_available; u32 user_cal_rcvd; int user_cal_exp_size; int device_opened; int iris_xo_mode_set; int fw_vbatt_state; int ctrl_device_opened; char wlan_nv_macAddr[WLAN_MAC_ADDR_SIZE]; struct mutex dev_lock; struct mutex ctrl_lock; wait_queue_head_t read_wait; struct qpnp_adc_tm_btm_param vbat_monitor_params; struct qpnp_adc_tm_chip *adc_tm_dev; struct mutex vbat_monitor_mutex; u16 unsafe_ch_count; u16 unsafe_ch_list[WCNSS_MAX_CH_NUM]; } *penv = NULL; static ssize_t wcnss_wlan_macaddr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char macAddr[WLAN_MAC_ADDR_SIZE]; if (!penv) return -ENODEV; pr_debug("%s: Receive MAC Addr From user space: %s\n", __func__, buf); if (WLAN_MAC_ADDR_SIZE != sscanf(buf, MAC_ADDRESS_STR, (int *)&macAddr[0], (int *)&macAddr[1], (int *)&macAddr[2], (int *)&macAddr[3], (int *)&macAddr[4], (int *)&macAddr[5])) { pr_err("%s: Failed to Copy MAC\n", __func__); return -EINVAL; } memcpy(penv->wlan_nv_macAddr, macAddr, sizeof(penv->wlan_nv_macAddr)); pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3], penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]); return count; } static ssize_t wcnss_wlan_macaddr_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!penv) return -ENODEV; return scnprintf(buf, PAGE_SIZE, MAC_ADDRESS_STR, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3], penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]); } static DEVICE_ATTR(wcnss_mac_addr, S_IRUSR | S_IWUSR, wcnss_wlan_macaddr_show, wcnss_wlan_macaddr_store); static ssize_t wcnss_serial_number_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!penv) return -ENODEV; return scnprintf(buf, PAGE_SIZE, "%08X\n", penv->serial_number); } static ssize_t wcnss_serial_number_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int value; if (!penv) return -ENODEV; if (sscanf(buf, "%08X", &value) != 1) return -EINVAL; penv->serial_number = value; return count; } static DEVICE_ATTR(serial_number, S_IRUSR | S_IWUSR, wcnss_serial_number_show, wcnss_serial_number_store); static ssize_t wcnss_thermal_mitigation_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!penv) return -ENODEV; return scnprintf(buf, PAGE_SIZE, "%u\n", penv->thermal_mitigation); } static ssize_t wcnss_thermal_mitigation_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; if (!penv) return -ENODEV; if (sscanf(buf, "%d", &value) != 1) return -EINVAL; penv->thermal_mitigation = value; if (penv->tm_notify) (penv->tm_notify)(dev, value); return count; } static DEVICE_ATTR(thermal_mitigation, S_IRUSR | S_IWUSR, wcnss_thermal_mitigation_show, wcnss_thermal_mitigation_store); static ssize_t wcnss_version_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!penv) return -ENODEV; return scnprintf(buf, PAGE_SIZE, "%s", penv->wcnss_version); } static DEVICE_ATTR(wcnss_version, S_IRUSR, wcnss_version_show, NULL); void wcnss_riva_dump_pmic_regs(void) { int i, rc; u8 val; for (i = 0; i < ARRAY_SIZE(wcnss_pmic_reg_dump); i++) { val = 0; rc = pm8xxx_read_register(wcnss_pmic_reg_dump[i].reg_addr, &val); if (rc) pr_err("PMIC READ: Failed to read addr = %d\n", wcnss_pmic_reg_dump[i].reg_addr); else pr_info_ratelimited("PMIC READ: %s addr = %x, value = %x\n", wcnss_pmic_reg_dump[i].reg_name, wcnss_pmic_reg_dump[i].reg_addr, val); } } void wcnss_riva_log_debug_regs(void) { void __iomem *ccu_reg; u32 reg = 0; ccu_reg = penv->riva_ccu_base + CCU_RIVA_INVALID_ADDR_OFFSET; reg = readl_relaxed(ccu_reg); pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg); ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR0_OFFSET; reg = readl_relaxed(ccu_reg); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg); ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR1_OFFSET; reg = readl_relaxed(ccu_reg); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg); ccu_reg = penv->riva_ccu_base + CCU_RIVA_LAST_ADDR2_OFFSET; reg = readl_relaxed(ccu_reg); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg); wcnss_riva_dump_pmic_regs(); } EXPORT_SYMBOL(wcnss_riva_log_debug_regs); void wcnss_pronto_log_debug_regs(void) { void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr; u32 reg = 0, reg2 = 0, reg3 = 0, reg4 = 0; reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_SPARE %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_COM_CPU_CBCR %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_COM_AHB_CBCR %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_CFG %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_COM_CSR %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_SOFT_RESET %08x\n", __func__, reg); reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_SAW2_SPM_STS %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PMU_COM_GDSCR %08x\n", __func__, reg); reg >>= 31; if (!reg) { pr_info_ratelimited("%s: Cannot log, Pronto common SS is power collapsed\n", __func__); return; } reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE | PRONTO_PMU_COM_GDSCR_HW_CTRL); writel_relaxed(reg, reg_addr); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CBCR_OFFSET; reg = readl_relaxed(reg_addr); reg |= PRONTO_PMU_CBCR_CLK_EN; writel_relaxed(reg, reg_addr); reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: A2XB_CFG_OFFSET %08x\n", __func__, reg); reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: A2XB_INT_SRC_OFFSET %08x\n", __func__, reg); reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: A2XB_ERR_INFO_OFFSET %08x\n", __func__, reg); reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg); reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg); reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg); reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg); reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET; reg = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PRONTO_PLL_STATUS %08x\n", __func__, reg); tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET; tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET; reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: Read data FIFO testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: Command FIFO testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: Rrite data FIFO testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM | WCNSS_TSTBUS_CTRL_AXIM_CFG0; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: AXIM SEL CFG0 testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_AXIM | WCNSS_TSTBUS_CTRL_AXIM_CFG1; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: AXIM SEL CFG1 testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL | WCNSS_TSTBUS_CTRL_CTRL_CFG0; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: CTRL SEL CFG0 testbus %08x\n", __func__, reg); reg = 0; reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CTRL | WCNSS_TSTBUS_CTRL_CTRL_CFG1; writel_relaxed(reg, tst_ctrl_addr); reg = readl_relaxed(tst_addr); pr_info_ratelimited("%s: CTRL SEL CFG1 testbus %08x\n", __func__, reg); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET; reg = readl_relaxed(reg_addr); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_GDSCR_OFFSET; reg2 = readl_relaxed(reg_addr); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET; reg3 = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PMU_WLAN_AHB_CBCR %08x\n", __func__, reg3); reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET; reg4 = readl_relaxed(reg_addr); pr_info_ratelimited("%s: PMU_CPU_CMD_RCGR %08x\n", __func__, reg4); if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) || (reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) || (!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) || (reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) || (!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) { pr_info_ratelimited("%s: Cannot log, wlan domain is power collapsed\n", __func__); return; } reg = readl_relaxed(penv->wlan_tx_phy_aborts); pr_info_ratelimited("%s: WLAN_TX_PHY_ABORTS %08x\n", __func__, reg); reg = readl_relaxed(penv->wlan_brdg_err_source); pr_info_ratelimited("%s: WLAN_BRDG_ERR_SOURCE %08x\n", __func__, reg); reg = readl_relaxed(penv->wlan_tx_status); pr_info_ratelimited("%s: WLAN_TX_STATUS %08x\n", __func__, reg); reg = readl_relaxed(penv->alarms_txctl); pr_err("ALARMS_TXCTL %08x\n", reg); reg = readl_relaxed(penv->alarms_tactl); pr_err("ALARMS_TACTL %08x\n", reg); } EXPORT_SYMBOL(wcnss_pronto_log_debug_regs); #ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE static void wcnss_log_iris_regs(void) { int i; u32 reg_val; u32 regs_array[] = { 0x04, 0x05, 0x11, 0x1e, 0x40, 0x48, 0x49, 0x4b, 0x00, 0x01, 0x4d}; pr_info("IRIS Registers [address] : value\n"); for (i = 0; i < ARRAY_SIZE(regs_array); i++) { reg_val = wcnss_rf_read_reg(regs_array[i]); pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val); } } void wcnss_log_debug_regs_on_bite(void) { struct platform_device *pdev = wcnss_get_platform_device(); struct clk *measure; struct clk *wcnss_debug_mux; unsigned long clk_rate; if (wcnss_hardware_type() != WCNSS_PRONTO_HW) return; measure = clk_get(&pdev->dev, "measure"); wcnss_debug_mux = clk_get(&pdev->dev, "wcnss_debug"); if (!IS_ERR(measure) && !IS_ERR(wcnss_debug_mux)) { clk_set_parent(measure, wcnss_debug_mux); clk_rate = clk_get_rate(measure); pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate); if (clk_rate) { wcnss_pronto_log_debug_regs(); } else { pr_err("clock frequency is zero, cannot access PMU or other registers\n"); wcnss_log_iris_regs(); } } else{ pr_err("Can't access measure or wcnss_debug\n"); } } #endif void wcnss_reset_intr(void) { if (wcnss_hardware_type() == WCNSS_PRONTO_HW) { wcnss_pronto_log_debug_regs(); wmb(); __raw_writel(1 << 16, penv->fiq_reg); } else { wcnss_riva_log_debug_regs(); wmb(); __raw_writel(1 << 24, MSM_APCS_GCC_BASE + 0x8); } } EXPORT_SYMBOL(wcnss_reset_intr); static int wcnss_create_sysfs(struct device *dev) { int ret; if (!dev) return -ENODEV; ret = device_create_file(dev, &dev_attr_serial_number); if (ret) return ret; ret = device_create_file(dev, &dev_attr_thermal_mitigation); if (ret) goto remove_serial; ret = device_create_file(dev, &dev_attr_wcnss_version); if (ret) goto remove_thermal; ret = device_create_file(dev, &dev_attr_wcnss_mac_addr); if (ret) goto remove_version; return 0; remove_version: device_remove_file(dev, &dev_attr_wcnss_version); remove_thermal: device_remove_file(dev, &dev_attr_thermal_mitigation); remove_serial: device_remove_file(dev, &dev_attr_serial_number); return ret; } static void wcnss_remove_sysfs(struct device *dev) { if (dev) { device_remove_file(dev, &dev_attr_serial_number); device_remove_file(dev, &dev_attr_thermal_mitigation); device_remove_file(dev, &dev_attr_wcnss_version); device_remove_file(dev, &dev_attr_wcnss_mac_addr); } } static void wcnss_smd_notify_event(void *data, unsigned int event) { int len = 0; if (penv != data) { pr_err("wcnss: invalid env pointer in smd callback\n"); return; } switch (event) { case SMD_EVENT_DATA: len = smd_read_avail(penv->smd_ch); if (len < 0) { pr_err("wcnss: failed to read from smd %d\n", len); return; } printk("wcnss_debug: wcnss_smd_notify_event() : Start to schedule wcnssctrl_rx_work to read smd packet \r\n"); schedule_work(&penv->wcnssctrl_rx_work); break; case SMD_EVENT_OPEN: pr_debug("wcnss: opening WCNSS SMD channel :%s", WCNSS_CTRL_CHANNEL); printk("wcnss_debug: opening WCNSS SMD channel :%s", WCNSS_CTRL_CHANNEL); schedule_work(&penv->wcnssctrl_version_work); break; case SMD_EVENT_CLOSE: pr_debug("wcnss: closing WCNSS SMD channel :%s", WCNSS_CTRL_CHANNEL); penv->nv_downloaded = 0; break; default: break; } } static void wcnss_post_bootup(struct work_struct *work) { if (do_not_cancel_vote == 1) { pr_info("%s: Keeping APPS vote for Iris & WCNSS\n", __func__); return; } pr_info("%s: Cancel APPS vote for Iris & WCNSS\n", __func__); wcnss_wlan_power(&penv->pdev->dev, &penv->wlan_config, WCNSS_WLAN_SWITCH_OFF, NULL); wcnss_allow_suspend(); } static int wcnss_pronto_gpios_config(struct device *dev, bool enable) { int rc = 0; int i, j; int WCNSS_WLAN_NUM_GPIOS = 5; for (i = 0; i < WCNSS_WLAN_NUM_GPIOS; i++) { int gpio = of_get_gpio(dev->of_node, i); if (enable) { rc = gpio_request(gpio, "wcnss_wlan"); if (rc) { pr_err("WCNSS gpio_request %d err %d\n", gpio, rc); goto fail; } } else gpio_free(gpio); } return rc; fail: for (j = WCNSS_WLAN_NUM_GPIOS-1; j >= 0; j--) { int gpio = of_get_gpio(dev->of_node, i); gpio_free(gpio); } return rc; } static int wcnss_gpios_config(struct resource *gpios_5wire, bool enable) { int i, j; int rc = 0; for (i = gpios_5wire->start; i <= gpios_5wire->end; i++) { if (enable) { rc = gpio_request(i, gpios_5wire->name); if (rc) { pr_err("WCNSS gpio_request %d err %d\n", i, rc); goto fail; } } else gpio_free(i); } return rc; fail: for (j = i-1; j >= gpios_5wire->start; j--) gpio_free(j); return rc; } static int __devinit wcnss_wlan_ctrl_probe(struct platform_device *pdev) { if (!penv || !penv->triggered) return -ENODEV; penv->smd_channel_ready = 1; pr_info("%s: SMD ctrl channel up\n", __func__); INIT_DELAYED_WORK(&penv->wcnss_work, wcnss_post_bootup); schedule_delayed_work(&penv->wcnss_work, msecs_to_jiffies(10000)); return 0; } void wcnss_flush_delayed_boot_votes() { flush_delayed_work(&penv->wcnss_work); } EXPORT_SYMBOL(wcnss_flush_delayed_boot_votes); static int __devexit wcnss_wlan_ctrl_remove(struct platform_device *pdev) { if (penv) penv->smd_channel_ready = 0; pr_info("%s: SMD ctrl channel down\n", __func__); return 0; } static struct platform_driver wcnss_wlan_ctrl_driver = { .driver = { .name = "WLAN_CTRL", .owner = THIS_MODULE, }, .probe = wcnss_wlan_ctrl_probe, .remove = __devexit_p(wcnss_wlan_ctrl_remove), }; static int __devexit wcnss_ctrl_remove(struct platform_device *pdev) { if (penv && penv->smd_ch) smd_close(penv->smd_ch); return 0; } static int __devinit wcnss_ctrl_probe(struct platform_device *pdev) { int ret = 0; if (!penv || !penv->triggered) return -ENODEV; ret = smd_named_open_on_edge(WCNSS_CTRL_CHANNEL, SMD_APPS_WCNSS, &penv->smd_ch, penv, wcnss_smd_notify_event); if (ret < 0) { pr_err("wcnss: cannot open the smd command channel %s: %d\n", WCNSS_CTRL_CHANNEL, ret); return -ENODEV; } smd_disable_read_intr(penv->smd_ch); return 0; } static struct platform_driver wcnss_ctrl_driver = { .driver = { .name = "WCNSS_CTRL", .owner = THIS_MODULE, }, .probe = wcnss_ctrl_probe, .remove = __devexit_p(wcnss_ctrl_remove), }; struct device *wcnss_wlan_get_device(void) { if (penv && penv->pdev && penv->smd_channel_ready) return &penv->pdev->dev; return NULL; } EXPORT_SYMBOL(wcnss_wlan_get_device); struct platform_device *wcnss_get_platform_device(void) { if (penv && penv->pdev) return penv->pdev; return NULL; } EXPORT_SYMBOL(wcnss_get_platform_device); struct wcnss_wlan_config *wcnss_get_wlan_config(void) { if (penv && penv->pdev) return &penv->wlan_config; return NULL; } EXPORT_SYMBOL(wcnss_get_wlan_config); int wcnss_device_ready(void) { printk("wcnss_debug : wcnss_device_ready(): penv->nv_downloaded = 0x%x\r\n", penv->nv_downloaded); if (penv && penv->pdev && penv->nv_downloaded) return 1; return 0; } EXPORT_SYMBOL(wcnss_device_ready); struct resource *wcnss_wlan_get_memory_map(struct device *dev) { if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready) return penv->mmio_res; return NULL; } EXPORT_SYMBOL(wcnss_wlan_get_memory_map); int wcnss_wlan_get_dxe_tx_irq(struct device *dev) { if (penv && dev && (dev == &penv->pdev->dev) && penv->tx_irq_res && penv->smd_channel_ready) return penv->tx_irq_res->start; return WCNSS_WLAN_IRQ_INVALID; } EXPORT_SYMBOL(wcnss_wlan_get_dxe_tx_irq); int wcnss_wlan_get_dxe_rx_irq(struct device *dev) { if (penv && dev && (dev == &penv->pdev->dev) && penv->rx_irq_res && penv->smd_channel_ready) return penv->rx_irq_res->start; return WCNSS_WLAN_IRQ_INVALID; } EXPORT_SYMBOL(wcnss_wlan_get_dxe_rx_irq); void wcnss_wlan_register_pm_ops(struct device *dev, const struct dev_pm_ops *pm_ops) { if (penv && dev && (dev == &penv->pdev->dev) && pm_ops) penv->pm_ops = pm_ops; } EXPORT_SYMBOL(wcnss_wlan_register_pm_ops); void wcnss_wlan_unregister_pm_ops(struct device *dev, const struct dev_pm_ops *pm_ops) { if (penv && dev && (dev == &penv->pdev->dev) && pm_ops) { if (pm_ops->suspend != penv->pm_ops->suspend || pm_ops->resume != penv->pm_ops->resume) pr_err("PM APIs dont match with registered APIs\n"); penv->pm_ops = NULL; } } EXPORT_SYMBOL(wcnss_wlan_unregister_pm_ops); void wcnss_register_thermal_mitigation(struct device *dev, void (*tm_notify)(struct device *, int)) { if (penv && dev && tm_notify) penv->tm_notify = tm_notify; } EXPORT_SYMBOL(wcnss_register_thermal_mitigation); void wcnss_unregister_thermal_mitigation( void (*tm_notify)(struct device *, int)) { if (penv && tm_notify) { if (tm_notify != penv->tm_notify) pr_err("tm_notify doesn't match registered\n"); penv->tm_notify = NULL; } } EXPORT_SYMBOL(wcnss_unregister_thermal_mitigation); unsigned int wcnss_get_serial_number(void) { if (penv) return penv->serial_number; return 0; } EXPORT_SYMBOL(wcnss_get_serial_number); int wcnss_get_wlan_mac_address(char mac_addr[WLAN_MAC_ADDR_SIZE]) { if (!penv) return -ENODEV; memcpy(mac_addr, penv->wlan_nv_macAddr, WLAN_MAC_ADDR_SIZE); pr_debug("%s: Get MAC Addr:" MAC_ADDRESS_STR "\n", __func__, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3], penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]); return 0; } EXPORT_SYMBOL(wcnss_get_wlan_mac_address); static int enable_wcnss_suspend_notify; static int enable_wcnss_suspend_notify_set(const char *val, struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (ret) return ret; if (enable_wcnss_suspend_notify) pr_debug("Suspend notification activated for wcnss\n"); return 0; } module_param_call(enable_wcnss_suspend_notify, enable_wcnss_suspend_notify_set, param_get_int, &enable_wcnss_suspend_notify, S_IRUGO | S_IWUSR); int wcnss_xo_auto_detect_enabled(void) { return (has_autodetect_xo == 1 ? 1 : 0); } int wcnss_wlan_iris_xo_mode(void) { if (penv && penv->pdev && penv->smd_channel_ready) return penv->iris_xo_mode_set; return -ENODEV; } EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode); void wcnss_suspend_notify(void) { void __iomem *pmu_spare_reg; u32 reg = 0; unsigned long flags; if (!enable_wcnss_suspend_notify) return; if (wcnss_hardware_type() == WCNSS_PRONTO_HW) return; pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET; spin_lock_irqsave(&reg_spinlock, flags); reg = readl_relaxed(pmu_spare_reg); reg |= RIVA_SUSPEND_BIT; writel_relaxed(reg, pmu_spare_reg); spin_unlock_irqrestore(&reg_spinlock, flags); } EXPORT_SYMBOL(wcnss_suspend_notify); void wcnss_resume_notify(void) { void __iomem *pmu_spare_reg; u32 reg = 0; unsigned long flags; if (!enable_wcnss_suspend_notify) return; if (wcnss_hardware_type() == WCNSS_PRONTO_HW) return; pmu_spare_reg = penv->msm_wcnss_base + RIVA_SPARE_OFFSET; spin_lock_irqsave(&reg_spinlock, flags); reg = readl_relaxed(pmu_spare_reg); reg &= ~RIVA_SUSPEND_BIT; writel_relaxed(reg, pmu_spare_reg); spin_unlock_irqrestore(&reg_spinlock, flags); } EXPORT_SYMBOL(wcnss_resume_notify); static int wcnss_wlan_suspend(struct device *dev) { if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready && penv->pm_ops && penv->pm_ops->suspend) return penv->pm_ops->suspend(dev); return 0; } static int wcnss_wlan_resume(struct device *dev) { if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready && penv->pm_ops && penv->pm_ops->resume) return penv->pm_ops->resume(dev); return 0; } void wcnss_prevent_suspend() { if (penv) wake_lock(&penv->wcnss_wake_lock); } EXPORT_SYMBOL(wcnss_prevent_suspend); void wcnss_allow_suspend() { if (penv) wake_unlock(&penv->wcnss_wake_lock); } EXPORT_SYMBOL(wcnss_allow_suspend); int wcnss_hardware_type(void) { if (penv) return penv->wcnss_hw_type; else return -ENODEV; } EXPORT_SYMBOL(wcnss_hardware_type); int fw_cal_data_available(void) { if (penv) return penv->fw_cal_available; else return -ENODEV; } u32 wcnss_get_wlan_rx_buff_count(void) { if (penv) return penv->wlan_rx_buff_count; else return WCNSS_DEF_WLAN_RX_BUFF_COUNT; } EXPORT_SYMBOL(wcnss_get_wlan_rx_buff_count); int wcnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count) { if (penv && unsafe_ch_list && (ch_count <= WCNSS_MAX_CH_NUM)) { memcpy((char *)penv->unsafe_ch_list, (char *)unsafe_ch_list, ch_count * sizeof(u16)); penv->unsafe_ch_count = ch_count; return 0; } else return -ENODEV; } EXPORT_SYMBOL(wcnss_set_wlan_unsafe_channel); int wcnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 buffer_size, u16 *ch_count) { if (penv) { if (buffer_size < penv->unsafe_ch_count * sizeof(u16)) return -ENODEV; memcpy((char *)unsafe_ch_list, (char *)penv->unsafe_ch_list, penv->unsafe_ch_count * sizeof(u16)); *ch_count = penv->unsafe_ch_count; return 0; } else return -ENODEV; } EXPORT_SYMBOL(wcnss_get_wlan_unsafe_channel); static int wcnss_smd_tx(void *data, int len) { int ret = 0; ret = smd_write_avail(penv->smd_ch); if (ret < len) { pr_err("wcnss: no space available for smd frame\n"); return -ENOSPC; } ret = smd_write(penv->smd_ch, data, len); if (ret < len) { pr_err("wcnss: failed to write Command %d", len); ret = -ENODEV; } printk("wcnss_debug: wcnss_smd_tx() finish and exit \r\n"); return ret; } static void wcnss_notify_vbat(enum qpnp_tm_state state, void *ctx) { mutex_lock(&penv->vbat_monitor_mutex); cancel_delayed_work_sync(&penv->vbatt_work); if (state == ADC_TM_LOW_STATE) { pr_debug("wcnss: low voltage notification triggered\n"); penv->vbat_monitor_params.state_request = ADC_TM_HIGH_THR_ENABLE; penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD + WCNSS_VBATT_GUARD; penv->vbat_monitor_params.low_thr = 0; } else if (state == ADC_TM_HIGH_STATE) { penv->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD - WCNSS_VBATT_GUARD; penv->vbat_monitor_params.high_thr = 0; pr_debug("wcnss: high voltage notification triggered\n"); } else { pr_debug("wcnss: unknown voltage notification state: %d\n", state); mutex_unlock(&penv->vbat_monitor_mutex); return; } pr_debug("wcnss: set low thr to %d and high to %d\n", penv->vbat_monitor_params.low_thr, penv->vbat_monitor_params.high_thr); qpnp_adc_tm_channel_measure(penv->adc_tm_dev, &penv->vbat_monitor_params); schedule_delayed_work(&penv->vbatt_work, msecs_to_jiffies(2000)); mutex_unlock(&penv->vbat_monitor_mutex); } static int wcnss_setup_vbat_monitoring(void) { int rc = -1; if (!penv->adc_tm_dev) { pr_err("wcnss: not setting up vbatt\n"); return rc; } penv->vbat_monitor_params.low_thr = WCNSS_VBATT_THRESHOLD; penv->vbat_monitor_params.high_thr = WCNSS_VBATT_THRESHOLD; penv->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; penv->vbat_monitor_params.channel = VBAT_SNS; penv->vbat_monitor_params.btm_ctx = (void *)penv; penv->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; penv->vbat_monitor_params.threshold_notification = &wcnss_notify_vbat; pr_debug("wcnss: set low thr to %d and high to %d\n", penv->vbat_monitor_params.low_thr, penv->vbat_monitor_params.high_thr); rc = qpnp_adc_tm_channel_measure(penv->adc_tm_dev, &penv->vbat_monitor_params); if (rc) pr_err("wcnss: tm setup failed: %d\n", rc); return rc; } static void wcnss_update_vbatt(struct work_struct *work) { struct vbatt_message vbatt_msg; int ret = 0; vbatt_msg.hdr.msg_type = WCNSS_VBATT_LEVEL_IND; vbatt_msg.hdr.msg_len = sizeof(struct vbatt_message); vbatt_msg.vbatt.threshold = WCNSS_VBATT_THRESHOLD; mutex_lock(&penv->vbat_monitor_mutex); if (penv->vbat_monitor_params.low_thr && (penv->fw_vbatt_state == WCNSS_VBATT_LOW || penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)) { vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_HIGH; penv->fw_vbatt_state = WCNSS_VBATT_HIGH; pr_debug("wcnss: send HIGH BATT to FW\n"); } else if (!penv->vbat_monitor_params.low_thr && (penv->fw_vbatt_state == WCNSS_VBATT_HIGH || penv->fw_vbatt_state == WCNSS_CONFIG_UNSPECIFIED)){ vbatt_msg.vbatt.curr_volt = WCNSS_VBATT_LOW; penv->fw_vbatt_state = WCNSS_VBATT_LOW; pr_debug("wcnss: send LOW BATT to FW\n"); } else { mutex_unlock(&penv->vbat_monitor_mutex); return; } mutex_unlock(&penv->vbat_monitor_mutex); ret = wcnss_smd_tx(&vbatt_msg, vbatt_msg.hdr.msg_len); if (ret < 0) pr_err("wcnss: smd tx failed\n"); return; } static unsigned char wcnss_fw_status(void) { int len = 0; int rc = 0; unsigned char fw_status = 0xFF; len = smd_read_avail(penv->smd_ch); if (len < 1) { pr_err("%s: invalid firmware status", __func__); return fw_status; } rc = smd_read(penv->smd_ch, &fw_status, 1); if (rc < 0) { pr_err("%s: incomplete data read from smd\n", __func__); return fw_status; } return fw_status; } static void wcnss_send_cal_rsp(unsigned char fw_status) { struct smd_msg_hdr *rsphdr; unsigned char *msg = NULL; int rc; msg = kmalloc((sizeof(struct smd_msg_hdr) + 1), GFP_KERNEL); if (NULL == msg) { pr_err("wcnss: %s: failed to get memory\n", __func__); return; } rsphdr = (struct smd_msg_hdr *)msg; rsphdr->msg_type = WCNSS_CALDATA_UPLD_RSP; rsphdr->msg_len = sizeof(struct smd_msg_hdr) + 1; memcpy(msg+sizeof(struct smd_msg_hdr), &fw_status, 1); rc = wcnss_smd_tx(msg, rsphdr->msg_len); if (rc < 0) pr_err("wcnss: smd tx failed\n"); kfree(msg); } void extract_cal_data(int len) { int rc; struct cal_data_params calhdr; unsigned char fw_status = WCNSS_RESP_FAIL; if (len < sizeof(struct cal_data_params)) { pr_err("wcnss: incomplete cal header length\n"); return; } rc = smd_read(penv->smd_ch, (unsigned char *)&calhdr, sizeof(struct cal_data_params)); if (rc < sizeof(struct cal_data_params)) { pr_err("wcnss: incomplete cal header read from smd\n"); return; } if (penv->fw_cal_exp_frag != calhdr.frag_number) { pr_err("wcnss: Invalid frgament"); goto exit; } if (calhdr.frag_size > WCNSS_MAX_FRAME_SIZE) { pr_err("wcnss: Invalid fragment size"); goto exit; } if (penv->fw_cal_available) { smd_read(penv->smd_ch, NULL, calhdr.frag_size); penv->fw_cal_exp_frag++; if (calhdr.msg_flags & LAST_FRAGMENT) { penv->fw_cal_exp_frag = 0; goto exit; } return; } if (0 == calhdr.frag_number) { if (calhdr.total_size > MAX_CALIBRATED_DATA_SIZE) { pr_err("wcnss: Invalid cal data size %d", calhdr.total_size); goto exit; } kfree(penv->fw_cal_data); penv->fw_cal_rcvd = 0; penv->fw_cal_data = kmalloc(calhdr.total_size, GFP_KERNEL); if (penv->fw_cal_data == NULL) { smd_read(penv->smd_ch, NULL, calhdr.frag_size); goto exit; } } mutex_lock(&penv->dev_lock); if (penv->fw_cal_rcvd + calhdr.frag_size > MAX_CALIBRATED_DATA_SIZE) { pr_err("calibrated data size is more than expected %d", penv->fw_cal_rcvd + calhdr.frag_size); penv->fw_cal_exp_frag = 0; penv->fw_cal_rcvd = 0; smd_read(penv->smd_ch, NULL, calhdr.frag_size); goto unlock_exit; } rc = smd_read(penv->smd_ch, penv->fw_cal_data + penv->fw_cal_rcvd, calhdr.frag_size); if (rc < calhdr.frag_size) goto unlock_exit; penv->fw_cal_exp_frag++; penv->fw_cal_rcvd += calhdr.frag_size; if (calhdr.msg_flags & LAST_FRAGMENT) { penv->fw_cal_exp_frag = 0; penv->fw_cal_available = true; pr_info("wcnss: cal data collection completed\n"); } mutex_unlock(&penv->dev_lock); wake_up(&penv->read_wait); if (penv->fw_cal_available) { fw_status = WCNSS_RESP_SUCCESS; wcnss_send_cal_rsp(fw_status); } return; unlock_exit: mutex_unlock(&penv->dev_lock); exit: wcnss_send_cal_rsp(fw_status); return; } static void wcnssctrl_rx_handler(struct work_struct *worker) { int len = 0; int rc = 0; unsigned char buf[sizeof(struct wcnss_version)]; unsigned char build[WCNSS_MAX_BUILD_VER_LEN+1]; struct smd_msg_hdr *phdr; struct smd_msg_hdr smd_msg; struct wcnss_version *pversion; int hw_type; unsigned char fw_status = 0; len = smd_read_avail(penv->smd_ch); if (len > WCNSS_MAX_FRAME_SIZE) { pr_err("wcnss: frame larger than the allowed size\n"); smd_read(penv->smd_ch, NULL, len); return; } if (len <= 0) return; rc = smd_read(penv->smd_ch, buf, sizeof(struct smd_msg_hdr)); if (rc < sizeof(struct smd_msg_hdr)) { pr_err("wcnss: incomplete header read from smd\n"); return; } len -= sizeof(struct smd_msg_hdr); phdr = (struct smd_msg_hdr *)buf; switch (phdr->msg_type) { case WCNSS_VERSION_RSP: if (len != sizeof(struct wcnss_version) - sizeof(struct smd_msg_hdr)) { pr_err("wcnss: invalid version data from wcnss %d\n", len); return; } rc = smd_read(penv->smd_ch, buf+sizeof(struct smd_msg_hdr), len); if (rc < len) { pr_err("wcnss: incomplete data read from smd\n"); return; } pversion = (struct wcnss_version *)buf; penv->fw_major = pversion->major; penv->fw_minor = pversion->minor; snprintf(penv->wcnss_version, WCNSS_VERSION_LEN, "%02x%02x%02x%02x", pversion->major, pversion->minor, pversion->version, pversion->revision); pr_info("wcnss: version %s\n", penv->wcnss_version); hw_type = wcnss_hardware_type(); switch (hw_type) { case WCNSS_RIVA_HW: if ((pversion->major >= 1) && (pversion->minor >= 4)) { pr_info("wcnss: schedule dnld work for riva\n"); schedule_work(&penv->wcnssctrl_nvbin_dnld_work); } break; case WCNSS_PRONTO_HW: smd_msg.msg_type = WCNSS_BUILD_VER_REQ; smd_msg.msg_len = sizeof(smd_msg); rc = wcnss_smd_tx(&smd_msg, smd_msg.msg_len); if (rc < 0) pr_err("wcnss: smd tx failed: %s\n", __func__); if ((pversion->major >= 1) && (pversion->minor >= 4)) { pr_info("wcnss: schedule dnld work for pronto\n"); schedule_work(&penv->wcnssctrl_nvbin_dnld_work); } break; default: pr_info("wcnss: unknown hw type (%d), will not schedule dnld work\n", hw_type); break; } break; case WCNSS_BUILD_VER_RSP: if (len > WCNSS_MAX_BUILD_VER_LEN) { pr_err("wcnss: invalid build version data from wcnss %d\n", len); return; } rc = smd_read(penv->smd_ch, build, len); if (rc < len) { pr_err("wcnss: incomplete data read from smd\n"); return; } build[len] = 0; pr_info("wcnss: build version %s\n", build); break; case WCNSS_NVBIN_DNLD_RSP: penv->nv_downloaded = true; fw_status = wcnss_fw_status(); pr_debug("wcnss: received WCNSS_NVBIN_DNLD_RSP from ccpu %u\n", fw_status); wcnss_setup_vbat_monitoring(); break; case WCNSS_CALDATA_DNLD_RSP: penv->nv_downloaded = true; fw_status = wcnss_fw_status(); pr_debug("wcnss: received WCNSS_CALDATA_DNLD_RSP from ccpu %u\n", fw_status); break; case WCNSS_CALDATA_UPLD_REQ: extract_cal_data(len); break; default: pr_err("wcnss: invalid message type %d\n", phdr->msg_type); } return; } static void wcnss_send_version_req(struct work_struct *worker) { struct smd_msg_hdr smd_msg; int ret = 0; smd_msg.msg_type = WCNSS_VERSION_REQ; smd_msg.msg_len = sizeof(smd_msg); ret = wcnss_smd_tx(&smd_msg, smd_msg.msg_len); if (ret < 0) pr_err("wcnss: smd tx failed\n"); return; } static DECLARE_RWSEM(wcnss_pm_sem); static void wcnss_nvbin_dnld(void) { int ret = 0; struct nvbin_dnld_req_msg *dnld_req_msg; unsigned short total_fragments = 0; unsigned short count = 0; unsigned short retry_count = 0; unsigned short cur_frag_size = 0; unsigned char *outbuffer = NULL; const void *nv_blob_addr = NULL; unsigned int nv_blob_size = 0; const struct firmware *nv = NULL; struct device *dev = &penv->pdev->dev; down_read(&wcnss_pm_sem); ret = request_firmware(&nv, NVBIN_FILE, dev); if (ret || !nv || !nv->data || !nv->size) { pr_err("wcnss: %s: request_firmware failed for %s\n", __func__, NVBIN_FILE); goto out; } nv_blob_addr = nv->data + 4; nv_blob_size = nv->size - 4; total_fragments = TOTALFRAGMENTS(nv_blob_size); pr_info("wcnss: NV bin size: %d, total_fragments: %d\n", nv_blob_size, total_fragments); outbuffer = kmalloc((sizeof(struct nvbin_dnld_req_msg) + NV_FRAGMENT_SIZE), GFP_KERNEL); if (NULL == outbuffer) { pr_err("wcnss: %s: failed to get buffer\n", __func__); goto err_free_nv; } dnld_req_msg = (struct nvbin_dnld_req_msg *)outbuffer; dnld_req_msg->hdr.msg_type = WCNSS_NVBIN_DNLD_REQ; dnld_req_msg->dnld_req_params.msg_flags = 0; for (count = 0; count < total_fragments; count++) { dnld_req_msg->dnld_req_params.frag_number = count; if (count == (total_fragments - 1)) { cur_frag_size = nv_blob_size % NV_FRAGMENT_SIZE; if (!cur_frag_size) cur_frag_size = NV_FRAGMENT_SIZE; dnld_req_msg->dnld_req_params.msg_flags |= LAST_FRAGMENT; dnld_req_msg->dnld_req_params.msg_flags |= CAN_RECEIVE_CALDATA; } else { cur_frag_size = NV_FRAGMENT_SIZE; dnld_req_msg->dnld_req_params.msg_flags &= ~LAST_FRAGMENT; } dnld_req_msg->dnld_req_params.nvbin_buffer_size = cur_frag_size; dnld_req_msg->hdr.msg_len = sizeof(struct nvbin_dnld_req_msg) + cur_frag_size; memcpy((outbuffer + sizeof(struct nvbin_dnld_req_msg)), (nv_blob_addr + count * NV_FRAGMENT_SIZE), cur_frag_size); ret = wcnss_smd_tx(outbuffer, dnld_req_msg->hdr.msg_len); retry_count = 0; while ((ret == -ENOSPC) && (retry_count <= 3)) { pr_debug("wcnss: %s: smd tx failed, ENOSPC\n", __func__); pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n", count, dnld_req_msg->hdr.msg_len, total_fragments, retry_count); msleep(20); retry_count++; ret = wcnss_smd_tx(outbuffer, dnld_req_msg->hdr.msg_len); } if (ret < 0) { pr_err("wcnss: %s: smd tx failed\n", __func__); pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n", count, dnld_req_msg->hdr.msg_len, total_fragments, retry_count); goto err_dnld; } } err_dnld: kfree(outbuffer); err_free_nv: release_firmware(nv); out: up_read(&wcnss_pm_sem); return; } static void wcnss_caldata_dnld(const void *cal_data, unsigned int cal_data_size, bool msg_to_follow) { int ret = 0; struct cal_data_msg *cal_msg; unsigned short total_fragments = 0; unsigned short count = 0; unsigned short retry_count = 0; unsigned short cur_frag_size = 0; unsigned char *outbuffer = NULL; total_fragments = TOTALFRAGMENTS(cal_data_size); outbuffer = kmalloc((sizeof(struct cal_data_msg) + NV_FRAGMENT_SIZE), GFP_KERNEL); if (NULL == outbuffer) { pr_err("wcnss: %s: failed to get buffer\n", __func__); return; } cal_msg = (struct cal_data_msg *)outbuffer; cal_msg->hdr.msg_type = WCNSS_CALDATA_DNLD_REQ; cal_msg->cal_params.msg_flags = 0; for (count = 0; count < total_fragments; count++) { cal_msg->cal_params.frag_number = count; if (count == (total_fragments - 1)) { cur_frag_size = cal_data_size % NV_FRAGMENT_SIZE; if (!cur_frag_size) cur_frag_size = NV_FRAGMENT_SIZE; cal_msg->cal_params.msg_flags |= LAST_FRAGMENT; if (msg_to_follow) cal_msg->cal_params.msg_flags |= MESSAGE_TO_FOLLOW; } else { cur_frag_size = NV_FRAGMENT_SIZE; cal_msg->cal_params.msg_flags &= ~LAST_FRAGMENT; } cal_msg->cal_params.total_size = cal_data_size; cal_msg->cal_params.frag_size = cur_frag_size; cal_msg->hdr.msg_len = sizeof(struct cal_data_msg) + cur_frag_size; memcpy((outbuffer + sizeof(struct cal_data_msg)), (cal_data + count * NV_FRAGMENT_SIZE), cur_frag_size); ret = wcnss_smd_tx(outbuffer, cal_msg->hdr.msg_len); retry_count = 0; while ((ret == -ENOSPC) && (retry_count <= 3)) { pr_debug("wcnss: %s: smd tx failed, ENOSPC\n", __func__); pr_debug("fragment: %d, len: %d, TotFragments: %d, retry_count: %d\n", count, cal_msg->hdr.msg_len, total_fragments, retry_count); msleep(20); retry_count++; ret = wcnss_smd_tx(outbuffer, cal_msg->hdr.msg_len); } if (ret < 0) { pr_err("wcnss: %s: smd tx failed\n", __func__); pr_err("fragment %d, len: %d, TotFragments: %d, retry_count: %d\n", count, cal_msg->hdr.msg_len, total_fragments, retry_count); goto err_dnld; } } err_dnld: kfree(outbuffer); return; } static void wcnss_nvbin_dnld_main(struct work_struct *worker) { int retry = 0; if (!FW_CALDATA_CAPABLE()) goto nv_download; if (!penv->fw_cal_available && WCNSS_CONFIG_UNSPECIFIED != has_calibrated_data && !penv->user_cal_available) { while (!penv->user_cal_available && retry++ < 5) msleep(500); } if (penv->fw_cal_available) { pr_info_ratelimited("wcnss: cal download, using fw cal"); wcnss_caldata_dnld(penv->fw_cal_data, penv->fw_cal_rcvd, true); } else if (penv->user_cal_available) { pr_info_ratelimited("wcnss: cal download, using user cal"); wcnss_caldata_dnld(penv->user_cal_data, penv->user_cal_rcvd, true); } nv_download: pr_info_ratelimited("wcnss: NV download"); wcnss_nvbin_dnld(); return; } static int wcnss_pm_notify(struct notifier_block *b, unsigned long event, void *p) { switch (event) { case PM_SUSPEND_PREPARE: down_write(&wcnss_pm_sem); break; case PM_POST_SUSPEND: up_write(&wcnss_pm_sem); break; } return NOTIFY_DONE; } static struct notifier_block wcnss_pm_notifier = { .notifier_call = wcnss_pm_notify, }; static int wcnss_ctrl_open(struct inode *inode, struct file *file) { int rc = 0; if (!penv || penv->ctrl_device_opened) return -EFAULT; penv->ctrl_device_opened = 1; return rc; } void process_usr_ctrl_cmd(u8 *buf, size_t len) { u16 cmd = buf[0] << 8 | buf[1]; switch (cmd) { case WCNSS_USR_SERIAL_NUM: if (WCNSS_MIN_SERIAL_LEN > len) { pr_err("%s: Invalid serial number\n", __func__); return; } penv->serial_number = buf[2] << 24 | buf[3] << 16 | buf[4] << 8 | buf[5]; break; case WCNSS_USR_HAS_CAL_DATA: if (1 < buf[2]) pr_err("%s: Invalid data for cal %d\n", __func__, buf[2]); has_calibrated_data = buf[2]; break; case WCNSS_USR_WLAN_MAC_ADDR: memcpy(&penv->wlan_nv_macAddr, &buf[2], sizeof(penv->wlan_nv_macAddr)); pr_debug("%s: MAC Addr:" MAC_ADDRESS_STR "\n", __func__, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3], penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]); break; default: pr_err("%s: Invalid command %d\n", __func__, cmd); break; } } static ssize_t wcnss_ctrl_write(struct file *fp, const char __user *user_buffer, size_t count, loff_t *position) { int rc = 0; u8 buf[WCNSS_MAX_CMD_LEN]; if (!penv || !penv->ctrl_device_opened || WCNSS_MAX_CMD_LEN < count || WCNSS_MIN_CMD_LEN > count) return -EFAULT; mutex_lock(&penv->ctrl_lock); rc = copy_from_user(buf, user_buffer, count); if (0 == rc) process_usr_ctrl_cmd(buf, count); mutex_unlock(&penv->ctrl_lock); return rc; } static const struct file_operations wcnss_ctrl_fops = { .owner = THIS_MODULE, .open = wcnss_ctrl_open, .write = wcnss_ctrl_write, }; static struct miscdevice wcnss_usr_ctrl = { .minor = MISC_DYNAMIC_MINOR, .name = CTRL_DEVICE, .fops = &wcnss_ctrl_fops, }; static int wcnss_trigger_config(struct platform_device *pdev) { int ret; struct qcom_wcnss_opts *pdata; unsigned long wcnss_phys_addr; int size = 0; struct resource *res; int has_pronto_hw = of_property_read_bool(pdev->dev.of_node, "qcom,has-pronto-hw"); if (of_property_read_u32(pdev->dev.of_node, "qcom,wlan-rx-buff-count", &penv->wlan_rx_buff_count)) { penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT; } if (penv->triggered) return 0; penv->triggered = 1; pdata = pdev->dev.platform_data; if (WCNSS_CONFIG_UNSPECIFIED == has_48mhz_xo) { if (has_pronto_hw) { has_48mhz_xo = of_property_read_bool(pdev->dev.of_node, "qcom,has-48mhz-xo"); } else { has_48mhz_xo = pdata->has_48mhz_xo; } } penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW; penv->wlan_config.use_48mhz_xo = has_48mhz_xo; if (WCNSS_CONFIG_UNSPECIFIED == has_autodetect_xo && has_pronto_hw) { has_autodetect_xo = of_property_read_bool(pdev->dev.of_node, "qcom,has-autodetect-xo"); } penv->thermal_mitigation = 0; strlcpy(penv->wcnss_version, "INVALID", WCNSS_VERSION_LEN); if (!has_pronto_hw) { penv->gpios_5wire = platform_get_resource_byname(pdev, IORESOURCE_IO, "wcnss_gpios_5wire"); if (!penv->gpios_5wire) { dev_err(&pdev->dev, "insufficient IO resources\n"); ret = -ENOENT; goto fail_gpio_res; } ret = wcnss_gpios_config(penv->gpios_5wire, true); } else ret = wcnss_pronto_gpios_config(&pdev->dev, true); if (ret) { dev_err(&pdev->dev, "WCNSS gpios config failed.\n"); goto fail_gpio_res; } ret = wcnss_wlan_power(&pdev->dev, &penv->wlan_config, WCNSS_WLAN_SWITCH_ON, &penv->iris_xo_mode_set); if (ret) { dev_err(&pdev->dev, "WCNSS Power-up failed.\n"); goto fail_power; } penv->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wcnss_mmio"); penv->tx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "wcnss_wlantx_irq"); penv->rx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "wcnss_wlanrx_irq"); if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) { dev_err(&pdev->dev, "insufficient resources\n"); ret = -ENOENT; goto fail_res; } INIT_WORK(&penv->wcnssctrl_rx_work, wcnssctrl_rx_handler); INIT_WORK(&penv->wcnssctrl_version_work, wcnss_send_version_req); INIT_WORK(&penv->wcnssctrl_nvbin_dnld_work, wcnss_nvbin_dnld_main); wake_lock_init(&penv->wcnss_wake_lock, WAKE_LOCK_SUSPEND, "wcnss"); wcnss_prevent_suspend(); if (wcnss_hardware_type() == WCNSS_PRONTO_HW) { size = 0x3000; wcnss_phys_addr = MSM_PRONTO_PHYS; } else { wcnss_phys_addr = MSM_RIVA_PHYS; size = SZ_256; } penv->msm_wcnss_base = ioremap(wcnss_phys_addr, size); if (!penv->msm_wcnss_base) { ret = -ENOMEM; pr_err("%s: ioremap wcnss physical failed\n", __func__); goto fail_ioremap; } if (wcnss_hardware_type() == WCNSS_RIVA_HW) { penv->riva_ccu_base = ioremap(MSM_RIVA_CCU_BASE, SZ_512); if (!penv->riva_ccu_base) { ret = -ENOMEM; pr_err("%s: ioremap wcnss physical failed\n", __func__); goto fail_ioremap2; } } else { penv->pronto_a2xb_base = ioremap(MSM_PRONTO_A2XB_BASE, SZ_512); if (!penv->pronto_a2xb_base) { ret = -ENOMEM; pr_err("%s: ioremap wcnss physical failed\n", __func__); goto fail_ioremap2; } penv->pronto_ccpu_base = ioremap(MSM_PRONTO_CCPU_BASE, SZ_512); if (!penv->pronto_ccpu_base) { ret = -ENOMEM; pr_err("%s: ioremap wcnss physical failed\n", __func__); goto fail_ioremap3; } res = platform_get_resource_byname(penv->pdev, IORESOURCE_MEM, "wcnss_fiq"); if (!res) { dev_err(&pdev->dev, "insufficient irq mem resources\n"); ret = -ENOENT; goto fail_ioremap4; } penv->fiq_reg = ioremap_nocache(res->start, resource_size(res)); if (!penv->fiq_reg) { pr_err("wcnss: %s: ioremap_nocache() failed fiq_reg addr:%pr\n", __func__, &res->start); ret = -ENOMEM; goto fail_ioremap4; } penv->pronto_saw2_base = ioremap_nocache(MSM_PRONTO_SAW2_BASE, SZ_32); if (!penv->pronto_saw2_base) { pr_err("%s: ioremap wcnss physical(saw2) failed\n", __func__); ret = -ENOMEM; goto fail_ioremap5; } penv->pronto_pll_base = ioremap_nocache(MSM_PRONTO_PLL_BASE, SZ_64); if (!penv->pronto_pll_base) { pr_err("%s: ioremap wcnss physical(pll) failed\n", __func__); ret = -ENOMEM; goto fail_ioremap6; } penv->wlan_tx_phy_aborts = ioremap(MSM_PRONTO_TXP_PHY_ABORT, SZ_8); if (!penv->wlan_tx_phy_aborts) { ret = -ENOMEM; pr_err("%s: ioremap wlan TX PHY failed\n", __func__); goto fail_ioremap7; } penv->wlan_brdg_err_source = ioremap(MSM_PRONTO_BRDG_ERR_SRC, SZ_8); if (!penv->wlan_brdg_err_source) { ret = -ENOMEM; pr_err("%s: ioremap wlan BRDG ERR failed\n", __func__); goto fail_ioremap8; } penv->wlan_tx_status = ioremap(MSM_PRONTO_TXP_STATUS, SZ_8); if (!penv->wlan_tx_status) { ret = -ENOMEM; pr_err("%s: ioremap wlan TX STATUS failed\n", __func__); goto fail_ioremap9; } penv->alarms_txctl = ioremap(MSM_PRONTO_ALARMS_TXCTL, SZ_8); if (!penv->alarms_txctl) { ret = -ENOMEM; pr_err("%s: ioremap alarms TXCTL failed\n", __func__); goto fail_ioremap10; } penv->alarms_tactl = ioremap(MSM_PRONTO_ALARMS_TACTL, SZ_8); if (!penv->alarms_tactl) { ret = -ENOMEM; pr_err("%s: ioremap alarms TACTL failed\n", __func__); goto fail_ioremap11; } } penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss"); if (IS_ERR(penv->adc_tm_dev)) { pr_err("%s: adc get failed\n", __func__); penv->adc_tm_dev = NULL; } else { INIT_DELAYED_WORK(&penv->vbatt_work, wcnss_update_vbatt); penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED; } penv->pil = subsystem_get(WCNSS_PIL_DEVICE); if (IS_ERR(penv->pil)) { dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n"); ret = PTR_ERR(penv->pil); wcnss_pronto_log_debug_regs(); penv->pil = NULL; goto fail_pil; } return 0; fail_pil: if (penv->riva_ccu_base) iounmap(penv->riva_ccu_base); if (penv->alarms_tactl) iounmap(penv->alarms_tactl); fail_ioremap11: if (penv->alarms_txctl) iounmap(penv->alarms_txctl); fail_ioremap10: if (penv->wlan_tx_status) iounmap(penv->wlan_tx_status); fail_ioremap9: if (penv->wlan_brdg_err_source) iounmap(penv->wlan_brdg_err_source); fail_ioremap8: if (penv->wlan_tx_phy_aborts) iounmap(penv->wlan_tx_phy_aborts); fail_ioremap7: if (penv->pronto_pll_base) iounmap(penv->pronto_pll_base); fail_ioremap6: if (penv->pronto_saw2_base) iounmap(penv->pronto_saw2_base); fail_ioremap5: if (penv->fiq_reg) iounmap(penv->fiq_reg); fail_ioremap4: if (penv->pronto_ccpu_base) iounmap(penv->pronto_ccpu_base); fail_ioremap3: if (penv->pronto_a2xb_base) iounmap(penv->pronto_a2xb_base); fail_ioremap2: if (penv->msm_wcnss_base) iounmap(penv->msm_wcnss_base); fail_ioremap: wcnss_allow_suspend(); wake_lock_destroy(&penv->wcnss_wake_lock); fail_res: wcnss_wlan_power(&pdev->dev, &penv->wlan_config, WCNSS_WLAN_SWITCH_OFF, NULL); fail_power: if (has_pronto_hw) wcnss_pronto_gpios_config(&pdev->dev, false); else wcnss_gpios_config(penv->gpios_5wire, false); fail_gpio_res: penv = NULL; return ret; } static int wcnss_node_open(struct inode *inode, struct file *file) { struct platform_device *pdev; int rc = 0; if (!penv) return -EFAULT; if (!penv->triggered) { pr_info(DEVICE " triggered by userspace\n"); pdev = penv->pdev; rc = wcnss_trigger_config(pdev); if (rc) return -EFAULT; } mutex_lock(&penv->dev_lock); penv->user_cal_rcvd = 0; penv->user_cal_read = 0; penv->user_cal_available = false; penv->user_cal_data = NULL; penv->device_opened = 1; mutex_unlock(&penv->dev_lock); return rc; } static ssize_t wcnss_wlan_read(struct file *fp, char __user *buffer, size_t count, loff_t *position) { int rc = 0; if (!penv || !penv->device_opened) return -EFAULT; rc = wait_event_interruptible(penv->read_wait, penv->fw_cal_rcvd > penv->user_cal_read || penv->fw_cal_available); if (rc < 0) return rc; mutex_lock(&penv->dev_lock); if (penv->fw_cal_available && penv->fw_cal_rcvd == penv->user_cal_read) { rc = 0; goto exit; } if (count > penv->fw_cal_rcvd - penv->user_cal_read) count = penv->fw_cal_rcvd - penv->user_cal_read; rc = copy_to_user(buffer, penv->fw_cal_data + penv->user_cal_read, count); if (rc == 0) { penv->user_cal_read += count; rc = count; } exit: mutex_unlock(&penv->dev_lock); return rc; } static ssize_t wcnss_wlan_write(struct file *fp, const char __user *user_buffer, size_t count, loff_t *position) { int rc = 0; size_t size = 0; if (!penv || !penv->device_opened || penv->user_cal_available) return -EFAULT; if (penv->user_cal_rcvd == 0 && count >= 4 && !penv->user_cal_data) { rc = copy_from_user((void *)&size, user_buffer, 4); if (!size || size > MAX_CALIBRATED_DATA_SIZE) { pr_err(DEVICE " invalid size to write %d\n", size); return -EFAULT; } rc += count; count -= 4; penv->user_cal_exp_size = size; penv->user_cal_data = kmalloc(size, GFP_KERNEL); if (penv->user_cal_data == NULL) { pr_err(DEVICE " no memory to write\n"); return -ENOMEM; } if (0 == count) goto exit; } else if (penv->user_cal_rcvd == 0 && count < 4) return -EFAULT; if ((UINT32_MAX - count < penv->user_cal_rcvd) || MAX_CALIBRATED_DATA_SIZE < count + penv->user_cal_rcvd) { pr_err(DEVICE " invalid size to write %d\n", count + penv->user_cal_rcvd); rc = -ENOMEM; goto exit; } rc = copy_from_user((void *)penv->user_cal_data + penv->user_cal_rcvd, user_buffer, count); if (0 == rc) { penv->user_cal_rcvd += count; rc += count; } if (penv->user_cal_rcvd == penv->user_cal_exp_size) { penv->user_cal_available = true; pr_info_ratelimited("wcnss: user cal written"); } exit: return rc; } static const struct file_operations wcnss_node_fops = { .owner = THIS_MODULE, .open = wcnss_node_open, .read = wcnss_wlan_read, .write = wcnss_wlan_write, }; static struct miscdevice wcnss_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DEVICE, .fops = &wcnss_node_fops, }; static int __devinit wcnss_wlan_probe(struct platform_device *pdev) { int ret = 0; if (penv) { dev_err(&pdev->dev, "cannot handle multiple devices.\n"); return -ENODEV; } penv = devm_kzalloc(&pdev->dev, sizeof(*penv), GFP_KERNEL); if (!penv) { dev_err(&pdev->dev, "cannot allocate device memory.\n"); return -ENOMEM; } penv->pdev = pdev; ret = wcnss_create_sysfs(&pdev->dev); if (ret) { penv = NULL; return -ENOENT; } mutex_init(&penv->dev_lock); mutex_init(&penv->ctrl_lock); mutex_init(&penv->vbat_monitor_mutex); init_waitqueue_head(&penv->read_wait); pr_info(DEVICE " probed in built-in mode\n"); misc_register(&wcnss_usr_ctrl); return misc_register(&wcnss_misc); } static int __devexit wcnss_wlan_remove(struct platform_device *pdev) { wcnss_remove_sysfs(&pdev->dev); penv = NULL; return 0; } static const struct dev_pm_ops wcnss_wlan_pm_ops = { .suspend = wcnss_wlan_suspend, .resume = wcnss_wlan_resume, }; #ifdef CONFIG_WCNSS_CORE_PRONTO static struct of_device_id msm_wcnss_pronto_match[] = { {.compatible = "qcom,wcnss_wlan"}, {} }; #endif static struct platform_driver wcnss_wlan_driver = { .driver = { .name = DEVICE, .owner = THIS_MODULE, .pm = &wcnss_wlan_pm_ops, #ifdef CONFIG_WCNSS_CORE_PRONTO .of_match_table = msm_wcnss_pronto_match, #endif }, .probe = wcnss_wlan_probe, .remove = __devexit_p(wcnss_wlan_remove), }; static int __init wcnss_wlan_init(void) { int ret = 0; platform_driver_register(&wcnss_wlan_driver); platform_driver_register(&wcnss_wlan_ctrl_driver); platform_driver_register(&wcnss_ctrl_driver); register_pm_notifier(&wcnss_pm_notifier); #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC ret = wcnss_prealloc_init(); if (ret < 0) pr_err("wcnss: pre-allocation failed\n"); #endif return ret; } static void __exit wcnss_wlan_exit(void) { if (penv) { if (penv->pil) subsystem_put(penv->pil); penv = NULL; } #ifdef CONFIG_WCNSS_MEM_PRE_ALLOC wcnss_prealloc_deinit(); #endif unregister_pm_notifier(&wcnss_pm_notifier); platform_driver_unregister(&wcnss_ctrl_driver); platform_driver_unregister(&wcnss_wlan_ctrl_driver); platform_driver_unregister(&wcnss_wlan_driver); } module_init(wcnss_wlan_init); module_exit(wcnss_wlan_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION(VERSION); MODULE_DESCRIPTION(DEVICE "Driver");
gpl-2.0
DroidThug/kernel_delta_msm8916
drivers/cpufreq/cpufreq_governor.c
1
13238
/* * drivers/cpufreq/cpufreq_governor.c * * CPUFREQ governors common code * * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> * (C) 2009 Alexander Clouter <alex@digriz.org.uk> * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/export.h> #include <linux/kernel_stat.h> #include <linux/slab.h> #include "cpufreq_governor.h" static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) { if (have_governor_per_policy()) return dbs_data->cdata->attr_group_gov_pol; else return dbs_data->cdata->attr_group_gov_sys; } void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) { struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct ex_dbs_tuners *ex_tuners = dbs_data->tuners; struct cpufreq_policy *policy; unsigned int sampling_rate; unsigned int max_load = 0; unsigned int ignore_nice; unsigned int j; { if (dbs_data->cdata->governor == GOV_ONDEMAND) ignore_nice = od_tuners->ignore_nice_load; } if (dbs_data->cdata->governor == GOV_ELEMENTALX) { sampling_rate = ex_tuners->sampling_rate; ignore_nice = ex_tuners->ignore_nice_load; } else { sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice_load; } policy = cdbs->cur_policy; /* Get Absolute Load */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs; u64 cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; unsigned int load; int io_busy = 0; j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); /* * For the purpose of ondemand, waiting for disk IO is * an indication that you're performance critical, and * not that the system is actually idle. So do not add * the iowait time to the cpu idle time. */ if (dbs_data->cdata->governor == GOV_ONDEMAND) io_busy = od_tuners->io_is_busy; cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); wall_time = (unsigned int) (cur_wall_time - j_cdbs->prev_cpu_wall); j_cdbs->prev_cpu_wall = cur_wall_time; idle_time = (unsigned int) (cur_idle_time - j_cdbs->prev_cpu_idle); j_cdbs->prev_cpu_idle = cur_idle_time; if (ignore_nice) { u64 cur_nice; unsigned long cur_nice_jiffies; cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - cdbs->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys */ cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } if (unlikely(!wall_time || wall_time < idle_time)) continue; /* * If the CPU had gone completely idle, and a task just woke up * on this CPU now, it would be unfair to calculate 'load' the * usual way for this elapsed time-window, because it will show * near-zero load, irrespective of how CPU intensive that task * actually is. This is undesirable for latency-sensitive bursty * workloads. * * To avoid this, we reuse the 'load' from the previous * time-window and give this task a chance to start with a * reasonably high CPU frequency. (However, we shouldn't over-do * this copy, lest we get stuck at a high load (high frequency) * for too long, even when the current system load has actually * dropped down. So we perform the copy only once, upon the * first wake-up from idle.) * * Detecting this situation is easy: the governor's deferrable * timer would not have fired during CPU-idle periods. Hence * an unusually large 'wall_time' (as compared to the sampling * rate) indicates this scenario. * * prev_load can be zero in two cases and we must recalculate it * for both cases: * - during long idle intervals * - explicitly set to zero */ if (unlikely(wall_time > (2 * sampling_rate) && j_cdbs->prev_load)) { load = j_cdbs->prev_load; /* * Perform a destructive copy, to ensure that we copy * the previous load only once, upon the first wake-up * from idle. */ j_cdbs->prev_load = 0; } else { load = 100 * (wall_time - idle_time) / wall_time; j_cdbs->prev_load = load; } if (load > max_load) max_load = load; } dbs_data->cdata->gov_check_cpu(cpu, max_load); } EXPORT_SYMBOL_GPL(dbs_check_cpu); static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, unsigned int delay) { struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); } void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, unsigned int delay, bool all_cpus) { int i; mutex_lock(&cpufreq_governor_lock); if (!policy->governor_enabled) goto out_unlock; if (!all_cpus) { /* * Use raw_smp_processor_id() to avoid preemptible warnings. * We know that this is only called with all_cpus == false from * works that have been queued with *_work_on() functions and * those works are canceled during CPU_DOWN_PREPARE so they * can't possibly run on any other CPU. */ __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); } else { for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); } out_unlock: mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); static inline void gov_cancel_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy) { struct cpu_dbs_common_info *cdbs; int i; for_each_cpu(i, policy->cpus) { cdbs = dbs_data->cdata->get_cpu_cdbs(i); cancel_delayed_work_sync(&cdbs->work); } } /* Will return if we need to evaluate cpu load again or not */ bool need_load_eval(struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) { if (policy_is_shared(cdbs->cur_policy)) { ktime_t time_now = ktime_get(); s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); /* Do nothing if we recently have sampled */ if (delta_us < (s64)(sampling_rate / 2)) return false; else cdbs->time_stamp = time_now; } return true; } EXPORT_SYMBOL_GPL(need_load_eval); static void set_sampling_rate(struct dbs_data *dbs_data, unsigned int sampling_rate) { if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; cs_tuners->sampling_rate = sampling_rate; } else if (dbs_data->cdata->governor == GOV_ELEMENTALX) { struct ex_dbs_tuners *ex_tuners = dbs_data->tuners; ex_tuners->sampling_rate = sampling_rate; } else { struct od_dbs_tuners *od_tuners = dbs_data->tuners; od_tuners->sampling_rate = sampling_rate; } } int cpufreq_governor_dbs(struct cpufreq_policy *policy, struct common_dbs_data *cdata, unsigned int event) { struct dbs_data *dbs_data; struct od_cpu_dbs_info_s *od_dbs_info = NULL; struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; struct ex_cpu_dbs_info_s *ex_dbs_info = NULL; struct od_ops *od_ops = NULL; struct od_dbs_tuners *od_tuners = NULL; struct cs_dbs_tuners *cs_tuners = NULL; struct ex_dbs_tuners *ex_tuners = NULL; struct cpu_dbs_common_info *cpu_cdbs; unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; int io_busy = 0; int rc; if (have_governor_per_policy()) dbs_data = policy->governor_data; else dbs_data = cdata->gdbs_data; WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); switch (event) { case CPUFREQ_GOV_POLICY_INIT: if (have_governor_per_policy()) { WARN_ON(dbs_data); } else if (dbs_data) { dbs_data->usage_count++; policy->governor_data = dbs_data; return 0; } dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); if (!dbs_data) { pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); return -ENOMEM; } dbs_data->cdata = cdata; dbs_data->usage_count = 1; if (cdata->governor == GOV_ELEMENTALX) rc = cdata->init_ex(dbs_data, policy); else rc = cdata->init(dbs_data); if (rc) { pr_err("%s: POLICY_INIT: init() failed\n", __func__); kfree(dbs_data); return rc; } if (!have_governor_per_policy()) WARN_ON(cpufreq_get_global_kobject()); rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); if (rc) { cdata->exit(dbs_data); kfree(dbs_data); return rc; } policy->governor_data = dbs_data; /* policy latency is in ns. Convert it to us first */ latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; /* Bring kernel and HW constraints together */ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, MIN_LATENCY_MULTIPLIER * latency); set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, latency * LATENCY_MULTIPLIER)); if ((cdata->governor == GOV_CONSERVATIVE) && (!policy->governor->initialized)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_register_notifier(cs_ops->notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } if (!have_governor_per_policy()) cdata->gdbs_data = dbs_data; return 0; case CPUFREQ_GOV_POLICY_EXIT: if (!--dbs_data->usage_count) { sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); if (!have_governor_per_policy()) cpufreq_put_global_kobject(); if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && (policy->governor->initialized == 1)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_unregister_notifier(cs_ops->notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } cdata->exit(dbs_data); kfree(dbs_data); cdata->gdbs_data = NULL; } policy->governor_data = NULL; return 0; } cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_tuners = dbs_data->tuners; cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice_load; } else if (dbs_data->cdata->governor == GOV_ELEMENTALX) { ex_tuners = dbs_data->tuners; ex_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = ex_tuners->sampling_rate; ignore_nice = ex_tuners->ignore_nice_load; } else { od_tuners = dbs_data->tuners; od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = od_tuners->sampling_rate; ignore_nice = od_tuners->ignore_nice_load; od_ops = dbs_data->cdata->gov_ops; io_busy = od_tuners->io_is_busy; } switch (event) { case CPUFREQ_GOV_START: if (!policy->cur) return -EINVAL; mutex_lock(&dbs_data->mutex); for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); unsigned int prev_load; j_cdbs->cpu = j; j_cdbs->cur_policy = policy; j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); prev_load = (unsigned int) (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); j_cdbs->prev_load = 100 * prev_load / (unsigned int) j_cdbs->prev_cpu_wall; if (ignore_nice) j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; mutex_init(&j_cdbs->timer_mutex); INIT_DEFERRABLE_WORK(&j_cdbs->work, dbs_data->cdata->gov_dbs_timer); } if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { cs_dbs_info->down_skip = 0; cs_dbs_info->enable = 1; cs_dbs_info->requested_freq = policy->cur; } else if (dbs_data->cdata->governor == GOV_ELEMENTALX) { ex_dbs_info->down_floor = 0; ex_dbs_info->enable = 1; } else { od_dbs_info->rate_mult = 1; od_dbs_info->sample_type = OD_NORMAL_SAMPLE; od_ops->powersave_bias_init_cpu(cpu); } mutex_unlock(&dbs_data->mutex); /* Initiate timer time stamp */ cpu_cdbs->time_stamp = ktime_get(); gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), true); break; case CPUFREQ_GOV_STOP: if (dbs_data->cdata->governor == GOV_CONSERVATIVE) cs_dbs_info->enable = 0; if (dbs_data->cdata->governor == GOV_ELEMENTALX) ex_dbs_info->enable = 0; gov_cancel_work(dbs_data, policy); mutex_lock(&dbs_data->mutex); mutex_destroy(&cpu_cdbs->timer_mutex); cpu_cdbs->cur_policy = NULL; mutex_unlock(&dbs_data->mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&dbs_data->mutex); if (!cpu_cdbs->cur_policy) { mutex_unlock(&dbs_data->mutex); break; } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); mutex_unlock(&dbs_data->mutex); break; } return 0; } EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
gpl-2.0
matthewbauer/dosbox
src/cpu/cpu.cpp
1
70305
/* * Copyright (C) 2002-2013 The DOSBox Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <assert.h> #include <sstream> #include <stddef.h> #include "dosbox.h" #include "cpu.h" #ifndef __LIBRETRO__ // memory.h: Not on wii, not needed anyway? #include "memory.h" #endif #include "debug.h" #include "mapper.h" #include "setup.h" #include "programs.h" #include "paging.h" #include "lazyflags.h" #include "support.h" Bitu DEBUG_EnableDebugger(void); extern void GFX_SetTitle(Bit32s cycles ,Bits frameskip,bool paused); #if 1 #undef LOG #if defined (_MSC_VER) #define LOG(X,Y) #else #define LOG(X,Y) CPU_LOG #define CPU_LOG(...) #endif #endif CPU_Regs cpu_regs; CPUBlock cpu; Segments Segs; Bit32s CPU_Cycles = 0; Bit32s CPU_CycleLeft = 3000; Bit32s CPU_CycleMax = 3000; Bit32s CPU_OldCycleMax = 3000; Bit32s CPU_CyclePercUsed = 100; Bit32s CPU_CycleLimit = -1; Bit32s CPU_CycleUp = 0; Bit32s CPU_CycleDown = 0; Bit64s CPU_IODelayRemoved = 0; CPU_Decoder * cpudecoder; bool CPU_CycleAutoAdjust = false; bool CPU_SkipCycleAutoAdjust = false; Bitu CPU_AutoDetermineMode = 0; Bitu CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; Bitu CPU_extflags_toggle=0; // ID and AC flags may be toggled depending on emulated CPU architecture Bitu CPU_PrefetchQueueSize=0; void CPU_Core_Full_Init(void); void CPU_Core_Normal_Init(void); void CPU_Core_Simple_Init(void); #if (C_DYNAMIC_X86) void CPU_Core_Dyn_X86_Init(void); void CPU_Core_Dyn_X86_Cache_Init(bool enable_cache); void CPU_Core_Dyn_X86_Cache_Close(void); void CPU_Core_Dyn_X86_SetFPUMode(bool dh_fpu); #elif (C_DYNREC) void CPU_Core_Dynrec_Init(void); void CPU_Core_Dynrec_Cache_Init(bool enable_cache); void CPU_Core_Dynrec_Cache_Close(void); #endif /* In debug mode exceptions are tested and dosbox exits when * a unhandled exception state is detected. * USE CHECK_EXCEPT to raise an exception in that case to see if that exception * solves the problem. * * In non-debug mode dosbox doesn't do detection (and hence doesn't crash at * that point). (game might crash later due to the unhandled exception) */ #if C_DEBUG // #define CPU_CHECK_EXCEPT 1 // #define CPU_CHECK_IGNORE 1 /* Use CHECK_EXCEPT when something doesn't work to see if a exception is * needed that isn't enabled by default.*/ #else /* NORMAL NO CHECKING => More Speed */ #define CPU_CHECK_IGNORE 1 #endif /* C_DEBUG */ #if defined(CPU_CHECK_IGNORE) #define CPU_CHECK_COND(cond,msg,exc,sel) { \ if (cond) do {} while (0); \ } #elif defined(CPU_CHECK_EXCEPT) #define CPU_CHECK_COND(cond,msg,exc,sel) { \ if (cond) { \ CPU_Exception(exc,sel); \ return; \ } \ } #else #define CPU_CHECK_COND(cond,msg,exc,sel) { \ if (cond) E_Exit(msg); \ } #endif void Descriptor::Load(PhysPt address) { cpu.mpl=0; Bit32u* data = (Bit32u*)&saved; *data = mem_readd(address); *(data+1) = mem_readd(address+4); cpu.mpl=3; } void Descriptor:: Save(PhysPt address) { cpu.mpl=0; Bit32u* data = (Bit32u*)&saved; mem_writed(address,*data); mem_writed(address+4,*(data+1)); cpu.mpl=03; } void CPU_Push16(Bitu value) { Bit32u new_esp=(reg_esp&cpu.stack.notmask)|((reg_esp-2)&cpu.stack.mask); mem_writew(SegPhys(ss) + (new_esp & cpu.stack.mask) ,value); reg_esp=new_esp; } void CPU_Push32(Bitu value) { Bit32u new_esp=(reg_esp&cpu.stack.notmask)|((reg_esp-4)&cpu.stack.mask); mem_writed(SegPhys(ss) + (new_esp & cpu.stack.mask) ,value); reg_esp=new_esp; } Bitu CPU_Pop16(void) { Bitu val=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); reg_esp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); return val; } Bitu CPU_Pop32(void) { Bitu val=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); reg_esp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); return val; } PhysPt SelBase(Bitu sel) { if (cpu.cr0 & CR0_PROTECTION) { Descriptor desc; cpu.gdt.GetDescriptor(sel,desc); return desc.GetBase(); } else { return sel<<4; } } void CPU_SetFlags(Bitu word,Bitu mask) { mask|=CPU_extflags_toggle; // ID-flag and AC-flag can be toggled on CPUID-supporting CPUs reg_flags=(reg_flags & ~mask)|(word & mask)|2; cpu.direction=1-((reg_flags & FLAG_DF) >> 9); } bool CPU_PrepareException(Bitu which,Bitu error) { cpu.exception.which=which; cpu.exception.error=error; return true; } bool CPU_CLI(void) { if (cpu.pmode && ((!GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) || (GETFLAG(VM) && (GETFLAG_IOPL<3)))) { return CPU_PrepareException(EXCEPTION_GP,0); } else { SETFLAGBIT(IF,false); return false; } } bool CPU_STI(void) { if (cpu.pmode && ((!GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) || (GETFLAG(VM) && (GETFLAG_IOPL<3)))) { return CPU_PrepareException(EXCEPTION_GP,0); } else { SETFLAGBIT(IF,true); return false; } } bool CPU_POPF(Bitu use32) { if (cpu.pmode && GETFLAG(VM) && (GETFLAG(IOPL)!=FLAG_IOPL)) { /* Not enough privileges to execute POPF */ return CPU_PrepareException(EXCEPTION_GP,0); } Bitu mask=FMASK_ALL; /* IOPL field can only be modified when CPL=0 or in real mode: */ if (cpu.pmode && (cpu.cpl>0)) mask &= (~FLAG_IOPL); if (cpu.pmode && !GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) mask &= (~FLAG_IF); if (use32) CPU_SetFlags(CPU_Pop32(),mask); else CPU_SetFlags(CPU_Pop16(),mask & 0xffff); DestroyConditionFlags(); return false; } bool CPU_PUSHF(Bitu use32) { if (cpu.pmode && GETFLAG(VM) && (GETFLAG(IOPL)!=FLAG_IOPL)) { /* Not enough privileges to execute PUSHF */ return CPU_PrepareException(EXCEPTION_GP,0); } FillFlags(); if (use32) CPU_Push32(reg_flags & 0xfcffff); else CPU_Push16(reg_flags); return false; } void CPU_CheckSegments(void) { bool needs_invalidation=false; Descriptor desc; if (!cpu.gdt.GetDescriptor(SegValue(es),desc)) needs_invalidation=true; else switch (desc.Type()) { case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cpu.cpl>desc.DPL()) needs_invalidation=true; break; default: break; } if (needs_invalidation) CPU_SetSegGeneral(es,0); needs_invalidation=false; if (!cpu.gdt.GetDescriptor(SegValue(ds),desc)) needs_invalidation=true; else switch (desc.Type()) { case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cpu.cpl>desc.DPL()) needs_invalidation=true; break; default: break; } if (needs_invalidation) CPU_SetSegGeneral(ds,0); needs_invalidation=false; if (!cpu.gdt.GetDescriptor(SegValue(fs),desc)) needs_invalidation=true; else switch (desc.Type()) { case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cpu.cpl>desc.DPL()) needs_invalidation=true; break; default: break; } if (needs_invalidation) CPU_SetSegGeneral(fs,0); needs_invalidation=false; if (!cpu.gdt.GetDescriptor(SegValue(gs),desc)) needs_invalidation=true; else switch (desc.Type()) { case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cpu.cpl>desc.DPL()) needs_invalidation=true; break; default: break; } if (needs_invalidation) CPU_SetSegGeneral(gs,0); } class TaskStateSegment { public: TaskStateSegment() { valid=false; } bool IsValid(void) { return valid; } Bitu Get_back(void) { cpu.mpl=0; Bit16u backlink=mem_readw(base); cpu.mpl=3; return backlink; } void SaveSelector(void) { cpu.gdt.SetDescriptor(selector,desc); } void Get_SSx_ESPx(Bitu level,Bitu & _ss,Bitu & _esp) { cpu.mpl=0; if (is386) { PhysPt where=base+offsetof(TSS_32,esp0)+level*8; _esp=mem_readd(where); _ss=mem_readw(where+4); } else { PhysPt where=base+offsetof(TSS_16,sp0)+level*4; _esp=mem_readw(where); _ss=mem_readw(where+2); } cpu.mpl=3; } bool SetSelector(Bitu new_sel) { valid=false; if ((new_sel & 0xfffc)==0) { selector=0; base=0; limit=0; is386=1; return true; } if (new_sel&4) return false; if (!cpu.gdt.GetDescriptor(new_sel,desc)) return false; switch (desc.Type()) { case DESC_286_TSS_A: case DESC_286_TSS_B: case DESC_386_TSS_A: case DESC_386_TSS_B: break; default: return false; } if (!desc.saved.seg.p) return false; selector=new_sel; valid=true; base=desc.GetBase(); limit=desc.GetLimit(); is386=desc.Is386(); return true; } TSS_Descriptor desc; Bitu selector; PhysPt base; Bitu limit; Bitu is386; bool valid; }; TaskStateSegment cpu_tss; enum TSwitchType { TSwitch_JMP,TSwitch_CALL_INT,TSwitch_IRET }; bool CPU_SwitchTask(Bitu new_tss_selector,TSwitchType tstype,Bitu old_eip) { FillFlags(); TaskStateSegment new_tss; if (!new_tss.SetSelector(new_tss_selector)) E_Exit("Illegal TSS for switch, selector=%x, switchtype=%x",new_tss_selector,tstype); if (tstype==TSwitch_IRET) { if (!new_tss.desc.IsBusy()) E_Exit("TSS not busy for IRET"); } else { if (new_tss.desc.IsBusy()) E_Exit("TSS busy for JMP/CALL/INT"); } Bitu new_cr3=0; Bitu new_eax,new_ebx,new_ecx,new_edx,new_esp,new_ebp,new_esi,new_edi; Bitu new_es,new_cs,new_ss,new_ds,new_fs,new_gs; Bitu new_ldt,new_eip,new_eflags; /* Read new context from new TSS */ if (new_tss.is386) { new_cr3=mem_readd(new_tss.base+offsetof(TSS_32,cr3)); new_eip=mem_readd(new_tss.base+offsetof(TSS_32,eip)); new_eflags=mem_readd(new_tss.base+offsetof(TSS_32,eflags)); new_eax=mem_readd(new_tss.base+offsetof(TSS_32,eax)); new_ecx=mem_readd(new_tss.base+offsetof(TSS_32,ecx)); new_edx=mem_readd(new_tss.base+offsetof(TSS_32,edx)); new_ebx=mem_readd(new_tss.base+offsetof(TSS_32,ebx)); new_esp=mem_readd(new_tss.base+offsetof(TSS_32,esp)); new_ebp=mem_readd(new_tss.base+offsetof(TSS_32,ebp)); new_edi=mem_readd(new_tss.base+offsetof(TSS_32,edi)); new_esi=mem_readd(new_tss.base+offsetof(TSS_32,esi)); new_es=mem_readw(new_tss.base+offsetof(TSS_32,es)); new_cs=mem_readw(new_tss.base+offsetof(TSS_32,cs)); new_ss=mem_readw(new_tss.base+offsetof(TSS_32,ss)); new_ds=mem_readw(new_tss.base+offsetof(TSS_32,ds)); new_fs=mem_readw(new_tss.base+offsetof(TSS_32,fs)); new_gs=mem_readw(new_tss.base+offsetof(TSS_32,gs)); new_ldt=mem_readw(new_tss.base+offsetof(TSS_32,ldt)); } else { E_Exit("286 task switch"); new_cr3=0; new_eip=0; new_eflags=0; new_eax=0; new_ecx=0; new_edx=0; new_ebx=0; new_esp=0; new_ebp=0; new_edi=0; new_esi=0; new_es=0; new_cs=0; new_ss=0; new_ds=0; new_fs=0; new_gs=0; new_ldt=0; } /* Check if we need to clear busy bit of old TASK */ if (tstype==TSwitch_JMP || tstype==TSwitch_IRET) { cpu_tss.desc.SetBusy(false); cpu_tss.SaveSelector(); } Bit32u old_flags = reg_flags; if (tstype==TSwitch_IRET) old_flags &= (~FLAG_NT); /* Save current context in current TSS */ if (cpu_tss.is386) { mem_writed(cpu_tss.base+offsetof(TSS_32,eflags),old_flags); mem_writed(cpu_tss.base+offsetof(TSS_32,eip),old_eip); mem_writed(cpu_tss.base+offsetof(TSS_32,eax),reg_eax); mem_writed(cpu_tss.base+offsetof(TSS_32,ecx),reg_ecx); mem_writed(cpu_tss.base+offsetof(TSS_32,edx),reg_edx); mem_writed(cpu_tss.base+offsetof(TSS_32,ebx),reg_ebx); mem_writed(cpu_tss.base+offsetof(TSS_32,esp),reg_esp); mem_writed(cpu_tss.base+offsetof(TSS_32,ebp),reg_ebp); mem_writed(cpu_tss.base+offsetof(TSS_32,esi),reg_esi); mem_writed(cpu_tss.base+offsetof(TSS_32,edi),reg_edi); mem_writed(cpu_tss.base+offsetof(TSS_32,es),SegValue(es)); mem_writed(cpu_tss.base+offsetof(TSS_32,cs),SegValue(cs)); mem_writed(cpu_tss.base+offsetof(TSS_32,ss),SegValue(ss)); mem_writed(cpu_tss.base+offsetof(TSS_32,ds),SegValue(ds)); mem_writed(cpu_tss.base+offsetof(TSS_32,fs),SegValue(fs)); mem_writed(cpu_tss.base+offsetof(TSS_32,gs),SegValue(gs)); } else { E_Exit("286 task switch"); } /* Setup a back link to the old TSS in new TSS */ if (tstype==TSwitch_CALL_INT) { if (new_tss.is386) { mem_writed(new_tss.base+offsetof(TSS_32,back),cpu_tss.selector); } else { mem_writew(new_tss.base+offsetof(TSS_16,back),cpu_tss.selector); } /* And make the new task's eflag have the nested task bit */ new_eflags|=FLAG_NT; } /* Set the busy bit in the new task */ if (tstype==TSwitch_JMP || tstype==TSwitch_CALL_INT) { new_tss.desc.SetBusy(true); new_tss.SaveSelector(); } // cpu.cr0|=CR0_TASKSWITCHED; if (new_tss_selector == cpu_tss.selector) { reg_eip = old_eip; new_cs = SegValue(cs); new_ss = SegValue(ss); new_ds = SegValue(ds); new_es = SegValue(es); new_fs = SegValue(fs); new_gs = SegValue(gs); } else { /* Setup the new cr3 */ PAGING_SetDirBase(new_cr3); /* Load new context */ if (new_tss.is386) { reg_eip=new_eip; CPU_SetFlags(new_eflags,FMASK_ALL | FLAG_VM); reg_eax=new_eax; reg_ecx=new_ecx; reg_edx=new_edx; reg_ebx=new_ebx; reg_esp=new_esp; reg_ebp=new_ebp; reg_edi=new_edi; reg_esi=new_esi; // new_cs=mem_readw(new_tss.base+offsetof(TSS_32,cs)); } else { E_Exit("286 task switch"); } } /* Load the new selectors */ if (reg_flags & FLAG_VM) { SegSet16(cs,new_cs); cpu.code.big=false; cpu.cpl=3; //We don't have segment caches so this will do } else { /* Protected mode task */ if (new_ldt!=0) CPU_LLDT(new_ldt); /* Load the new CS*/ Descriptor cs_desc; cpu.cpl=new_cs & 3; if (!cpu.gdt.GetDescriptor(new_cs,cs_desc)) E_Exit("Task switch with CS beyond limits"); if (!cs_desc.saved.seg.p) E_Exit("Task switch with non present code-segment"); switch (cs_desc.Type()) { case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cpu.cpl != cs_desc.DPL()) E_Exit("Task CS RPL != DPL"); goto doconforming; case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: if (cpu.cpl < cs_desc.DPL()) E_Exit("Task CS RPL < DPL"); doconforming: Segs.phys[cs]=cs_desc.GetBase(); cpu.code.big=cs_desc.Big()>0; Segs.val[cs]=new_cs; break; default: E_Exit("Task switch CS Type %d",cs_desc.Type()); } } CPU_SetSegGeneral(es,new_es); CPU_SetSegGeneral(ss,new_ss); CPU_SetSegGeneral(ds,new_ds); CPU_SetSegGeneral(fs,new_fs); CPU_SetSegGeneral(gs,new_gs); if (!cpu_tss.SetSelector(new_tss_selector)) { LOG(LOG_CPU,LOG_NORMAL)("TaskSwitch: set tss selector %X failed",new_tss_selector); } // cpu_tss.desc.SetBusy(true); // cpu_tss.SaveSelector(); // LOG_MSG("Task CPL %X CS:%X IP:%X SS:%X SP:%X eflags %x",cpu.cpl,SegValue(cs),reg_eip,SegValue(ss),reg_esp,reg_flags); return true; } bool CPU_IO_Exception(Bitu port,Bitu size) { if (cpu.pmode && ((GETFLAG_IOPL<cpu.cpl) || GETFLAG(VM))) { cpu.mpl=0; if (!cpu_tss.is386) goto doexception; PhysPt bwhere=cpu_tss.base+0x66; Bitu ofs=mem_readw(bwhere); if (ofs>cpu_tss.limit) goto doexception; bwhere=cpu_tss.base+ofs+(port/8); Bitu map=mem_readw(bwhere); Bitu mask=(0xffff>>(16-size)) << (port&7); if (map & mask) goto doexception; cpu.mpl=3; } return false; doexception: cpu.mpl=3; LOG(LOG_CPU,LOG_NORMAL)("IO Exception port %X",port); return CPU_PrepareException(EXCEPTION_GP,0); } void CPU_Exception(Bitu which,Bitu error ) { // LOG_MSG("Exception %d error %x",which,error); cpu.exception.error=error; CPU_Interrupt(which,CPU_INT_EXCEPTION | ((which>=8) ? CPU_INT_HAS_ERROR : 0),reg_eip); } Bit8u lastint; void CPU_Interrupt(Bitu num,Bitu type,Bitu oldeip) { lastint=num; FillFlags(); #if C_DEBUG switch (num) { case 0xcd: #if C_HEAVY_DEBUG LOG(LOG_CPU,LOG_ERROR)("Call to interrupt 0xCD this is BAD"); DEBUG_HeavyWriteLogInstruction(); E_Exit("Call to interrupt 0xCD this is BAD"); #endif break; case 0x03: if (DEBUG_Breakpoint()) { CPU_Cycles=0; return; } }; #endif if (!cpu.pmode) { /* Save everything on a 16-bit stack */ CPU_Push16(reg_flags & 0xffff); CPU_Push16(SegValue(cs)); CPU_Push16(oldeip); SETFLAGBIT(IF,false); SETFLAGBIT(TF,false); /* Get the new CS:IP from vector table */ PhysPt base=cpu.idt.GetBase(); reg_eip=mem_readw(base+(num << 2)); Segs.val[cs]=mem_readw(base+(num << 2)+2); Segs.phys[cs]=Segs.val[cs]<<4; cpu.code.big=false; return; } else { /* Protected Mode Interrupt */ if ((reg_flags & FLAG_VM) && (type&CPU_INT_SOFTWARE) && !(type&CPU_INT_NOIOPLCHECK)) { // LOG_MSG("Software int in v86, AH %X IOPL %x",reg_ah,(reg_flags & FLAG_IOPL) >>12); if ((reg_flags & FLAG_IOPL)!=FLAG_IOPL) { CPU_Exception(EXCEPTION_GP,0); return; } } Descriptor gate; if (!cpu.idt.GetDescriptor(num<<3,gate)) { // zone66 CPU_Exception(EXCEPTION_GP,num*8+2+(type&CPU_INT_SOFTWARE)?0:1); return; } if ((type&CPU_INT_SOFTWARE) && (gate.DPL()<cpu.cpl)) { // zone66, win3.x e CPU_Exception(EXCEPTION_GP,num*8+2); return; } switch (gate.Type()) { case DESC_286_INT_GATE: case DESC_386_INT_GATE: case DESC_286_TRAP_GATE: case DESC_386_TRAP_GATE: { CPU_CHECK_COND(!gate.saved.seg.p, "INT:Gate segment not present", EXCEPTION_NP,num*8+2+(type&CPU_INT_SOFTWARE)?0:1) Descriptor cs_desc; Bitu gate_sel=gate.GetSelector(); Bitu gate_off=gate.GetOffset(); CPU_CHECK_COND((gate_sel & 0xfffc)==0, "INT:Gate with CS zero selector", EXCEPTION_GP,(type&CPU_INT_SOFTWARE)?0:1) CPU_CHECK_COND(!cpu.gdt.GetDescriptor(gate_sel,cs_desc), "INT:Gate with CS beyond limit", EXCEPTION_GP,(gate_sel & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) Bitu cs_dpl=cs_desc.DPL(); CPU_CHECK_COND(cs_dpl>cpu.cpl, "Interrupt to higher privilege", EXCEPTION_GP,(gate_sel & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) switch (cs_desc.Type()) { case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (cs_dpl<cpu.cpl) { /* Prepare for gate to inner level */ CPU_CHECK_COND(!cs_desc.saved.seg.p, "INT:Inner level:CS segment not present", EXCEPTION_NP,(gate_sel & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) CPU_CHECK_COND((reg_flags & FLAG_VM) && (cs_dpl!=0), "V86 interrupt calling codesegment with DPL>0", EXCEPTION_GP,gate_sel & 0xfffc) Bitu n_ss,n_esp; Bitu o_ss,o_esp; o_ss=SegValue(ss); o_esp=reg_esp; cpu_tss.Get_SSx_ESPx(cs_dpl,n_ss,n_esp); CPU_CHECK_COND((n_ss & 0xfffc)==0, "INT:Gate with SS zero selector", EXCEPTION_TS,(type&CPU_INT_SOFTWARE)?0:1) Descriptor n_ss_desc; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), "INT:Gate with SS beyond limit", EXCEPTION_TS,(n_ss & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) CPU_CHECK_COND(((n_ss & 3)!=cs_dpl) || (n_ss_desc.DPL()!=cs_dpl), "INT:Inner level with CS_DPL!=SS_DPL and SS_RPL", EXCEPTION_TS,(n_ss & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) // check if stack segment is a writable data segment switch (n_ss_desc.Type()) { case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: break; default: E_Exit("INT:Inner level:Stack segment not writable."); // or #TS(ss_sel+EXT) } CPU_CHECK_COND(!n_ss_desc.saved.seg.p, "INT:Inner level with nonpresent SS", EXCEPTION_SS,(n_ss & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) // commit point Segs.phys[ss]=n_ss_desc.GetBase(); Segs.val[ss]=n_ss; if (n_ss_desc.Big()) { cpu.stack.big=true; cpu.stack.mask=0xffffffff; cpu.stack.notmask=0; reg_esp=n_esp; } else { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; reg_sp=n_esp & 0xffff; } cpu.cpl=cs_dpl; if (gate.Type() & 0x8) { /* 32-bit Gate */ if (reg_flags & FLAG_VM) { CPU_Push32(SegValue(gs));SegSet16(gs,0x0); CPU_Push32(SegValue(fs));SegSet16(fs,0x0); CPU_Push32(SegValue(ds));SegSet16(ds,0x0); CPU_Push32(SegValue(es));SegSet16(es,0x0); } CPU_Push32(o_ss); CPU_Push32(o_esp); } else { /* 16-bit Gate */ if (reg_flags & FLAG_VM) E_Exit("V86 to 16-bit gate"); CPU_Push16(o_ss); CPU_Push16(o_esp); } // LOG_MSG("INT:Gate to inner level SS:%X SP:%X",n_ss,n_esp); goto do_interrupt; } if (cs_dpl!=cpu.cpl) E_Exit("Non-conforming intra privilege INT with DPL!=CPL"); case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: /* Prepare stack for gate to same priviledge */ CPU_CHECK_COND(!cs_desc.saved.seg.p, "INT:Same level:CS segment not present", EXCEPTION_NP,(gate_sel & 0xfffc)+(type&CPU_INT_SOFTWARE)?0:1) if ((reg_flags & FLAG_VM) && (cs_dpl<cpu.cpl)) E_Exit("V86 interrupt doesn't change to pl0"); // or #GP(cs_sel) // commit point do_interrupt: if (gate.Type() & 0x8) { /* 32-bit Gate */ CPU_Push32(reg_flags); CPU_Push32(SegValue(cs)); CPU_Push32(oldeip); if (type & CPU_INT_HAS_ERROR) CPU_Push32(cpu.exception.error); } else { /* 16-bit gate */ CPU_Push16(reg_flags & 0xffff); CPU_Push16(SegValue(cs)); CPU_Push16(oldeip); if (type & CPU_INT_HAS_ERROR) CPU_Push16(cpu.exception.error); } break; default: E_Exit("INT:Gate Selector points to illegal descriptor with type %x",cs_desc.Type()); } Segs.val[cs]=(gate_sel&0xfffc) | cpu.cpl; Segs.phys[cs]=cs_desc.GetBase(); cpu.code.big=cs_desc.Big()>0; reg_eip=gate_off; if (!(gate.Type()&1)) { SETFLAGBIT(IF,false); } SETFLAGBIT(TF,false); SETFLAGBIT(NT,false); SETFLAGBIT(VM,false); LOG(LOG_CPU,LOG_NORMAL)("INT:Gate to %X:%X big %d %s",gate_sel,gate_off,cs_desc.Big(),gate.Type() & 0x8 ? "386" : "286"); return; } case DESC_TASK_GATE: CPU_CHECK_COND(!gate.saved.seg.p, "INT:Gate segment not present", EXCEPTION_NP,num*8+2+(type&CPU_INT_SOFTWARE)?0:1) CPU_SwitchTask(gate.GetSelector(),TSwitch_CALL_INT,oldeip); if (type & CPU_INT_HAS_ERROR) { //TODO Be sure about this, seems somewhat unclear if (cpu_tss.is386) CPU_Push32(cpu.exception.error); else CPU_Push16(cpu.exception.error); } return; default: E_Exit("Illegal descriptor type %X for int %X",gate.Type(),num); } } assert(1); return ; // make compiler happy } void CPU_IRET(bool use32,Bitu oldeip) { if (!cpu.pmode) { /* RealMode IRET */ if (use32) { reg_eip=CPU_Pop32(); SegSet16(cs,CPU_Pop32()); CPU_SetFlags(CPU_Pop32(),FMASK_ALL); } else { reg_eip=CPU_Pop16(); SegSet16(cs,CPU_Pop16()); CPU_SetFlags(CPU_Pop16(),FMASK_ALL & 0xffff); } cpu.code.big=false; DestroyConditionFlags(); return; } else { /* Protected mode IRET */ if (reg_flags & FLAG_VM) { if ((reg_flags & FLAG_IOPL)!=FLAG_IOPL) { // win3.x e CPU_Exception(EXCEPTION_GP,0); return; } else { if (use32) { Bit32u new_eip=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); Bit32u tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); Bit32u new_cs=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); Bit32u new_flags=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); reg_esp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); reg_eip=new_eip; SegSet16(cs,(Bit16u)(new_cs&0xffff)); /* IOPL can not be modified in v86 mode by IRET */ CPU_SetFlags(new_flags,FMASK_NORMAL|FLAG_NT); } else { Bit16u new_eip=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); Bit32u tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); Bit16u new_cs=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); Bit16u new_flags=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); reg_esp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); reg_eip=(Bit32u)new_eip; SegSet16(cs,new_cs); /* IOPL can not be modified in v86 mode by IRET */ CPU_SetFlags(new_flags,FMASK_NORMAL|FLAG_NT); } cpu.code.big=false; DestroyConditionFlags(); return; } } /* Check if this is task IRET */ if (GETFLAG(NT)) { if (GETFLAG(VM)) E_Exit("Pmode IRET with VM bit set"); CPU_CHECK_COND(!cpu_tss.IsValid(), "TASK Iret without valid TSS", EXCEPTION_TS,cpu_tss.selector & 0xfffc) if (!cpu_tss.desc.IsBusy()) { LOG(LOG_CPU,LOG_ERROR)("TASK Iret:TSS not busy"); } Bitu back_link=cpu_tss.Get_back(); CPU_SwitchTask(back_link,TSwitch_IRET,oldeip); return; } Bitu n_cs_sel,n_eip,n_flags; Bit32u tempesp; if (use32) { n_eip=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); n_cs_sel=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)) & 0xffff; tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); n_flags=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); if ((n_flags & FLAG_VM) && (cpu.cpl==0)) { // commit point reg_esp=tempesp; reg_eip=n_eip & 0xffff; Bitu n_ss,n_esp,n_es,n_ds,n_fs,n_gs; n_esp=CPU_Pop32(); n_ss=CPU_Pop32() & 0xffff; n_es=CPU_Pop32() & 0xffff; n_ds=CPU_Pop32() & 0xffff; n_fs=CPU_Pop32() & 0xffff; n_gs=CPU_Pop32() & 0xffff; CPU_SetFlags(n_flags,FMASK_ALL | FLAG_VM); DestroyConditionFlags(); cpu.cpl=3; CPU_SetSegGeneral(ss,n_ss); CPU_SetSegGeneral(es,n_es); CPU_SetSegGeneral(ds,n_ds); CPU_SetSegGeneral(fs,n_fs); CPU_SetSegGeneral(gs,n_gs); reg_esp=n_esp; cpu.code.big=false; SegSet16(cs,n_cs_sel); LOG(LOG_CPU,LOG_NORMAL)("IRET:Back to V86: CS:%X IP %X SS:%X SP %X FLAGS:%X",SegValue(cs),reg_eip,SegValue(ss),reg_esp,reg_flags); return; } if (n_flags & FLAG_VM) E_Exit("IRET from pmode to v86 with CPL!=0"); } else { n_eip=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); n_cs_sel=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); n_flags=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); n_flags|=(reg_flags & 0xffff0000); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); if (n_flags & FLAG_VM) E_Exit("VM Flag in 16-bit iret"); } CPU_CHECK_COND((n_cs_sel & 0xfffc)==0, "IRET:CS selector zero", EXCEPTION_GP,0) Bitu n_cs_rpl=n_cs_sel & 3; Descriptor n_cs_desc; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_cs_sel,n_cs_desc), "IRET:CS selector beyond limits", EXCEPTION_GP,n_cs_sel & 0xfffc) CPU_CHECK_COND(n_cs_rpl<cpu.cpl, "IRET to lower privilege", EXCEPTION_GP,n_cs_sel & 0xfffc) switch (n_cs_desc.Type()) { case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: CPU_CHECK_COND(n_cs_rpl!=n_cs_desc.DPL(), "IRET:NC:DPL!=RPL", EXCEPTION_GP,n_cs_sel & 0xfffc) break; case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: CPU_CHECK_COND(n_cs_desc.DPL()>n_cs_rpl, "IRET:C:DPL>RPL", EXCEPTION_GP,n_cs_sel & 0xfffc) break; default: E_Exit("IRET:Illegal descriptor type %X",n_cs_desc.Type()); } CPU_CHECK_COND(!n_cs_desc.saved.seg.p, "IRET with nonpresent code segment", EXCEPTION_NP,n_cs_sel & 0xfffc) if (n_cs_rpl==cpu.cpl) { /* Return to same level */ // commit point reg_esp=tempesp; Segs.phys[cs]=n_cs_desc.GetBase(); cpu.code.big=n_cs_desc.Big()>0; Segs.val[cs]=n_cs_sel; reg_eip=n_eip; Bitu mask=cpu.cpl ? (FMASK_NORMAL | FLAG_NT) : FMASK_ALL; if (GETFLAG_IOPL<cpu.cpl) mask &= (~FLAG_IF); CPU_SetFlags(n_flags,mask); DestroyConditionFlags(); LOG(LOG_CPU,LOG_NORMAL)("IRET:Same level:%X:%X big %d",n_cs_sel,n_eip,cpu.code.big); } else { /* Return to outer level */ Bitu n_ss,n_esp; if (use32) { n_esp=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); n_ss=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)) & 0xffff; } else { n_esp=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); n_ss=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); } CPU_CHECK_COND((n_ss & 0xfffc)==0, "IRET:Outer level:SS selector zero", EXCEPTION_GP,0) CPU_CHECK_COND((n_ss & 3)!=n_cs_rpl, "IRET:Outer level:SS rpl!=CS rpl", EXCEPTION_GP,n_ss & 0xfffc) Descriptor n_ss_desc; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), "IRET:Outer level:SS beyond limit", EXCEPTION_GP,n_ss & 0xfffc) CPU_CHECK_COND(n_ss_desc.DPL()!=n_cs_rpl, "IRET:Outer level:SS dpl!=CS rpl", EXCEPTION_GP,n_ss & 0xfffc) // check if stack segment is a writable data segment switch (n_ss_desc.Type()) { case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: break; default: E_Exit("IRET:Outer level:Stack segment not writable"); // or #GP(ss_sel) } CPU_CHECK_COND(!n_ss_desc.saved.seg.p, "IRET:Outer level:Stack segment not present", EXCEPTION_NP,n_ss & 0xfffc) // commit point Segs.phys[cs]=n_cs_desc.GetBase(); cpu.code.big=n_cs_desc.Big()>0; Segs.val[cs]=n_cs_sel; Bitu mask=cpu.cpl ? (FMASK_NORMAL | FLAG_NT) : FMASK_ALL; if (GETFLAG_IOPL<cpu.cpl) mask &= (~FLAG_IF); CPU_SetFlags(n_flags,mask); DestroyConditionFlags(); cpu.cpl=n_cs_rpl; reg_eip=n_eip; Segs.val[ss]=n_ss; Segs.phys[ss]=n_ss_desc.GetBase(); if (n_ss_desc.Big()) { cpu.stack.big=true; cpu.stack.mask=0xffffffff; cpu.stack.notmask=0; reg_esp=n_esp; } else { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; reg_sp=n_esp & 0xffff; } // borland extender, zrdx CPU_CheckSegments(); LOG(LOG_CPU,LOG_NORMAL)("IRET:Outer level:%X:%X big %d",n_cs_sel,n_eip,cpu.code.big); } return; } } void CPU_JMP(bool use32,Bitu selector,Bitu offset,Bitu oldeip) { if (!cpu.pmode || (reg_flags & FLAG_VM)) { if (!use32) { reg_eip=offset&0xffff; } else { reg_eip=offset; } SegSet16(cs,selector); cpu.code.big=false; return; } else { CPU_CHECK_COND((selector & 0xfffc)==0, "JMP:CS selector zero", EXCEPTION_GP,0) Bitu rpl=selector & 3; Descriptor desc; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,desc), "JMP:CS beyond limits", EXCEPTION_GP,selector & 0xfffc) switch (desc.Type()) { case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: CPU_CHECK_COND(rpl>cpu.cpl, "JMP:NC:RPL>CPL", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(cpu.cpl!=desc.DPL(), "JMP:NC:RPL != DPL", EXCEPTION_GP,selector & 0xfffc) LOG(LOG_CPU,LOG_NORMAL)("JMP:Code:NC to %X:%X big %d",selector,offset,desc.Big()); goto CODE_jmp; case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: LOG(LOG_CPU,LOG_NORMAL)("JMP:Code:C to %X:%X big %d",selector,offset,desc.Big()); CPU_CHECK_COND(cpu.cpl<desc.DPL(), "JMP:C:CPL < DPL", EXCEPTION_GP,selector & 0xfffc) CODE_jmp: if (!desc.saved.seg.p) { // win CPU_Exception(EXCEPTION_NP,selector & 0xfffc); return; } /* Normal jump to another selector:offset */ Segs.phys[cs]=desc.GetBase(); cpu.code.big=desc.Big()>0; Segs.val[cs]=(selector & 0xfffc) | cpu.cpl; reg_eip=offset; return; case DESC_386_TSS_A: CPU_CHECK_COND(desc.DPL()<cpu.cpl, "JMP:TSS:dpl<cpl", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(desc.DPL()<rpl, "JMP:TSS:dpl<rpl", EXCEPTION_GP,selector & 0xfffc) LOG(LOG_CPU,LOG_NORMAL)("JMP:TSS to %X",selector); CPU_SwitchTask(selector,TSwitch_JMP,oldeip); break; default: E_Exit("JMP Illegal descriptor type %X",desc.Type()); } } assert(1); } void CPU_CALL(bool use32,Bitu selector,Bitu offset,Bitu oldeip) { if (!cpu.pmode || (reg_flags & FLAG_VM)) { if (!use32) { CPU_Push16(SegValue(cs)); CPU_Push16(oldeip); reg_eip=offset&0xffff; } else { CPU_Push32(SegValue(cs)); CPU_Push32(oldeip); reg_eip=offset; } cpu.code.big=false; SegSet16(cs,selector); return; } else { CPU_CHECK_COND((selector & 0xfffc)==0, "CALL:CS selector zero", EXCEPTION_GP,0) Bitu rpl=selector & 3; Descriptor call; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,call), "CALL:CS beyond limits", EXCEPTION_GP,selector & 0xfffc) /* Check for type of far call */ switch (call.Type()) { case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: CPU_CHECK_COND(rpl>cpu.cpl, "CALL:CODE:NC:RPL>CPL", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(call.DPL()!=cpu.cpl, "CALL:CODE:NC:DPL!=CPL", EXCEPTION_GP,selector & 0xfffc) LOG(LOG_CPU,LOG_NORMAL)("CALL:CODE:NC to %X:%X",selector,offset); goto call_code; case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: CPU_CHECK_COND(call.DPL()>cpu.cpl, "CALL:CODE:C:DPL>CPL", EXCEPTION_GP,selector & 0xfffc) LOG(LOG_CPU,LOG_NORMAL)("CALL:CODE:C to %X:%X",selector,offset); call_code: if (!call.saved.seg.p) { // borland extender (RTM) CPU_Exception(EXCEPTION_NP,selector & 0xfffc); return; } // commit point if (!use32) { CPU_Push16(SegValue(cs)); CPU_Push16(oldeip); reg_eip=offset & 0xffff; } else { CPU_Push32(SegValue(cs)); CPU_Push32(oldeip); reg_eip=offset; } Segs.phys[cs]=call.GetBase(); cpu.code.big=call.Big()>0; Segs.val[cs]=(selector & 0xfffc) | cpu.cpl; return; case DESC_386_CALL_GATE: case DESC_286_CALL_GATE: { CPU_CHECK_COND(call.DPL()<cpu.cpl, "CALL:Gate:Gate DPL<CPL", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(call.DPL()<rpl, "CALL:Gate:Gate DPL<RPL", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(!call.saved.seg.p, "CALL:Gate:Segment not present", EXCEPTION_NP,selector & 0xfffc) Descriptor n_cs_desc; Bitu n_cs_sel=call.GetSelector(); CPU_CHECK_COND((n_cs_sel & 0xfffc)==0, "CALL:Gate:CS selector zero", EXCEPTION_GP,0) CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_cs_sel,n_cs_desc), "CALL:Gate:CS beyond limits", EXCEPTION_GP,n_cs_sel & 0xfffc) Bitu n_cs_dpl = n_cs_desc.DPL(); CPU_CHECK_COND(n_cs_dpl>cpu.cpl, "CALL:Gate:CS DPL>CPL", EXCEPTION_GP,n_cs_sel & 0xfffc) CPU_CHECK_COND(!n_cs_desc.saved.seg.p, "CALL:Gate:CS not present", EXCEPTION_NP,n_cs_sel & 0xfffc) Bitu n_eip = call.GetOffset(); switch (n_cs_desc.Type()) { case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: /* Check if we goto inner priviledge */ if (n_cs_dpl < cpu.cpl) { /* Get new SS:ESP out of TSS */ Bitu n_ss_sel,n_esp; Descriptor n_ss_desc; cpu_tss.Get_SSx_ESPx(n_cs_dpl,n_ss_sel,n_esp); CPU_CHECK_COND((n_ss_sel & 0xfffc)==0, "CALL:Gate:NC:SS selector zero", EXCEPTION_TS,0) CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss_sel,n_ss_desc), "CALL:Gate:Invalid SS selector", EXCEPTION_TS,n_ss_sel & 0xfffc) CPU_CHECK_COND(((n_ss_sel & 3)!=n_cs_desc.DPL()) || (n_ss_desc.DPL()!=n_cs_desc.DPL()), "CALL:Gate:Invalid SS selector privileges", EXCEPTION_TS,n_ss_sel & 0xfffc) switch (n_ss_desc.Type()) { case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: // writable data segment break; default: E_Exit("Call:Gate:SS no writable data segment"); // or #TS(ss_sel) } CPU_CHECK_COND(!n_ss_desc.saved.seg.p, "CALL:Gate:Stack segment not present", EXCEPTION_SS,n_ss_sel & 0xfffc) /* Load the new SS:ESP and save data on it */ Bitu o_esp = reg_esp; Bitu o_ss = SegValue(ss); PhysPt o_stack = SegPhys(ss)+(reg_esp & cpu.stack.mask); // catch pagefaults if (call.saved.gate.paramcount&31) { if (call.Type()==DESC_386_CALL_GATE) { for (Bits i=(call.saved.gate.paramcount&31)-1;i>=0;i--) mem_readd(o_stack+i*4); } else { for (Bits i=(call.saved.gate.paramcount&31)-1;i>=0;i--) mem_readw(o_stack+i*2); } } // commit point Segs.val[ss]=n_ss_sel; Segs.phys[ss]=n_ss_desc.GetBase(); if (n_ss_desc.Big()) { cpu.stack.big=true; cpu.stack.mask=0xffffffff; cpu.stack.notmask=0; reg_esp=n_esp; } else { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; reg_sp=n_esp & 0xffff; } cpu.cpl = n_cs_desc.DPL(); Bit16u oldcs = SegValue(cs); /* Switch to new CS:EIP */ Segs.phys[cs] = n_cs_desc.GetBase(); Segs.val[cs] = (n_cs_sel & 0xfffc) | cpu.cpl; cpu.code.big = n_cs_desc.Big()>0; reg_eip = n_eip; if (!use32) reg_eip&=0xffff; if (call.Type()==DESC_386_CALL_GATE) { CPU_Push32(o_ss); //save old stack CPU_Push32(o_esp); if (call.saved.gate.paramcount&31) for (Bits i=(call.saved.gate.paramcount&31)-1;i>=0;i--) CPU_Push32(mem_readd(o_stack+i*4)); CPU_Push32(oldcs); CPU_Push32(oldeip); } else { CPU_Push16(o_ss); //save old stack CPU_Push16(o_esp); if (call.saved.gate.paramcount&31) for (Bits i=(call.saved.gate.paramcount&31)-1;i>=0;i--) CPU_Push16(mem_readw(o_stack+i*2)); CPU_Push16(oldcs); CPU_Push16(oldeip); } break; } else if (n_cs_dpl > cpu.cpl) E_Exit("CALL:GATE:CS DPL>CPL"); // or #GP(sel) case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: // zrdx extender if (call.Type()==DESC_386_CALL_GATE) { CPU_Push32(SegValue(cs)); CPU_Push32(oldeip); } else { CPU_Push16(SegValue(cs)); CPU_Push16(oldeip); } /* Switch to new CS:EIP */ Segs.phys[cs] = n_cs_desc.GetBase(); Segs.val[cs] = (n_cs_sel & 0xfffc) | cpu.cpl; cpu.code.big = n_cs_desc.Big()>0; reg_eip = n_eip; if (!use32) reg_eip&=0xffff; break; default: E_Exit("CALL:GATE:CS no executable segment"); } } /* Call Gates */ break; case DESC_386_TSS_A: CPU_CHECK_COND(call.DPL()<cpu.cpl, "CALL:TSS:dpl<cpl", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(call.DPL()<rpl, "CALL:TSS:dpl<rpl", EXCEPTION_GP,selector & 0xfffc) CPU_CHECK_COND(!call.saved.seg.p, "CALL:TSS:Segment not present", EXCEPTION_NP,selector & 0xfffc) LOG(LOG_CPU,LOG_NORMAL)("CALL:TSS to %X",selector); CPU_SwitchTask(selector,TSwitch_CALL_INT,oldeip); break; case DESC_DATA_EU_RW_NA: // vbdos case DESC_INVALID: // used by some installers CPU_Exception(EXCEPTION_GP,selector & 0xfffc); return; default: E_Exit("CALL:Descriptor type %x unsupported",call.Type()); } } assert(1); } void CPU_RET(bool use32,Bitu bytes,Bitu oldeip) { if (!cpu.pmode || (reg_flags & FLAG_VM)) { Bitu new_ip,new_cs; if (!use32) { new_ip=CPU_Pop16(); new_cs=CPU_Pop16(); } else { new_ip=CPU_Pop32(); new_cs=CPU_Pop32() & 0xffff; } reg_esp+=bytes; SegSet16(cs,new_cs); reg_eip=new_ip; cpu.code.big=false; return; } else { Bitu offset,selector; if (!use32) selector = mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask) + 2); else selector = mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask) + 4) & 0xffff; Descriptor desc; Bitu rpl=selector & 3; if(rpl < cpu.cpl) { // win setup CPU_Exception(EXCEPTION_GP,selector & 0xfffc); return; } CPU_CHECK_COND((selector & 0xfffc)==0, "RET:CS selector zero", EXCEPTION_GP,0) CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,desc), "RET:CS beyond limits", EXCEPTION_GP,selector & 0xfffc) if (cpu.cpl==rpl) { /* Return to same level */ switch (desc.Type()) { case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: CPU_CHECK_COND(cpu.cpl!=desc.DPL(), "RET to NC segment of other privilege", EXCEPTION_GP,selector & 0xfffc) goto RET_same_level; case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: CPU_CHECK_COND(desc.DPL()>cpu.cpl, "RET to C segment of higher privilege", EXCEPTION_GP,selector & 0xfffc) break; default: E_Exit("RET from illegal descriptor type %X",desc.Type()); } RET_same_level: if (!desc.saved.seg.p) { // borland extender (RTM) CPU_Exception(EXCEPTION_NP,selector & 0xfffc); return; } // commit point if (!use32) { offset=CPU_Pop16(); selector=CPU_Pop16(); } else { offset=CPU_Pop32(); selector=CPU_Pop32() & 0xffff; } Segs.phys[cs]=desc.GetBase(); cpu.code.big=desc.Big()>0; Segs.val[cs]=selector; reg_eip=offset; if (cpu.stack.big) { reg_esp+=bytes; } else { reg_sp+=bytes; } LOG(LOG_CPU,LOG_NORMAL)("RET - Same level to %X:%X RPL %X DPL %X",selector,offset,rpl,desc.DPL()); return; } else { /* Return to outer level */ switch (desc.Type()) { case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: CPU_CHECK_COND(desc.DPL()!=rpl, "RET to outer NC segment with DPL!=RPL", EXCEPTION_GP,selector & 0xfffc) break; case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: CPU_CHECK_COND(desc.DPL()>rpl, "RET to outer C segment with DPL>RPL", EXCEPTION_GP,selector & 0xfffc) break; default: E_Exit("RET from illegal descriptor type %X",desc.Type()); // or #GP(selector) } CPU_CHECK_COND(!desc.saved.seg.p, "RET:Outer level:CS not present", EXCEPTION_NP,selector & 0xfffc) // commit point Bitu n_esp,n_ss; if (use32) { offset=CPU_Pop32(); selector=CPU_Pop32() & 0xffff; reg_esp+=bytes; n_esp = CPU_Pop32(); n_ss = CPU_Pop32() & 0xffff; } else { offset=CPU_Pop16(); selector=CPU_Pop16(); reg_esp+=bytes; n_esp = CPU_Pop16(); n_ss = CPU_Pop16(); } CPU_CHECK_COND((n_ss & 0xfffc)==0, "RET to outer level with SS selector zero", EXCEPTION_GP,0) Descriptor n_ss_desc; CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), "RET:SS beyond limits", EXCEPTION_GP,n_ss & 0xfffc) CPU_CHECK_COND(((n_ss & 3)!=rpl) || (n_ss_desc.DPL()!=rpl), "RET to outer segment with invalid SS privileges", EXCEPTION_GP,n_ss & 0xfffc) switch (n_ss_desc.Type()) { case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: break; default: E_Exit("RET:SS selector type no writable data segment"); // or #GP(selector) } CPU_CHECK_COND(!n_ss_desc.saved.seg.p, "RET:Stack segment not present", EXCEPTION_SS,n_ss & 0xfffc) cpu.cpl = rpl; Segs.phys[cs]=desc.GetBase(); cpu.code.big=desc.Big()>0; Segs.val[cs]=(selector&0xfffc) | cpu.cpl; reg_eip=offset; Segs.val[ss]=n_ss; Segs.phys[ss]=n_ss_desc.GetBase(); if (n_ss_desc.Big()) { cpu.stack.big=true; cpu.stack.mask=0xffffffff; cpu.stack.notmask=0; reg_esp=n_esp+bytes; } else { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; reg_sp=(n_esp & 0xffff)+bytes; } CPU_CheckSegments(); // LOG(LOG_MISC,LOG_ERROR)("RET - Higher level to %X:%X RPL %X DPL %X",selector,offset,rpl,desc.DPL()); return; } LOG(LOG_CPU,LOG_NORMAL)("Prot ret %X:%X",selector,offset); return; } assert(1); } Bitu CPU_SLDT(void) { return cpu.gdt.SLDT(); } bool CPU_LLDT(Bitu selector) { if (!cpu.gdt.LLDT(selector)) { LOG(LOG_CPU,LOG_ERROR)("LLDT failed, selector=%X",selector); return true; } LOG(LOG_CPU,LOG_NORMAL)("LDT Set to %X",selector); return false; } Bitu CPU_STR(void) { return cpu_tss.selector; } bool CPU_LTR(Bitu selector) { if ((selector & 0xfffc)==0) { cpu_tss.SetSelector(selector); return false; } TSS_Descriptor desc; if ((selector & 4) || (!cpu.gdt.GetDescriptor(selector,desc))) { LOG(LOG_CPU,LOG_ERROR)("LTR failed, selector=%X",selector); return CPU_PrepareException(EXCEPTION_GP,selector); } if ((desc.Type()==DESC_286_TSS_A) || (desc.Type()==DESC_386_TSS_A)) { if (!desc.saved.seg.p) { LOG(LOG_CPU,LOG_ERROR)("LTR failed, selector=%X (not present)",selector); return CPU_PrepareException(EXCEPTION_NP,selector); } if (!cpu_tss.SetSelector(selector)) E_Exit("LTR failed, selector=%X",selector); cpu_tss.desc.SetBusy(true); cpu_tss.SaveSelector(); } else { /* Descriptor was no available TSS descriptor */ LOG(LOG_CPU,LOG_NORMAL)("LTR failed, selector=%X (type=%X)",selector,desc.Type()); return CPU_PrepareException(EXCEPTION_GP,selector); } return false; } void CPU_LGDT(Bitu limit,Bitu base) { LOG(LOG_CPU,LOG_NORMAL)("GDT Set to base:%X limit:%X",base,limit); cpu.gdt.SetLimit(limit); cpu.gdt.SetBase(base); } void CPU_LIDT(Bitu limit,Bitu base) { LOG(LOG_CPU,LOG_NORMAL)("IDT Set to base:%X limit:%X",base,limit); cpu.idt.SetLimit(limit); cpu.idt.SetBase(base); } Bitu CPU_SGDT_base(void) { return cpu.gdt.GetBase(); } Bitu CPU_SGDT_limit(void) { return cpu.gdt.GetLimit(); } Bitu CPU_SIDT_base(void) { return cpu.idt.GetBase(); } Bitu CPU_SIDT_limit(void) { return cpu.idt.GetLimit(); } static bool printed_cycles_auto_info = false; void CPU_SET_CRX(Bitu cr,Bitu value) { switch (cr) { case 0: { value|=CR0_FPUPRESENT; Bitu changed=cpu.cr0 ^ value; if (!changed) return; cpu.cr0=value; if (value & CR0_PROTECTION) { cpu.pmode=true; LOG(LOG_CPU,LOG_NORMAL)("Protected mode"); PAGING_Enable((value & CR0_PAGING)>0); if (!(CPU_AutoDetermineMode&CPU_AUTODETERMINE_MASK)) break; if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) { CPU_CycleAutoAdjust=true; CPU_CycleLeft=0; CPU_Cycles=0; CPU_OldCycleMax=CPU_CycleMax; GFX_SetTitle(CPU_CyclePercUsed,-1,false); if(!printed_cycles_auto_info) { printed_cycles_auto_info = true; LOG_MSG("DOSBox switched to max cycles, because of the setting: cycles=auto. If the game runs too fast try a fixed cycles amount in DOSBox's options."); } } else { GFX_SetTitle(-1,-1,false); } #if (C_DYNAMIC_X86) if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CORE) { CPU_Core_Dyn_X86_Cache_Init(true); cpudecoder=&CPU_Core_Dyn_X86_Run; } #elif (C_DYNREC) if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CORE) { CPU_Core_Dynrec_Cache_Init(true); cpudecoder=&CPU_Core_Dynrec_Run; } #endif CPU_AutoDetermineMode<<=CPU_AUTODETERMINE_SHIFT; } else { cpu.pmode=false; if (value & CR0_PAGING) LOG_MSG("Paging requested without PE=1"); PAGING_Enable(false); LOG(LOG_CPU,LOG_NORMAL)("Real mode"); } break; } case 2: paging.cr2=value; break; case 3: PAGING_SetDirBase(value); break; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV CR%d,%X",cr,value); break; } } bool CPU_WRITE_CRX(Bitu cr,Bitu value) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); if ((cr==1) || (cr>4)) return CPU_PrepareException(EXCEPTION_UD,0); if (CPU_ArchitectureType<CPU_ARCHTYPE_486OLDSLOW) { if (cr==4) return CPU_PrepareException(EXCEPTION_UD,0); } CPU_SET_CRX(cr,value); return false; } Bitu CPU_GET_CRX(Bitu cr) { switch (cr) { case 0: if (CPU_ArchitectureType>=CPU_ARCHTYPE_PENTIUMSLOW) return cpu.cr0; else if (CPU_ArchitectureType>=CPU_ARCHTYPE_486OLDSLOW) return (cpu.cr0 & 0xe005003f); else return (cpu.cr0 | 0x7ffffff0); case 2: return paging.cr2; case 3: return PAGING_GetDirBase() & 0xfffff000; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, CR%d",cr); break; } return 0; } bool CPU_READ_CRX(Bitu cr,Bit32u & retvalue) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); if ((cr==1) || (cr>4)) return CPU_PrepareException(EXCEPTION_UD,0); retvalue=CPU_GET_CRX(cr); return false; } bool CPU_WRITE_DRX(Bitu dr,Bitu value) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); switch (dr) { case 0: case 1: case 2: case 3: cpu.drx[dr]=value; break; case 4: case 6: cpu.drx[6]=(value|0xffff0ff0) & 0xffffefff; break; case 5: case 7: if (CPU_ArchitectureType<CPU_ARCHTYPE_PENTIUMSLOW) { cpu.drx[7]=(value|0x400) & 0xffff2fff; } else { cpu.drx[7]=(value|0x400); } break; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV DR%d,%X",dr,value); break; } return false; } bool CPU_READ_DRX(Bitu dr,Bit32u & retvalue) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); switch (dr) { case 0: case 1: case 2: case 3: case 6: case 7: retvalue=cpu.drx[dr]; break; case 4: retvalue=cpu.drx[6]; break; case 5: retvalue=cpu.drx[7]; break; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, DR%d",dr); retvalue=0; break; } return false; } bool CPU_WRITE_TRX(Bitu tr,Bitu value) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); switch (tr) { // case 3: case 6: case 7: cpu.trx[tr]=value; return false; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV TR%d,%X",tr,value); break; } return CPU_PrepareException(EXCEPTION_UD,0); } bool CPU_READ_TRX(Bitu tr,Bit32u & retvalue) { /* Check if privileged to access control registers */ if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); switch (tr) { // case 3: case 6: case 7: retvalue=cpu.trx[tr]; return false; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, TR%d",tr); break; } return CPU_PrepareException(EXCEPTION_UD,0); } Bitu CPU_SMSW(void) { return cpu.cr0; } bool CPU_LMSW(Bitu word) { if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); word&=0xf; if (cpu.cr0 & 1) word|=1; word|=(cpu.cr0&0xfffffff0); CPU_SET_CRX(0,word); return false; } void CPU_ARPL(Bitu & dest_sel,Bitu src_sel) { FillFlags(); if ((dest_sel & 3) < (src_sel & 3)) { dest_sel=(dest_sel & 0xfffc) + (src_sel & 3); // dest_sel|=0xff3f0000; SETFLAGBIT(ZF,true); } else { SETFLAGBIT(ZF,false); } } void CPU_LAR(Bitu selector,Bitu & ar) { FillFlags(); if (selector == 0) { SETFLAGBIT(ZF,false); return; } Descriptor desc;Bitu rpl=selector & 3; if (!cpu.gdt.GetDescriptor(selector,desc)){ SETFLAGBIT(ZF,false); return; } switch (desc.Type()){ case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: break; case DESC_286_INT_GATE: case DESC_286_TRAP_GATE: { case DESC_386_INT_GATE: case DESC_386_TRAP_GATE: SETFLAGBIT(ZF,false); return; } case DESC_LDT: case DESC_TASK_GATE: case DESC_286_TSS_A: case DESC_286_TSS_B: case DESC_286_CALL_GATE: case DESC_386_TSS_A: case DESC_386_TSS_B: case DESC_386_CALL_GATE: case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { SETFLAGBIT(ZF,false); return; } break; default: SETFLAGBIT(ZF,false); return; } /* Valid descriptor */ ar=desc.saved.fill[1] & 0x00ffff00; SETFLAGBIT(ZF,true); } void CPU_LSL(Bitu selector,Bitu & limit) { FillFlags(); if (selector == 0) { SETFLAGBIT(ZF,false); return; } Descriptor desc;Bitu rpl=selector & 3; if (!cpu.gdt.GetDescriptor(selector,desc)){ SETFLAGBIT(ZF,false); return; } switch (desc.Type()){ case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: break; case DESC_LDT: case DESC_286_TSS_A: case DESC_286_TSS_B: case DESC_386_TSS_A: case DESC_386_TSS_B: case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { SETFLAGBIT(ZF,false); return; } break; default: SETFLAGBIT(ZF,false); return; } limit=desc.GetLimit(); SETFLAGBIT(ZF,true); } void CPU_VERR(Bitu selector) { FillFlags(); if (selector == 0) { SETFLAGBIT(ZF,false); return; } Descriptor desc;Bitu rpl=selector & 3; if (!cpu.gdt.GetDescriptor(selector,desc)){ SETFLAGBIT(ZF,false); return; } switch (desc.Type()){ case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: //Conforming readable code segments can be always read break; case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { SETFLAGBIT(ZF,false); return; } break; default: SETFLAGBIT(ZF,false); return; } SETFLAGBIT(ZF,true); } void CPU_VERW(Bitu selector) { FillFlags(); if (selector == 0) { SETFLAGBIT(ZF,false); return; } Descriptor desc;Bitu rpl=selector & 3; if (!cpu.gdt.GetDescriptor(selector,desc)){ SETFLAGBIT(ZF,false); return; } switch (desc.Type()){ case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { SETFLAGBIT(ZF,false); return; } break; default: SETFLAGBIT(ZF,false); return; } SETFLAGBIT(ZF,true); } bool CPU_SetSegGeneral(SegNames seg,Bitu value) { value &= 0xffff; if (!cpu.pmode || (reg_flags & FLAG_VM)) { Segs.val[seg]=value; Segs.phys[seg]=value << 4; if (seg==ss) { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; } return false; } else { if (seg==ss) { // Stack needs to be non-zero if ((value & 0xfffc)==0) { E_Exit("CPU_SetSegGeneral: Stack segment zero"); // return CPU_PrepareException(EXCEPTION_GP,0); } Descriptor desc; if (!cpu.gdt.GetDescriptor(value,desc)) { E_Exit("CPU_SetSegGeneral: Stack segment beyond limits"); // return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } if (((value & 3)!=cpu.cpl) || (desc.DPL()!=cpu.cpl)) { E_Exit("CPU_SetSegGeneral: Stack segment with invalid privileges"); // return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } switch (desc.Type()) { case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: break; default: //Earth Siege 1 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } if (!desc.saved.seg.p) { // E_Exit("CPU_SetSegGeneral: Stack segment not present"); // or #SS(sel) return CPU_PrepareException(EXCEPTION_SS,value & 0xfffc); } Segs.val[seg]=value; Segs.phys[seg]=desc.GetBase(); if (desc.Big()) { cpu.stack.big=true; cpu.stack.mask=0xffffffff; cpu.stack.notmask=0; } else { cpu.stack.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; } } else { if ((value & 0xfffc)==0) { Segs.val[seg]=value; Segs.phys[seg]=0; // ?? return false; } Descriptor desc; if (!cpu.gdt.GetDescriptor(value,desc)) { return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } switch (desc.Type()) { case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: if (((value & 3)>desc.DPL()) || (cpu.cpl>desc.DPL())) { // extreme pinball return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } break; case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: break; default: // gabriel knight return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); } if (!desc.saved.seg.p) { // win return CPU_PrepareException(EXCEPTION_NP,value & 0xfffc); } Segs.val[seg]=value; Segs.phys[seg]=desc.GetBase(); } return false; } } bool CPU_PopSeg(SegNames seg,bool use32) { Bitu val=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); if (CPU_SetSegGeneral(seg,val)) return true; Bitu addsp=use32?0x04:0x02; reg_esp=(reg_esp&cpu.stack.notmask)|((reg_esp+addsp)&cpu.stack.mask); return false; } bool CPU_CPUID(void) { if (CPU_ArchitectureType<CPU_ARCHTYPE_486NEWSLOW) return false; switch (reg_eax) { case 0: /* Vendor ID String and maximum level? */ reg_eax=1; /* Maximum level */ reg_ebx='G' | ('e' << 8) | ('n' << 16) | ('u'<< 24); reg_edx='i' | ('n' << 8) | ('e' << 16) | ('I'<< 24); reg_ecx='n' | ('t' << 8) | ('e' << 16) | ('l'<< 24); break; case 1: /* get processor type/family/model/stepping and feature flags */ if ((CPU_ArchitectureType==CPU_ARCHTYPE_486NEWSLOW) || (CPU_ArchitectureType==CPU_ARCHTYPE_MIXED)) { reg_eax=0x402; /* intel 486dx */ reg_ebx=0; /* Not Supported */ reg_ecx=0; /* No features */ reg_edx=0x00000001; /* FPU */ } else if (CPU_ArchitectureType==CPU_ARCHTYPE_PENTIUMSLOW) { reg_eax=0x513; /* intel pentium */ reg_ebx=0; /* Not Supported */ reg_ecx=0; /* No features */ reg_edx=0x00000011; /* FPU+TimeStamp/RDTSC */ } else { return false; } break; default: LOG(LOG_CPU,LOG_ERROR)("Unhandled CPUID Function %x",reg_eax); reg_eax=0; reg_ebx=0; reg_ecx=0; reg_edx=0; break; } return true; } static Bits HLT_Decode(void) { /* Once an interrupt occurs, it should change cpu core */ if (reg_eip!=cpu.hlt.eip || SegValue(cs) != cpu.hlt.cs) { cpudecoder=cpu.hlt.old_decoder; } else { CPU_IODelayRemoved += CPU_Cycles; CPU_Cycles=0; } return 0; } void CPU_HLT(Bitu oldeip) { reg_eip=oldeip; CPU_IODelayRemoved += CPU_Cycles; CPU_Cycles=0; cpu.hlt.cs=SegValue(cs); cpu.hlt.eip=reg_eip; cpu.hlt.old_decoder=cpudecoder; cpudecoder=&HLT_Decode; } void CPU_ENTER(bool use32,Bitu bytes,Bitu level) { level&=0x1f; Bitu sp_index=reg_esp&cpu.stack.mask; Bitu bp_index=reg_ebp&cpu.stack.mask; if (!use32) { sp_index-=2; mem_writew(SegPhys(ss)+sp_index,reg_bp); reg_bp=(Bit16u)(reg_esp-2); if (level) { for (Bitu i=1;i<level;i++) { sp_index-=2;bp_index-=2; mem_writew(SegPhys(ss)+sp_index,mem_readw(SegPhys(ss)+bp_index)); } sp_index-=2; mem_writew(SegPhys(ss)+sp_index,reg_bp); } } else { sp_index-=4; mem_writed(SegPhys(ss)+sp_index,reg_ebp); reg_ebp=(reg_esp-4); if (level) { for (Bitu i=1;i<level;i++) { sp_index-=4;bp_index-=4; mem_writed(SegPhys(ss)+sp_index,mem_readd(SegPhys(ss)+bp_index)); } sp_index-=4; mem_writed(SegPhys(ss)+sp_index,reg_ebp); } } sp_index-=bytes; reg_esp=(reg_esp&cpu.stack.notmask)|((sp_index)&cpu.stack.mask); } static void CPU_CycleIncrease(bool pressed) { if (!pressed) return; if (CPU_CycleAutoAdjust) { CPU_CyclePercUsed+=5; if (CPU_CyclePercUsed>105) CPU_CyclePercUsed=105; LOG_MSG("CPU speed: max %d percent.",CPU_CyclePercUsed); GFX_SetTitle(CPU_CyclePercUsed,-1,false); } else { Bit32s old_cycles=CPU_CycleMax; if (CPU_CycleUp < 100) { CPU_CycleMax = (Bit32s)(CPU_CycleMax * (1 + (float)CPU_CycleUp / 100.0)); } else { CPU_CycleMax = (Bit32s)(CPU_CycleMax + CPU_CycleUp); } CPU_CycleLeft=0;CPU_Cycles=0; if (CPU_CycleMax==old_cycles) CPU_CycleMax++; if(CPU_CycleMax > 15000 ) LOG_MSG("CPU speed: fixed %d cycles. If you need more than 20000, try core=dynamic in DOSBox's options.",CPU_CycleMax); else LOG_MSG("CPU speed: fixed %d cycles.",CPU_CycleMax); GFX_SetTitle(CPU_CycleMax,-1,false); } } static void CPU_CycleDecrease(bool pressed) { if (!pressed) return; if (CPU_CycleAutoAdjust) { CPU_CyclePercUsed-=5; if (CPU_CyclePercUsed<=0) CPU_CyclePercUsed=1; if(CPU_CyclePercUsed <=70) LOG_MSG("CPU speed: max %d percent. If the game runs too fast, try a fixed cycles amount in DOSBox's options.",CPU_CyclePercUsed); else LOG_MSG("CPU speed: max %d percent.",CPU_CyclePercUsed); GFX_SetTitle(CPU_CyclePercUsed,-1,false); } else { if (CPU_CycleDown < 100) { CPU_CycleMax = (Bit32s)(CPU_CycleMax / (1 + (float)CPU_CycleDown / 100.0)); } else { CPU_CycleMax = (Bit32s)(CPU_CycleMax - CPU_CycleDown); } CPU_CycleLeft=0;CPU_Cycles=0; if (CPU_CycleMax <= 0) CPU_CycleMax=1; LOG_MSG("CPU speed: fixed %d cycles.",CPU_CycleMax); GFX_SetTitle(CPU_CycleMax,-1,false); } } void CPU_Enable_SkipAutoAdjust(void) { if (CPU_CycleAutoAdjust) { CPU_CycleMax /= 2; if (CPU_CycleMax < CPU_CYCLES_LOWER_LIMIT) CPU_CycleMax = CPU_CYCLES_LOWER_LIMIT; } CPU_SkipCycleAutoAdjust=true; } void CPU_Disable_SkipAutoAdjust(void) { CPU_SkipCycleAutoAdjust=false; } extern Bit32s ticksDone; extern Bit32u ticksScheduled; void CPU_Reset_AutoAdjust(void) { CPU_IODelayRemoved = 0; ticksDone = 0; ticksScheduled = 0; } class CPU: public Module_base { private: static bool inited; public: CPU(Section* configuration):Module_base(configuration) { if(inited) { Change_Config(configuration); return; } // Section_prop * section=static_cast<Section_prop *>(configuration); inited=true; reg_eax=0; reg_ebx=0; reg_ecx=0; reg_edx=0; reg_edi=0; reg_esi=0; reg_ebp=0; reg_esp=0; SegSet16(cs,0); SegSet16(ds,0); SegSet16(es,0); SegSet16(fs,0); SegSet16(gs,0); SegSet16(ss,0); CPU_SetFlags(FLAG_IF,FMASK_ALL); //Enable interrupts cpu.cr0=0xffffffff; CPU_SET_CRX(0,0); //Initialize cpu.code.big=false; cpu.stack.mask=0xffff; cpu.stack.notmask=0xffff0000; cpu.stack.big=false; cpu.trap_skip=false; cpu.idt.SetBase(0); cpu.idt.SetLimit(1023); for (Bitu i=0; i<7; i++) { cpu.drx[i]=0; cpu.trx[i]=0; } if (CPU_ArchitectureType==CPU_ARCHTYPE_PENTIUMSLOW) { cpu.drx[6]=0xffff0ff0; } else { cpu.drx[6]=0xffff1ff0; } cpu.drx[7]=0x00000400; /* Init the cpu cores */ CPU_Core_Normal_Init(); CPU_Core_Simple_Init(); CPU_Core_Full_Init(); #if (C_DYNAMIC_X86) CPU_Core_Dyn_X86_Init(); #elif (C_DYNREC) CPU_Core_Dynrec_Init(); #endif MAPPER_AddHandler(CPU_CycleDecrease,MK_f11,MMOD1,"cycledown","Dec Cycles"); MAPPER_AddHandler(CPU_CycleIncrease,MK_f12,MMOD1,"cycleup" ,"Inc Cycles"); Change_Config(configuration); CPU_JMP(false,0,0,0); //Setup the first cpu core } bool Change_Config(Section* newconfig){ Section_prop * section=static_cast<Section_prop *>(newconfig); CPU_AutoDetermineMode=CPU_AUTODETERMINE_NONE; //CPU_CycleLeft=0;//needed ? CPU_Cycles=0; CPU_SkipCycleAutoAdjust=false; Prop_multival* p = section->Get_multival("cycles"); std::string type = p->GetSection()->Get_string("type"); std::string str ; CommandLine cmd(0,p->GetSection()->Get_string("parameters")); if (type=="max") { CPU_CycleMax=0; CPU_CyclePercUsed=100; CPU_CycleAutoAdjust=true; CPU_CycleLimit=-1; for (Bitu cmdnum=1; cmdnum<=cmd.GetCount(); cmdnum++) { if (cmd.FindCommand(cmdnum,str)) { if (str.find('%')==str.length()-1) { str.erase(str.find('%')); int percval=0; std::istringstream stream(str); stream >> percval; if ((percval>0) && (percval<=105)) CPU_CyclePercUsed=(Bit32s)percval; } else if (str=="limit") { cmdnum++; if (cmd.FindCommand(cmdnum,str)) { int cyclimit=0; std::istringstream stream(str); stream >> cyclimit; if (cyclimit>0) CPU_CycleLimit=cyclimit; } } } } } else { if (type=="auto") { CPU_AutoDetermineMode|=CPU_AUTODETERMINE_CYCLES; CPU_CycleMax=3000; CPU_OldCycleMax=3000; CPU_CyclePercUsed=100; for (Bitu cmdnum=0; cmdnum<=cmd.GetCount(); cmdnum++) { if (cmd.FindCommand(cmdnum,str)) { if (str.find('%')==str.length()-1) { str.erase(str.find('%')); int percval=0; std::istringstream stream(str); stream >> percval; if ((percval>0) && (percval<=105)) CPU_CyclePercUsed=(Bit32s)percval; } else if (str=="limit") { cmdnum++; if (cmd.FindCommand(cmdnum,str)) { int cyclimit=0; std::istringstream stream(str); stream >> cyclimit; if (cyclimit>0) CPU_CycleLimit=cyclimit; } } else { int rmdval=0; std::istringstream stream(str); stream >> rmdval; if (rmdval>0) { CPU_CycleMax=(Bit32s)rmdval; CPU_OldCycleMax=(Bit32s)rmdval; } } } } } else if(type =="fixed") { cmd.FindCommand(1,str); int rmdval=0; std::istringstream stream(str); stream >> rmdval; CPU_CycleMax=(Bit32s)rmdval; } else { std::istringstream stream(type); int rmdval=0; stream >> rmdval; if(rmdval) CPU_CycleMax=(Bit32s)rmdval; } CPU_CycleAutoAdjust=false; } CPU_CycleUp=section->Get_int("cycleup"); CPU_CycleDown=section->Get_int("cycledown"); std::string core(section->Get_string("core")); cpudecoder=&CPU_Core_Normal_Run; if (core == "normal") { cpudecoder=&CPU_Core_Normal_Run; } else if (core =="simple") { cpudecoder=&CPU_Core_Simple_Run; } else if (core == "full") { cpudecoder=&CPU_Core_Full_Run; } else if (core == "auto") { cpudecoder=&CPU_Core_Normal_Run; #if (C_DYNAMIC_X86) CPU_AutoDetermineMode|=CPU_AUTODETERMINE_CORE; } else if (core == "dynamic") { cpudecoder=&CPU_Core_Dyn_X86_Run; CPU_Core_Dyn_X86_SetFPUMode(true); } else if (core == "dynamic_nodhfpu") { cpudecoder=&CPU_Core_Dyn_X86_Run; CPU_Core_Dyn_X86_SetFPUMode(false); #elif (C_DYNREC) CPU_AutoDetermineMode|=CPU_AUTODETERMINE_CORE; } else if (core == "dynamic") { cpudecoder=&CPU_Core_Dynrec_Run; #else #endif } #if (C_DYNAMIC_X86) CPU_Core_Dyn_X86_Cache_Init((core == "dynamic") || (core == "dynamic_nodhfpu")); #elif (C_DYNREC) CPU_Core_Dynrec_Cache_Init( core == "dynamic" ); #endif CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; std::string cputype(section->Get_string("cputype")); if (cputype == "auto") { CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; } else if (cputype == "386") { CPU_ArchitectureType = CPU_ARCHTYPE_386FAST; } else if (cputype == "386_prefetch") { CPU_ArchitectureType = CPU_ARCHTYPE_386FAST; if (core == "normal") { cpudecoder=&CPU_Core_Prefetch_Run; CPU_PrefetchQueueSize = 16; } else if (core == "auto") { cpudecoder=&CPU_Core_Prefetch_Run; CPU_PrefetchQueueSize = 16; CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); } else { E_Exit("prefetch queue emulation requires the normal core setting."); } } else if (cputype == "386_slow") { CPU_ArchitectureType = CPU_ARCHTYPE_386SLOW; } else if (cputype == "486_slow") { CPU_ArchitectureType = CPU_ARCHTYPE_486NEWSLOW; } else if (cputype == "486_prefetch") { CPU_ArchitectureType = CPU_ARCHTYPE_486NEWSLOW; if (core == "normal") { cpudecoder=&CPU_Core_Prefetch_Run; CPU_PrefetchQueueSize = 32; } else if (core == "auto") { cpudecoder=&CPU_Core_Prefetch_Run; CPU_PrefetchQueueSize = 32; CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); } else { E_Exit("prefetch queue emulation requires the normal core setting."); } } else if (cputype == "pentium_slow") { CPU_ArchitectureType = CPU_ARCHTYPE_PENTIUMSLOW; } if (CPU_ArchitectureType>=CPU_ARCHTYPE_486NEWSLOW) CPU_extflags_toggle=(FLAG_ID|FLAG_AC); else if (CPU_ArchitectureType>=CPU_ARCHTYPE_486OLDSLOW) CPU_extflags_toggle=(FLAG_AC); else CPU_extflags_toggle=0; if(CPU_CycleMax <= 0) CPU_CycleMax = 3000; if(CPU_CycleUp <= 0) CPU_CycleUp = 500; if(CPU_CycleDown <= 0) CPU_CycleDown = 20; if (CPU_CycleAutoAdjust) GFX_SetTitle(CPU_CyclePercUsed,-1,false); else GFX_SetTitle(CPU_CycleMax,-1,false); return true; } ~CPU(){ /* empty */}; }; static CPU * test; void CPU_ShutDown(Section* sec) { #if (C_DYNAMIC_X86) CPU_Core_Dyn_X86_Cache_Close(); #elif (C_DYNREC) CPU_Core_Dynrec_Cache_Close(); #endif delete test; } void CPU_Init(Section* sec) { test = new CPU(sec); sec->AddDestroyFunction(&CPU_ShutDown,true); } //initialize static members bool CPU::inited=false;
gpl-2.0
GameTheory-/android_kernel_lge_l0
drivers/felica/felica_rfs.c
1
8763
/* * felica_rfs.c * */ /* * INCLUDE FILES FOR MODULE */ #include "felica_rfs.h" #include "felica_gpio.h" #include "felica_test.h" /* * DEFINE */ #ifdef FELICA_LED_SUPPORT enum{ RFS_LED_OFF = 0, RFS_LED_ON, }; #endif /* * FUNCTION PROTOTYPE */ /* * INTERNAL DEFINITION */ #ifdef FELICA_LED_SUPPORT #define FELICA_LED_INTENT "com.nttdocomo.android.felicaremotelock/.LEDService" #endif /* * INTERNAL VARIABLE */ static int isopen = 0; // 0 : No open 1 : Open #ifdef FELICA_LED_SUPPORT static int isFelicaUsed = 0; /* */ #endif /* * FUNCTION DEFINITION */ #ifdef FELICA_LED_SUPPORT static void felica_rfs_interrupt_work(struct work_struct *data); static DECLARE_DELAYED_WORK(felica_rfs_interrupt, felica_rfs_interrupt_work); static int invoke_led_service(void) { int rc = 0; int getvalue; char *argv_on[] = { "/system/bin/sh", "/system/bin/am", "startservice", "--es", "rfs", "on", "-n", FELICA_LED_INTENT, NULL }; char *argv_off[] = { "/system/bin/sh", "/system/bin/am", "startservice", "--es", "rfs", "off", "-n", FELICA_LED_INTENT, NULL }; static char *envp[] = {FELICA_LD_LIBRARY_PATH,FELICA_BOOTCLASSPATH,FELICA_PATH,NULL}; FELICA_DEBUG_MSG("[FELICA_RFS] invoke led service ... \n"); getvalue = felica_gpio_read(GPIO_FELICA_RFS); FELICA_DEBUG_MSG("[FELICA_RFS] felica_gpio_read = %d , isFelicaUsed =%d \n",getvalue,isFelicaUsed); if( isFelicaUsed ==0 && getvalue == GPIO_LOW_VALUE) { FELICA_DEBUG_MSG("[FELICA_RFS] Felica LED On ... \n"); lock_felica_rfs_wake_lock(); rc = call_usermodehelper( argv_on[0], argv_on, envp, UMH_WAIT_PROC ); isFelicaUsed = 1; } else if( isFelicaUsed ==1 && getvalue == GPIO_HIGH_VALUE) { FELICA_DEBUG_MSG("[FELICA_RFS] Felica LED Off ... \n"); unlock_felica_rfs_wake_lock(); rc = call_usermodehelper( argv_off[0], argv_off, envp, UMH_WAIT_PROC ); isFelicaUsed =0; } else { FELICA_DEBUG_MSG("[FELICA_RFS] Felica LED ERROR case so LED Off ... \n"); unlock_felica_rfs_wake_lock(); rc = call_usermodehelper( argv_off[0], argv_off, envp, UMH_WAIT_PROC ); isFelicaUsed =0; } FELICA_DEBUG_MSG("[FELICA_RFS] invoke_led_service: %d \n", rc); return rc; } static void felica_rfs_interrupt_work(struct work_struct *data) { int rc = 0; disable_irq_nosync(gpio_to_irq(GPIO_FELICA_RFS)); usermodehelper_enable(); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_interrupt_work - start \n"); #endif rc = invoke_led_service(); if(rc) { FELICA_DEBUG_MSG("[FELICA_RFS] Error - invoke app \n"); } #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_interrupt_work - end \n"); #endif enable_irq(gpio_to_irq(GPIO_FELICA_RFS)); } irqreturn_t felica_rfs_detect_interrupt(int irq, void *dev_id) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_low_isr - start \n"); #endif schedule_delayed_work(&felica_rfs_interrupt,0); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_low_isr - end \n"); #endif return IRQ_HANDLED; } #endif /* * Description: MFC calls this function using open method of FileInputStream class * Input: None * Output: Success : 0 Fail : Others */ static int felica_rfs_open (struct inode *inode, struct file *fp) { int rc = 0; if(1 == isopen) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_open - already open \n"); #endif return -1; } else { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_open - start \n"); #endif isopen = 1; } #ifdef FELICA_LED_SUPPORT rc = felica_gpio_open(GPIO_FELICA_RFS, GPIO_DIRECTION_IN, GPIO_HIGH_VALUE); #else rc = felica_gpio_open(GPIO_FELICA_RFS, GPIO_DIRECTION_IN, GPIO_LOW_VALUE); #endif #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_open - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_open - result(%d) \n",result_open_rfs); return result_open_rfs; #else return rc; #endif } /* * Description: MFC calls this function using read method(int read()) of FileInputStream class * Input: None * Output: RFS low : 1 RFS high : 0 */ static ssize_t felica_rfs_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int rc = 0; int getvalue = GPIO_LOW_VALUE; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_read - start \n"); #endif /* Check error */ if(NULL == fp) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR fp \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR buf \n"); return -2; } if(1 != count) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR count \n"); return -3; } if(NULL == pos) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR file \n"); return -4; } /* Get GPIO value */ getvalue = felica_gpio_read(GPIO_FELICA_RFS); FELICA_DEBUG_MSG("[FELICA_RFS] RFS GPIO status : %d \n", getvalue); if((GPIO_LOW_VALUE != getvalue)&&(GPIO_HIGH_VALUE != getvalue)) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR - getvalue is out of range \n"); return -5; } /* Copy value to user memory */ getvalue = getvalue ? GPIO_LOW_VALUE: GPIO_HIGH_VALUE; FELICA_DEBUG_MSG("[FELICA_RFS] RFS status : %d \n", getvalue); rc = copy_to_user((void*)buf, (void*)&getvalue, count); if(rc) { FELICA_DEBUG_MSG("[FELICA_RFS] ERROR - copy_to_user \n"); return rc; } #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_read - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_read - result(%d) \n",result_read_rfs); if(result_read_rfs != -1) result_read_rfs = count; return result_read_rfs; #else return count; #endif } /* * Description: MFC calls this function using close method(int close()) of FileInputStream class * Input: None * Output: RFS low : 1 RFS high : 0 */ static int felica_rfs_release (struct inode *inode, struct file *fp) { if(0 == isopen) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_release - not open \n"); #endif return -1; } else { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_release - start \n"); #endif isopen = 0; } #ifdef FELICA_LED_SUPPORT isFelicaUsed = 0; #endif #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_release - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_release - result(%d) \n",result_close_rfs); return result_close_rfs; #else return 0; #endif } static struct file_operations felica_rfs_fops = { .owner = THIS_MODULE, .open = felica_rfs_open, .read = felica_rfs_read, .release = felica_rfs_release, }; static struct miscdevice felica_rfs_device = { .minor = MISC_DYNAMIC_MINOR, .name = FELICA_RFS_NAME, .fops = &felica_rfs_fops, }; static int felica_rfs_init(void) { int rc; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_init - start \n"); #endif /* register the device file */ rc = misc_register(&felica_rfs_device); if (rc < 0) { FELICA_DEBUG_MSG("[FELICA_RFS] FAIL!! can not register felica_rfs \n"); return rc; } #ifdef FELICA_LED_SUPPORT FELICA_DEBUG_MSG("[FELICA_RFS] FELICA LED NEW SUPPORT !!\n"); rc= request_irq(gpio_to_irq(GPIO_FELICA_RFS), felica_rfs_detect_interrupt, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING|IRQF_NO_SUSPEND , FELICA_RFS_NAME, NULL); if (rc) { FELICA_DEBUG_MSG("[FELICA_RFS] FAIL!! can not request_irq \n"); return rc; } irq_set_irq_wake(gpio_to_irq(GPIO_FELICA_RFS),1); init_felica_rfs_wake_lock(); #else FELICA_DEBUG_MSG("[FELICA_RFS] FELICA LED NOT SUPPORT !! \n"); #endif #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_init - end \n"); #endif return 0; } static void felica_rfs_exit(void) { #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_exit - start \n"); #endif #ifdef FELICA_LED_SUPPORT free_irq(gpio_to_irq(GPIO_FELICA_RFS), NULL); destroy_felica_rfs_wake_lock(); #endif /* deregister the device file */ misc_deregister(&felica_rfs_device); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA_RFS] felica_rfs_exit - end \n"); #endif } module_init(felica_rfs_init); module_exit(felica_rfs_exit); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
doublesword/commuse
Source/interview_test/整数逆序链表相加返回链表.cpp
1
2161
#ifndef __ÕûÊýÁ´±íÏà¼Ó·µ»ØÁ´±í_H #define __ÕûÊýÁ´±íÏà¼Ó·µ»ØÁ´±í_H namespace addnum2 { struct ListNode { int val; ListNode *next; ListNode() : val(0), next(nullptr) {} ListNode(int x) : val(x), next(nullptr) {} ListNode(int x, ListNode *next) : val(x), next(next) {} }; void Reserver(ListNode*& l) { ListNode* cur = l; ListNode* end = cur->next; while(cur && end) { //ÏÂÏÂÒ»¸ö½Úµã ListNode* pNextNext = end->next; //ÏÂÒ»¸öÖ¸Ïòµ±Ç°½Úµã end->next = cur; //µ±Ç°½ÚµãºóÒÆ cur = end; //ÏÂÒ»¸ö½ÚµãºóÒÆ end = pNextNext; } l->next = nullptr; l = cur; } ListNode* addTwoNumbers(ListNode* l1, ListNode* l2) { queue<int> a; queue<int> b; ListNode* pNewLink = nullptr , *pPre = nullptr; ListNode* cur = l1; while(cur) { a.push(cur->val); cur = cur->next; } cur = l2; while(cur) { b.push(cur->val); cur = cur->next; } //Ïà¼Ó while(a.size() || b.size()) { int num1 = 0; int num2 = 0; if(a.size()) { num1 = a.front(); a.pop(); } if(b.size()) { num2 = b.front(); b.pop(); } int sf = 0; ListNode* node = nullptr; //˵Ã÷ÉÏÒ»ÂÖÓнøÎ» if(pPre && pPre->next){ sf = pPre->next->val; node = pPre->next; } if(!node) node = new ListNode((num1+num2)%10 , nullptr); else node->val = (num1+num2+sf)%10; if(!pNewLink) pNewLink = node; if(pPre) pPre->next = node; pPre = node;//½¨Á¢Á´±í sf = (num1+num2+sf)/10>0?1:0; //ÐèÒªÌáǰ´´½¨ÏÂÒ»¸ö if(sf > 0) { ListNode* node_sf = new ListNode(1 , nullptr); node->next = node_sf; } } //Á´±í·­×ª //Reserver(pNewLink); return pNewLink; } ListNode* buildLink(int* arr , int size) { ListNode* pRoot = nullptr; ListNode* pPre = nullptr; for (int i=0;i<size;i++) { ListNode* node = new ListNode(arr[i],nullptr); if(!pRoot) pRoot = node; if(pPre) pPre->next = node; pPre = node; } return pRoot; } void test_addTwoNumbers() { int a[]={9,9,9,9,9,9,9}; int b[]={9,9,9,9}; ListNode* l1 = buildLink(a,sizeof(a)/sizeof(a[0])); ListNode* l2 = buildLink(b,sizeof(b)/sizeof(b[0])); ListNode* l3 = addTwoNumbers(l1,l2); cout<<"hello"<<endl; } } #endif //__ÕûÊýÁ´±íÏà¼Ó·µ»ØÁ´±í_H
gpl-2.0
rafoid/gen_LL_sort
genlistsort.c
1
5593
/************************************************************************** * * Title: genlistsort.c * Description: Implements the sorting of generic linked list using * max-heap sort algorithm * Author: rafoid * Date: 3/21/2014 * Licensed under GPL v.2 * ***************************************************************************/ #include <stdio.h> #include <time.h> #include "genlistsort.h" #include "string.h" void add_node(gen_tree_node_t *n, gen_tree_node_t *t, int (*cmp_func)()) { gen_tree_node_t *c; while (1) { if (!t) break; t->size++; if ((*cmp_func)(n->data, t->data) < 0) { void *data = t->data; t->data = n->data; n->data = data; } if (t->left) { if (t->right) t = t->left->size < t->right->size ? t->left : t->right; else { t->right = n; break; } } else { t->left = n; break; } } } /* pick the root node and return it. * percolate data node from the smallest child all the way down to the bottom of * the heap */ void get_node(gen_tree_node_t *n, gen_tree_node_t **t, int (*cmp_func)()) { gen_tree_node_t *cur_root, *left, *right, *dir, **root = t; cur_root = *t; n->data = cur_root->data; while (1) { if (!cur_root) break; left = cur_root->left; right = cur_root->right; if (left) { if (right) dir = (*cmp_func)(left->data, right->data) < 0 ? left : right; else dir = left; } else { if (right) dir = right; else break; } cur_root->data = dir->data; if (!dir->left && !dir->right) { if (dir == left) cur_root->left = NULL; else cur_root->right = NULL; } cur_root = dir; } t = root; } /* uses min-heap sort algorithm. * original list is destroyed and replaced with the sorted one (least to * greatest order) */ void sort_gen_list(void *list, void (*cmp_func)) { gen_list_node_t *l = list; gen_tree_node_t t, *mem, *root, *new_node; int i = 0, tree_size = 1; if (!l) return; /* how many elements in the list */ while (l) { i++; l = l->next; } /* allocate the generic heap tree */ mem = (gen_tree_node_t *) malloc (sizeof(gen_tree_node_t)*i); root = mem; /* add root node */ root->left = NULL; root->right = NULL; root->data = ((gen_list_node_t *)list)->data; root->size = 0; /* add all linked list nodes to the heap tree */ l = ((gen_list_node_t *)list)->next; while (l) { new_node = root+tree_size; new_node->left = NULL; new_node->right = NULL; new_node->data = l->data; new_node->size = 0; add_node(new_node, root, cmp_func); l = l->next; tree_size++; } /* read the nodes back into the list */ l = list; while (l) { get_node(&t, &root, cmp_func); l->data = t.data; l = l->next; } free(mem); list = l; } /* Example of using the sort. * Just uncomment everything below, compile and run. * It is fully operation and also tests the sort * int compare_nodes_by_size(tdp *d1, tdp *d2) { return d1->size - d2->size; } int compare_nodes_by_area(tdp *d1, tdp *d2) { return d1->area - d2->area; } int compare_nodes_by_volume(tdp *d1, tdp *d2) { return d1->volume - d2->volume; } int compare_nodes_by_name(tdp *d1, tdp *d2) { return strcmp(d1->name, d2->name); } #define LST_SZ 19 int main(int argc, char **argv) { tdp node[LST_SZ]; tl list[LST_SZ]; int i; char a, b, names[LST_SZ][3]; srand(time(NULL)); for (i = 0; i < LST_SZ; i++) { node[i].size = rand() % 20; node[i].area = rand() % 20; node[i].volume = rand() % 20; a = 97 + (rand() % 26); b = 97 + (rand() % 26); sprintf(names[i], "%c%c\0", a, b); node[i].name = names[i]; list[i].data = &node[i]; } for (i = 0; i < LST_SZ - 1; i++) list[i].next = &list[i + 1]; list[LST_SZ - 1].next = NULL; printf("Original list:\n"); for (i = 0; i<LST_SZ; i++) { printf("\t%3d, ", list[i].data->size); printf("%3d, ", list[i].data->area); printf("%3d, ", list[i].data->volume); printf("%3s\n", list[i].data->name); } printf("Order by size:\n\t"); sort_gen_list(&list, (*compare_nodes_by_size)); for (i = 0; i<LST_SZ; i++) printf("%d,", list[i].data->size); printf("\n"); printf("Order by area:\n\t"); sort_gen_list(&list, (*compare_nodes_by_area)); for (i = 0; i<LST_SZ; i++) printf("%d,", list[i].data->area); printf("\n"); printf("Order by volume:\n\t"); sort_gen_list(&list, (*compare_nodes_by_volume)); for (i = 0; i<LST_SZ; i++) printf("%d,", list[i].data->volume); printf("Order by name:\n\t"); sort_gen_list(&list, (*compare_nodes_by_name)); for (i = 0; i<LST_SZ; i++) printf("%s,", list[i].data->name); printf("\n"); return 0; } */
gpl-2.0
gwq5210/learn_curl
examples/http-post.c
1
1933
/*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include <stdio.h> #include <curl/curl.h> int main(void) { CURL *curl; CURLcode res; /* In windows, this will init the winsock stuff */ curl_global_init(CURL_GLOBAL_ALL); /* get a curl handle */ curl = curl_easy_init(); if (curl) { /* First set the URL that is about to receive our POST. This URL can just as well be a https:// URL if that is what should receive the data. */ curl_easy_setopt(curl, CURLOPT_URL, "http://postit.example.com/moo.cgi"); /* Now specify the POST data */ curl_easy_setopt(curl, CURLOPT_POSTFIELDS, "name=daniel&project=curl"); /* Perform the request, res will get the return code */ res = curl_easy_perform(curl); /* Check for errors */ if (res != CURLE_OK) fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); /* always cleanup */ curl_easy_cleanup(curl); } curl_global_cleanup(); return 0; }
gpl-2.0
rdebath/Brainfuck
bf2any/bf2dc.c
1
9830
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <signal.h> #if _POSIX_VERSION < 200112L && _XOPEN_VERSION < 500 #define NO_SNPRINTF #endif #include "bf2any.h" /* * Unix/GNU dc translation from BF, runs at about 1,500,000 instructions per second. If malloc is avoided ... 2,600,000 */ /* * There are two other methods of storing the tape. * * 1) Use a register to stack the left of the tape and the main stack * for the right hand tape. * This cannot use run length encoding for tape movements. * * 2) I could use a pair of variables and use divmod to move bytes from * one to the other. This means we can't have bignum cells but * run length encoding is possible. (divmod by 256^N) */ /* * Named variables * F Full script * p Memory pointer * a Memory array * V Temp memory store * b Code store for conditions * Z Output bytes old style * o Output number as char * O Output cell * Q Quit with STOP error * i Input command * N Input command part * m mask to 0..255 * M mask command part * w mask to 0..2^bpc * W mask command part * v 2^bpc */ static int do_run = 0; static int do_output = 0; static int do_input = 0; static int do_trailer = 0; static int max_input_depth = 0; static int ind = 0; #define I printf("%*s", ind*4, "") #define prv(s,v) fprintf(ofd, "%*s" s "\n", ind*4, "", (v)) #define pr(s) fprintf(ofd, "%*s" s "\n", ind*4, "") static void print_dcstring(char * str); static FILE * ofd; static int outputmode = 2; static int inputmode = 0; static int stackdepth = 0; static int bpc = 0; static check_arg_t fn_check_arg; static gen_code_t gen_code; struct be_interface_s be_interface = {.check_arg=fn_check_arg, .gen_code=gen_code}; static int fn_check_arg(const char * arg) { if (strncmp(arg, "-i", 2) == 0 && arg[2] >= '0' && arg[2] <= '9') { inputmode = atol(arg+2); return 1; } else if (strcmp(arg, "-t") == 0) { outputmode=1; return 1; } else if (strcmp(arg, "-g") == 0) { outputmode=2; return 1; } else if (strcmp(arg, "-a") == 0) { outputmode=3; return 1; } else if (strcmp(arg, "-sed") == 0) { outputmode=5; return 1; #ifndef _WIN32 } else if (strcmp(arg, "-r") == 0) { do_run=inputmode=1; return 1; #endif } else if (strcmp(arg, "-d") == 0) { inputmode=0; return 1; } else if (strncmp(arg, "-b", 2) == 0 && arg[2] > '0' && arg[2] <= '9') { bpc = strtol(arg+2, 0, 10); if (bpc < 32) { fprintf(stderr, "Cell size must be a minimum of 32 bits\n"); exit(1); } return 1; } if (strcmp("-h", arg) ==0) { fprintf(stderr, "%s\n", "\t" "-d Dump code" #ifndef _WIN32 "\n\t" "-r Run program (standard input will be translated)" #endif "\n\t" "-i0 ',' command will error" "\n\t" "-i1 ',' command will be dc '?' command" "\n\t" "-i2 ',' command input will be requested" "\n\t" "-i3 ',' command will be ignored" "\n\t" "-i4 ',' command will be dc '$' command" "\n\t" "-t Traditional dc output; works with V7 unix dc." "\n\t" "-g General modern dc output. (BSD or GNU)" "\n\t" "-a Autodetect dc output type V7 or modern." "\n\t" "-sed For dc.sed."); return 1; } else return 0; } #ifndef _WIN32 /* Note: calling exit() isn't posix */ static void endprog(int s) { exit(s != SIGCHLD); } #endif static char * dc_ltoa(long val) { static char buf[64]; #ifndef NO_SNPRINTF snprintf(buf, sizeof(buf), "%ld", val); #else sprintf(buf, "%ld", val); #endif if (*buf == '-') *buf = '_'; return buf; } static void gen_code(int ch, int count, char * strn) { switch(ch) { case '!': #ifndef _WIN32 if (do_run) ofd = popen("dc", "w"); else #endif ofd = stdout; if (outputmode == 2) { if (!count || do_run) { pr("#!/usr/bin/dc"); prv("[%dsp", tapeinit); do_trailer = 1; } } else { prv("[%dsp", tapeinit); do_trailer = 1; } break; case '=': prv("%slp:a", dc_ltoa(count)); break; case 'B': if(bytecell) pr("lmxsV"); else if(bpc>0) pr("lwxsV"); else pr("lp;asV"); break; case 'M': prv("lp;alV%s*+lp:a", dc_ltoa(count)); break; case 'N': prv("lp;alV%s*+lp:a", dc_ltoa(-count)); break; case 'S': pr("lp;alV+lp:a"); break; case 'T': pr("lp;alV-lp:a"); break; case '*': pr("lp;alV*lp:a"); break; case '/': pr("lp;alV/lp:a"); break; case '%': pr("lp;alV%%lp:a"); break; case 'C': prv("lV%s*lp:a", dc_ltoa(count)); break; case 'D': prv("lV%s*lp:a", dc_ltoa(-count)); break; case 'V': pr("lVlp:a"); break; case 'W': pr("0lV-lp:a"); break; case '+': prv("lp;a%s+lp:a", dc_ltoa(count)); break; case '-': prv("lp;a%s-lp:a", dc_ltoa(count)); break; case '<': prv("lp%s-sp", dc_ltoa(count)); break; case '>': prv("lp%s+sp", dc_ltoa(count)); break; case '[': pr("["); stackdepth++; break; case ']': if(bytecell) pr("lmx0!=b]Sblmx0!=bLbc"); else if(bpc>0) pr("lwx0!=b]Sblwx0!=bLbc"); else pr("lp;a0!=b]Sblp;a0!=bLbc"); stackdepth--; break; case 'I': pr("["); stackdepth++; break; case 'E': if(bytecell) pr("]Sblmx0!=bLbc"); else if(bpc>0) pr("]Sblwx0!=bLbc"); else pr("]Sblp;a0!=bLbc"); stackdepth--; break; case ',': pr("lix"); do_input=1; if(stackdepth>max_input_depth) max_input_depth=stackdepth; break; case 'X': pr("[STOP command executed\n]P"); if (stackdepth>1) prv("%dQ", stackdepth); else pr("q"); break; case '.': if (outputmode == 2) pr("lp;aaP"); else pr("lox"); do_output=1; break; case '"': print_dcstring(strn); break; } if (ch != '~') return; if (do_trailer) pr("q]SF"); if ((do_output && outputmode != 2) || bytecell) fprintf(ofd, "[256+]sM [lp;a 256 %% d0>M]sm\n"); if (!bytecell && bpc>0) fprintf(ofd, "2 %d^sv[lv+]sW [lp;a lv %% d0>W]sw\n", bpc); if (do_output) { if (outputmode&1) { int i; if (outputmode & 2) fprintf(ofd, "["); for (i=0; i<256; i++) { if (i == '\n') { fprintf(ofd, "[[\n]P]%d:C\n", i); } else if ( i >= ' ' && i <= '~' && i != '[' && i != ']' && i != '\\' /* bsd dc */ && (outputmode != 5 || (i != '|' && i != '~')) /* dc.sed */ ) { fprintf(ofd, "[[%c]P]%d:C", i, i); } else if (i < 100) { fprintf(ofd, "[%dP]%d:C", i, i); } else if ( i >= 127 ) { fprintf(ofd, "[[%c]P]%d:C", i, i); } else fprintf(ofd, "[[<%d>]P]%d:C", i, i); /* Give up */ if (i%8 == 7) fprintf(ofd, "\n"); } if (outputmode & 2) fprintf(ofd, "]sZ"); fprintf(ofd, "\n"); } switch(outputmode) { case 1: case 5: fprintf(ofd, "[;Cx]sO [lmx;Cx]so\n"); break; case 3: /* Note: dc.sed works in traditional mode, but as it takes minutes just to print "Hello World!" without optimisation there's not much point doing an auto detection. However, if wanted the best method seems to be to check that the comment indicator '#' also works for new dc(1). */ /* Use 'Z' command. Detects dc.sed as new style. */ fprintf(ofd, "[1 [aP]sO [lmxaP]so ]sB\n"); fprintf(ofd, "[lZx [;Cx]sO [lmx;Cx]so ]sA\n"); fprintf(ofd, "0 0 [o] Z 1=B 0=A 0sA 0sB 0sZ c\n"); break; } } if (do_input && inputmode == 1) { pr("[[INPUT command failed to execute.\n]P"); prv("%dQ]sQ", max_input_depth+3); pr("[?z0=Qlp:a]si"); do_input = 0; if (do_run) do_run = 2; } if (do_input && inputmode == 2) { int flg = 0; pr("0si"); fprintf(stderr, "Please enter the input data for the program: "); while((ch = fgetc(stdin)) != EOF) { prv("%dlid1+si:I", ch); if (ch == '\n') { if (flg) fprintf(stderr, ">"); else fprintf(stderr, "More? ^D to end>"); flg = 1; } } fprintf(stderr, "\n"); pr("0li:I0sn"); pr("[lnd1+sn;Ilp:a]si"); do_input = 0; } if (do_input && inputmode == 3) { pr("[]si\n"); do_input = 0; } if (do_input && inputmode == 4) { /* Simple input command '$', read a byte and push if not EOF */ pr("[[INPUT command failed to execute.\n]P"); prv("%dQ]sQ", max_input_depth+3); pr("[$z0=Qlp:a]si"); do_input = 0; if (do_run) do_run = 2; } if (do_input && inputmode == 5) { /* New for GNU dc, character I/O with "G" ... soon after 2013! */ fprintf(ofd, "[1G [sB_1]SA [bAla]SB 0=A Bx 0sALAaBLB+ ]si\n"); do_input = 0; } if (do_input && inputmode == 6) { /* New for GNU dc, character input with "$" ... soon after 2013! */ fprintf(ofd, "0dd:Isn\n"); fprintf(ofd, "[[0$I0sn]SNln0;I!>N\n"); fprintf(ofd, "[d>.ln1+dsn;I]sN_1ln0;I>N0sNLNd>.]si\n"); do_input = 0; } if (do_input) { pr("[[INPUT command failed to execute.\n]P"); prv("%dQ", max_input_depth+2); pr("]si"); } #ifndef _WIN32 if (do_run) { fprintf(ofd, "LFx "); if (do_run>1) { signal(SIGCHLD, endprog); fflush(ofd); while((ch = fgetc(stdin)) != EOF) { fprintf(ofd, "%d\n", ch); fflush(ofd); } fprintf(ofd, "_1\n"); } pclose(ofd); } else #endif if (do_trailer) fprintf(ofd, "LFx\n"); } static void print_dcstring(char * str) { char buf[BUFSIZ]; size_t outlen = 0; int badchar = 0; if (!str) return; for(;; str++) { if (outlen && (*str == 0 || badchar || outlen > sizeof(buf)-8)) { buf[outlen] = 0; prv("[%s]P", buf); outlen = 0; } if (badchar) { if (outputmode == 2) prv("%daP", badchar); else prv("%dlOx", badchar); do_output = 1; badchar = 0; } if (!*str) break; if (*str > '~' || (*str < ' ' && *str != '\n' && *str != '\t') || *str == '\\' /* BSD dc */ || (outputmode == 1 && (*str == '|' || *str == '~')) /* dc.sed */ || *str == '[' || *str == ']') badchar = (*str & 0xFF); else buf[outlen++] = *str; } }
gpl-2.0
sten0/btrfs-progs
extent_io.c
1
23502
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <stdbool.h> #include "kerncompat.h" #include "extent_io.h" #include "list.h" #include "ctree.h" #include "volumes.h" #include "utils.h" #include "internal.h" void extent_io_tree_init(struct extent_io_tree *tree) { cache_tree_init(&tree->state); cache_tree_init(&tree->cache); INIT_LIST_HEAD(&tree->lru); tree->cache_size = 0; tree->max_cache_size = (u64)total_memory() / 4; } void extent_io_tree_init_cache_max(struct extent_io_tree *tree, u64 max_cache_size) { extent_io_tree_init(tree); tree->max_cache_size = max_cache_size; } static struct extent_state *alloc_extent_state(void) { struct extent_state *state; state = malloc(sizeof(*state)); if (!state) return NULL; state->cache_node.objectid = 0; state->refs = 1; state->state = 0; state->xprivate = 0; return state; } static void btrfs_free_extent_state(struct extent_state *state) { state->refs--; BUG_ON(state->refs < 0); if (state->refs == 0) free(state); } static void free_extent_state_func(struct cache_extent *cache) { struct extent_state *es; es = container_of(cache, struct extent_state, cache_node); btrfs_free_extent_state(es); } static void free_extent_buffer_final(struct extent_buffer *eb); void extent_io_tree_cleanup(struct extent_io_tree *tree) { struct extent_buffer *eb; while(!list_empty(&tree->lru)) { eb = list_entry(tree->lru.next, struct extent_buffer, lru); if (eb->refs) { fprintf(stderr, "extent buffer leak: start %llu len %u\n", (unsigned long long)eb->start, eb->len); free_extent_buffer_nocache(eb); } else { free_extent_buffer_final(eb); } } cache_tree_free_extents(&tree->state, free_extent_state_func); } static inline void update_extent_state(struct extent_state *state) { state->cache_node.start = state->start; state->cache_node.size = state->end + 1 - state->start; } /* * Utility function to look for merge candidates inside a given range. * Any extents with matching state are merged together into a single * extent in the tree. Extents with EXTENT_IO in their state field are * not merged */ static int merge_state(struct extent_io_tree *tree, struct extent_state *state) { struct extent_state *other; struct cache_extent *other_node; if (state->state & EXTENT_IOBITS) return 0; other_node = prev_cache_extent(&state->cache_node); if (other_node) { other = container_of(other_node, struct extent_state, cache_node); if (other->end == state->start - 1 && other->state == state->state) { state->start = other->start; update_extent_state(state); remove_cache_extent(&tree->state, &other->cache_node); btrfs_free_extent_state(other); } } other_node = next_cache_extent(&state->cache_node); if (other_node) { other = container_of(other_node, struct extent_state, cache_node); if (other->start == state->end + 1 && other->state == state->state) { other->start = state->start; update_extent_state(other); remove_cache_extent(&tree->state, &state->cache_node); btrfs_free_extent_state(state); } } return 0; } /* * insert an extent_state struct into the tree. 'bits' are set on the * struct before it is inserted. */ static int insert_state(struct extent_io_tree *tree, struct extent_state *state, u64 start, u64 end, int bits) { int ret; BUG_ON(end < start); state->state |= bits; state->start = start; state->end = end; update_extent_state(state); ret = insert_cache_extent(&tree->state, &state->cache_node); BUG_ON(ret); merge_state(tree, state); return 0; } /* * split a given extent state struct in two, inserting the preallocated * struct 'prealloc' as the newly created second half. 'split' indicates an * offset inside 'orig' where it should be split. */ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, struct extent_state *prealloc, u64 split) { int ret; prealloc->start = orig->start; prealloc->end = split - 1; prealloc->state = orig->state; update_extent_state(prealloc); orig->start = split; update_extent_state(orig); ret = insert_cache_extent(&tree->state, &prealloc->cache_node); BUG_ON(ret); return 0; } /* * clear some bits on a range in the tree. */ static int clear_state_bit(struct extent_io_tree *tree, struct extent_state *state, int bits) { int ret = state->state & bits; state->state &= ~bits; if (state->state == 0) { remove_cache_extent(&tree->state, &state->cache_node); btrfs_free_extent_state(state); } else { merge_state(tree, state); } return ret; } /* * extent_buffer_bitmap_set - set an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to set */ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos); const unsigned int size = pos + len; int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); while (len >= bits_to_set) { *p |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0; p++; } if (len) { mask_to_set &= BITMAP_LAST_BYTE_MASK(size); *p |= mask_to_set; } } /* * extent_buffer_bitmap_clear - clear an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to clear */ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos); const unsigned int size = pos + len; int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); while (len >= bits_to_clear) { *p &= ~mask_to_clear; len -= bits_to_clear; bits_to_clear = BITS_PER_BYTE; mask_to_clear = ~0; p++; } if (len) { mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); *p &= ~mask_to_clear; } } /* * clear some bits on a range in the tree. */ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits) { struct extent_state *state; struct extent_state *prealloc = NULL; struct cache_extent *node; u64 last_end; int err; int set = 0; again: if (!prealloc) { prealloc = alloc_extent_state(); if (!prealloc) return -ENOMEM; } /* * this search will find the extents that end after * our range starts */ node = search_cache_extent(&tree->state, start); if (!node) goto out; state = container_of(node, struct extent_state, cache_node); if (state->start > end) goto out; last_end = state->end; /* * | ---- desired range ---- | * | state | or * | ------------- state -------------- | * * We need to split the extent we found, and may flip * bits on second half. * * If the extent we found extends past our range, we * just split and search again. It'll get split again * the next time though. * * If the extent we found is inside our range, we clear * the desired bit on it. */ if (state->start < start) { err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; if (err) goto out; if (state->end <= end) { set |= clear_state_bit(tree, state, bits); if (last_end == (u64)-1) goto out; start = last_end + 1; } else { start = state->start; } goto search_again; } /* * | ---- desired range ---- | * | state | * We need to split the extent, and clear the bit * on the first half */ if (state->start <= end && state->end > end) { err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); set |= clear_state_bit(tree, prealloc, bits); prealloc = NULL; goto out; } start = state->end + 1; set |= clear_state_bit(tree, state, bits); if (last_end == (u64)-1) goto out; start = last_end + 1; goto search_again; out: if (prealloc) btrfs_free_extent_state(prealloc); return set; search_again: if (start > end) goto out; goto again; } /* * set some bits on a range in the tree. */ int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits) { struct extent_state *state; struct extent_state *prealloc = NULL; struct cache_extent *node; int err = 0; u64 last_start; u64 last_end; again: if (!prealloc) { prealloc = alloc_extent_state(); if (!prealloc) return -ENOMEM; } /* * this search will find the extents that end after * our range starts */ node = search_cache_extent(&tree->state, start); if (!node) { err = insert_state(tree, prealloc, start, end, bits); BUG_ON(err == -EEXIST); prealloc = NULL; goto out; } state = container_of(node, struct extent_state, cache_node); last_start = state->start; last_end = state->end; /* * | ---- desired range ---- | * | state | * * Just lock what we found and keep going */ if (state->start == start && state->end <= end) { state->state |= bits; merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; goto search_again; } /* * | ---- desired range ---- | * | state | * or * | ------------- state -------------- | * * We need to split the extent we found, and may flip bits on * second half. * * If the extent we found extends past our * range, we just split and search again. It'll get split * again the next time though. * * If the extent we found is inside our range, we set the * desired bit on it. */ if (state->start < start) { err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; if (err) goto out; if (state->end <= end) { state->state |= bits; start = state->end + 1; merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; } else { start = state->start; } goto search_again; } /* * | ---- desired range ---- | * | state | or | state | * * There's a hole, we need to insert something in it and * ignore the extent we found. */ if (state->start > start) { u64 this_end; if (end < last_start) this_end = end; else this_end = last_start -1; err = insert_state(tree, prealloc, start, this_end, bits); BUG_ON(err == -EEXIST); prealloc = NULL; if (err) goto out; start = this_end + 1; goto search_again; } /* * | ---- desired range ---- | * | ---------- state ---------- | * We need to split the extent, and set the bit * on the first half */ err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); state->state |= bits; merge_state(tree, prealloc); prealloc = NULL; out: if (prealloc) btrfs_free_extent_state(prealloc); return err; search_again: if (start > end) goto out; goto again; } int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end) { return set_extent_bits(tree, start, end, EXTENT_DIRTY); } int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end) { return clear_extent_bits(tree, start, end, EXTENT_DIRTY); } int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits) { struct cache_extent *node; struct extent_state *state; int ret = 1; /* * this search will find all the extents that end after * our range starts. */ node = search_cache_extent(&tree->state, start); if (!node) goto out; while(1) { state = container_of(node, struct extent_state, cache_node); if (state->end >= start && (state->state & bits)) { *start_ret = state->start; *end_ret = state->end; ret = 0; break; } node = next_cache_extent(node); if (!node) break; } out: return ret; } int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int filled) { struct extent_state *state = NULL; struct cache_extent *node; int bitset = 0; node = search_cache_extent(&tree->state, start); while (node && start <= end) { state = container_of(node, struct extent_state, cache_node); if (filled && state->start > start) { bitset = 0; break; } if (state->start > end) break; if (state->state & bits) { bitset = 1; if (!filled) break; } else if (filled) { bitset = 0; break; } start = state->end + 1; if (start > end) break; node = next_cache_extent(node); if (!node) { if (filled) bitset = 0; break; } } return bitset; } int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) { struct cache_extent *node; struct extent_state *state; int ret = 0; node = search_cache_extent(&tree->state, start); if (!node) { ret = -ENOENT; goto out; } state = container_of(node, struct extent_state, cache_node); if (state->start != start) { ret = -ENOENT; goto out; } state->xprivate = private; out: return ret; } int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) { struct cache_extent *node; struct extent_state *state; int ret = 0; node = search_cache_extent(&tree->state, start); if (!node) { ret = -ENOENT; goto out; } state = container_of(node, struct extent_state, cache_node); if (state->start != start) { ret = -ENOENT; goto out; } *private = state->xprivate; out: return ret; } static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info, u64 bytenr, u32 blocksize) { struct extent_buffer *eb; eb = calloc(1, sizeof(struct extent_buffer) + blocksize); if (!eb) return NULL; eb->start = bytenr; eb->len = blocksize; eb->refs = 1; eb->flags = 0; eb->fd = -1; eb->dev_bytenr = (u64)-1; eb->cache_node.start = bytenr; eb->cache_node.size = blocksize; eb->fs_info = info; eb->tree = &info->extent_cache; INIT_LIST_HEAD(&eb->recow); INIT_LIST_HEAD(&eb->lru); return eb; } struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) { struct extent_buffer *new; new = __alloc_extent_buffer(src->fs_info, src->start, src->len); if (!new) return NULL; /* cloned eb is not linked into fs_info->extent_cache */ new->tree = NULL; copy_extent_buffer(new, src, 0, 0, src->len); new->flags |= EXTENT_BUFFER_DUMMY; return new; } static void free_extent_buffer_final(struct extent_buffer *eb) { struct extent_io_tree *tree = eb->tree; BUG_ON(eb->refs); BUG_ON(tree && tree->cache_size < eb->len); list_del_init(&eb->lru); if (!(eb->flags & EXTENT_BUFFER_DUMMY)) { remove_cache_extent(&tree->cache, &eb->cache_node); tree->cache_size -= eb->len; } free(eb); } static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now) { if (!eb || IS_ERR(eb)) return; eb->refs--; BUG_ON(eb->refs < 0); if (eb->refs == 0) { if (eb->flags & EXTENT_DIRTY) { warning( "dirty eb leak (aborted trans): start %llu len %u", eb->start, eb->len); } list_del_init(&eb->recow); if (eb->flags & EXTENT_BUFFER_DUMMY || free_now) free_extent_buffer_final(eb); } } void free_extent_buffer(struct extent_buffer *eb) { free_extent_buffer_internal(eb, 0); } void free_extent_buffer_nocache(struct extent_buffer *eb) { free_extent_buffer_internal(eb, 1); } struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 bytenr, u32 blocksize) { struct extent_buffer *eb = NULL; struct cache_extent *cache; cache = lookup_cache_extent(&tree->cache, bytenr, blocksize); if (cache && cache->start == bytenr && cache->size == blocksize) { eb = container_of(cache, struct extent_buffer, cache_node); list_move_tail(&eb->lru, &tree->lru); eb->refs++; } return eb; } struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree, u64 start) { struct extent_buffer *eb = NULL; struct cache_extent *cache; cache = search_cache_extent(&tree->cache, start); if (cache) { eb = container_of(cache, struct extent_buffer, cache_node); list_move_tail(&eb->lru, &tree->lru); eb->refs++; } return eb; } static void trim_extent_buffer_cache(struct extent_io_tree *tree) { struct extent_buffer *eb, *tmp; list_for_each_entry_safe(eb, tmp, &tree->lru, lru) { if (eb->refs == 0) free_extent_buffer_final(eb); if (tree->cache_size <= ((tree->max_cache_size * 9) / 10)) break; } } struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 bytenr, u32 blocksize) { struct extent_buffer *eb; struct extent_io_tree *tree = &fs_info->extent_cache; struct cache_extent *cache; cache = lookup_cache_extent(&tree->cache, bytenr, blocksize); if (cache && cache->start == bytenr && cache->size == blocksize) { eb = container_of(cache, struct extent_buffer, cache_node); list_move_tail(&eb->lru, &tree->lru); eb->refs++; } else { int ret; if (cache) { eb = container_of(cache, struct extent_buffer, cache_node); free_extent_buffer(eb); } eb = __alloc_extent_buffer(fs_info, bytenr, blocksize); if (!eb) return NULL; ret = insert_cache_extent(&tree->cache, &eb->cache_node); if (ret) { free(eb); return NULL; } list_add_tail(&eb->lru, &tree->lru); tree->cache_size += blocksize; if (tree->cache_size >= tree->max_cache_size) trim_extent_buffer_cache(tree); } return eb; } int read_extent_from_disk(struct extent_buffer *eb, unsigned long offset, unsigned long len) { int ret; ret = pread(eb->fd, eb->data + offset, len, eb->dev_bytenr); if (ret < 0) { ret = -errno; goto out; } if (ret != len) { ret = -EIO; goto out; } ret = 0; out: return ret; } int write_extent_to_disk(struct extent_buffer *eb) { int ret; ret = pwrite(eb->fd, eb->data, eb->len, eb->dev_bytenr); if (ret < 0) goto out; if (ret != eb->len) { ret = -EIO; goto out; } ret = 0; out: return ret; } int read_data_from_disk(struct btrfs_fs_info *info, void *buf, u64 offset, u64 bytes, int mirror) { struct btrfs_multi_bio *multi = NULL; struct btrfs_device *device; u64 bytes_left = bytes; u64 read_len; u64 total_read = 0; int ret; while (bytes_left) { read_len = bytes_left; ret = btrfs_map_block(info, READ, offset, &read_len, &multi, mirror, NULL); if (ret) { fprintf(stderr, "Couldn't map the block %Lu\n", offset); return -EIO; } device = multi->stripes[0].dev; read_len = min(bytes_left, read_len); if (device->fd <= 0) { kfree(multi); return -EIO; } ret = pread(device->fd, buf + total_read, read_len, multi->stripes[0].physical); kfree(multi); if (ret < 0) { fprintf(stderr, "Error reading %Lu, %d\n", offset, ret); return ret; } if (ret != read_len) { fprintf(stderr, "Short read for %Lu, read %d, " "read_len %Lu\n", offset, ret, read_len); return -EIO; } bytes_left -= read_len; offset += read_len; total_read += read_len; } return 0; } int write_data_to_disk(struct btrfs_fs_info *info, void *buf, u64 offset, u64 bytes, int mirror) { struct btrfs_multi_bio *multi = NULL; struct btrfs_device *device; u64 bytes_left = bytes; u64 this_len; u64 total_write = 0; u64 *raid_map = NULL; u64 dev_bytenr; int dev_nr; int ret = 0; while (bytes_left > 0) { this_len = bytes_left; dev_nr = 0; ret = btrfs_map_block(info, WRITE, offset, &this_len, &multi, mirror, &raid_map); if (ret) { fprintf(stderr, "Couldn't map the block %Lu\n", offset); return -EIO; } if (raid_map) { struct extent_buffer *eb; u64 stripe_len = this_len; this_len = min(this_len, bytes_left); this_len = min(this_len, (u64)info->nodesize); eb = malloc(sizeof(struct extent_buffer) + this_len); if (!eb) { fprintf(stderr, "cannot allocate memory for eb\n"); ret = -ENOMEM; goto out; } memset(eb, 0, sizeof(struct extent_buffer) + this_len); eb->start = offset; eb->len = this_len; memcpy(eb->data, buf + total_write, this_len); ret = write_raid56_with_parity(info, eb, multi, stripe_len, raid_map); BUG_ON(ret); free(eb); kfree(raid_map); raid_map = NULL; } else while (dev_nr < multi->num_stripes) { device = multi->stripes[dev_nr].dev; if (device->fd <= 0) { kfree(multi); return -EIO; } dev_bytenr = multi->stripes[dev_nr].physical; this_len = min(this_len, bytes_left); dev_nr++; ret = pwrite(device->fd, buf + total_write, this_len, dev_bytenr); if (ret != this_len) { if (ret < 0) { fprintf(stderr, "Error writing to " "device %d\n", errno); ret = errno; kfree(multi); return ret; } else { fprintf(stderr, "Short write\n"); kfree(multi); return -EIO; } } } BUG_ON(bytes_left < this_len); bytes_left -= this_len; offset += this_len; total_write += this_len; kfree(multi); multi = NULL; } return 0; out: kfree(raid_map); return ret; } int set_extent_buffer_dirty(struct extent_buffer *eb) { struct extent_io_tree *tree = eb->tree; if (!(eb->flags & EXTENT_DIRTY)) { eb->flags |= EXTENT_DIRTY; set_extent_dirty(tree, eb->start, eb->start + eb->len - 1); extent_buffer_get(eb); } return 0; } int clear_extent_buffer_dirty(struct extent_buffer *eb) { struct extent_io_tree *tree = eb->tree; if (eb->flags & EXTENT_DIRTY) { eb->flags &= ~EXTENT_DIRTY; clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1); free_extent_buffer(eb); } return 0; } int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, unsigned long start, unsigned long len) { return memcmp(eb->data + start, ptrv, len); } void read_extent_buffer(struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len) { memcpy(dst, eb->data + start, len); } void write_extent_buffer(struct extent_buffer *eb, const void *src, unsigned long start, unsigned long len) { memcpy(eb->data + start, src, len); } void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { memcpy(dst->data + dst_offset, src->data + src_offset, len); } void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { memmove(dst->data + dst_offset, dst->data + src_offset, len); } void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len) { memset(eb->data + start, c, len); } int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, unsigned long nr) { return le_test_bit(nr, (u8 *)eb->data + start); }
gpl-2.0
insilico/reliefseq
ArffDataset.cpp
1
13226
/* * ArffDataset.cpp - Bill White * see ArffDataset.h and http://www.cs.waikato.ac.nz/ml/weka/arff.html */ #include <string> #include <iostream> #include <fstream> #include <cstring> #include <string> #include <sstream> #include <boost/lexical_cast.hpp> #include "Dataset.h" #include "DatasetInstance.h" #include "StringUtils.h" #include "ArffDataset.h" #include "Insilico.h" using namespace std; using namespace insilico; using namespace boost; ArffDataset::ArffDataset() { ::Dataset(); missingAttributeValuesToCheck.push_back("?"); missingClassValuesToCheck.push_back("?"); } bool ArffDataset::LoadSnps(string filename) { snpsFilename = filename; ifstream dataStream(snpsFilename.c_str()); if (!dataStream.is_open()) { cerr << "ERROR: Could not open dataset: " << snpsFilename << endl; return false; } cout << Timestamp() << "ArffDataset: Reading lines from " << snpsFilename << endl; string line; int firstSpace = -1, secondSpace = -1; string attributeName = ""; int attributeIndex = 0; int numericsIndex = 0; string attributeType = ""; string classTypeString = ""; unsigned int lineNumber = 0; double minPheno = 0.0, maxPheno = 0.0; while (getline(dataStream, line)) { ++lineNumber; string trimmedLine = trim(line); // skip blank lines if (trimmedLine.size() < 1) { continue; } // check the first character of the line switch (trimmedLine.at(0)) { case '%': // skip comment lines continue; case '@': // relation, attribute or data firstSpace = trimmedLine.find(" "); string keyword = to_upper(trimmedLine.substr(1, firstSpace - 1)); // cout << "keyword => " << keyword << endl; if (keyword == "RELATION") { relationName = keyword; } if (keyword == "ATTRIBUTE") { secondSpace = trimmedLine.find(" ", firstSpace + 1); attributeName = trimmedLine.substr(firstSpace + 1, secondSpace - firstSpace - 1); // cout << "DEBUG attribute name: [" << attributeName << "]" << endl; if (to_upper(attributeName) == "CLASS") { classColumn = attributeIndex; cout << Timestamp() << "Class column detect: " << classColumn << endl; classTypeString = to_upper(trimmedLine.substr(secondSpace + 1)); if (classTypeString == "NUMERIC") { hasContinuousPhenotypes = true; cout << Timestamp() << "Detected continuous phenotype" << endl; } else { hasContinuousPhenotypes = false; cout << Timestamp() << "Detected case-control phenotype" << endl; } } else { attributeNames.push_back(attributeName); attributeType = to_upper(trimmedLine.substr(secondSpace + 1)); if (attributeType == "STRING") { cerr << "ERROR: STRING attributes are not yet supported" << endl; return false; attributeTypes.push_back(ARFF_STRING_TYPE); } if (attributeType == "DATE") { cerr << "ERROR: DATE attributes are not yet supported" << endl; return false; attributeTypes.push_back(ARFF_DATE_TYPE); } if (attributeType == "NUMERIC") { attributeTypes.push_back(ARFF_NUMERIC_TYPE); numericsMask[attributeName] = numericsIndex; numericsNames.push_back(attributeName); ++numericsIndex; } else { // must be nominal type - add nominal values to map attributeTypes.push_back(ARFF_NOMINAL_TYPE); vector<string> tokens; split(tokens, trimmedLine, "{"); vector<string>::const_iterator it = tokens.end() - 1; string nominalsListWithCurly = *it; string nominalsList = trim( nominalsListWithCurly.substr(0, nominalsListWithCurly.size() - 1)); vector<string> nominals; split(nominals, nominalsList, ","); // GENETICS CHECK HERE for plink recodeA encoding if (nominals.size() != 3) { cerr << "ERROR: This dataset is currently unsupported. SNP data " << "must be encoded with {0, 1, 2} for {homozygous1, " << "heterzygote, homozygous2} respectively. The following " << "attributes were read successfully" << endl; PrintNominalsMapping(); return false; } // GENETICS CHECK HERE if ((nominals[0] == "0") && (nominals[1] == "1") && (nominals[2] == "2")) { nominalValues[attributeName] = nominals; attributesMask[attributeName] = attributeIndex; } else { cerr << "ERROR: This dataset is currently unsupported. SNP data " << "must be encoded with {0, 1, 2} for {homozygous1, " << "heterzygote, homozygous2} respectively. The following " << "attributes were read successfully" << endl; PrintNominalsMapping(); return false; } // end genetics check } // end nominal } // end class or attribute ++attributeIndex; } // keyword = attribute // the rest of the file is instances if (keyword == "DATA") { int numAttributes = attributesMask.size(); if (numAttributes) { hasGenotypes = true; levelCounts.resize(numAttributes); levelCountsByClass.resize(numAttributes); attributeLevelsSeen.resize(numAttributes); attributeAlleleCounts.resize(numAttributes); attributeMinorAllele.resize(numAttributes); genotypeCounts.resize(numAttributes); attributeMutationTypes.resize(numAttributes); } else { hasGenotypes = false; } int numNumerics = numericsMask.size(); if (numNumerics) { hasNumerics = true; } else { hasNumerics = false; } lineNumber = 0; bool makeLineIntoInstance = true; unsigned int instanceIndex = 0; int numericsAdded = 0; while (getline(dataStream, line)) { ++lineNumber; string trimmedLine = trim(line); // skip blank lines in the data section (usually end of file) if (!trimmedLine.size()) { continue; } // only load matching IDs, line numbers for non-plink files ostringstream ssLineNum; ssLineNum << zeroPadNumber(lineNumber, 8); string ID = ssLineNum.str() + ssLineNum.str(); // filter out IDs if (!IsLoadableInstanceID(ID)) { cout << Timestamp() << "WARNING: " << "Dataset instance ID [" << ID << "] skipped. " << "Not found in list of loadable IDs. Numerics and/or " << "phenotype file(s) matching filtered out this ID" << endl; continue; } vector<string> attributesStringVector; split(attributesStringVector, trimmedLine, ","); vector<AttributeLevel> attributeVector; vector<NumericLevel> numericsVector; unsigned int attrIdx = 0; unsigned int vectorIdx = 0; makeLineIntoInstance = true; vector<string>::const_iterator it = attributesStringVector.begin(); ClassLevel discreteClassLevel = MISSING_DISCRETE_CLASS_VALUE; NumericLevel numericClassLevel = MISSING_NUMERIC_CLASS_VALUE; for (; it != attributesStringVector.end(); ++it, ++vectorIdx) { string thisAttr = *it; if (vectorIdx == classColumn) { discreteClassLevel = MISSING_DISCRETE_CLASS_VALUE; numericClassLevel = MISSING_NUMERIC_CLASS_VALUE; if (hasContinuousPhenotypes) { if (thisAttr != "-9") { numericClassLevel = lexical_cast<NumericLevel>(thisAttr); if (lineNumber == 1) { minPheno = maxPheno = numericClassLevel; } else { if (numericClassLevel < minPheno) { minPheno = numericClassLevel; } if (numericClassLevel > maxPheno) { maxPheno = numericClassLevel; } } } else { if (!hasAlternatePhenotypes) { cout << Timestamp() << "Instance ID " << ID << " filtered out by missing value" << endl; continue; } } } else { if (thisAttr != "-9") { discreteClassLevel = lexical_cast<ClassLevel>(thisAttr); } else { if (!hasAlternatePhenotypes) { cout << Timestamp() << "Instance ID " << ID << " filtered out by missing value" << endl; continue; } } } } else { if (attributeTypes[attrIdx] == ARFF_NUMERIC_TYPE) { if (thisAttr == "?") { numericsVector.push_back(MISSING_NUMERIC_VALUE); missingNumericValues[ID].push_back(attrIdx); } else { double thisNumericValue = lexical_cast<NumericLevel>( thisAttr); numericsVector.push_back(thisNumericValue); } ++numericsAdded; } else { if (attributeTypes[attrIdx] == ARFF_NOMINAL_TYPE) { AttributeLevel thisAttrLevel = MISSING_ATTRIBUTE_VALUE; if (thisAttr == "?") { missingValues[ID].push_back(attrIdx); } else { thisAttrLevel = lexical_cast<AttributeLevel>(thisAttr); attributeLevelsSeen[attrIdx].insert(thisAttr); } attributeVector.push_back(thisAttrLevel); ++attrIdx; } else { cout << Timestamp() << "Unrecognized attribute type!" << endl; return false; } } } } // create an instance from the vector of attribute and class values if (makeLineIntoInstance) { DatasetInstance * newInst = 0; if ((int) attributeVector.size() != numAttributes) { cerr << "ERROR: Number of attributes parsed on line " << lineNumber << ": " << attributesStringVector.size() << " is not equal to the number of attributes " << " read from the data file header: " << numAttributes << endl; return false; } if ((int) numericsVector.size() != numNumerics) { cerr << "ERROR: Number of numerics parsed on line " << lineNumber << ": " << numericsVector.size() << " is not equal to the number of attributes " << " read from the data file header: " << numNumerics << endl; return false; } newInst = new DatasetInstance(this); if (newInst) { if (hasContinuousPhenotypes) { newInst->SetPredictedValueTau(numericClassLevel); } else { newInst->SetClass(discreteClassLevel); classIndexes[discreteClassLevel].push_back(instanceIndex); } if (hasGenotypes) { newInst->LoadInstanceFromVector(attributeVector); } if (hasNumerics) { for (int i = 0; i < (int) numericsVector.size(); ++i) { newInst->AddNumeric(numericsVector[i]); } } instances.push_back(newInst); instanceIds.push_back(ID); // instanceIdsToLoad.push_back(ID); instancesMask[ID] = instanceIndex; } else { cerr << "ERROR: loading ARFF @data section. " << "Could not create dataset instance for line number " << lineNumber << endl; return false; } ++instanceIndex; } // make new instance // happy lights if ((lineNumber - 1) && ((lineNumber % 100) == 0)) { cout << Timestamp() << lineNumber << endl; } } // while reading file lines } // keyword = data break; } // end switch } // end while cout << Timestamp() << lineNumber << " lines read" << endl; dataStream.close(); cout << Timestamp() << "There are " << NumInstances() << " instances in the data set" << endl; cout << Timestamp() << "There are " << instancesMask.size() << " instances in the instance mask" << endl; if (instancesMask.size() == 0) { cerr << "ERROR: no instances in the instance mask" << endl; return false; } if (hasContinuousPhenotypes) { continuousPhenotypeMinMax = make_pair(minPheno, maxPheno); cout << Timestamp() << "Continuous phenotypes." << endl; } else { cout << Timestamp() << "There are " << classIndexes.size() << " classes in the data set" << endl; } if (hasNumerics) { // find the min and max values for each numeric attribute // used in diff/distance calculation metrics vector<NumericLevel> numericColumn; for (unsigned int i = 0; i < NumNumerics(); ++i) { GetNumericValues(i, numericColumn); double minElement = *numericColumn.begin(); double maxElement = *numericColumn.begin(); for (vector<NumericLevel>::const_iterator it = numericColumn.begin(); it != numericColumn.end(); ++it) { if ((*it != MISSING_NUMERIC_VALUE) && (*it < minElement)) { minElement = *it; } if ((*it != MISSING_NUMERIC_VALUE) && (*it > maxElement)) { maxElement = *it; } } numericsMinMax.push_back(make_pair(minElement, maxElement)); } } if (hasGenotypes) { UpdateAllLevelCounts(); CreateDummyAlleles(); } return true; } ArffAttributeType ArffDataset::GetTypeOf(unsigned int columnIndex) { if (columnIndex < attributeTypes.size()) { return attributeTypes[columnIndex]; } else return (ARFF_ERROR_TYPE); } void ArffDataset::PrintNominalsMapping() { cout << Timestamp() << "Nominals and their accepted values:" << endl; map<string, vector<string> >::const_iterator mit = nominalValues.begin(); for (; mit != nominalValues.end(); ++mit) { cout << (*mit).first << ":"; vector<string>::const_iterator it = (*mit).second.begin(); for (; it != (*mit).second.end(); ++it) { cout << " " << *it; } cout << endl; } }
gpl-2.0
andi34/kernel_samsung_espresso-cm
fs/nfsd/nfs4acl.c
1
22028
/* * Common NFSv4 ACL handling code. * * Copyright (c) 2002, 2003 The Regents of the University of Michigan. * All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * Jeff Sedlak <jsedlak@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include <linux/nfs_fs.h> #include "acl.h" /* mode bit translations: */ #define NFS4_READ_MODE (NFS4_ACE_READ_DATA) #define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA) #define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE #define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE) #define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL) /* We don't support these bits; insist they be neither allowed nor denied */ #define NFS4_MASK_UNSUPP (NFS4_ACE_DELETE | NFS4_ACE_WRITE_OWNER \ | NFS4_ACE_READ_NAMED_ATTRS | NFS4_ACE_WRITE_NAMED_ATTRS) /* flags used to simulate posix default ACLs */ #define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ | NFS4_ACE_DIRECTORY_INHERIT_ACE) #define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \ | NFS4_ACE_INHERIT_ONLY_ACE \ | NFS4_ACE_IDENTIFIER_GROUP) #define MASK_EQUAL(mask1, mask2) \ ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) ) static u32 mask_from_posix(unsigned short perm, unsigned int flags) { int mask = NFS4_ANYONE_MODE; if (flags & NFS4_ACL_OWNER) mask |= NFS4_OWNER_MODE; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } static u32 deny_mask_from_posix(unsigned short perm, u32 flags) { u32 mask = 0; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } /* XXX: modify functions to return NFS errors; they're only ever * used by nfs code, after all.... */ /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the * side of being more restrictive, so the mode bit mapping below is * pessimistic. An optimistic version would be needed to handle DENY's, * but we espect to coalesce all ALLOWs and DENYs before mapping to mode * bits. */ static void low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags) { u32 write_mode = NFS4_WRITE_MODE; if (flags & NFS4_ACL_DIR) write_mode |= NFS4_ACE_DELETE_CHILD; *mode = 0; if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE) *mode |= ACL_READ; if ((perm & write_mode) == write_mode) *mode |= ACL_WRITE; if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE) *mode |= ACL_EXECUTE; } struct ace_container { struct nfs4_ace *ace; struct list_head ace_l; }; static short ace2type(struct nfs4_ace *); static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); struct nfs4_acl * nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl, unsigned int flags) { struct nfs4_acl *acl; int size = 0; if (pacl) { if (posix_acl_valid(pacl) < 0) return ERR_PTR(-EINVAL); size += 2*pacl->a_count; } if (dpacl) { if (posix_acl_valid(dpacl) < 0) return ERR_PTR(-EINVAL); size += 2*dpacl->a_count; } /* Allocate for worst case: one (deny, allow) pair each: */ acl = nfs4_acl_new(size); if (acl == NULL) return ERR_PTR(-ENOMEM); if (pacl) _posix_to_nfsv4_one(pacl, acl, flags & ~NFS4_ACL_TYPE_DEFAULT); if (dpacl) _posix_to_nfsv4_one(dpacl, acl, flags | NFS4_ACL_TYPE_DEFAULT); return acl; } struct posix_acl_summary { unsigned short owner; unsigned short users; unsigned short group; unsigned short groups; unsigned short other; unsigned short mask; }; static void summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas) { struct posix_acl_entry *pa, *pe; /* * Only pas.users and pas.groups need initialization; previous * posix_acl_valid() calls ensure that the other fields will be * initialized in the following loop. But, just to placate gcc: */ memset(pas, 0, sizeof(*pas)); pas->mask = 07; pe = acl->a_entries + acl->a_count; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: pas->owner = pa->e_perm; break; case ACL_GROUP_OBJ: pas->group = pa->e_perm; break; case ACL_USER: pas->users |= pa->e_perm; break; case ACL_GROUP: pas->groups |= pa->e_perm; break; case ACL_OTHER: pas->other = pa->e_perm; break; case ACL_MASK: pas->mask = pa->e_perm; break; } } /* We'll only care about effective permissions: */ pas->users &= pas->mask; pas->group &= pas->mask; pas->groups &= pas->mask; } /* We assume the acl has been verified with posix_acl_valid. */ static void _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, unsigned int flags) { struct posix_acl_entry *pa, *group_owner_entry; struct nfs4_ace *ace; struct posix_acl_summary pas; unsigned short deny; int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ? NFS4_INHERITANCE_FLAGS | NFS4_ACE_INHERIT_ONLY_ACE : 0); BUG_ON(pacl->a_count < 3); summarize_posix_acl(pacl, &pas); pa = pacl->a_entries; ace = acl->aces + acl->naces; /* We could deny everything not granted by the owner: */ deny = ~pas.owner; /* * but it is equivalent (and simpler) to deny only what is not * granted by later entries: */ deny &= pas.users | pas.group | pas.groups | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_USER) { deny = ~(pa->e_perm & pas.mask); deny &= pas.groups | pas.group | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; pa++; } /* In the case of groups, we apply allow ACEs first, then deny ACEs, * since a user can be in more than one group. */ /* allow ACEs */ group_owner_entry = pa; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pas.group, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_GROUP) { ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; pa++; } /* deny ACEs */ pa = group_owner_entry; deny = ~pas.group & pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; } pa++; while (pa->e_tag == ACL_GROUP) { deny = ~(pa->e_perm & pas.mask); deny &= pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; acl->naces++; } pa++; } if (pa->e_tag == ACL_MASK) pa++; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags); ace->whotype = NFS4_ACL_WHO_EVERYONE; acl->naces++; } static void sort_pacl_range(struct posix_acl *pacl, int start, int end) { int sorted = 0, i; struct posix_acl_entry tmp; /* We just do a bubble sort; easy to do in place, and we're not * expecting acl's to be long enough to justify anything more. */ while (!sorted) { sorted = 1; for (i = start; i < end; i++) { if (pacl->a_entries[i].e_id > pacl->a_entries[i+1].e_id) { sorted = 0; tmp = pacl->a_entries[i]; pacl->a_entries[i] = pacl->a_entries[i+1]; pacl->a_entries[i+1] = tmp; } } } } static void sort_pacl(struct posix_acl *pacl) { /* posix_acl_valid requires that users and groups be in order * by uid/gid. */ int i, j; if (pacl->a_count <= 4) return; /* no users or groups */ i = 1; while (pacl->a_entries[i].e_tag == ACL_USER) i++; sort_pacl_range(pacl, 1, i-1); BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); j = ++i; while (pacl->a_entries[j].e_tag == ACL_GROUP) j++; sort_pacl_range(pacl, i, j-1); return; } /* * While processing the NFSv4 ACE, this maintains bitmasks representing * which permission bits have been allowed and which denied to a given * entity: */ struct posix_ace_state { u32 allow; u32 deny; }; struct posix_user_ace_state { uid_t uid; struct posix_ace_state perms; }; struct posix_ace_state_array { int n; struct posix_user_ace_state aces[]; }; /* * While processing the NFSv4 ACE, this maintains the partial permissions * calculated so far: */ struct posix_acl_state { int empty; struct posix_ace_state owner; struct posix_ace_state group; struct posix_ace_state other; struct posix_ace_state everyone; struct posix_ace_state mask; /* Deny unused in this case */ struct posix_ace_state_array *users; struct posix_ace_state_array *groups; }; static int init_state(struct posix_acl_state *state, int cnt) { int alloc; memset(state, 0, sizeof(struct posix_acl_state)); state->empty = 1; /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't no which, so we allocate * enough space for either: */ alloc = sizeof(struct posix_ace_state_array) + cnt*sizeof(struct posix_user_ace_state); state->users = kzalloc(alloc, GFP_KERNEL); if (!state->users) return -ENOMEM; state->groups = kzalloc(alloc, GFP_KERNEL); if (!state->groups) { kfree(state->users); return -ENOMEM; } return 0; } static void free_state(struct posix_acl_state *state) { kfree(state->users); kfree(state->groups); } static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate) { state->mask.allow |= astate->allow; } /* * Certain bits (SYNCHRONIZE, DELETE, WRITE_OWNER, READ/WRITE_NAMED_ATTRS, * READ_ATTRIBUTES, READ_ACL) are currently unenforceable and don't translate * to traditional read/write/execute permissions. * * It's problematic to reject acls that use certain mode bits, because it * places the burden on users to learn the rules about which bits one * particular server sets, without giving the user a lot of help--we return an * error that could mean any number of different things. To make matters * worse, the problematic bits might be introduced by some application that's * automatically mapping from some other acl model. * * So wherever possible we accept anything, possibly erring on the side of * denying more permissions than necessary. * * However we do reject *explicit* DENY's of a few bits representing * permissions we could never deny: */ static inline int check_deny(u32 mask, int isowner) { if (mask & (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL)) return -EINVAL; if (!isowner) return 0; if (mask & (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)) return -EINVAL; return 0; } static struct posix_acl * posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) { struct posix_acl_entry *pace; struct posix_acl *pacl; int nace; int i, error = 0; /* * ACLs with no ACEs are treated differently in the inheritable * and effective cases: when there are no inheritable ACEs, we * set a zero-length default posix acl: */ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) { pacl = posix_acl_alloc(0, GFP_KERNEL); return pacl ? pacl : ERR_PTR(-ENOMEM); } /* * When there are no effective ACEs, the following will end * up setting a 3-element effective posix ACL with all * permissions zero. */ nace = 4 + state->users->n + state->groups->n; pacl = posix_acl_alloc(nace, GFP_KERNEL); if (!pacl) return ERR_PTR(-ENOMEM); pace = pacl->a_entries; pace->e_tag = ACL_USER_OBJ; error = check_deny(state->owner.deny, 1); if (error) goto out_err; low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; for (i=0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; error = check_deny(state->users->aces[i].perms.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->users->aces[i].perms.allow, &pace->e_perm, flags); pace->e_id = state->users->aces[i].uid; add_to_mask(state, &state->users->aces[i].perms); } pace++; pace->e_tag = ACL_GROUP_OBJ; error = check_deny(state->group.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; add_to_mask(state, &state->group); for (i=0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; error = check_deny(state->groups->aces[i].perms.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->groups->aces[i].perms.allow, &pace->e_perm, flags); pace->e_id = state->groups->aces[i].uid; add_to_mask(state, &state->groups->aces[i].perms); } pace++; pace->e_tag = ACL_MASK; low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; pace++; pace->e_tag = ACL_OTHER; error = check_deny(state->other.deny, 0); if (error) goto out_err; low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags); pace->e_id = ACL_UNDEFINED_ID; return pacl; out_err: posix_acl_release(pacl); return ERR_PTR(error); } static inline void allow_bits(struct posix_ace_state *astate, u32 mask) { /* Allow all bits in the mask not already denied: */ astate->allow |= mask & ~astate->deny; } static inline void deny_bits(struct posix_ace_state *astate, u32 mask) { /* Deny all bits in the mask not already allowed: */ astate->deny |= mask & ~astate->allow; } static int find_uid(struct posix_acl_state *state, struct posix_ace_state_array *a, uid_t uid) { int i; for (i = 0; i < a->n; i++) if (a->aces[i].uid == uid) return i; /* Not found: */ a->n++; a->aces[i].uid = uid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static void deny_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) deny_bits(&a->aces[i].perms, mask); } static void allow_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) allow_bits(&a->aces[i].perms, mask); } static void process_one_v4_ace(struct posix_acl_state *state, struct nfs4_ace *ace) { u32 mask = ace->access_mask; int i; state->empty = 0; switch (ace2type(ace)) { case ACL_USER_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); } else { deny_bits(&state->owner, mask); } break; case ACL_USER: i = find_uid(state, state->users, ace->who); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->users->aces[i].perms, mask); } else { deny_bits(&state->users->aces[i].perms, mask); mask = state->users->aces[i].perms.deny; deny_bits(&state->owner, mask); } break; case ACL_GROUP_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->group, mask); } else { deny_bits(&state->group, mask); mask = state->group.deny; deny_bits(&state->owner, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_GROUP: i = find_uid(state, state->groups, ace->who); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->groups->aces[i].perms, mask); } else { deny_bits(&state->groups->aces[i].perms, mask); mask = state->groups->aces[i].perms.deny; deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_OTHER: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); allow_bits(&state->group, mask); allow_bits(&state->other, mask); allow_bits(&state->everyone, mask); allow_bits_array(state->users, mask); allow_bits_array(state->groups, mask); } else { deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->other, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } } } int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl, struct posix_acl **dpacl, unsigned int flags) { struct posix_acl_state effective_acl_state, default_acl_state; struct nfs4_ace *ace; int ret; ret = init_state(&effective_acl_state, acl->naces); if (ret) return ret; ret = init_state(&default_acl_state, acl->naces); if (ret) goto out_estate; ret = -EINVAL; for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) { if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE && ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) goto out_dstate; if (ace->flag & ~NFS4_SUPPORTED_FLAGS) goto out_dstate; if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) { process_one_v4_ace(&effective_acl_state, ace); continue; } if (!(flags & NFS4_ACL_DIR)) goto out_dstate; /* * Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT * is set, we're effectively turning on the other. That's OK, * according to rfc 3530. */ process_one_v4_ace(&default_acl_state, ace); if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE)) process_one_v4_ace(&effective_acl_state, ace); } *pacl = posix_state_to_acl(&effective_acl_state, flags); if (IS_ERR(*pacl)) { ret = PTR_ERR(*pacl); *pacl = NULL; goto out_dstate; } *dpacl = posix_state_to_acl(&default_acl_state, flags | NFS4_ACL_TYPE_DEFAULT); if (IS_ERR(*dpacl)) { ret = PTR_ERR(*dpacl); *dpacl = NULL; posix_acl_release(*pacl); *pacl = NULL; goto out_dstate; } sort_pacl(*pacl); sort_pacl(*dpacl); ret = 0; out_dstate: free_state(&default_acl_state); out_estate: free_state(&effective_acl_state); return ret; } static short ace2type(struct nfs4_ace *ace) { switch (ace->whotype) { case NFS4_ACL_WHO_NAMED: return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ? ACL_GROUP : ACL_USER); case NFS4_ACL_WHO_OWNER: return ACL_USER_OBJ; case NFS4_ACL_WHO_GROUP: return ACL_GROUP_OBJ; case NFS4_ACL_WHO_EVERYONE: return ACL_OTHER; } BUG(); return -1; } EXPORT_SYMBOL(nfs4_acl_posix_to_nfsv4); EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix); struct nfs4_acl * nfs4_acl_new(int n) { struct nfs4_acl *acl; acl = kmalloc(sizeof(*acl) + n*sizeof(struct nfs4_ace), GFP_KERNEL); if (acl == NULL) return NULL; acl->naces = 0; return acl; } static struct { char *string; int stringlen; int type; } s2t_map[] = { { .string = "OWNER@", .stringlen = sizeof("OWNER@") - 1, .type = NFS4_ACL_WHO_OWNER, }, { .string = "GROUP@", .stringlen = sizeof("GROUP@") - 1, .type = NFS4_ACL_WHO_GROUP, }, { .string = "EVERYONE@", .stringlen = sizeof("EVERYONE@") - 1, .type = NFS4_ACL_WHO_EVERYONE, }, }; int nfs4_acl_get_whotype(char *p, u32 len) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].stringlen == len && 0 == memcmp(s2t_map[i].string, p, len)) return s2t_map[i].type; } return NFS4_ACL_WHO_NAMED; } int nfs4_acl_write_who(int who, char *p) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].type == who) { memcpy(p, s2t_map[i].string, s2t_map[i].stringlen); return s2t_map[i].stringlen; } } BUG(); return -1; } EXPORT_SYMBOL(nfs4_acl_new); EXPORT_SYMBOL(nfs4_acl_get_whotype); EXPORT_SYMBOL(nfs4_acl_write_who);
gpl-2.0
sigmabeta/dolphin
Source/Core/DolphinWX/MainToolBar.cpp
1
8586
// Copyright 2016 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include "DolphinWX/MainToolBar.h" #include <array> #include <utility> #include "Core/Core.h" #include "Core/HW/CPU.h" #include "DolphinWX/Globals.h" #include "DolphinWX/WxUtils.h" wxDEFINE_EVENT(DOLPHIN_EVT_RELOAD_TOOLBAR_BITMAPS, wxCommandEvent); MainToolBar::MainToolBar(ToolBarType type, wxWindow* parent, wxWindowID id, const wxPoint& pos, const wxSize& size, long style, const wxString& name) : wxToolBar{parent, id, pos, size, style, name}, m_type{type} { wxToolBar::SetToolBitmapSize(FromDIP(wxSize{32, 32})); InitializeBitmaps(); AddToolBarButtons(); wxToolBar::Realize(); BindEvents(); } void MainToolBar::BindEvents() { Bind(DOLPHIN_EVT_RELOAD_TOOLBAR_BITMAPS, &MainToolBar::OnReloadBitmaps, this); BindMainButtonEvents(); if (m_type == ToolBarType::Debug) BindDebuggerButtonEvents(); } void MainToolBar::BindMainButtonEvents() { Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCoreNotRunning, this, wxID_OPEN); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCoreNotRunning, this, wxID_REFRESH); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCoreRunningOrPaused, this, IDM_STOP); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCoreRunningOrPaused, this, IDM_TOGGLE_FULLSCREEN); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCoreRunningOrPaused, this, IDM_SCREENSHOT); } void MainToolBar::BindDebuggerButtonEvents() { Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCPUCanStep, this, IDM_STEP); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCPUCanStep, this, IDM_STEPOVER); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCPUCanStep, this, IDM_STEPOUT); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCPUCanStep, this, IDM_SKIP); Bind(wxEVT_UPDATE_UI, &MainToolBar::OnUpdateIfCorePaused, this, IDM_SETPC); } void MainToolBar::OnUpdateIfCoreNotRunning(wxUpdateUIEvent& event) { event.Enable(!Core::IsRunning()); } void MainToolBar::OnUpdateIfCorePaused(wxUpdateUIEvent& event) { event.Enable(Core::GetState() == Core::CORE_PAUSE); } void MainToolBar::OnUpdateIfCoreRunningOrPaused(wxUpdateUIEvent& event) { const auto state = Core::GetState(); event.Enable(state == Core::CORE_RUN || state == Core::CORE_PAUSE); } void MainToolBar::OnUpdateIfCPUCanStep(wxUpdateUIEvent& event) { event.Enable(Core::IsRunning() && CPU::IsStepping()); } void MainToolBar::OnReloadBitmaps(wxCommandEvent& WXUNUSED(event)) { Freeze(); m_icon_bitmaps.clear(); InitializeBitmaps(); ApplyThemeBitmaps(); if (m_type == ToolBarType::Debug) ApplyDebuggerBitmaps(); Thaw(); } void MainToolBar::Refresh(bool erase_background, const wxRect* rect) { wxToolBar::Refresh(erase_background, rect); RefreshPlayButton(); } void MainToolBar::InitializeBitmaps() { InitializeThemeBitmaps(); if (m_type == ToolBarType::Debug) InitializeDebuggerBitmaps(); } void MainToolBar::InitializeThemeBitmaps() { m_icon_bitmaps.insert({{TOOLBAR_FILEOPEN, CreateBitmap("open")}, {TOOLBAR_REFRESH, CreateBitmap("refresh")}, {TOOLBAR_PLAY, CreateBitmap("play")}, {TOOLBAR_STOP, CreateBitmap("stop")}, {TOOLBAR_PAUSE, CreateBitmap("pause")}, {TOOLBAR_SCREENSHOT, CreateBitmap("screenshot")}, {TOOLBAR_FULLSCREEN, CreateBitmap("fullscreen")}, {TOOLBAR_CONFIGMAIN, CreateBitmap("config")}, {TOOLBAR_CONFIGGFX, CreateBitmap("graphics")}, {TOOLBAR_CONTROLLER, CreateBitmap("classic")}}); } void MainToolBar::InitializeDebuggerBitmaps() { m_icon_bitmaps.insert( {{TOOLBAR_DEBUG_STEP, CreateDebuggerBitmap("toolbar_debugger_step")}, {TOOLBAR_DEBUG_STEPOVER, CreateDebuggerBitmap("toolbar_debugger_step_over")}, {TOOLBAR_DEBUG_STEPOUT, CreateDebuggerBitmap("toolbar_debugger_step_out")}, {TOOLBAR_DEBUG_SKIP, CreateDebuggerBitmap("toolbar_debugger_skip")}, {TOOLBAR_DEBUG_GOTOPC, CreateDebuggerBitmap("toolbar_debugger_goto_pc")}, {TOOLBAR_DEBUG_SETPC, CreateDebuggerBitmap("toolbar_debugger_set_pc")}}); } wxBitmap MainToolBar::CreateBitmap(const std::string& name) const { return WxUtils::LoadScaledThemeBitmap(name, this, GetToolBitmapSize()); } wxBitmap MainToolBar::CreateDebuggerBitmap(const std::string& name) const { constexpr auto scale_flags = WxUtils::LSI_SCALE_DOWN | WxUtils::LSI_ALIGN_CENTER; return WxUtils::LoadScaledResourceBitmap(name, this, GetToolBitmapSize(), wxDefaultSize, scale_flags); } void MainToolBar::ApplyThemeBitmaps() { constexpr std::array<std::pair<int, ToolBarBitmapID>, 8> bitmap_entries{ {{wxID_OPEN, TOOLBAR_FILEOPEN}, {wxID_REFRESH, TOOLBAR_REFRESH}, {IDM_STOP, TOOLBAR_STOP}, {IDM_TOGGLE_FULLSCREEN, TOOLBAR_FULLSCREEN}, {IDM_SCREENSHOT, TOOLBAR_SCREENSHOT}, {wxID_PREFERENCES, TOOLBAR_CONFIGMAIN}, {IDM_CONFIG_GFX_BACKEND, TOOLBAR_CONFIGGFX}, {IDM_CONFIG_CONTROLLERS, TOOLBAR_CONTROLLER}}}; for (const auto& entry : bitmap_entries) ApplyBitmap(entry.first, entry.second); // Separate, as the play button is dual-state and doesn't have a fixed bitmap. RefreshPlayButton(); } void MainToolBar::ApplyDebuggerBitmaps() { constexpr std::array<std::pair<int, ToolBarBitmapID>, 6> bitmap_entries{ {{IDM_STEP, TOOLBAR_DEBUG_STEP}, {IDM_STEPOVER, TOOLBAR_DEBUG_STEPOVER}, {IDM_STEPOUT, TOOLBAR_DEBUG_STEPOUT}, {IDM_SKIP, TOOLBAR_DEBUG_SKIP}, {IDM_GOTOPC, TOOLBAR_DEBUG_GOTOPC}, {IDM_SETPC, TOOLBAR_DEBUG_SETPC}}}; for (const auto& entry : bitmap_entries) ApplyBitmap(entry.first, entry.second); } void MainToolBar::ApplyBitmap(int tool_id, ToolBarBitmapID bitmap_id) { const auto& bitmap = m_icon_bitmaps[bitmap_id]; SetToolDisabledBitmap(tool_id, WxUtils::CreateDisabledButtonBitmap(bitmap)); SetToolNormalBitmap(tool_id, bitmap); } void MainToolBar::AddToolBarButtons() { if (m_type == ToolBarType::Debug) { AddDebuggerToolBarButtons(); AddSeparator(); } AddMainToolBarButtons(); } void MainToolBar::AddMainToolBarButtons() { AddToolBarButton(wxID_OPEN, TOOLBAR_FILEOPEN, _("Open"), _("Open file...")); AddToolBarButton(wxID_REFRESH, TOOLBAR_REFRESH, _("Refresh"), _("Refresh game list")); AddSeparator(); AddToolBarButton(IDM_PLAY, TOOLBAR_PLAY, _("Play"), _("Play")); AddToolBarButton(IDM_STOP, TOOLBAR_STOP, _("Stop"), _("Stop")); AddToolBarButton(IDM_TOGGLE_FULLSCREEN, TOOLBAR_FULLSCREEN, _("FullScr"), _("Toggle fullscreen")); AddToolBarButton(IDM_SCREENSHOT, TOOLBAR_SCREENSHOT, _("ScrShot"), _("Take screenshot")); AddSeparator(); AddToolBarButton(wxID_PREFERENCES, TOOLBAR_CONFIGMAIN, _("Config"), _("Configure...")); AddToolBarButton(IDM_CONFIG_GFX_BACKEND, TOOLBAR_CONFIGGFX, _("Graphics"), _("Graphics settings")); AddToolBarButton(IDM_CONFIG_CONTROLLERS, TOOLBAR_CONTROLLER, _("Controllers"), _("Controller settings")); } void MainToolBar::AddDebuggerToolBarButtons() { AddToolBarButton(IDM_STEP, TOOLBAR_DEBUG_STEP, _("Step"), _("Step into the next instruction")); AddToolBarButton(IDM_STEPOVER, TOOLBAR_DEBUG_STEPOVER, _("Step Over"), _("Step over the next instruction")); AddToolBarButton(IDM_STEPOUT, TOOLBAR_DEBUG_STEPOUT, _("Step Out"), _("Step out of the current function")); AddToolBarButton(IDM_SKIP, TOOLBAR_DEBUG_SKIP, _("Skip"), _("Skips the next instruction completely")); AddSeparator(); AddToolBarButton(IDM_GOTOPC, TOOLBAR_DEBUG_GOTOPC, _("Show PC"), _("Go to the current instruction")); AddToolBarButton(IDM_SETPC, TOOLBAR_DEBUG_SETPC, _("Set PC"), _("Set the current instruction")); } void MainToolBar::AddToolBarButton(int tool_id, ToolBarBitmapID bitmap_id, const wxString& label, const wxString& short_help) { WxUtils::AddToolbarButton(this, tool_id, label, m_icon_bitmaps[bitmap_id], short_help); } void MainToolBar::RefreshPlayButton() { ToolBarBitmapID bitmap_id; wxString label; if (Core::GetState() == Core::CORE_RUN) { bitmap_id = TOOLBAR_PAUSE; label = _("Pause"); } else { bitmap_id = TOOLBAR_PLAY; label = _("Play"); } FindById(IDM_PLAY)->SetLabel(label); SetToolShortHelp(IDM_PLAY, label); ApplyBitmap(IDM_PLAY, bitmap_id); }
gpl-2.0
jeffegg/beaglebone
arch/arm/mach-exynos4/init.c
1
1083
/* linux/arch/arm/mach-exynos4/init.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/serial_core.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/regs-serial.h> static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = { [0] = { .name = "uclk1", .divisor = 1, .min_baud = 0, .max_baud = 0, }, }; /* uart registration process */ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) { struct s3c2410_uartcfg *tcfg = cfg; u32 ucnt; for (ucnt = 0; ucnt < no; ucnt++, tcfg++) { if (!tcfg->clocks) { tcfg->has_fracval = 1; tcfg->clocks = exynos4_serial_clocks; tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); } tcfg->flags |= NO_NEED_CHECK_CLKSRC; } s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); }
gpl-2.0
visitor83/fighting_stm32
rtgui/common/image_hdc.c
1
5900
#include <rtthread.h> #include <rtgui/dc_hw.h> #include <rtgui/image.h> #include <rtgui/rtgui_system.h> #include <rtgui/image_hdc.h> #define HDC_MAGIC_LEN 4 struct rtgui_image_hdc { rt_bool_t is_loaded; /* hdc image information */ rt_uint16_t byte_per_pixel; rt_uint16_t pitch; rt_size_t pixel_offset; rt_uint8_t *pixels; struct rtgui_filerw* filerw; const struct rtgui_graphic_driver* hw_driver; }; static rt_bool_t rtgui_image_hdc_check(struct rtgui_filerw* file); static rt_bool_t rtgui_image_hdc_load(struct rtgui_image* image, struct rtgui_filerw* file, rt_bool_t load); static void rtgui_image_hdc_unload(struct rtgui_image* image); static void rtgui_image_hdc_blit(struct rtgui_image* image, struct rtgui_dc* dc, struct rtgui_rect* rect); static void rtgui_image_hdcmm_blit(struct rtgui_image* image, struct rtgui_dc* dc, struct rtgui_rect* dst_rect); struct rtgui_image_engine rtgui_image_hdc_engine = { "hdc", { RT_NULL }, rtgui_image_hdc_check, rtgui_image_hdc_load, rtgui_image_hdc_unload, rtgui_image_hdc_blit }; const struct rtgui_image_engine rtgui_image_hdcmm_engine = { "hdcmm", {RT_NULL}, RT_NULL, RT_NULL, RT_NULL, rtgui_image_hdcmm_blit }; static rt_bool_t rtgui_image_hdc_check(struct rtgui_filerw* file) { int start; rt_bool_t is_HDC; rt_uint8_t magic[4]; if ( !file ) return 0; start = rtgui_filerw_tell(file); /* move to the beginning of file */ rtgui_filerw_seek(file, 0, RTGUI_FILE_SEEK_SET); is_HDC = RT_FALSE; if ( rtgui_filerw_read(file, magic, 1, sizeof(magic)) == sizeof(magic) ) { if ( magic[0] == 'H' && magic[1] == 'D' && magic[2] == 'C' && magic[3] == '\0' ) { is_HDC = RT_TRUE; } } rtgui_filerw_seek(file, start, RTGUI_FILE_SEEK_SET); return(is_HDC); } static rt_bool_t rtgui_image_hdc_load(struct rtgui_image* image, struct rtgui_filerw* file, rt_bool_t load) { rt_uint32_t header[5]; struct rtgui_image_hdc* hdc; hdc = (struct rtgui_image_hdc*) rtgui_malloc(sizeof(struct rtgui_image_hdc)); if (hdc == RT_NULL) return RT_FALSE; hdc->hw_driver = rtgui_graphic_driver_get_default(); if (hdc->hw_driver == RT_NULL) { rtgui_free(hdc); return RT_FALSE; } rtgui_filerw_read(file, (char*)&header, 1, sizeof(header)); /* set image information */ image->w = (rt_uint16_t)header[1]; image->h = (rt_uint16_t)header[2]; image->engine = &rtgui_image_hdc_engine; image->data = hdc; hdc->filerw = file; hdc->byte_per_pixel = hdc->hw_driver->byte_per_pixel; hdc->pitch = image->w * hdc->byte_per_pixel; hdc->pixel_offset = rtgui_filerw_tell(file); if (load == RT_TRUE) { /* load all pixels */ hdc->pixels = rtgui_malloc(image->h * hdc->pitch); if (hdc->pixels == RT_NULL) { /* release data */ rtgui_free(hdc); return RT_FALSE; } rtgui_filerw_read(hdc->filerw, hdc->pixels, 1, image->h * hdc->pitch); rtgui_filerw_close(hdc->filerw); hdc->filerw = RT_NULL; hdc->pixel_offset = 0; } else { hdc->pixels = RT_NULL; } return RT_TRUE; } static void rtgui_image_hdc_unload(struct rtgui_image* image) { struct rtgui_image_hdc* hdc; if (image != RT_NULL) { hdc = (struct rtgui_image_hdc*) image->data; if (hdc->pixels != RT_NULL) rtgui_free(hdc->pixels); if (hdc->filerw != RT_NULL) { rtgui_filerw_close(hdc->filerw); hdc->filerw = RT_NULL; } /* release data */ rtgui_free(hdc); } } static void rtgui_image_hdc_blit(struct rtgui_image* image, struct rtgui_dc* dc, struct rtgui_rect* dst_rect) { rt_uint16_t y, w, h; struct rtgui_image_hdc* hdc; RT_ASSERT(image != RT_NULL || dc != RT_NULL || dst_rect != RT_NULL); /* this dc is not visible */ if (rtgui_dc_get_visible(dc) != RT_TRUE) return; hdc = (struct rtgui_image_hdc*) image->data; RT_ASSERT(hdc != RT_NULL); if (dc->type != RTGUI_DC_HW) return; /* the minimum rect */ if (image->w < rtgui_rect_width(*dst_rect)) w = image->w; else w = rtgui_rect_width(*dst_rect); if (image->h < rtgui_rect_height(*dst_rect)) h = image->h; else h = rtgui_rect_height(*dst_rect); if (hdc->pixels != RT_NULL) { rt_uint8_t* ptr; /* get pixel pointer */ ptr = hdc->pixels; for (y = 0; y < h; y ++) { rtgui_dc_hw_draw_raw_hline((struct rtgui_dc_hw*)dc, ptr, dst_rect->x1, dst_rect->x1 + w, dst_rect->y1 + y); ptr += hdc->pitch; } } else { rt_uint8_t* ptr; ptr = rtgui_malloc(hdc->pitch); if (ptr == RT_NULL) return; /* no memory */ /* seek to the begin of pixel data */ rtgui_filerw_seek(hdc->filerw, hdc->pixel_offset, RTGUI_FILE_SEEK_SET); for (y = 0; y < h; y ++) { /* read pixel data */ if (rtgui_filerw_read(hdc->filerw, ptr, 1, hdc->pitch) != hdc->pitch) break; /* read data failed */ rtgui_dc_hw_draw_raw_hline((struct rtgui_dc_hw*)dc, ptr, dst_rect->x1, dst_rect->x1 + w, dst_rect->y1 + y); } rtgui_free(ptr); } } static void rtgui_image_hdcmm_blit(struct rtgui_image* image, struct rtgui_dc* dc, struct rtgui_rect* dst_rect) { rt_uint8_t* ptr; rt_uint16_t y, w, h; struct rtgui_image_hdcmm* hdc; RT_ASSERT(image != RT_NULL || dc != RT_NULL || dst_rect != RT_NULL); /* this dc is not visible */ if (rtgui_dc_get_visible(dc) != RT_TRUE || (dc->type != RTGUI_DC_HW)) return; hdc = (struct rtgui_image_hdcmm*) image; RT_ASSERT(hdc != RT_NULL); /* the minimum rect */ if (image->w < rtgui_rect_width(*dst_rect)) w = image->w; else w = rtgui_rect_width(*dst_rect); if (image->h < rtgui_rect_height(*dst_rect)) h = image->h; else h = rtgui_rect_height(*dst_rect); /* get pixel pointer */ ptr = hdc->pixels; for (y = 0; y < h; y ++) { rtgui_dc_hw_draw_raw_hline((struct rtgui_dc_hw*)dc, ptr, dst_rect->x1, dst_rect->x1 + w, dst_rect->y1 + y); ptr += hdc->pitch; } } void rtgui_image_hdc_init() { /* register hdc on image system */ rtgui_image_register_engine(&rtgui_image_hdc_engine); }
gpl-2.0
vingarzan/kamailio
msg_translator.c
1
84883
/* * Copyright (C) 2001-2003 FhG Fokus * * This file is part of Kamailio, a free SIP server. * * Kamailio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version * * Kamailio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * * */ /** Via special params: * requests: * - if the address in via is different from the src_ip or an existing * received=something is found, received=src_ip is added (and any preexisting * received is deleted). received is added as the first via parameter if no * receive is previously present or over the old receive. * - if the original via contains rport / rport=something or msg->msg_flags * FL_FORCE_RPORT is set (e.g. script force_rport() cmd) rport=src_port * is added (over previous rport / as first via param or after received * if no received was present and received is added too) * local replies: * (see also sl_send_reply) * - rport and received are added in mostly the same way as for requests, but * in the reverse order (first rport and then received). See also * limitations. * - if reply_to_via is set (default off) the local reply will be sent to * the address in via (received is ignored since it was not set by us). The * destination port is either the message source port if via contains rport * or the FL_FORCE_RPORT flag is set or the port from the via. If either * port or rport are present a normal dns lookup (instead of a srv lookup) * is performed on the address. If no port is present and a srv lookup is * performed the port is taken from the srv lookup. If the srv lookup failed * or it was not performed, the port is set to the default sip port (5060). * - if reply_to_via is off (default) the local reply is sent to the message * source ip address. The destination port is set to the source port if * rport is present or FL_FORCE_RPORT flag is set, to the via port or to * the default sip port (5060) if neither rport or via port are present. * "normal" replies: * - if received is present the message is sent to the received address else * if no port is present (neither a normal via port or rport) a dns srv * lookup is performed on the host part and the reply is sent to the * resulting ip. If a port is present or the host part is an ip address * the dns lookup will be a "normal" one (A or AAAA). * - if rport is present, it's value will be used as the destination port * (and this will also disable srv lookups) * - if no port is present the destination port will be taken from the srv * lookup. If the srv lookup fails or is not performed (e.g. ip address * in host) the destination port will be set to the default sip port (5060). * * Known limitations: * - when locally replying to a message, rport and received will be appended to * the via header parameters (for forwarded requests they are inserted at the * beginning). * - a locally generated reply might get two received via parameters if a * received is already present in the original message (this should not * happen though, but ...) * *--andrei */ /*! * \file * \brief Kamailio core :: Message translations * \ingroup core * Module: \ref core */ #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include "comp_defs.h" #include "msg_translator.h" #include "globals.h" #include "error.h" #include "mem/mem.h" #include "dprint.h" #include "config.h" #include "md5utils.h" #include "data_lump.h" #include "data_lump_rpl.h" #include "ip_addr.h" #include "resolve.h" #include "ut.h" #include "pt.h" #include "cfg/cfg.h" #include "parser/parse_to.h" #include "parser/parse_param.h" #include "forward.h" #include "str_list.h" #include "rand/kam_rand.h" #define append_str_trans(_dest,_src,_len,_msg) \ append_str( (_dest), (_src), (_len) ); extern char version[]; extern int version_len; /** per process fixup function for global_req_flags. * It should be called from the configuration framework. */ void fix_global_req_flags(str* gname, str* name) { global_req_flags=0; switch(cfg_get(core, core_cfg, udp_mtu_try_proto)){ case PROTO_NONE: case PROTO_UDP: /* do nothing */ break; case PROTO_TCP: global_req_flags|=FL_MTU_TCP_FB; break; case PROTO_TLS: global_req_flags|=FL_MTU_TLS_FB; break; case PROTO_SCTP: global_req_flags|=FL_MTU_SCTP_FB; break; } if (cfg_get(core, core_cfg, force_rport)) global_req_flags|=FL_FORCE_RPORT; } /* checks if ip is in host(name) and ?host(ip)=name? * ip must be in network byte order! * resolver = DO_DNS | DO_REV_DNS; if 0 no dns check is made * return 0 if equal */ static int check_via_address(struct ip_addr* ip, str *name, unsigned short port, int resolver) { struct hostent* he; int i; char* s; int len; char lproto; /* maybe we are lucky and name it's an ip */ s=ip_addr2a(ip); if (s){ LM_DBG("(%s, %.*s, %d)\n", s, name->len, name->s, resolver); len=strlen(s); /* check if name->s is an ipv6 address or an ipv6 address ref. */ if ((ip->af==AF_INET6) && ( ((len==name->len)&&(strncasecmp(name->s, s, name->len)==0)) || ((len==(name->len-2))&&(name->s[0]=='[')&& (name->s[name->len-1]==']')&& (strncasecmp(name->s+1, s, len)==0)) ) ) return 0; else if (strncmp(name->s, s, name->len)==0) return 0; }else{ LM_CRIT("could not convert ip address\n"); return -1; } if (port==0) port=SIP_PORT; if (resolver&DO_DNS){ LM_DBG("doing dns lookup\n"); /* try all names ips */ lproto = PROTO_NONE; he=sip_resolvehost(name, &port, &lproto); /* don't use naptr */ if (he && ip->af==he->h_addrtype){ for(i=0;he && he->h_addr_list[i];i++){ if ( memcmp(&he->h_addr_list[i], ip->u.addr, ip->len)==0) return 0; } } } if (resolver&DO_REV_DNS){ LM_DBG("doing rev. dns lookup\n"); /* try reverse dns */ he=rev_resolvehost(ip); if (he && (strncmp(he->h_name, name->s, name->len)==0)) return 0; for (i=0; he && he->h_aliases[i];i++){ if (strncmp(he->h_aliases[i],name->s, name->len)==0) return 0; } } return -1; } /* check if IP address in Via != source IP address of signaling, * or the sender requires adding rport or received values */ int received_test( struct sip_msg *msg ) { int rcvd; rcvd=msg->via1->received || msg->via1->rport || check_via_address(&msg->rcv.src_ip, &msg->via1->host, msg->via1->port, received_dns); return rcvd; } /* check if IP address in Via != source IP address of signaling */ int received_via_test( struct sip_msg *msg ) { int rcvd; rcvd = (check_via_address(&msg->rcv.src_ip, &msg->via1->host, msg->via1->port, received_dns)!=0); return rcvd; } static char * warning_builder( struct sip_msg *msg, unsigned int *returned_len) { static char buf[MAX_WARNING_LEN]; str *foo; int print_len, l; int clen; char* t; #define str_print(string, string_len) \ do{ \ l=(string_len); \ if ((clen+l)>MAX_WARNING_LEN) \ goto error_overflow; \ memcpy(buf+clen, (string), l); \ clen+=l; \ }while(0) #define str_lenpair_print(string, string_len, string2, string2_len) \ do{ \ str_print(string, string_len); \ str_print(string2, string2_len);\ }while(0) #define str_pair_print( string, string2, string2_len) \ str_lenpair_print((string), strlen((string)), (string2), (string2_len)) #define str_int_print(string, intval)\ do{\ t=int2str((intval), &print_len); \ str_pair_print(string, t, print_len);\ } while(0) #define str_su_print(sockaddr)\ do{\ t=su2a(&sockaddr, sizeof(sockaddr)); \ print_len=strlen(t); \ str_print(t, print_len); \ } while(0) #define str_ipaddr_print(string, ipaddr_val)\ do{\ t=ip_addr2a((ipaddr_val)); \ print_len=strlen(t); \ str_pair_print(string, t, print_len);\ } while(0) clen=0; str_print(WARNING, WARNING_LEN); str_su_print(msg->rcv.bind_address->su); str_print(WARNING_PHRASE, WARNING_PHRASE_LEN); /*adding out_uri*/ if (msg->new_uri.s) foo=&(msg->new_uri); else foo=&(msg->first_line.u.request.uri); /* pid= */ str_int_print(" pid=", my_pid()); /* req_src_ip= */ str_ipaddr_print(" req_src_ip=", &msg->rcv.src_ip); str_int_print(" req_src_port=", msg->rcv.src_port); str_pair_print(" in_uri=", msg->first_line.u.request.uri.s, msg->first_line.u.request.uri.len); str_pair_print(" out_uri=", foo->s, foo->len); str_pair_print(" via_cnt", (msg->parsed_flag & HDR_EOH_F)==HDR_EOH_F ? "=" : ">", 1); str_int_print("=", via_cnt); if (clen<MAX_WARNING_LEN){ buf[clen]='"'; clen++; } else goto error_overflow; *returned_len=clen; return buf; error_overflow: LM_NOTICE("buffer size exceeded (probably too long URI)\n"); *returned_len=0; return 0; } char* received_builder(struct sip_msg *msg, unsigned int *received_len) { char *buf; int len; struct ip_addr *source_ip; char *tmp; int tmp_len; source_ip=&msg->rcv.src_ip; buf=pkg_malloc(sizeof(char)*MAX_RECEIVED_SIZE); if (buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); return 0; } memcpy(buf, RECEIVED, RECEIVED_LEN); if ( (tmp=ip_addr2a(source_ip))==0) { pkg_free(buf); return 0; /* error*/ } tmp_len=strlen(tmp); len=RECEIVED_LEN+tmp_len; memcpy(buf+RECEIVED_LEN, tmp, tmp_len); buf[len]=0; /*null terminate it */ *received_len = len; return buf; } char* rport_builder(struct sip_msg *msg, unsigned int *rport_len) { char* buf; char* tmp; int tmp_len; int len; tmp_len=0; tmp=int2str(msg->rcv.src_port, &tmp_len); len=RPORT_LEN+tmp_len; buf=pkg_malloc(sizeof(char)*(len+1));/* space for null term */ if (buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); return 0; } memcpy(buf, RPORT, RPORT_LEN); memcpy(buf+RPORT_LEN, tmp, tmp_len); buf[len]=0; /*null terminate it*/ *rport_len=len; return buf; } char* id_builder(struct sip_msg* msg, unsigned int *id_len) { char* buf; int len, value_len; char revhex[sizeof(int)*2]; char* p; int size; size=sizeof(int)*2; p=&revhex[0]; if (int2reverse_hex(&p, &size, msg->rcv.proto_reserved1)==-1){ LM_CRIT("not enough space for id\n"); return 0; } value_len=p-&revhex[0]; len=ID_PARAM_LEN+value_len; buf=pkg_malloc(sizeof(char)*(len+1));/* place for ending \0 */ if (buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); return 0; } memcpy(buf, ID_PARAM, ID_PARAM_LEN); memcpy(buf+ID_PARAM_LEN, revhex, value_len); buf[len]=0; /* null terminate it */ *id_len=len; return buf; } char* clen_builder( struct sip_msg* msg, int *clen_len, int diff, int body_only) { char* buf; int len; int value; char* value_s; int value_len; char* body; body=get_body(msg); if (body==0){ ser_error=E_BAD_REQ; LM_ERR("no message body found (missing crlf?)"); return 0; } value=msg->len-(int)(body-msg->buf)+diff; value_s=int2str(value, &value_len); LM_DBG("content-length: %d (%s)\n", value, value_s); if (body_only) { len=value_len; } else { len=CONTENT_LENGTH_LEN+value_len+CRLF_LEN; } buf=pkg_malloc(sizeof(char)*(len+1)); if (buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); return 0; } if (body_only) { memcpy(buf, value_s, value_len); } else { memcpy(buf, CONTENT_LENGTH, CONTENT_LENGTH_LEN); memcpy(buf+CONTENT_LENGTH_LEN, value_s, value_len); memcpy(buf+CONTENT_LENGTH_LEN+value_len, CRLF, CRLF_LEN); } buf[len]=0; /* null terminate it */ *clen_len=len; return buf; } /* checks if a lump opt condition * returns 1 if cond is true, 0 if false */ static inline int lump_check_opt( struct lump *l, struct sip_msg* msg, struct dest_info* snd_i ) { struct ip_addr* ip; unsigned short port; int proto; #define get_ip_port_proto \ if ((snd_i==0) || (snd_i->send_sock==0)){ \ LM_CRIT("null send socket\n"); \ return 1; /* we presume they are different :-) */ \ } \ if (msg->rcv.bind_address){ \ ip=&msg->rcv.bind_address->address; \ port=msg->rcv.bind_address->port_no; \ proto=msg->rcv.bind_address->proto; \ }else{ \ ip=&msg->rcv.dst_ip; \ port=msg->rcv.dst_port; \ proto=msg->rcv.proto; \ } \ switch(l->u.cond){ case COND_FALSE: return 0; case COND_TRUE: LUMP_SET_COND_TRUE(l); return 1; case COND_IF_DIFF_REALMS: get_ip_port_proto; /* faster tests first */ if ((port==snd_i->send_sock->port_no) && (proto==snd_i->send_sock->proto) && #ifdef USE_COMP (msg->rcv.comp==snd_i->comp) && #endif (ip_addr_cmp(ip, &snd_i->send_sock->address))) return 0; else { LUMP_SET_COND_TRUE(l); return 1; } case COND_IF_DIFF_AF: get_ip_port_proto; if (ip->af!=snd_i->send_sock->address.af) { LUMP_SET_COND_TRUE(l); return 1; } else return 0; case COND_IF_DIFF_PROTO: get_ip_port_proto; if (proto!=snd_i->send_sock->proto) { LUMP_SET_COND_TRUE(l); return 1; } else return 0; case COND_IF_DIFF_PORT: get_ip_port_proto; if (port!=snd_i->send_sock->port_no) { LUMP_SET_COND_TRUE(l); return 1; } else return 0; case COND_IF_DIFF_IP: get_ip_port_proto; if (ip_addr_cmp(ip, &snd_i->send_sock->address)) return 0; else { LUMP_SET_COND_TRUE(l); return 1; } case COND_IF_RAND: if(kam_rand()>=KAM_RAND_MAX/2) { LUMP_SET_COND_TRUE(l); return 1; } else return 0; default: LM_CRIT("unknown lump condition %d\n", l->u.cond); } return 0; /* false */ } /* computes the "unpacked" len of a lump list, code moved from build_req_from_req */ static inline int lumps_len(struct sip_msg* msg, struct lump* lumps, struct dest_info* send_info) { int s_offset; int new_len; struct lump* t; struct lump* r; str* send_address_str; str* send_port_str; str* recv_address_str = NULL; str* recv_port_str = NULL; int recv_port_no = 0; struct socket_info* send_sock; #ifdef USE_COMP #define RCVCOMP_LUMP_LEN \ /* add;comp=xxx */ \ switch(msg->rcv.comp){ \ case COMP_NONE: \ break; \ case COMP_SIGCOMP: \ new_len+=COMP_PARAM_LEN+SIGCOMP_NAME_LEN; \ break; \ case COMP_SERGZ: \ new_len+=COMP_PARAM_LEN+SERGZ_NAME_LEN ; \ break; \ default: \ LM_CRIT("unknown comp %d\n", msg->rcv.comp); \ } #define SENDCOMP_LUMP_LEN \ /* add;comp=xxx */ \ switch(send_info->comp){ \ case COMP_NONE: \ break; \ case COMP_SIGCOMP: \ new_len+=COMP_PARAM_LEN+SIGCOMP_NAME_LEN; \ break; \ case COMP_SERGZ: \ new_len+=COMP_PARAM_LEN+SERGZ_NAME_LEN ; \ break; \ default: \ LM_CRIT("unknown comp %d\n", send_info->comp); \ } #else #define RCVCOMP_LUMP_LEN #define SENDCOMP_LUMP_LEN #endif /*USE_COMP */ #define SUBST_LUMP_LEN(subst_l) \ switch((subst_l)->u.subst){ \ case SUBST_RCV_IP: \ if (msg->rcv.bind_address){ \ new_len+=recv_address_str->len; \ if (msg->rcv.bind_address->address.af!=AF_INET) \ new_len+=2; \ }else{ \ /* FIXME */ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_PORT: \ if (msg->rcv.bind_address){ \ new_len+=recv_port_str->len; \ }else{ \ /* FIXME */ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_PROTO: \ if (msg->rcv.bind_address){ \ switch(msg->rcv.bind_address->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ new_len+=3; \ break; \ case PROTO_TCP: \ case PROTO_TLS: \ switch(msg->rcv.proto){ \ case PROTO_WS: \ case PROTO_WSS: \ new_len+=2; \ break; \ default: \ new_len+=3; \ break; \ } \ break; \ case PROTO_SCTP: \ new_len+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", msg->rcv.bind_address->proto); \ }\ }else{ \ /* FIXME */ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_ALL: \ if (msg->rcv.bind_address){ \ new_len+=recv_address_str->len; \ if (msg->rcv.bind_address->address.af!=AF_INET) \ new_len+=2; \ if (recv_port_no!=SIP_PORT){ \ /* add :port_no */ \ new_len+=1+recv_port_str->len; \ }\ /*add;transport=xxx*/ \ switch(msg->rcv.bind_address->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ break; /* udp is the default */ \ case PROTO_TCP: \ case PROTO_TLS: \ switch(msg->rcv.proto){ \ case PROTO_WS: \ case PROTO_WSS: \ new_len+=TRANSPORT_PARAM_LEN+2; \ break; \ default: \ new_len+=TRANSPORT_PARAM_LEN+3; \ break; \ } \ break; \ case PROTO_SCTP: \ new_len+=TRANSPORT_PARAM_LEN+4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", \ msg->rcv.bind_address->proto); \ }\ RCVCOMP_LUMP_LEN \ }else{ \ /* FIXME */ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_SND_IP: \ if (send_sock){ \ new_len+=send_address_str->len; \ if (send_sock->address.af!=AF_INET && \ send_address_str==&(send_sock->address_str)) \ new_len+=2; \ }else{ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_SND_PORT: \ if (send_sock){ \ new_len+=send_port_str->len; \ }else{ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_SND_PROTO: \ if (send_sock){ \ switch(send_sock->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ new_len+=3; \ break; \ case PROTO_TCP: \ case PROTO_TLS: \ switch(send_info->proto){ \ case PROTO_WS: \ case PROTO_WSS: \ new_len+=2; \ break; \ default: \ new_len+=3; \ break; \ } \ break; \ case PROTO_SCTP: \ new_len+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", send_sock->proto); \ }\ }else{ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_SND_ALL: \ if (send_sock){ \ new_len+=send_address_str->len; \ if ((send_sock->address.af!=AF_INET) && \ (send_address_str==&(send_sock->address_str))) \ new_len+=2; \ if ((send_sock->port_no!=SIP_PORT) || \ (send_port_str!=&(send_sock->port_no_str))){ \ /* add :port_no */ \ new_len+=1+send_port_str->len; \ }\ /*add;transport=xxx*/ \ switch(send_sock->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ break; /* udp is the default */ \ case PROTO_TCP: \ case PROTO_TLS: \ switch(send_info->proto){ \ case PROTO_WS: \ case PROTO_WSS: \ new_len+=TRANSPORT_PARAM_LEN+2; \ break; \ default: \ new_len+=TRANSPORT_PARAM_LEN+3; \ break; \ } \ break; \ case PROTO_SCTP: \ new_len+=TRANSPORT_PARAM_LEN+4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", send_sock->proto); \ }\ SENDCOMP_LUMP_LEN \ }else{ \ /* FIXME */ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_NOP: /* do nothing */ \ break; \ default: \ LM_CRIT("unknown subst type %d\n", (subst_l)->u.subst); \ } if (send_info){ send_sock=send_info->send_sock; }else{ send_sock=0; }; s_offset=0; new_len=0; /* init send_address_str & send_port_str */ if(send_sock && send_sock->useinfo.name.len>0) send_address_str=&(send_sock->useinfo.name); else if (msg->set_global_address.len) send_address_str=&(msg->set_global_address); else send_address_str=&(send_sock->address_str); if(send_sock && send_sock->useinfo.port_no>0) send_port_str=&(send_sock->useinfo.port_no_str); else if (msg->set_global_port.len) send_port_str=&(msg->set_global_port); else send_port_str=&(send_sock->port_no_str); /* init recv_address_str, recv_port_str & recv_port_no */ if(msg->rcv.bind_address) { if(msg->rcv.bind_address->useinfo.name.len>0) recv_address_str=&(msg->rcv.bind_address->useinfo.name); else recv_address_str=&(msg->rcv.bind_address->address_str); if(msg->rcv.bind_address->useinfo.port_no>0) { recv_port_str=&(msg->rcv.bind_address->useinfo.port_no_str); recv_port_no = msg->rcv.bind_address->useinfo.port_no; } else { recv_port_str=&(msg->rcv.bind_address->port_no_str); recv_port_no = msg->rcv.bind_address->port_no; } } for(t=lumps;t;t=t->next){ /* skip if this is an OPT lump and the condition is not satisfied */ if ((t->op==LUMP_ADD_OPT)&& !lump_check_opt(t, msg, send_info)) continue; for(r=t->before;r;r=r->before){ switch(r->op){ case LUMP_ADD: new_len+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP_LEN(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_before; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n", r->op); } } skip_before: switch(t->op){ case LUMP_ADD: new_len+=t->len; break; case LUMP_ADD_SUBST: SUBST_LUMP_LEN(t); break; case LUMP_ADD_OPT: /* we don't do anything here, it's only a condition for * before & after */ break; case LUMP_DEL: /* fix overlapping deleted zones */ if (t->u.offset < s_offset){ /* change len */ if (t->len>s_offset-t->u.offset) t->len-=s_offset-t->u.offset; else t->len=0; t->u.offset=s_offset; } s_offset=t->u.offset+t->len; new_len-=t->len; break; case LUMP_NOP: /* fix offset if overlapping on a deleted zone */ if (t->u.offset < s_offset){ t->u.offset=s_offset; }else s_offset=t->u.offset; /* do nothing */ break; default: LM_CRIT("invalid op for data lump (%x)\n", r->op); } for (r=t->after;r;r=r->after){ switch(r->op){ case LUMP_ADD: new_len+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP_LEN(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_after; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n", r->op); } } skip_after: ; /* to make gcc 3.* happy */ } return new_len; #undef RCVCOMP_LUMP_LEN #undef SENDCOMP_LUMP_LEN } /* another helper functions, adds/Removes the lump, code moved form build_req_from_req */ void process_lumps( struct sip_msg* msg, struct lump* lumps, char* new_buf, unsigned int* new_buf_offs, unsigned int* orig_offs, struct dest_info* send_info, int flag) { struct lump *t; struct lump *r; char* orig; int size; int offset; int s_offset; str* send_address_str; str* send_port_str; str* recv_address_str = NULL; str* recv_port_str = NULL; int recv_port_no = 0; struct socket_info* send_sock; #ifdef USE_COMP #define RCVCOMP_PARAM_ADD \ /* add ;comp=xxxx */ \ switch(msg->rcv.comp){ \ case COMP_NONE: \ break; \ case COMP_SIGCOMP: \ memcpy(new_buf+offset, COMP_PARAM, COMP_PARAM_LEN);\ offset+=COMP_PARAM_LEN; \ memcpy(new_buf+offset, SIGCOMP_NAME, \ SIGCOMP_NAME_LEN); \ offset+=SIGCOMP_NAME_LEN; \ break; \ case COMP_SERGZ: \ memcpy(new_buf+offset, COMP_PARAM, COMP_PARAM_LEN);\ offset+=COMP_PARAM_LEN; \ memcpy(new_buf+offset, SERGZ_NAME, SERGZ_NAME_LEN); \ offset+=SERGZ_NAME_LEN; \ break;\ default:\ LM_CRIT("unknown comp %d\n", msg->rcv.comp); \ } #define SENDCOMP_PARAM_ADD \ /* add ;comp=xxxx */ \ switch(send_info->comp){ \ case COMP_NONE: \ break; \ case COMP_SIGCOMP: \ memcpy(new_buf+offset, COMP_PARAM, COMP_PARAM_LEN);\ offset+=COMP_PARAM_LEN; \ memcpy(new_buf+offset, SIGCOMP_NAME, \ SIGCOMP_NAME_LEN); \ offset+=SIGCOMP_NAME_LEN; \ break; \ case COMP_SERGZ: \ memcpy(new_buf+offset, COMP_PARAM, COMP_PARAM_LEN);\ offset+=COMP_PARAM_LEN; \ memcpy(new_buf+offset, SERGZ_NAME, SERGZ_NAME_LEN); \ offset+=SERGZ_NAME_LEN; \ break;\ default:\ LM_CRIT("unknown comp %d\n", msg->rcv.comp); \ } #else #define RCVCOMP_PARAM_ADD #define SENDCOMP_PARAM_ADD #endif /* USE_COMP */ #define SUBST_LUMP(subst_l) \ switch((subst_l)->u.subst){ \ case SUBST_RCV_IP: \ if (msg->rcv.bind_address){ \ if (msg->rcv.bind_address->address.af!=AF_INET){\ new_buf[offset]='['; offset++; \ }\ memcpy(new_buf+offset, recv_address_str->s, \ recv_address_str->len); \ offset+=recv_address_str->len; \ if (msg->rcv.bind_address->address.af!=AF_INET){\ new_buf[offset]=']'; offset++; \ }\ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_PORT: \ if (msg->rcv.bind_address){ \ memcpy(new_buf+offset, recv_port_str->s, \ recv_port_str->len); \ offset+=recv_port_str->len; \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_ALL: \ if (msg->rcv.bind_address){ \ /* address */ \ if (msg->rcv.bind_address->address.af!=AF_INET){\ new_buf[offset]='['; offset++; \ }\ memcpy(new_buf+offset, recv_address_str->s, \ recv_address_str->len); \ offset+=recv_address_str->len; \ if (msg->rcv.bind_address->address.af!=AF_INET){\ new_buf[offset]=']'; offset++; \ }\ /* :port */ \ if (recv_port_no!=SIP_PORT){ \ new_buf[offset]=':'; offset++; \ memcpy(new_buf+offset, \ recv_port_str->s, \ recv_port_str->len); \ offset+=recv_port_str->len; \ }\ switch(msg->rcv.bind_address->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ break; /* nothing to do, udp is default*/ \ case PROTO_TCP: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ if (msg->rcv.proto == PROTO_WS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tcp", 3); \ offset+=3; \ } \ break; \ case PROTO_TLS: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ if (msg->rcv.proto == PROTO_WS || msg->rcv.proto == PROTO_WSS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tls", 3); \ offset+=3; \ } \ break; \ case PROTO_SCTP: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ memcpy(new_buf+offset, "sctp", 4); \ offset+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", msg->rcv.bind_address->proto); \ } \ RCVCOMP_PARAM_ADD \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_SND_IP: \ if (send_sock){ \ if ((send_sock->address.af!=AF_INET) && \ (send_address_str==&(send_sock->address_str))){\ new_buf[offset]='['; offset++; \ }\ memcpy(new_buf+offset, send_address_str->s, \ send_address_str->len); \ offset+=send_address_str->len; \ if ((send_sock->address.af!=AF_INET) && \ (send_address_str==&(send_sock->address_str))){\ new_buf[offset]=']'; offset++; \ }\ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_SND_PORT: \ if (send_sock){ \ memcpy(new_buf+offset, send_port_str->s, \ send_port_str->len); \ offset+=send_port_str->len; \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null send_sock\n"); \ }; \ break; \ case SUBST_SND_ALL: \ if (send_sock){ \ /* address */ \ if ((send_sock->address.af!=AF_INET) && \ (send_address_str==&(send_sock->address_str))){\ new_buf[offset]='['; offset++; \ }\ memcpy(new_buf+offset, send_address_str->s, \ send_address_str->len); \ offset+=send_address_str->len; \ if ((send_sock->address.af!=AF_INET) && \ (send_address_str==&(send_sock->address_str))){\ new_buf[offset]=']'; offset++; \ }\ /* :port */ \ if ((send_sock->port_no!=SIP_PORT) || \ (send_port_str!=&(send_sock->port_no_str))){ \ new_buf[offset]=':'; offset++; \ memcpy(new_buf+offset, send_port_str->s, \ send_port_str->len); \ offset+=send_port_str->len; \ }\ switch(send_sock->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ break; /* nothing to do, udp is default*/ \ case PROTO_TCP: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ if (send_info->proto == PROTO_WS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tcp", 3); \ offset+=3; \ } \ break; \ case PROTO_TLS: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ if (send_info->proto == PROTO_WS || send_info->proto == PROTO_WSS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tls", 3); \ offset+=3; \ } \ break; \ case PROTO_SCTP: \ memcpy(new_buf+offset, TRANSPORT_PARAM, \ TRANSPORT_PARAM_LEN); \ offset+=TRANSPORT_PARAM_LEN; \ memcpy(new_buf+offset, "sctp", 4); \ offset+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", send_sock->proto); \ } \ SENDCOMP_PARAM_ADD \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null bind_address\n"); \ }; \ break; \ case SUBST_RCV_PROTO: \ if (msg->rcv.bind_address){ \ switch(msg->rcv.bind_address->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ memcpy(new_buf+offset, "udp", 3); \ offset+=3; \ break; \ case PROTO_TCP: \ if (msg->rcv.proto == PROTO_WS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tcp", 3); \ offset+=3; \ } \ break; \ case PROTO_TLS: \ if (msg->rcv.proto == PROTO_WS || msg->rcv.proto == PROTO_WSS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tls", 3); \ offset+=3; \ } \ break; \ case PROTO_SCTP: \ memcpy(new_buf+offset, "sctp", 4); \ offset+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", msg->rcv.bind_address->proto); \ } \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null send_sock \n"); \ }; \ break; \ case SUBST_SND_PROTO: \ if (send_sock){ \ switch(send_sock->proto){ \ case PROTO_NONE: \ case PROTO_UDP: \ memcpy(new_buf+offset, "udp", 3); \ offset+=3; \ break; \ case PROTO_TCP: \ if (send_info->proto == PROTO_WS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tcp", 3); \ offset+=3; \ } \ break; \ case PROTO_TLS: \ if (send_info->proto == PROTO_WS || send_info->proto == PROTO_WSS) { \ memcpy(new_buf+offset, "ws", 2); \ offset+=2; \ } else { \ memcpy(new_buf+offset, "tls", 3); \ offset+=3; \ } \ break; \ case PROTO_SCTP: \ memcpy(new_buf+offset, "sctp", 4); \ offset+=4; \ break; \ default: \ LM_CRIT("unknown proto %d\n", send_sock->proto); \ } \ }else{ \ /*FIXME*/ \ LM_CRIT("FIXME: null send_sock \n"); \ }; \ break; \ default: \ LM_CRIT("unknown subst type %d\n", (subst_l)->u.subst); \ } if (send_info){ send_sock=send_info->send_sock; }else{ send_sock=0; } /* init send_address_str & send_port_str */ if (msg->set_global_address.len) send_address_str=&(msg->set_global_address); else send_address_str=&(send_sock->address_str); if (msg->set_global_port.len) send_port_str=&(msg->set_global_port); else send_port_str=&(send_sock->port_no_str); /* init send_address_str & send_port_str */ if(send_sock && send_sock->useinfo.name.len>0) send_address_str=&(send_sock->useinfo.name); else if (msg->set_global_address.len) send_address_str=&(msg->set_global_address); else send_address_str=&(send_sock->address_str); if(send_sock && send_sock->useinfo.port_no>0) send_port_str=&(send_sock->useinfo.port_no_str); else if (msg->set_global_port.len) send_port_str=&(msg->set_global_port); else send_port_str=&(send_sock->port_no_str); /* init recv_address_str, recv_port_str & recv_port_no */ if(msg->rcv.bind_address) { if(msg->rcv.bind_address->useinfo.name.len>0) recv_address_str=&(msg->rcv.bind_address->useinfo.name); else recv_address_str=&(msg->rcv.bind_address->address_str); if(msg->rcv.bind_address->useinfo.port_no>0) { recv_port_str=&(msg->rcv.bind_address->useinfo.port_no_str); recv_port_no = msg->rcv.bind_address->useinfo.port_no; } else { recv_port_str=&(msg->rcv.bind_address->port_no_str); recv_port_no = msg->rcv.bind_address->port_no; } } orig=msg->buf; offset=*new_buf_offs; s_offset=*orig_offs; for (t=lumps;t;t=t->next){ switch(t->op){ case LUMP_ADD: case LUMP_ADD_SUBST: case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if ((t->op==LUMP_ADD_OPT) && (!lump_check_opt(t, msg, send_info))) continue; /* just add it here! */ /* process before */ for(r=t->before;r;r=r->before){ switch (r->op){ case LUMP_ADD: /*just add it here*/ memcpy(new_buf+offset, r->u.value, r->len); offset+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_before; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n", r->op); } } skip_before: /* copy "main" part */ switch(t->op){ case LUMP_ADD: memcpy(new_buf+offset, t->u.value, t->len); offset+=t->len; break; case LUMP_ADD_SUBST: SUBST_LUMP(t); break; case LUMP_ADD_OPT: /* do nothing, it's only a condition */ break; default: /* should not ever get here */ LM_CRIT("unhandled data lump op %d\n", t->op); } /* process after */ for(r=t->after;r;r=r->after){ switch (r->op){ case LUMP_ADD: /*just add it here*/ memcpy(new_buf+offset, r->u.value, r->len); offset+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_after; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n", r->op); } } skip_after: break; case LUMP_NOP: case LUMP_DEL: /* copy till offset */ if (s_offset>t->u.offset){ LM_DBG("WARNING: (%d) overlapped lumps offsets," " ignoring(%x, %x)\n", t->op, s_offset,t->u.offset); /* this should've been fixed above (when computing len) */ /* just ignore it*/ break; } size=t->u.offset-s_offset; if (size > 0 && flag == FLAG_MSG_ALL){ memcpy(new_buf+offset, orig+s_offset,size); offset+=size; s_offset+=size; } else if (flag == FLAG_MSG_LUMPS_ONLY) { /* do not copy the whole message, jump to the lumps offs */ s_offset+=size; } /* the LUMP_DELs are printed with "- " before them */ if (t->op==LUMP_DEL && flag == FLAG_MSG_LUMPS_ONLY) { new_buf[offset++] = '-'; new_buf[offset++] = ' '; } /* process before */ for(r=t->before;r;r=r->before){ switch (r->op){ case LUMP_ADD: /*just add it here*/ memcpy(new_buf+offset, r->u.value, r->len); offset+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_nop_before; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n",r->op); } } skip_nop_before: /* process main (del only) */ if (t->op==LUMP_DEL && flag == FLAG_MSG_ALL){ /* skip len bytes from orig msg */ s_offset+=t->len; } else if (t->op==LUMP_DEL && flag == FLAG_MSG_LUMPS_ONLY) { /* copy lump value and indent as necessarely */ memcpy(new_buf+offset, orig + t->u.offset, t->len); offset+=t->len; if (new_buf[offset-1] != '\n') { new_buf[offset] = '\n'; offset+=1; } /* skip len bytes from orig msg */ s_offset+=t->len; } /* process after */ for(r=t->after;r;r=r->after){ switch (r->op){ case LUMP_ADD: /*just add it here*/ memcpy(new_buf+offset, r->u.value, r->len); offset+=r->len; break; case LUMP_ADD_SUBST: SUBST_LUMP(r); break; case LUMP_ADD_OPT: /* skip if this is an OPT lump and the condition is * not satisfied */ if (!lump_check_opt(r, msg, send_info)) goto skip_nop_after; break; default: /* only ADD allowed for before/after */ LM_CRIT("invalid op for data lump (%x)\n", r->op); } } skip_nop_after: break; default: LM_CRIT("unknown op (%x)\n", t->op); } } *new_buf_offs=offset; *orig_offs=s_offset; /* add '\0' to char* lump list to print it smoothly */ if (flag == FLAG_MSG_LUMPS_ONLY) { new_buf[offset] = '\0'; } #undef RCVCOMP_PARAM_ADD #undef SENDCOMP_PARAM_ADD } /* * Adjust/insert Content-Length if necessary */ static inline int adjust_clen(struct sip_msg* msg, int body_delta, int proto) { struct lump* anchor; char* clen_buf; int clen_len, body_only; #ifdef USE_TCP char* body; int comp_clen; #endif /* USE_TCP */ /* Calculate message length difference caused by lumps modifying message * body, from this point on the message body must not be modified. Zero * value indicates that the body hasn't been modified */ clen_buf = 0; anchor=0; body_only=1; /* check to see if we need to add clen */ #ifdef USE_TCP if (proto == PROTO_TCP #ifdef USE_TLS || proto == PROTO_TLS #endif ) { if (parse_headers(msg, HDR_CONTENTLENGTH_F, 0)==-1){ LM_ERR("error parsing content-length\n"); goto error; } if (unlikely(msg->content_length==0)){ /* not present, we need to add it */ /* msg->unparsed should point just before the final crlf * - whole message was parsed by the above parse_headers * which did not find content-length */ anchor=anchor_lump(msg, msg->unparsed-msg->buf, 0, HDR_CONTENTLENGTH_T); if (anchor==0){ LM_ERR("cannot set clen anchor\n"); goto error; } body_only=0; }else{ /* compute current content length and compare it with the one in the message */ body=get_body(msg); if (unlikely(body==0)){ ser_error=E_BAD_REQ; LM_ERR("no message body found (missing crlf?)"); goto error; } comp_clen=msg->len-(int)(body-msg->buf)+body_delta; if (comp_clen!=(int)(long)msg->content_length->parsed){ /* note: we don't distinguish here between received with wrong content-length and content-length changed, we just fix it automatically in both cases (the reason being that an error message telling we have received a msg- with wrong content-length is of very little use) */ anchor = del_lump(msg, msg->content_length->body.s-msg->buf, msg->content_length->body.len, HDR_CONTENTLENGTH_T); if (anchor==0) { LM_ERR("Can't remove original Content-Length\n"); goto error; } body_only=1; } } }else #endif /* USE_TCP */ if (body_delta){ if (parse_headers(msg, HDR_CONTENTLENGTH_F, 0) == -1) { LM_ERR("Error parsing Content-Length\n"); goto error; } /* The body has been changed, try to find * existing Content-Length */ /* no need for Content-Length if it's and UDP packet and * it hasn't Content-Length already */ if (msg->content_length==0){ /* content-length doesn't exist, append it */ /* msg->unparsed should point just before the final crlf * - whole message was parsed by the above parse_headers * which did not find content-length */ if (proto!=PROTO_UDP){ anchor=anchor_lump(msg, msg->unparsed-msg->buf, 0, HDR_CONTENTLENGTH_T); if (anchor==0){ LM_ERR("cannot set clen anchor\n"); goto error; } body_only=0; } /* else LM_DBG("UDP packet with no clen => not adding one \n"); */ }else{ /* Content-Length has been found, remove it */ anchor = del_lump( msg, msg->content_length->body.s - msg->buf, msg->content_length->body.len, HDR_CONTENTLENGTH_T); if (anchor==0) { LM_ERR("Can't remove original Content-Length\n"); goto error; } } } if (anchor){ clen_buf = clen_builder(msg, &clen_len, body_delta, body_only); if (!clen_buf) goto error; if (insert_new_lump_after(anchor, clen_buf, clen_len, HDR_CONTENTLENGTH_T) == 0) goto error; } return 0; error: if (clen_buf) pkg_free(clen_buf); return -1; } static inline int find_line_start(char *text, unsigned int text_len, char **buf, unsigned int *buf_len) { char *ch, *start; unsigned int len; start = *buf; len = *buf_len; while (text_len <= len) { if (strncmp(text, start, text_len) == 0) { *buf = start; *buf_len = len; return 1; } if ((ch = memchr(start, 13, len - 1))) { if (*(ch + 1) != 10) { LM_ERR("No LF after CR\n"); return 0; } len = len - (ch - start + 2); start = ch + 2; } else { LM_ERR("No CRLF found\n"); return 0; } } return 0; } static inline int get_line(str s) { char *ch; if ((ch = memchr(s.s, 13, s.len))) { if (*(ch + 1) != 10) { LM_ERR("No LF after CR\n"); return 0; } return ch - s.s + 2; } else { LM_ERR("No CRLF found\n"); return s.len; } return 0; } int replace_body(struct sip_msg *msg, str txt) { struct lump *anchor; char *buf; str body = {0,0}; body.s = get_body(msg); if(body.s==0) { LM_ERR("malformed sip message\n"); return 0; } body.len = msg->len -(int)(body.s-msg->buf); LM_DBG("old size body[%d] actual[%d]\n", body.len, txt.len); if(body.s+body.len>msg->buf+msg->len) { LM_ERR("invalid content length: %d\n", body.len); return 0; } del_nonshm_lump( &(msg->body_lumps) ); msg->body_lumps = NULL; if(del_lump(msg, body.s-msg->buf, body.len, 0) == 0) { LM_ERR("cannot delete existing body"); return 0; } anchor = anchor_lump(msg, body.s - msg->buf, 0, 0); if(anchor==0) { LM_ERR("failed to get anchor\n"); return 0; } buf=pkg_malloc(sizeof(char)*txt.len); if(buf==0) { PKG_MEM_ERROR; return 0; } memcpy(buf, txt.s, txt.len); if(insert_new_lump_after(anchor, buf, txt.len, 0)==0) { LM_ERR("failed to insert body lump\n"); pkg_free(buf); return 0; } return 1; } /** * returns the boundary defined by the Content-Type * header */ int get_boundary(struct sip_msg* msg, str* boundary) { str params; param_t *p, *list; param_hooks_t hooks; params.s = memchr(msg->content_type->body.s, ';', msg->content_type->body.len); if (params.s == NULL) { LM_INFO("Content-Type hdr has no params <%.*s>\n", msg->content_type->body.len, msg->content_type->body.s); return -1; } params.len = msg->content_type->body.len - (params.s - msg->content_type->body.s); if (parse_params(&params, CLASS_ANY, &hooks, &list) < 0) { LM_ERR("while parsing Content-Type params\n"); return -1; } boundary->s = NULL; boundary->len = 0; for (p = list; p; p = p->next) { if ((p->name.len == 8) && (strncasecmp(p->name.s, "boundary", 8) == 0)) { boundary->s = pkg_malloc(p->body.len + 2); if (boundary->s == NULL) { free_params(list); LM_ERR("no memory for boundary string\n"); return -1; } *(boundary->s) = '-'; *(boundary->s + 1) = '-'; memcpy(boundary->s + 2, p->body.s, p->body.len); boundary->len = 2 + p->body.len; LM_DBG("boundary is <%.*s>\n", boundary->len, boundary->s); break; } } free_params(list); return 0; } int check_boundaries(struct sip_msg *msg, struct dest_info *send_info) { str b = {0,0}; str fb = {0,0}; str ob = {0,0}; str bsuffix = {"\r\n", 2}; str fsuffix = {"--\r\n", 4}; str body = {0,0}; str buf = {0,0}; str tmp = {0,0}; struct str_list* lb = NULL; struct str_list* lb_t = NULL; int lb_found = 0; int t, ret, lb_size; char *pb; if(!(msg->msg_flags&FL_BODY_MULTIPART)) return 0; else { buf.s = build_body(msg, (unsigned int *)&buf.len, &ret, send_info); if(ret) { LM_ERR("Can't get body\n"); return -1; } tmp.s = buf.s; t = tmp.len = buf.len; if(get_boundary(msg, &ob)!=0) { if(tmp.s) pkg_free(tmp.s); return -1; } if(str_append(&ob, &bsuffix, &b)!=0) { LM_ERR("Can't append suffix to boundary\n"); goto error; } if(str_append(&ob, &fsuffix,&fb)!=0) { LM_ERR("Can't append suffix to final boundary\n"); goto error; } ret = b.len-2; while(t>0) { if(find_line_start(b.s, ret, &tmp.s, (unsigned int *)&tmp.len)) { /*LM_DBG("found t[%d] tmp.len[%d]:[%.*s]\n", t, tmp.len, tmp.len, tmp.s);*/ if(!lb) { lb = pkg_malloc(sizeof(struct str_list)); if (!lb) { PKG_MEM_ERROR; goto error; } lb->s.s = tmp.s; lb->s.len = tmp.len; lb->next = 0; lb_t = lb; } else { lb_t = append_str_list(tmp.s, tmp.len, &lb_t, &lb_size); } lb_found = lb_found + 1; tmp.s = tmp.s + ret; t = t - ret; tmp.len = tmp.len - ret; } else { t=0; } } if(lb_found<2) { LM_ERR("found[%d] wrong number of boundaries\n", lb_found); goto error; } /* adding 2 chars in advance */ body.len = buf.len + 2; body.s = pkg_malloc(sizeof(char)*body.len); if (!body.s) { PKG_MEM_ERROR; goto error; } pb = body.s; body.len = 0; lb_t = lb; while(lb_t) { tmp.s = lb_t->s.s; tmp.len = lb_t->s.len; tmp.len = get_line(lb_t->s); if(tmp.len!=b.len || strncmp(b.s, tmp.s, b.len)!=0) { LM_DBG("malformed bondary in the middle\n"); memcpy(pb, b.s, b.len); body.len = body.len + b.len; pb = pb + b.len; t = lb_t->s.s - (lb_t->s.s + tmp.len); memcpy(pb, lb_t->s.s+tmp.len, t); pb = pb + t; /*LM_DBG("new chunk[%d][%.*s]\n", t, t, pb-t);*/ } else { t = lb_t->next->s.s - lb_t->s.s; memcpy(pb, lb_t->s.s, t); /*LM_DBG("copy[%d][%.*s]\n", t, t, pb);*/ pb = pb + t; } body.len = body.len + t; /*LM_DBG("body[%d][%.*s]\n", body.len, body.len, body.s);*/ lb_t = lb_t->next; if(!lb_t->next) lb_t = NULL; } /* last boundary */ tmp.s = lb->s.s; tmp.len = lb->s.len; tmp.len = get_line(lb->s); if(tmp.len!=fb.len || strncmp(fb.s, tmp.s, fb.len)!=0) { LM_DBG("last bondary without -- at the end\n"); memcpy(pb, fb.s, fb.len); /*LM_DBG("new chunk[%d][%.*s]\n", fb.len, fb.len, pb);*/ pb = pb + fb.len; body.len = body.len + fb.len; } else { memcpy(pb, lb->s.s, lb->s.len); pb = pb + lb->s.len; body.len = body.len + lb->s.len; /*LM_DBG("copy[%d][%.*s]\n", lb->s.len, lb->s.len, pb - lb->s.len);*/ } /*LM_DBG("body[%d][%.*s] expected[%ld]\n", body.len, body.len, body.s, pb-body.s); */ if(!replace_body(msg, body)) { LM_ERR("Can't replace body\n"); goto error; } msg->msg_flags &= ~FL_BODY_MULTIPART; ret = 1; goto clean; } error: ret = -1; clean: if(ob.s) pkg_free(ob.s); if(b.s) pkg_free(b.s); if(fb.s) pkg_free(fb.s); if(body.s) pkg_free(body.s); if(buf.s) pkg_free(buf.s); while(lb) { lb_t = lb->next; pkg_free(lb); lb = lb_t; } return ret; } /** builds a request in memory from another sip request. * * Side-effects: - it adds lumps to the msg which are _not_ cleaned. * The added lumps are HDR_VIA_T (almost always added), HDR_CONTENLENGTH_T * and HDR_ROUTE_T (when a Route: header is added as a result of a non-null * msg->path_vec). * - it might change send_info->proto and send_info->send_socket * if proto fallback is enabled (see below). * * Uses also global_req_flags ( OR'ed with msg->msg_flags, see send_info * below). * * @param msg - sip message structure, complete with lumps * @param returned_len - result length (filled in) * @param send_info - dest_info structure (value/result), contains where the * packet will be sent to (it's needed for building a * correct via, fill RR lumps a.s.o.). If MTU based * protocol fall-back is enabled (see flags below), * send_info->proto might be updated with the new * protocol. * msg->msg_flags used: * - FL_TCP_MTU_FB, FL_TLS_MTU_FB and FL_SCTP_MTU_FB - * fallback to the corresp. proto if the built * message > mtu and send_info->proto==PROTO_UDP. * It will also update send_info->proto. * - FL_FORCE_RPORT: add rport to via * @param mode - flags for building the message, can be a combination of: * * BUILD_NO_LOCAL_VIA - don't add a local via * * BUILD_NO_VIA1_UPDATE - don't update first via (rport, * received a.s.o) * * BUILD_NO_PATH - don't add a Route: header with the * msg->path_vec content. * * BUILD_IN_SHM - build the result in shm memory * * @return pointer to the new request (pkg_malloc'ed or shm_malloc'ed, * depending on the presence of the BUILD_IN_SHM flag, needs freeing when * done) and sets returned_len or 0 on error. */ char * build_req_buf_from_sip_req( struct sip_msg* msg, unsigned int *returned_len, struct dest_info* send_info, unsigned int mode) { unsigned int len, new_len, received_len, rport_len, uri_len, via_len, body_delta; char* line_buf; char* received_buf; char* rport_buf; char* new_buf; char* buf; str path_buf; unsigned int offset, s_offset, size; struct lump* via_anchor; struct lump* via_lump; struct lump* via_insert_param; struct lump* path_anchor; struct lump* path_lump; str branch; unsigned int flags; unsigned int udp_mtu; struct dest_info di; via_insert_param=0; uri_len=0; buf=msg->buf; len=msg->len; received_len=0; rport_len=0; new_buf=0; received_buf=0; rport_buf=0; via_anchor=0; line_buf=0; via_len=0; path_buf.s=0; path_buf.len=0; flags=msg->msg_flags|global_req_flags; if(check_boundaries(msg, send_info)<0){ LM_WARN("check_boundaries error\n"); } /* Calculate message body difference and adjust Content-Length */ body_delta = lumps_len(msg, msg->body_lumps, send_info); if (adjust_clen(msg, body_delta, send_info->proto) < 0) { LM_ERR("Error while adjusting Content-Length\n"); goto error00; } if(unlikely(mode&BUILD_NO_LOCAL_VIA)) goto after_local_via; /* create the via header */ branch.s=msg->add_to_branch_s; branch.len=msg->add_to_branch_len; via_anchor=anchor_lump(msg, msg->via1->hdr.s-buf, 0, HDR_VIA_T); if (unlikely(via_anchor==0)) goto error00; line_buf = create_via_hf( &via_len, msg, send_info, &branch); if (unlikely(!line_buf)){ LM_ERR("could not create Via header\n"); goto error00; } after_local_via: if(unlikely(mode&BUILD_NO_VIA1_UPDATE)) goto after_update_via1; /* check if received needs to be added */ if ( received_test(msg) ) { if ((received_buf=received_builder(msg,&received_len))==0){ LM_ERR("received_builder failed\n"); goto error01; /* free also line_buf */ } } /* check if rport needs to be updated: * - if FL_FORCE_RPORT is set add it (and del. any previous version) * - if via already contains an rport add it and overwrite the previous * rport value if present (if you don't want to overwrite the previous * version remove the comments) */ if ((flags&FL_FORCE_RPORT)|| (msg->via1->rport /*&& msg->via1->rport->value.s==0*/)){ if ((rport_buf=rport_builder(msg, &rport_len))==0){ LM_ERR("rport_builder failed\n"); goto error01; /* free everything */ } } /* find out where the offset of the first parameter that should be added * (after host:port), needed by add receive & maybe rport */ if (msg->via1->params.s){ size= msg->via1->params.s-msg->via1->hdr.s-1; /*compensate for ';' */ }else{ size= msg->via1->host.s-msg->via1->hdr.s+msg->via1->host.len; if (msg->via1->port!=0){ /*size+=strlen(msg->via1->hdr.s+size+1)+1;*/ size += msg->via1->port_str.len + 1; /* +1 for ':'*/ } #if 0 /* no longer necessary, now hots.s contains [] */ if(send_sock->address.af==AF_INET6) size+=1; /* +1 for ']'*/ #endif } /* if received needs to be added, add anchor after host and add it, or * overwrite the previous one if already present */ if (received_len){ if (msg->via1->received){ /* received already present => overwrite it*/ via_insert_param=del_lump(msg, msg->via1->received->start-buf-1, /*;*/ msg->via1->received->size+1, /*;*/ HDR_VIA_T); }else if (via_insert_param==0){ /* receive not present, ok */ via_insert_param=anchor_lump(msg, msg->via1->hdr.s-buf+size, 0, HDR_VIA_T); } if (via_insert_param==0) goto error02; /* free received_buf */ if (insert_new_lump_after(via_insert_param, received_buf, received_len, HDR_VIA_T) ==0 ) goto error02; /* free received_buf */ } /* if rport needs to be updated, delete it if present and add it's value */ if (rport_len){ if (msg->via1->rport){ /* rport already present */ via_insert_param=del_lump(msg, msg->via1->rport->start-buf-1, /*';'*/ msg->via1->rport->size+1 /* ; */, HDR_VIA_T); }else if (via_insert_param==0){ /*force rport, no rport present */ /* no rport, add it */ via_insert_param=anchor_lump(msg, msg->via1->hdr.s-buf+size, 0, HDR_VIA_T); } if (via_insert_param==0) goto error03; /* free rport_buf */ if (insert_new_lump_after(via_insert_param, rport_buf, rport_len, HDR_VIA_T) ==0 ) goto error03; /* free rport_buf */ } after_update_via1: /* add route with path content */ if(unlikely(!(mode&BUILD_NO_PATH) && msg->path_vec.s && msg->path_vec.len)){ path_buf.len=ROUTE_PREFIX_LEN+msg->path_vec.len+CRLF_LEN; path_buf.s=pkg_malloc(path_buf.len+1); if (unlikely(path_buf.s==0)){ LM_ERR("out of memory\n"); ser_error=E_OUT_OF_MEM; goto error00; } memcpy(path_buf.s, ROUTE_PREFIX, ROUTE_PREFIX_LEN); memcpy(path_buf.s+ROUTE_PREFIX_LEN, msg->path_vec.s, msg->path_vec.len); memcpy(path_buf.s+ROUTE_PREFIX_LEN+msg->path_vec.len, CRLF, CRLF_LEN); path_buf.s[path_buf.len]=0; /* insert Route header either before the other routes (if present & parsed), after the local via or after in front of the first via if we don't add a local via*/ if (msg->route){ path_anchor=anchor_lump(msg, msg->route->name.s-buf, 0, HDR_ROUTE_T); }else if (likely(via_anchor)){ path_anchor=via_anchor; }else if (likely(msg->via1)){ path_anchor=anchor_lump(msg, msg->via1->hdr.s-buf, 0, HDR_ROUTE_T); }else{ /* if no via1 (theoretically possible for non-sip messages, e.g. http xmlrpc) */ path_anchor=anchor_lump(msg, msg->headers->name.s-buf, 0, HDR_ROUTE_T); } if (unlikely(path_anchor==0)) goto error05; if (unlikely((path_lump=insert_new_lump_after(path_anchor, path_buf.s, path_buf.len, HDR_ROUTE_T))==0)) goto error05; } /* compute new msg len and fix overlapping zones*/ new_len=len+body_delta+lumps_len(msg, msg->add_rm, send_info)+via_len; #ifdef XL_DEBUG LM_ERR("new_len(%d)=len(%d)+lumps_len\n", new_len, len); #endif udp_mtu=cfg_get(core, core_cfg, udp_mtu); di.proto=PROTO_NONE; if (unlikely((send_info->proto==PROTO_UDP) && udp_mtu && (flags & FL_MTU_FB_MASK) && (new_len>udp_mtu) && (!(mode&BUILD_NO_LOCAL_VIA)))){ di=*send_info; /* copy whole struct - will be used in the Via builder */ di.proto=PROTO_NONE; /* except the proto */ #ifdef USE_TCP if (!tcp_disable && (flags & FL_MTU_TCP_FB) && (di.send_sock=get_send_socket(msg, &send_info->to, PROTO_TCP))){ di.proto=PROTO_TCP; } #ifdef USE_TLS else if (!tls_disable && (flags & FL_MTU_TLS_FB) && (di.send_sock=get_send_socket(msg, &send_info->to, PROTO_TLS))){ di.proto=PROTO_TLS; } #endif /* USE_TLS */ #endif /* USE_TCP */ #ifdef USE_SCTP #ifdef USE_TCP else #endif /* USE_TCP */ if (!sctp_disable && (flags & FL_MTU_SCTP_FB) && (di.send_sock=get_send_socket(msg, &send_info->to, PROTO_SCTP))){ di.proto=PROTO_SCTP; } #endif /* USE_SCTP */ if (di.proto!=PROTO_NONE){ new_len-=via_len; if(likely(line_buf)) pkg_free(line_buf); line_buf = create_via_hf( &via_len, msg, &di, &branch); if (!line_buf){ LM_ERR("memory allocation failure!\n"); goto error00; } new_len+=via_len; } } /* add via header to the list */ /* try to add it before msg. 1st via */ /* add first via, as an anchor for second via*/ if(likely(line_buf)) { if ((via_lump=insert_new_lump_before(via_anchor, line_buf, via_len, HDR_VIA_T))==0) goto error04; } if (msg->new_uri.s){ uri_len=msg->new_uri.len; new_len=new_len-msg->first_line.u.request.uri.len+uri_len; } if(unlikely(mode&BUILD_IN_SHM)) new_buf=(char*)shm_malloc(new_len+1); else new_buf=(char*)pkg_malloc(new_len+1); if (new_buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); goto error00; } offset=s_offset=0; if (msg->new_uri.s){ /* copy message up to uri */ size=msg->first_line.u.request.uri.s-buf; memcpy(new_buf, buf, size); offset+=size; s_offset+=size; /* add our uri */ memcpy(new_buf+offset, msg->new_uri.s, uri_len); offset+=uri_len; s_offset+=msg->first_line.u.request.uri.len; /* skip original uri */ } new_buf[new_len]=0; /* copy msg adding/removing lumps */ process_lumps(msg, msg->add_rm, new_buf, &offset, &s_offset, send_info, FLAG_MSG_ALL); process_lumps(msg, msg->body_lumps, new_buf, &offset, &s_offset,send_info, FLAG_MSG_ALL); /* copy the rest of the message */ memcpy(new_buf+offset, buf+s_offset, len-s_offset); new_buf[new_len]=0; /* update the send_info if udp_mtu affected */ if (di.proto!=PROTO_NONE) { send_info->proto=di.proto; send_info->send_sock=di.send_sock; } #ifdef DBG_MSG_QA if (new_buf[new_len-1]==0) { LM_ERR("0 in the end\n"); abort(); } #endif *returned_len=new_len; return new_buf; error01: error02: if (received_buf) pkg_free(received_buf); error03: if (rport_buf) pkg_free(rport_buf); error04: if (line_buf) pkg_free(line_buf); error05: if (path_buf.s) pkg_free(path_buf.s); error00: *returned_len=0; return 0; } char * generate_res_buf_from_sip_res( struct sip_msg* msg, unsigned int *returned_len, unsigned int mode) { unsigned int new_len, via_len, body_delta; char* new_buf; unsigned offset, s_offset, via_offset; char* buf; unsigned int len; buf=msg->buf; len=msg->len; new_buf=0; if(unlikely(mode&BUILD_NO_VIA1_UPDATE)) { via_len = 0; via_offset = 0; } else { /* we must remove the first via */ if (msg->via1->next) { via_len=msg->via1->bsize; via_offset=msg->h_via1->body.s-buf; } else { via_len=msg->h_via1->len; via_offset=msg->h_via1->name.s-buf; } } /* Calculate message body difference and adjust * Content-Length */ body_delta = lumps_len(msg, msg->body_lumps, 0); if (adjust_clen(msg, body_delta, (msg->via2? msg->via2->proto:PROTO_UDP)) < 0) { LM_ERR("error while adjusting Content-Length\n"); goto error; } if(likely(!(mode&BUILD_NO_VIA1_UPDATE))) { /* remove the first via*/ if (del_lump( msg, via_offset, via_len, HDR_VIA_T)==0){ LM_ERR("error trying to remove first via\n"); goto error; } } new_len=len+body_delta+lumps_len(msg, msg->add_rm, 0); /*FIXME: we don't know the send sock */ LM_DBG("old size: %d, new size: %d\n", len, new_len); new_buf=(char*)pkg_malloc(new_len+1); /* +1 is for debugging (\0 to print it )*/ if (new_buf==0){ LM_ERR("out of mem\n"); goto error; } new_buf[new_len]=0; /* debug: print the message */ offset=s_offset=0; /*FIXME: no send sock*/ process_lumps(msg, msg->add_rm, new_buf, &offset, &s_offset, 0, FLAG_MSG_ALL);/*FIXME:*/ process_lumps(msg, msg->body_lumps, new_buf, &offset, &s_offset, 0, FLAG_MSG_ALL); /* copy the rest of the message */ memcpy(new_buf+offset, buf+s_offset, len-s_offset); /* send it! */ LM_DBG("copied size: orig:%d, new: %d, rest: %d msg=\n%s\n", s_offset, offset, len-s_offset, new_buf); *returned_len=new_len; return new_buf; error: *returned_len=0; return 0; } char * build_res_buf_from_sip_res( struct sip_msg* msg, unsigned int *returned_len) { return generate_res_buf_from_sip_res(msg, returned_len, 0); } char * build_res_buf_from_sip_req( unsigned int code, str *text ,str *new_tag, struct sip_msg* msg, unsigned int *returned_len, struct bookmark *bmark) { char *buf, *p; unsigned int len,foo; struct hdr_field *hdr; struct lump_rpl *lump; struct lump_rpl *body; int i; char* received_buf; unsigned int received_len; char* rport_buf; unsigned int rport_len; char* warning_buf; unsigned int warning_len; char* content_len_buf; unsigned int content_len_len; char *after_body; str to_tag; char *totags; int httpreq; char *pvia; body = 0; buf=0; received_buf=rport_buf=warning_buf=content_len_buf=0; received_len=rport_len=warning_len=content_len_len=0; to_tag.s=0; /* fixes gcc 4.0 warning */ to_tag.len=0; /* force parsing all headers -- we want to return all Via's in the reply and they may be scattered down to the end of header (non-block Vias are a really poor property of SIP :( ) */ if (parse_headers( msg, HDR_EOH_F, 0 )==-1) { LM_ERR("alas, parse_headers failed\n"); goto error00; } /*computes the length of the new response buffer*/ len = 0; httpreq = IS_HTTP(msg); /* check if received needs to be added */ if (received_test(msg)) { if ((received_buf=received_builder(msg,&received_len))==0) { LM_ERR("alas, received_builder failed\n"); goto error00; } } /* check if rport needs to be updated */ if ( ((msg->msg_flags|global_req_flags)&FL_FORCE_RPORT)|| (msg->via1->rport /*&& msg->via1->rport->value.s==0*/)){ if ((rport_buf=rport_builder(msg, &rport_len))==0){ LM_ERR("rport_builder failed\n"); goto error01; /* free everything */ } if (msg->via1->rport) len -= msg->via1->rport->size+1; /* include ';' */ } /* first line */ len += msg->first_line.u.request.version.len + 1/*space*/ + 3/*code*/ + 1/*space*/ + text->len + CRLF_LEN/*new line*/; /*headers that will be copied (TO, FROM, CSEQ,CALLID,VIA)*/ for ( hdr=msg->headers ; hdr ; hdr=hdr->next ) { switch (hdr->type) { case HDR_TO_T: if (new_tag && new_tag->len) { to_tag=get_to(msg)->tag_value; if ( to_tag.len || to_tag.s ) len+=new_tag->len-to_tag.len; else len+=new_tag->len+TOTAG_TOKEN_LEN/*";tag="*/; } len += hdr->len; break; case HDR_VIA_T: /* we always add CRLF to via*/ len+=(hdr->body.s+hdr->body.len)-hdr->name.s+CRLF_LEN; if (hdr==msg->h_via1) len += received_len+rport_len; break; case HDR_RECORDROUTE_T: /* RR only for 1xx and 2xx replies */ if (code<180 || code>=300) break; case HDR_FROM_T: case HDR_CALLID_T: case HDR_CSEQ_T: /* we keep the original termination for these headers*/ len += hdr->len; break; default: /* do nothing, we are interested only in the above headers */ ; } } /* lumps length */ for(lump=msg->reply_lump;lump;lump=lump->next) { len += lump->text.len; if (lump->flags&LUMP_RPL_BODY) body = lump; } /* server header */ if (server_signature && server_hdr.len) len += server_hdr.len + CRLF_LEN; /* warning hdr */ if (sip_warning) { warning_buf = warning_builder(msg,&warning_len); if (warning_buf) len += warning_len + CRLF_LEN; else LM_WARN("warning skipped -- too big\n"); } /* content length hdr */ if (body) { content_len_buf = int2str(body->text.len, (int*)&content_len_len); len += CONTENT_LENGTH_LEN + content_len_len + CRLF_LEN; } else { len += CONTENT_LENGTH_LEN + 1/*0*/ + CRLF_LEN; } /* end of message */ len += CRLF_LEN; /*new line*/ /*allocating mem*/ buf = (char*) pkg_malloc( len+1 ); if (!buf) { LM_ERR("out of memory; needs %d\n",len); goto error01; } /* filling the buffer*/ p=buf; /* first line */ memcpy( p , msg->first_line.u.request.version.s , msg->first_line.u.request.version.len); p += msg->first_line.u.request.version.len; *(p++) = ' ' ; /*code*/ for ( i=2 , foo = code ; i>=0 ; i-- , foo=foo/10 ) *(p+i) = '0' + foo - ( foo/10 )*10; p += 3; *(p++) = ' ' ; memcpy( p , text->s , text->len ); p += text->len; memcpy( p, CRLF, CRLF_LEN ); p+=CRLF_LEN; /* headers*/ for ( hdr=msg->headers ; hdr ; hdr=hdr->next ) { switch (hdr->type) { case HDR_VIA_T: /* if is HTTP, backup start of Via header in response */ if(unlikely(httpreq)) pvia = p; if (hdr==msg->h_via1){ if (rport_buf){ if (msg->via1->rport){ /* delete the old one */ /* copy until rport */ append_str_trans( p, hdr->name.s , msg->via1->rport->start-hdr->name.s-1,msg); /* copy new rport */ append_str(p, rport_buf, rport_len); /* copy the rest of the via */ append_str_trans(p, msg->via1->rport->start+ msg->via1->rport->size, hdr->body.s+hdr->body.len- msg->via1->rport->start- msg->via1->rport->size, msg); }else{ /* just append the new one */ /* normal whole via copy */ append_str_trans( p, hdr->name.s , (hdr->body.s+hdr->body.len)-hdr->name.s, msg); append_str(p, rport_buf, rport_len); } }else{ /* normal whole via copy */ append_str_trans( p, hdr->name.s , (hdr->body.s+hdr->body.len)-hdr->name.s, msg); } if (received_buf) append_str( p, received_buf, received_len); }else{ /* normal whole via copy */ append_str_trans( p, hdr->name.s, (hdr->body.s+hdr->body.len)-hdr->name.s, msg); } append_str( p, CRLF,CRLF_LEN); /* if is HTTP, replace Via with Sia * - HTTP Via format is different than SIP Via */ if(unlikely(httpreq)) *pvia = 'S'; break; case HDR_RECORDROUTE_T: /* RR only for 1xx and 2xx replies */ if (code<180 || code>=300) break; append_str(p, hdr->name.s, hdr->len); break; case HDR_TO_T: if (new_tag && new_tag->len){ if (to_tag.s ) { /* replacement */ /* before to-tag */ append_str( p, hdr->name.s, to_tag.s-hdr->name.s); /* to tag replacement */ bmark->to_tag_val.s=p; bmark->to_tag_val.len=new_tag->len; append_str( p, new_tag->s,new_tag->len); /* the rest after to-tag */ append_str( p, to_tag.s+to_tag.len, hdr->name.s+hdr->len-(to_tag.s+to_tag.len)); }else{ /* adding a new to-tag */ after_body=hdr->body.s+hdr->body.len; append_str( p, hdr->name.s, after_body-hdr->name.s); append_str(p, TOTAG_TOKEN, TOTAG_TOKEN_LEN); bmark->to_tag_val.s=p; bmark->to_tag_val.len=new_tag->len; append_str( p, new_tag->s,new_tag->len); append_str( p, after_body, hdr->name.s+hdr->len-after_body); } break; } /* no new to-tag -- proceed to 1:1 copying */ totags=((struct to_body*)(hdr->parsed))->tag_value.s; if (totags) { bmark->to_tag_val.s=p+(totags-hdr->name.s); bmark->to_tag_val.len= ((struct to_body*)(hdr->parsed))->tag_value.len; } else { bmark->to_tag_val.len = 0; bmark->to_tag_val.s = p+(hdr->body.s+hdr->body.len-hdr->name.s); } /* no break */ case HDR_FROM_T: case HDR_CALLID_T: case HDR_CSEQ_T: append_str(p, hdr->name.s, hdr->len); break; default: /* do nothing, we are interested only in the above headers */ ; } /* end switch */ } /* end for */ /* lumps */ for(lump=msg->reply_lump;lump;lump=lump->next) if (lump->flags&LUMP_RPL_HDR){ memcpy(p,lump->text.s,lump->text.len); p += lump->text.len; } /* server header */ if (server_signature && server_hdr.len>0) { memcpy( p, server_hdr.s, server_hdr.len ); p+=server_hdr.len; memcpy( p, CRLF, CRLF_LEN ); p+=CRLF_LEN; } /* content_length hdr */ if (content_len_len) { append_str( p, CONTENT_LENGTH, CONTENT_LENGTH_LEN); append_str( p, content_len_buf, content_len_len ); append_str( p, CRLF, CRLF_LEN ); } else { append_str( p, CONTENT_LENGTH"0"CRLF,CONTENT_LENGTH_LEN+1+CRLF_LEN); } /* warning header */ if (warning_buf) { memcpy( p, warning_buf, warning_len); p+=warning_len; memcpy( p, CRLF, CRLF_LEN); p+=CRLF_LEN; } /*end of message*/ memcpy( p, CRLF, CRLF_LEN ); p+=CRLF_LEN; /* body */ if (body) { memcpy ( p, body->text.s, body->text.len ); p+=body->text.len; } if (len!=p-buf) LM_CRIT("diff len=%d p-buf=%d\n", len, (int)(p-buf)); *(p) = 0; *returned_len = len; /* in req2reply, received_buf is not introduced to lumps and needs to be deleted here */ if (received_buf) pkg_free(received_buf); if (rport_buf) pkg_free(rport_buf); return buf; error01: if (received_buf) pkg_free(received_buf); if (rport_buf) pkg_free(rport_buf); error00: *returned_len=0; return 0; } /* return number of chars printed or 0 if space exceeded; assumes buffer size of at least MAX_BRANCH_PARAM_LEN */ int branch_builder( unsigned int hash_index, /* only either parameter useful */ unsigned int label, char * char_v, int branch, char *branch_str, int *len ) { char *begin; int size; /* hash id provided ... start with it */ size=MAX_BRANCH_PARAM_LEN; begin=branch_str; *len=0; memcpy(begin, MCOOKIE, MCOOKIE_LEN ); size-=MCOOKIE_LEN;begin+=MCOOKIE_LEN; if (int2reverse_hex( &begin, &size, hash_index)==-1) return 0; if (size) { *begin=BRANCH_SEPARATOR; begin++; size--; } else return 0; /* string with request's characteristic value ... use it ... */ if (char_v) { if (memcpy(begin,char_v,MD5_LEN)) { begin+=MD5_LEN; size-=MD5_LEN; } else return 0; } else { /* ... use the "label" value otherwise */ if (int2reverse_hex( &begin, &size, label )==-1) return 0; } if (size) { *begin=BRANCH_SEPARATOR; begin++; size--; } else return 0; if (int2reverse_hex( &begin, &size, branch)==-1) return 0; *len=MAX_BRANCH_PARAM_LEN-size; return size; } /* uses only the send_info->send_socket, send_info->proto and * send_info->comp (so that a send_info used for sending can be passed * to this function w/o changes and the correct via will be built) */ char* via_builder( unsigned int *len, struct dest_info* send_info /* where to send the reply */, str* branch, str* extra_params, struct hostport* hp) { unsigned int via_len, extra_len; char *line_buf; int max_len; int via_prefix_len; str* address_str; /* address displayed in via */ str* port_str; /* port no displayed in via */ struct socket_info* send_sock; int comp_len, comp_name_len; #ifdef USE_COMP char* comp_name; #endif /* USE_COMP */ int port; struct ip_addr ip; union sockaddr_union *from = NULL; union sockaddr_union local_addr; struct tcp_connection *con = NULL; send_sock=send_info->send_sock; /* use pre-set address in via, the outbound socket alias or address one */ if (hp && hp->host->len) address_str=hp->host; else if(send_sock->useinfo.name.len>0) address_str=&(send_sock->useinfo.name); else address_str=&(send_sock->address_str); if (hp && hp->port->len) port_str=hp->port; else if(send_sock->useinfo.port_no>0) port_str=&(send_sock->useinfo.port_no_str); else port_str=&(send_sock->port_no_str); comp_len=comp_name_len=0; #ifdef USE_COMP comp_name=0; switch(send_info->comp){ case COMP_NONE: break; case COMP_SIGCOMP: comp_len=COMP_PARAM_LEN; comp_name_len=SIGCOMP_NAME_LEN; comp_name=SIGCOMP_NAME; break; case COMP_SERGZ: comp_len=COMP_PARAM_LEN; comp_name_len=SERGZ_NAME_LEN; comp_name=SERGZ_NAME; break; default: LM_CRIT("unknown comp %d\n", send_info->comp); /* continue, we'll just ignore comp */ } #endif /* USE_COMP */ via_prefix_len=MY_VIA_LEN+(send_info->proto==PROTO_SCTP); max_len=via_prefix_len +address_str->len /* space in MY_VIA */ +2 /* just in case it is a v6 address ... [ ] */ +1 /*':'*/+port_str->len +(branch?(MY_BRANCH_LEN+branch->len):0) +(extra_params?extra_params->len:0) +comp_len+comp_name_len +CRLF_LEN+1; line_buf=pkg_malloc( max_len ); if (line_buf==0){ ser_error=E_OUT_OF_MEM; LM_ERR("out of memory\n"); return 0; } extra_len=0; via_len=via_prefix_len+address_str->len; /*space included in MY_VIA*/ memcpy(line_buf, MY_VIA, MY_VIA_LEN); if (send_info->proto==PROTO_UDP){ /* do nothing */ }else if (send_info->proto==PROTO_TCP){ memcpy(line_buf+MY_VIA_LEN-4, "TCP ", 4); }else if (send_info->proto==PROTO_TLS){ memcpy(line_buf+MY_VIA_LEN-4, "TLS ", 4); }else if (send_info->proto==PROTO_SCTP){ memcpy(line_buf+MY_VIA_LEN-4, "SCTP ", 5); }else if (send_info->proto==PROTO_WS){ if (unlikely(send_info->send_flags.f & SND_F_FORCE_SOCKET && send_info->send_sock)) { local_addr = send_info->send_sock->su; su_setport(&local_addr, 0); /* any local port will do */ from = &local_addr; } port = su_getport(&send_info->to); if (likely(port)) { su2ip_addr(&ip, &send_info->to); con = tcpconn_get(send_info->id, &ip, port, from, 0); } else if (likely(send_info->id)) con = tcpconn_get(send_info->id, 0, 0, 0, 0); else { LM_CRIT("null_id & to\n"); pkg_free(line_buf); return 0; } if (con == NULL) { LM_WARN("TCP/TLS connection (id: %d) for WebSocket could not be found\n", send_info->id); pkg_free(line_buf); return 0; } if (con->rcv.proto==PROTO_WS) { memcpy(line_buf+MY_VIA_LEN-4, "WS ", 3); } else if (con->rcv.proto==PROTO_WSS) { memcpy(line_buf+MY_VIA_LEN-4, "WSS ", 4); } else { tcpconn_put(con); LM_CRIT("unknown proto %d\n", con->rcv.proto); pkg_free(line_buf); return 0; } tcpconn_put(con); }else if (send_info->proto==PROTO_WSS){ memcpy(line_buf+MY_VIA_LEN-4, "WSS ", 4); }else{ LM_CRIT("unknown proto %d\n", send_info->proto); pkg_free(line_buf); return 0; } /* add [] only if ipv6 and outbound socket address is used; * if using pre-set no check is made */ if ((send_sock->address.af==AF_INET6) && (address_str==&(send_sock->address_str))) { line_buf[via_prefix_len]='['; line_buf[via_prefix_len+1+address_str->len]=']'; extra_len=1; via_len+=2; /* [ ]*/ } memcpy(line_buf+via_prefix_len+extra_len, address_str->s, address_str->len); if ((send_sock->port_no!=SIP_PORT) || (port_str!=&send_sock->port_no_str)){ line_buf[via_len]=':'; via_len++; memcpy(line_buf+via_len, port_str->s, port_str->len); via_len+=port_str->len; } /* branch parameter */ if (branch){ memcpy(line_buf+via_len, MY_BRANCH, MY_BRANCH_LEN ); via_len+=MY_BRANCH_LEN; memcpy(line_buf+via_len, branch->s, branch->len ); via_len+=branch->len; } /* extra params */ if (extra_params){ memcpy(line_buf+via_len, extra_params->s, extra_params->len); via_len+=extra_params->len; } #ifdef USE_COMP /* comp */ if (comp_len){ memcpy(line_buf+via_len, COMP_PARAM, COMP_PARAM_LEN); via_len+=COMP_PARAM_LEN; memcpy(line_buf+via_len, comp_name, comp_name_len); via_len+=comp_name_len; } #endif memcpy(line_buf+via_len, CRLF, CRLF_LEN); via_len+=CRLF_LEN; line_buf[via_len]=0; /* null terminate the string*/ *len = via_len; return line_buf; } /* creates a via header honoring the protocol of the incomming socket * msg is an optional parameter */ char* create_via_hf( unsigned int *len, struct sip_msg *msg, struct dest_info* send_info /* where to send the reply */, str* branch) { char* via; str extra_params; struct hostport hp; #if defined USE_TCP || defined USE_SCTP char* id_buf; unsigned int id_len; id_buf=0; id_len=0; #endif extra_params.len=0; extra_params.s=0; #if defined USE_TCP || defined USE_SCTP /* add id if tcp */ if (msg && ( #ifdef USE_TCP (msg->rcv.proto==PROTO_TCP) #ifdef USE_TLS || (msg->rcv.proto==PROTO_TLS) #endif #ifdef USE_SCTP || #endif /* USE_SCTP */ #endif /* USE_TCP */ #ifdef USE_SCTP (msg->rcv.proto==PROTO_SCTP) #endif /* USE_SCTP */ )){ if ((id_buf=id_builder(msg, &id_len))==0){ LM_ERR("id_builder failed\n"); return 0; /* we don't need to free anything, nothing alloc'ed yet*/ } LM_DBG("id added: <%.*s>, rcv proto=%d\n", (int)id_len, id_buf, msg->rcv.proto); extra_params.s=id_buf; extra_params.len=id_len; } #endif /* USE_TCP || USE_SCTP */ /* test and add rport parameter to local via - rfc3581 */ if(msg && msg->msg_flags&FL_ADD_LOCAL_RPORT) { /* params so far + ';rport' + '\0' */ via = (char*)pkg_malloc(extra_params.len+RPORT_LEN); if(via==0) { LM_ERR("building local rport via param failed\n"); if (extra_params.s) pkg_free(extra_params.s); return 0; } if(extra_params.len!=0) { memcpy(via, extra_params.s, extra_params.len); pkg_free(extra_params.s); } memcpy(via + extra_params.len, RPORT, RPORT_LEN-1); extra_params.s = via; extra_params.len += RPORT_LEN-1; extra_params.s[extra_params.len]='\0'; } set_hostport(&hp, msg); via = via_builder( len, send_info, branch, extra_params.len?&extra_params:0, &hp); /* we do not need extra_params any more, already in the new via header */ if (extra_params.s) pkg_free(extra_params.s); return via; } /* builds a char* buffer from message headers without body * first line is excluded in case of skip_first_line=1 * error is set -1 if the memory allocation failes */ char * build_only_headers( struct sip_msg* msg, int skip_first_line, unsigned int *returned_len, int *error, struct dest_info* send_info) { char *buf, *new_buf; unsigned int offset, s_offset, len, new_len; *error = 0; buf = msg->buf; if (skip_first_line) s_offset = msg->headers->name.s - buf; else s_offset = 0; /* original length without body, and without final \r\n */ len = msg->unparsed - buf; /* new msg length */ new_len = len - /* original length */ s_offset + /* skipped first line */ lumps_len(msg, msg->add_rm, send_info); /* lumps */ if (new_len == 0) { *returned_len = 0; return 0; } new_buf = (char *)pkg_malloc(new_len+1); if (!new_buf) { LM_ERR("Not enough memory\n"); *error = -1; return 0; } new_buf[0] = 0; offset = 0; /* copy message lumps */ process_lumps(msg, msg->add_rm, new_buf, &offset, &s_offset, send_info, FLAG_MSG_ALL); /* copy the rest of the message without body */ if (len > s_offset) { memcpy(new_buf+offset, buf+s_offset, len-s_offset); offset += (len-s_offset); } new_buf[offset] = 0; *returned_len = offset; return new_buf; } /* builds a char* buffer from message body * error is set -1 if the memory allocation failes */ char * build_body( struct sip_msg* msg, unsigned int *returned_len, int *error, struct dest_info* send_info) { char *buf, *new_buf, *body; unsigned int offset, s_offset, len, new_len; *error = 0; body = get_body(msg); if (!body || (body[0] == 0)) { *returned_len = 0; return 0; } buf = msg->buf; s_offset = body - buf; /* original length of msg with body */ len = msg->len; /* new body length */ new_len = len - /* original length */ s_offset + /* msg without body */ lumps_len(msg, msg->body_lumps, send_info); /* lumps */ new_buf = (char *)pkg_malloc(new_len+1); if (!new_buf) { LM_ERR("Not enough memory\n"); *error = -1; return 0; } new_buf[0] = 0; offset = 0; /* copy body lumps */ process_lumps(msg, msg->body_lumps, new_buf, &offset, &s_offset, send_info, FLAG_MSG_ALL); /* copy the rest of the message without body */ if (len > s_offset) { memcpy(new_buf+offset, buf+s_offset, len-s_offset); offset += (len-s_offset); } new_buf[offset] = 0; *returned_len = offset; return new_buf; } /* builds a char* buffer from SIP message including body * The function adjusts the Content-Length HF according * to body lumps in case of touch_clen=1. */ char * build_all( struct sip_msg* msg, int touch_clen, unsigned int *returned_len, int *error, struct dest_info* send_info) { char *buf, *new_buf; unsigned int offset, s_offset, len, new_len; unsigned int body_delta; *error = 0; /* Calculate message body difference */ body_delta = lumps_len(msg, msg->body_lumps, send_info); if (touch_clen) { /* adjust Content-Length */ if (adjust_clen(msg, body_delta, send_info->proto) < 0) { LM_ERR("Error while adjusting Content-Length\n"); *error = -1; return 0; } } buf = msg->buf; /* original msg length */ len = msg->len; /* new msg length */ new_len = len + /* original length */ lumps_len(msg, msg->add_rm, send_info) + /* hdr lumps */ body_delta; /* body lumps */ if (new_len == 0) { returned_len = 0; return 0; } new_buf = (char *)pkg_malloc(new_len+1); if (!new_buf) { LM_ERR("Not enough memory\n"); *error = -1; return 0; } new_buf[0] = 0; offset = s_offset = 0; /* copy message lumps */ process_lumps(msg, msg->add_rm, new_buf, &offset, &s_offset, send_info, FLAG_MSG_ALL); /* copy body lumps */ process_lumps(msg, msg->body_lumps, new_buf, &offset, &s_offset, send_info, FLAG_MSG_ALL); /* copy the rest of the message */ memcpy(new_buf+offset, buf+s_offset, len-s_offset); offset += (len-s_offset); new_buf[offset] = 0; *returned_len = offset; return new_buf; } /** * parse buf in msg and fill several fields */ int build_sip_msg_from_buf(struct sip_msg *msg, char *buf, int len, unsigned int id) { if(msg==0 || buf==0) return -1; memset(msg, 0, sizeof(sip_msg_t)); msg->id = id; msg->buf = buf; msg->len = len; if (parse_msg(buf, len, msg)!=0) { LM_ERR("parsing failed"); return -1; } msg->set_global_address=default_global_address; msg->set_global_port=default_global_port; return 0; }
gpl-2.0
rex-xxx/mt6572_x201
frameworks/compile/libbcc/runtime/test/Unit/fixunsdfdi_test.c
1
3139
//===-- fixunsdfdi_test.c - Test __fixunsdfdi -----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file tests __fixunsdfdi for the compiler_rt library. // //===----------------------------------------------------------------------===// #include "int_lib.h" #include <stdio.h> // Returns: convert a to a unsigned long long, rounding toward zero. // Negative values all become zero. // Assumption: double is a IEEE 64 bit floating point type // du_int is a 64 bit integral type // value in double is representable in du_int or is negative // (no range checking performed) // seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm du_int __fixunsdfdi(double a); int test__fixunsdfdi(double a, du_int expected) { du_int x = __fixunsdfdi(a); if (x != expected) printf("error in __fixunsdfdi(%A) = %llX, expected %llX\n", a, x, expected); return x != expected; } char assumption_1[sizeof(du_int) == 2*sizeof(su_int)] = {0}; char assumption_2[sizeof(su_int)*CHAR_BIT == 32] = {0}; char assumption_3[sizeof(double)*CHAR_BIT == 64] = {0}; int main() { if (test__fixunsdfdi(0.0, 0)) return 1; if (test__fixunsdfdi(0.5, 0)) return 1; if (test__fixunsdfdi(0.99, 0)) return 1; if (test__fixunsdfdi(1.0, 1)) return 1; if (test__fixunsdfdi(1.5, 1)) return 1; if (test__fixunsdfdi(1.99, 1)) return 1; if (test__fixunsdfdi(2.0, 2)) return 1; if (test__fixunsdfdi(2.01, 2)) return 1; if (test__fixunsdfdi(-0.5, 0)) return 1; if (test__fixunsdfdi(-0.99, 0)) return 1; #if !TARGET_LIBGCC if (test__fixunsdfdi(-1.0, 0)) // libgcc ignores "returns 0 for negative input" spec return 1; if (test__fixunsdfdi(-1.5, 0)) return 1; if (test__fixunsdfdi(-1.99, 0)) return 1; if (test__fixunsdfdi(-2.0, 0)) return 1; if (test__fixunsdfdi(-2.01, 0)) return 1; #endif if (test__fixunsdfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000LL)) return 1; if (test__fixunsdfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000LL)) return 1; #if !TARGET_LIBGCC if (test__fixunsdfdi(-0x1.FFFFFEp+62, 0)) return 1; if (test__fixunsdfdi(-0x1.FFFFFCp+62, 0)) return 1; #endif if (test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+63, 0xFFFFFFFFFFFFF800LL)) return 1; if (test__fixunsdfdi(0x1.0000000000000p+63, 0x8000000000000000LL)) return 1; if (test__fixunsdfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFC00LL)) return 1; if (test__fixunsdfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFF800LL)) return 1; #if !TARGET_LIBGCC if (test__fixunsdfdi(-0x1.FFFFFFFFFFFFFp+62, 0)) return 1; if (test__fixunsdfdi(-0x1.FFFFFFFFFFFFEp+62, 0)) return 1; #endif return 0; }
gpl-2.0
stephenjschaefer/MMCd
mmcd.c
1
69592
/* * $Id: mmcd.c,v 1.16 2003/07/03 10:29:50 dmitry Exp $ * * Copyright (c) 2003, Dmitry Yurtaev <dm1try@umail.ru> * * This is free software; you can redistribute it and/or modify it under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <PalmOS.h> #include <HsExt.h> #include "mmcd.h" #include "format.h" #include "graph.h" #include "panel.h" #include "StringMgr.h" // UInt32 screenWidth, screenHeight, screenDepth; Int16 screenDensity; FontPtr bigFont; // global state vars Boolean colorMode = false; UInt32 ticksPerSecond; UInt16 portId; UInt16 autoOff; Boolean talkingECU = false; Boolean portOpen = false; Boolean newLog = false; // Identify first time log file is written UInt8 currentSensor = 0; UInt8 currentBank = 0; UInt8 currentMode = 0; UInt8 scanError = 0; Int8 lastSensorReturned = 0; #define REVIEW_MODE 0 #define MONITOR_MODE 1 UInt32 graphSensors = 0; UInt32 captureSensors = 0; UInt8 selectedSensor = 0; GraphSample currentSample; FileHand stream; //Char streamName[32]; Char streamName[24]; // cbhack Looking for bytes UInt16 streamAttr; // Char editFormTitle[32]; Char editFormTitle[16]; // cbhack Need a little more space... Int32 alarmFreq[5] = { 0, 4000, 1000, 300, 50 }; // custom widgets GraphType graph; PanelType panel; Coord graphX, graphY, graphW, graphH; //#define DELAY_DEBUG// Code to debug serial processing delay //#define ECU_DEBUG // Enable debugging mode for simulation // Consider enhancing to enable simulation when serial port not available #ifndef ECU_DEBUG #define ECU_BAUDRATE 1920 #define SIMULATE 0 #else #define ECU_BAUDRATE 9600 #define SIMULATE 1 #endif static const char egrtInput[6][12] = { "3.0 BAR", "2.5 BAR", "2.0 BAR", "Wideband O2", "EGRT", "0-5V" }; static const char egrtSlug[6][4] = { "MAP", "MAP", "MAP", "WBO2", "EGRT", "0-5V" }; //typedef struct Prefs { // This was declaring a static varible Prefs of type Prefs typedef struct { // Just declare the struct. UInt32 exists; UInt32 capture; UInt32 graph; UInt8 currentBank; UInt8 currentMode; UInt8 addr[SENSOR_COUNT]; UInt8 metricUnit; UInt8 mapScaling; UInt8 vehicleSelect; UInt32 baseTiming; UInt32 serialSpeed; UInt32 ecuSpeed; UInt8 overClock; //Insert SS 4/3/05 Char slug[SENSOR_COUNT][4]; UInt16 topVisible; UInt8 selectedSensor; UInt32 polarity; UInt8 threshold[SENSOR_COUNT]; UInt8 sound[SENSOR_COUNT]; UInt8 trigger[SENSOR_COUNT]; // Added persistant storage for triggers Boolean hideUnused; Boolean numericMode; Boolean audibleAlarms; Boolean autologEnabled; Boolean o2NonLinEnabled; UInt8 bigNumSelect[8]; } PrefsType; // Changed name to make it more obvious what was going on. //PrefsType *Prefs; // I hate type == var declarations... this one was unintentional.. Prefs vcar not even used UInt16 topVisible; UInt32 serialSpeed; UInt32 ecuSpeed; UInt8 overClock; // Insert SS 4/3/05 UInt8 metricUnit; UInt8 mapScaling; UInt8 vehicleSelect; UInt32 baseTiming; UInt8 audibleAlarms; UInt8 autologEnabled; // Why is this a UInt8 instead of bool ?? UInt8 o2NonLinEnabled; UInt8 numericMode; // show sensor numeric value in Monitor mode Boolean waitingAnswer; // Moved these declarations to top of source UInt32 receiveTimeout = 0; GraphSample scratchSample; // Original code GraphSample peakSample; // Max value GraphSample minSample; // Min value GraphSample resetSample; // Simple way to clear struct to null UInt32 lastGraphUpdate; // Keeps track of last time low was written UInt32 lastTimeTicks; // Used to sync Seconds + Ticks UInt32 lastTimeSeconds; // to create mS resolution Boolean peakEnabled; // Boolean logging; // Moved logging status to global UInt16 postLog = 30; // Number of seconds in postlog - consider making user defined UInt32 autologTimer; // Counts down autolog time remaining Boolean userLogging; // User requesting logging (has priority over autolog) extern UInt8 bigNumSelect[8]; // Used in graph to select senors for display /* Insert SP 06/01/03 - Hide Unused preference */ Boolean fHideUnused; /* End Insert */ UInt8 sensorPosition[SENSOR_COUNT] = { // 17, 14, 19, 9, 18, 3, 10, 22, // 4, 11, 21, 13, 12, 23, 20, 0, 17, 14, 20, 9, 18, 3, 10, 22, // Swapped INJD / INJP 4, 11, 21, 13, 12, 23, 19, 0, 5, 15, 6, 16, 7, 1, 8, 2, 24, 25, 26, 27, 28, 29, 30, 31, }; /* Insert SP 06/01/03 - default sensor order */ const UInt8 defSensorPosition[SENSOR_COUNT] = { // 17, 14, 19, 9, 18, 3, 10, 22, // 4, 11, 21, 13, 12, 23, 20, 0, 17, 14, 20, 9, 18, 3, 10, 22, 4, 11, 21, 13, 12, 23, 19, 0, 5, 15, 6, 16, 7, 1, 8, 2, 24, 25, 26, 27, 28, 29, 30, 31, }; /* End Insert */ void AllocateMem() // Called during Main init { // Prefs = MemPtrNew(sizeof(Prefs)); // Prefs var wasn't even being used } void ReleaseMem() // Called just before exiting Main { // MemPtrFree(Prefs); } // UInt8 GetNextSensorOriginal(UInt8 index) // { // return (index+1) % 32; //Original logic // } UInt8 GetNextSensor(UInt8 currentsensor) { UInt8 index = 0; // Find current sensor and return next in row/column order while( sensorPosition[index++] != currentsensor); index = index % 32; if (sensorPosition[index] == lastSensorReturned){ // Stuck at the end of the hide list index = 0; // Return first sensor in list } lastSensorReturned = sensorPosition[index]; return sensorPosition[index]; } // calculate injector duty cycle from RPM and injector pulse width void computeDerivatives(GraphSample* sample) { Int32 v; if(sample->dataPresent & 1L << IDX_RPM && // Modified to include INJD as a qualifier for display. sample->dataPresent & 1L << IDX_INJP && captureSensors & 1L << IDX_INJD) { // Only update if INJD is enabled v = sample->data[IDX_INJP] * sample->data[IDX_RPM] / 117L; sample->data[IDX_INJD] = MIN(v, 255); sample->dataPresent |= 1L << IDX_INJD; // Mark data as being available for display } // Update vehicle speed if(sample->dataPresent & 1L << IDX_VSPD) { if (sample->data[IDX_INJD] == 0) // Not sure if runtime library will handle divide by zero case v = 255; else v = sample->data[IDX_VSPD] * 100L / ( 28L * sample->data[IDX_INJD] ); // MPH / GPH = MPG sample->data[IDX_MPG] = MIN(v, 255); sample->dataPresent |= 1L << IDX_MPG; } } /* transpose function - row to column orientation */ #define XPOSE(i) ((i&0x18)|(i<<1&6)|(i>>2&1)) /* Insert SP 06/01/03 - hide/unhide unused sensors */ void HideUnused(void) { UInt8 i, j = 0; if(currentMode == REVIEW_MODE) { for(i = 0; i < SENSOR_COUNT; i++) { UInt8 xi = XPOSE(i); SensorType *sensor = &_pnlSensor[defSensorPosition[xi]]; if(sensor->exists && (currentSample.dataPresent & (1L << defSensorPosition[xi]))) { sensorPosition[XPOSE(j)] = defSensorPosition[xi]; j++; } } } else if(currentMode == MONITOR_MODE) { for(i = 0; i < SENSOR_COUNT; i++) { UInt8 xi = XPOSE(i); SensorType *sensor = &_pnlSensor[defSensorPosition[xi]]; if(sensor->exists && (sensor->capture || sensor->graph)) { sensorPosition[XPOSE(j)] = defSensorPosition[xi]; j++; } } } else { return; } /* fill remaining slots with blanks */ for(; j < SENSOR_COUNT; j++) { sensorPosition[XPOSE(j)] = 0; } } void UnhideUnused(void) { UInt8 i; for(i = 0; i < SENSOR_COUNT; i++) sensorPosition[i] = defSensorPosition[i]; } void RefreshPanel(void) { if(fHideUnused) HideUnused(); PnlSetBank(&panel, &sensorPosition[currentBank * 8]); } /* End Insert */ /* * return pointer to object in form by its identifier, if found * NULL otherwise */ void *FrmGetObjectPtrById(FormType *form, UInt16 id) { UInt16 index = FrmGetObjectIndex(form, id); return index == frmInvalidObjectId ? NULL : FrmGetObjectPtr(form, index); } /* * graph-callback stream reader */ Int32 streamReader(UInt16 id, Int32 streamOffset, GraphSample *pBuffer, Int32 sampleCount) { Int32 samplesRead = 0, i; Err err; // stream length request if(pBuffer == NULL) { FileTell(stream, &samplesRead, &err); return samplesRead / sizeof(GraphSample); } if(sampleCount == 0) return 0; FileSeek(stream, streamOffset * sizeof(GraphSample), fileOriginBeginning); samplesRead = FileRead(stream, pBuffer, sizeof(GraphSample), sampleCount, &err); if(samplesRead < sampleCount) MemSet(&pBuffer[samplesRead], (sampleCount - samplesRead) * sizeof(GraphSample), 0); // for compatibility with old logs for(i = 0; i < samplesRead; i++) { computeDerivatives(pBuffer + i); } return samplesRead; } Int32 streamWriter(GraphSample *sample) { Int32 samplesWritten = 0; Err err; FileSeek(stream, 0, fileOriginEnd); samplesWritten = FileWrite(stream, sample, sizeof(GraphSample), 1, &err); return samplesWritten; } void preLog(UInt8 store, GraphSample *sample) { static GraphSample preLogSamples[21]; // Prelog sample storage; static UInt8 head,tail; static UInt8 maxPreLog=20; // Var so it can be changed programatically if (store){ // Save data to pre-log circular queue head = ++head % maxPreLog; // wrap around to beginning of queue preLogSamples[head]=*sample; // write sample to prelog if (tail == head) tail = ++tail % maxPreLog; } else{ // copy data from pre-log queue to log // streamWriter(&preLogSamples[head]); // For testing just write one most recent record while (head != tail){ streamWriter(&preLogSamples[tail]); tail = ++tail % maxPreLog; } } } /* * open serial port, leaving handle in portId global var */ Err openPort(UInt32 baudRate, Boolean enableFlowControl) { Err err; if(!portOpen) { err = SrmOpen(serPortCradleRS232Port, baudRate, &portId); if(err != errNone) err = SrmOpen(serPortCradlePort, baudRate, &portId); if(err != errNone) { ErrAlert(err); return err; } if(!enableFlowControl) { UInt16 paramSize; UInt32 flags = srmSettingsFlagBitsPerChar8 | srmSettingsFlagStopBits1; paramSize = sizeof(flags); err = SrmControl(portId, srmCtlSetFlags, &flags, &paramSize); } portOpen = true; // disable auto-sleep when talking to ECU autoOff = SysSetAutoOffTime(0); currentSensor = 0; waitingAnswer = false; scratchSample.dataPresent = 0; // Reset flags before starting scan } return 0; } Err closePort() { // Err err; if(portOpen) { portOpen = false; SrmClose(portId); } if(autoOff) { SysSetAutoOffTime(autoOff); autoOff = 0; } return 0; } Boolean lastSensor() { return currentSensor == (SENSOR_COUNT - 1) || captureSensors < (1L << (currentSensor + 1)); } Boolean nextSensor() { UInt32 scanSensorList; // CB - New scan list for INJD if(captureSensors == 0) { currentSensor = 0; scratchSample.dataPresent = 0; return false; } for(;;) { currentSensor = (currentSensor + 1) % SENSOR_COUNT; // if (currentSensor == IDX_INJD) currentSensor++; // Skip INJD sensor // Disabled in 1.6p - was causing scan to be disabled // when INJD was last sensor in scan list // if (currentSensor == IDX_INJD) // currentSensor = (currentSensor + 1) % SENSOR_COUNT; // Added back in V1.7d with new logic in derived calc // Dummy scan was tossing bogus value in MAX // if(captureSensors & (1L << currentSensor)) break; // Original code if (captureSensors & (1L << IDX_INJD)) // INJD Enabled so force INJP / RPM scanSensorList = captureSensors | (1L << IDX_INJP) | (1L << IDX_RPM); else scanSensorList = captureSensors; // Scan RPM / INJP if(scanSensorList & (1L << currentSensor)) break; // Use the new list } return true; } Boolean switchMode(UInt8 newMode); //Resolve forward references - should be in headers void haltScanning(); void WriteGraphGap(void) { // Add gap in log file UInt8 blanks; for (blanks=0; blanks<10 ; blanks++) streamWriter(&resetSample); } Boolean queryECU(GraphSample *sample) { Err err; Boolean sampleComplete = false; UInt16 index; //static UInt8 pendingSensor; // Initialized to get rid of warning //UInt8 tempSensor; // Temp storage for sensor processing FormType *form = FrmGetActiveForm(); if(!portOpen) return false; ErrTry { if(waitingAnswer) { UInt32 bytesAvailable; UInt8 buf[2]; // any data available? err = SrmReceiveCheck(portId, &bytesAvailable); if(err != errNone) ErrThrow(0); // got the answer? // Assumes half duplex operation with xmit command byte in rec buffer buf[0] if(bytesAvailable >= 2) { Int32 bytesReceived = SrmReceive(portId, &buf, 2, 1, &err); if(err != errNone || bytesReceived != 2) ErrThrow(0); // ASSERT(buf[0] == _pnlSensor[currentSensor].addr); scratchSample.dataPresent |= 1L << currentSensor; // Mark data available scratchSample.data[currentSensor] = buf[1]; // Xfer data to scratch sample switch (currentSensor){ // Calculate peak/min V1.7e case IDX_COOL: // Peak logic reversed for coolant case IDX_AIRT: // and Air Temp if (buf[1] < peakSample.data[currentSensor]) peakSample.data[currentSensor]= buf[1]; if (buf[1] > minSample.data[currentSensor]) minSample.data[currentSensor]= buf[1]; break; case IDX_INJD: // Do nothing for now scratchSample.data[currentSensor]=0; // cb Hack - Might solve the peak problem break; default: // Normal logic for other sensors if (buf[1] > peakSample.data[currentSensor]) peakSample.data[currentSensor]= buf[1]; if (buf[1] < minSample.data[currentSensor]) minSample.data[currentSensor]= buf[1]; break; } waitingAnswer = false; // Got a valid response scanError = 0; } // not yet - half-second timeout expired? else if(TimGetTicks() - receiveTimeout > ticksPerSecond / 2) { // if (scanError++ > 10) //pause to allow powerdown if (scanError++ > 5) // No need to wait so long V1.7d { //switchMode(REVIEW_MODE); haltScanning(); peakSample=resetSample; // cbnewcode Reset peak values corrupted when ECU is off scanError = 0; } ErrThrow(0); } else if ((SIMULATE) || (serialSpeed==1200)) { // pretend that we got an anwer scratchSample.dataPresent |= 1L << currentSensor; scratchSample.data[currentSensor] = SysRandom(0); // Update peak values for synthesized values // if (buf[1] > peakSample.data[currentSensor]) if (scratchSample.data[currentSensor] > peakSample.data[currentSensor]) // Fixed V1.7e peakSample.data[currentSensor]= scratchSample.data[currentSensor]; // if (buf[1] < minSample.data[currentSensor]) if (scratchSample.data[currentSensor] < minSample.data[currentSensor]) // Fixed V1.7e minSample.data[currentSensor]= scratchSample.data[currentSensor]; waitingAnswer = false; } else return false; // Still waiting for data receipt pending so just return } // End of conditional block receive processing logic // Got data so send request for next data while processing results // CB - Moved this code from end of loop to improve efficiency /* tempSensor=currentSensor; // Save a copy of the currentSensor if(nextSensor()) { SrmSend(portId, &_pnlSensor[currentSensor].addr, 1, &err); receiveTimeout = TimGetTicks(); waitingAnswer = true; } pendingSensor = currentSensor; // Save pendingSensor - Swap after processing currentSensor = tempSensor; // Replace currentSensor value for alarm processing */ // Process alarms if( (_pnlSensor[currentSensor].trigger || _pnlSensor[currentSensor].sound) && (((currentSensor == 10 || currentSensor == 22)&& (scratchSample.data[14] > 200)) || (currentSensor != 10 && currentSensor != 22))) { _pnlSensor[IDX_COOL].polarity = ! _pnlSensor[IDX_COOL].polarity; // Toggle for alarm processing _pnlSensor[IDX_AIRT].polarity = ! _pnlSensor[IDX_AIRT].polarity; // because the raw data is inverted if((_pnlSensor[currentSensor].polarity == 0 && scratchSample.data[currentSensor] > _pnlSensor [currentSensor].threshold) || (_pnlSensor[currentSensor].polarity == 1 && scratchSample.data[currentSensor] < _pnlSensor [currentSensor].threshold)) { if (audibleAlarms){ // Process audible alarm SndCommandType sndCmd = { sndCmdFrqOn, 0, alarmFreq[_pnlSensor[currentSensor].sound], 100, sndMaxAmp }; SndDoCmd(NULL, &sndCmd, true); } if (autologEnabled && _pnlSensor[currentSensor].trigger){ // Start autolog autologTimer = TimGetSeconds()+ postLog; // Any alarm with trigger starts autologging logging = true; index = FrmGetObjectIndex(form, logCheckId); FrmSetControlValue(form, index, 1); } } _pnlSensor[IDX_COOL].polarity = ! _pnlSensor[IDX_COOL].polarity; // Toggle back to original _pnlSensor[IDX_AIRT].polarity = ! _pnlSensor[IDX_AIRT].polarity; } // } } ErrCatch(exception __attribute__ ((unused))) { SrmClearErr(portId); SrmReceiveFlush(portId, 0); waitingAnswer = false; } ErrEndCatch // ASSERT(waitingAnswer == false); if(lastSensor() && scratchSample.dataPresent) { // before starting over return collected sample to the caller *sample = scratchSample; sample->time = TimGetSeconds(); // Add timestamp to log record // CBB - Hack to synthesize mS resolution // Consider creating new function to return elapsed time + mS // if (sample->time != lastTimeSeconds){ // Seconds have rolled over so reset mS lastTimeSeconds = sample->time; lastTimeTicks = TimGetTicks(); // Reset starting timer for centiseconds sample->data[23] = 0 ; // Write 00 centiseconds to unused log entry CBB } else sample->data[23] = (100 * (TimGetTicks() - lastTimeTicks)) / ticksPerSecond; // Centiseconds sampleComplete = true; // CB - Deleted - done after calculating derivatives // peakSample.dataPresent = scratchSample.dataPresent; // Update data available in peakSample // minSample.dataPresent = scratchSample.dataPresent; // Update data available in peakSample scratchSample.dataPresent = 0; // This is being reset before returning????? } if(nextSensor()) { SrmSend(portId, &_pnlSensor[currentSensor].addr, 1, &err); receiveTimeout = TimGetTicks(); waitingAnswer = true; } // currentSensor = pendingSensor; // Update currentSensor to point to sensor that is already pending return sampleComplete; } /* * review mode-specific event handler */ Int32 scrollerScale = 1; void _setScroller(Int32 value) { ScrollBarType *bar = FrmGetObjectPtrById(FrmGetActiveForm(), scrollBarId); scrollerScale = graph.length / 32000 + 1; SclSetScrollBar(bar, value / scrollerScale, 0, graph.length > graph.width ? (graph.length - graph.width) / scrollerScale : 0, graph.width / scrollerScale ); } void updateSampleTime() { static Char s[32]; DateTimeType date; TimSecondsToDateTime(currentSample.time, &date); // StrPrintF(s, "%02d:%02d:%02d", date.hour, date.minute, date.second); // Add month/day/year to review display StrPrintF(s, "%02d/%02d/%02d %02d:%02d:%02d.%03d", date.month, date.day, date.year, date.hour, date.minute, date.second,currentSample.data[23] * 10); FrmCopyLabel(FrmGetActiveForm(), timeLabelId, s); } Boolean reviewHandleEvent(EventType* e) { /* Insert SP 06/03/03 */ FormType *form = FrmGetActiveForm(); /* End Insert */ if(GrfHandleEvent(&graph, e)) return true; switch(e->eType) { case ctlSelectEvent: switch(e->data.ctlSelect.controlID) { case hideUnusedButtonId: fHideUnused = !fHideUnused; if (fHideUnused) { /* highlight "H" button */ FrmSetControlValue(form, FrmGetObjectIndex(form, hideUnusedButtonId), 1); // fHideUnused = true; // fix hide mode in Review } else { /* deselect "H" button */ FrmSetControlValue(FrmGetActiveForm(), FrmGetObjectIndex(form, hideUnusedButtonId), 0); // fHideUnused = false; // fix hide mode in Review UnhideUnused(); } RefreshPanel(); return true; } break; case sclRepeatEvent: // user moved scroller - update graph if(e->data.sclRepeat.newValue != e->data.sclRepeat.value) GrfSetGraph(&graph, graph.length, e->data.sclRepeat.newValue * scrollerScale, graph.cursor, graph.dataMask); return false; case grfChangeEvent: // graph has been changed by user interaction { GraphEventType *event = (GraphEventType *)e; // position changed? update scroller, then if(event->data.grfChange.positionChanged || event->data.grfChange.lengthChanged) { _setScroller(event->data.grfChange.position); } // cursor moved? update values and currectSample if(event->data.grfChange.cursorChanged) { currentSample = graph._buffer[graph.cursor - graph.position]; computeDerivatives(&currentSample); /* Insert SP 06/01/03 - update hidden sensors */ RefreshPanel(); /* End Insert */ PnlUpdate(&panel, &currentSample); updateSampleTime(); } } return true; /* * removed, pageUp and pageDown now scroll thru banks */ case keyDownEvent: // move cursor 1 sample left if not at the beginning of graph if(e->data.keyDown.chr == hard1Chr && graph.cursor > 0) { Int32 newPosition = graph.position; Int32 newCursor = graph.cursor - 1; SndPlaySystemSound(sndClick); // update graph and scroller positions // if moved out of visible area if(newCursor < newPosition) { _setScroller(newPosition); newPosition = newCursor; } // update the graph GrfSetGraph(&graph, graph.length, newPosition, newCursor, graph.dataMask); // update values currentSample = graph._buffer[graph.cursor - graph.position]; computeDerivatives(&currentSample); // Insert SP 06/01/03 - update hidden sensors RefreshPanel(); // End Insert PnlUpdate(&panel, &currentSample); updateSampleTime(); return true; } // move cursor 1 sample right if not at the end of graph if(e->data.keyDown.chr == hard2Chr && graph.cursor < graph.length - 1) { Int32 newPosition = graph.position; Int32 newCursor = graph.cursor + 1; SndPlaySystemSound(sndClick); // update graph and scroller positions // if moved out of visible area if(newCursor >= newPosition + graph.width) { newPosition = newCursor - graph.width + 1; _setScroller(newPosition); } // update the graph GrfSetGraph(&graph, graph.length, newPosition, newCursor, graph.dataMask); // update values currentSample = graph._buffer[graph.cursor - graph.position]; computeDerivatives(&currentSample); // Insert SP 06/01/03 - update hidden sensors RefreshPanel(); // End Insert PnlUpdate(&panel, &currentSample); updateSampleTime(); return true; } break; default: break; } return false; } /* * monitor mode-specific event handler */ void haltScanning() { FormType *form = FrmGetActiveForm(); SndPlaySystemSound(sndClick); { UInt16 index = FrmGetObjectIndex(form, pauseCheckId); Int16 newValue = FrmGetControlValue(form, index) ? 0 : 1; FrmSetControlValue(form, index, newValue); if(newValue) { RectangleType r = { { 1, 147 }, { 50, 13 } }; WinEraseRectangle(&r, 0); FrmShowObject(form, FrmGetObjectIndex(form, reviewButtonId)); FrmShowObject(form, FrmGetObjectIndex(form, doneButtonId)); FrmHideObject(form, FrmGetObjectIndex(form, peakButtonId)); closePort(); talkingECU = false; GrfAppendSample(&graph, NULL); } } } Boolean monitorHandleEvent(EventType* e) // Main loop for scanning { FormType *form = FrmGetActiveForm(); static UInt32 startTicks; static UInt32 cycles; static UInt32 lastUpdateTime; static Boolean needsUpdate; static GraphSample sample; // static GraphSample scaledGraphSample; // static Boolean logging; // UInt8 blanks; Boolean sampleComplete; UInt32 keepScanning; // ControlType *newtest; // Trying to figure out how to get a ptr to graph control GrfHandleEvent(&graph, e); if(e->eType == keyDownEvent && !(e->data.keyDown.modifiers & autoRepeatKeyMask)) { switch(e->data.keyDown.chr) { case hard1Chr: // change sensor for numeric display SndPlaySystemSound(sndClick); PnlSetSelection(&panel, selectedSensor = GrfSetSensor(&graph, 0xff)); return true; case hard2Chr: // toggle Graph checkbox SndPlaySystemSound(sndClick); // newtest = FrmGetObjectPtr(form, graphCheckId); // CtlHitControl(newtest); // CtlHitControl(FrmGetObjectPtr(form, graphCheckId)); // CB - Get rid of dedundant code //* { UInt16 index = FrmGetObjectIndex(form, graphCheckId); Int16 newValue = FrmGetControlValue(form, index) ? 0 : 1; FrmSetControlValue(form, index, newValue); GrfSetMode(&graph, numericMode = !newValue); GrfAppendSample(&graph, NULL); // if (numericMode) // cbTest panel hide/unhide // PnlHidePanel(&panel); // else // PnlShowPanel(&panel); PnlSetMode(&panel, numericMode ? PANEL_SINGLESELECT : PANEL_MULTISELECT); } //*/ return true; case hard3Chr: // toggle Log checkbox SndPlaySystemSound(sndClick); { UInt16 index = FrmGetObjectIndex(form, logCheckId); Int16 newValue = FrmGetControlValue(form, index) ? 0 : 1; FrmSetControlValue(form, index, newValue); logging = newValue; if (!logging){ // Disable logging autologTimer =0; // Stop autologging - ready for next autolog trigger userLogging=false; // Reset flag for user controlled logging } else{ userLogging=true; // User logging overrides autologging } } return true; case hard4Chr: // toggle Pause checkbox SndPlaySystemSound(sndClick); { UInt16 index = FrmGetObjectIndex(form, pauseCheckId); Int16 newValue = FrmGetControlValue(form, index) ? 0 : 1; FrmSetControlValue(form, index, newValue); if(newValue) { RectangleType r = { { 1, 147 }, { 50, 13 } }; WinEraseRectangle(&r, 0); FrmShowObject(form, FrmGetObjectIndex(form, reviewButtonId)); FrmShowObject(form, FrmGetObjectIndex(form, doneButtonId)); FrmHideObject(form, FrmGetObjectIndex(form, peakButtonId)); closePort(); talkingECU = false; GrfAppendSample(&graph, NULL); } else { FrmHideObject(form, FrmGetObjectIndex(form, reviewButtonId)); FrmHideObject(form, FrmGetObjectIndex(form, doneButtonId)); // update current mode pushbutton as its frame // gets erased by neighbour hiding buttons FrmShowObject(form, FrmGetObjectIndex(form, monitorButtonId)); FrmShowObject(form, FrmGetObjectIndex(form, peakButtonId)); // FrmShowObject(form, FrmGetObjectIndex(form, minmaxButtonId)); // New button not implemented yet openPort(serialSpeed, false); talkingECU = true; startTicks = TimGetTicks(); cycles = 0; } } return true; } } /* Modified SP 06/01/03 - change conditional structure */ if (e->eType == ctlSelectEvent) { switch (e->data.ctlSelect.controlID) { case pauseCheckId: // disable mode-switching when serial communication // is in progress if(e->data.ctlSelect.on) { RectangleType r = { { 1, 147 }, { 50, 13 } }; WinEraseRectangle(&r, 0); FrmShowObject(form, FrmGetObjectIndex(form, reviewButtonId)); FrmShowObject(form, FrmGetObjectIndex(form, doneButtonId)); FrmHideObject(form, FrmGetObjectIndex(form, peakButtonId)); // FrmShowObject(form, FrmGetObjectIndex(form, peakButtonId)); closePort(); talkingECU = false; GrfAppendSample(&graph, NULL); } else { FrmHideObject(form, FrmGetObjectIndex(form, reviewButtonId)); FrmHideObject(form, FrmGetObjectIndex(form, doneButtonId)); // update current mode pushbutton as its frame // gets erased by neighbour hiding buttons FrmShowObject(form, FrmGetObjectIndex(form, monitorButtonId)); FrmShowObject(form, FrmGetObjectIndex(form, peakButtonId)); openPort(serialSpeed, false); talkingECU = true; startTicks = TimGetTicks(); cycles = 0; } break; case logCheckId: logging = e->data.ctlSelect.on; if (!logging){ autologTimer =0; // Stop logging - ready for next autolog trigger userLogging=false; } else { userLogging=true; // Flag for manual logging // WriteGraphGap(); } break; case peakCheckId: peakEnabled = e->data.ctlSelect.on; break; case graphCheckId: GrfSetMode(&graph, numericMode = !e->data.ctlSelect.on); GrfAppendSample(&graph, NULL); PnlSetMode(&panel, numericMode ? PANEL_SINGLESELECT : PANEL_MULTISELECT); break; /* Insert SP 06/01/03 - handle Hide Unused button */ case hideUnusedButtonId: if (e->data.ctlSelect.on) { /* Hide */ fHideUnused = true; } else { /* Do not hide */ UnhideUnused(); fHideUnused = false; } RefreshPanel(); break; /* End Insert - handle Hide Unused button */ } } /* End Modified - change conditional structure */ // if(queryECU(&sample)) { // Modified to read multiple samples per event main loo sampleComplete=false; keepScanning = TimGetTicks()+4; // Originall set to 2 while (TimGetTicks() < keepScanning){ if (queryECU(&sample)){ sampleComplete=true; break; } } if (sampleComplete) { computeDerivatives(&sample); // Update fields in peak structures peakSample.dataPresent = sample.dataPresent; minSample.dataPresent = sample.dataPresent; // Update min/max values for INJD if (sample.data[IDX_INJD] > peakSample.data[IDX_INJD]) peakSample.data[IDX_INJD] = sample.data[IDX_INJD]; if (sample.data[IDX_INJD] < minSample.data[IDX_INJD]) minSample.data[IDX_INJD] = sample.data[IDX_INJD]; // show it GrfAppendSample(&graph, &sample); needsUpdate = true; cycles++; // if log checkbox is checked - write the sample to log file if ((logging) ){ if ((sample.time-lastGraphUpdate) > 2){ if (!newLog) WriteGraphGap(); // Insert blank gap in log file else newLog = false; // Don't put gap at start of log file preLog(0,&sample ); // Copy prelog data to log // Consider passing argument of seconds to preLog() // streamWriter(&resetSample); // Insert blank record for testing } streamWriter(&sample); // Write the sample to the log // Check if autolog has timed out if (!userLogging && (autologTimer < TimGetSeconds())){ UInt16 index = FrmGetObjectIndex(form, logCheckId); FrmSetControlValue(form, index, 0); logging = false; } lastGraphUpdate = sample.time; } else preLog(1,&sample); // Write sample to prelog buffer } if(TimGetTicks() > lastUpdateTime + ticksPerSecond / 6) { lastUpdateTime = TimGetTicks(); if (peakEnabled) PnlUpdate(&panel, &peakSample); else PnlUpdate(&panel, &sample); // show sample rate and current sensor if(portOpen) { static Char str[32]; Int16 strLen, labelWidth; RectangleType r; // strLen = StrPrintF(str, "%ldHz %s", cycles * ticksPerSecond / (lastUpdateTime - startTicks + 1), _pnlSensor[currentSensor].slug); strLen = StrPrintF(str, "%ldHz %s", cycles * ticksPerSecond / (lastUpdateTime - startTicks + 1), _pnlSensor[currentSensor].slug); labelWidth = FntCharsWidth(str, strLen); r.topLeft.x = 1 + labelWidth; r.topLeft.y = 147; // r.extent.x = 50 - labelWidth; // r.extent.x = 40 - labelWidth; // Make narrower to allow for reset button r.extent.x = 46 - labelWidth; // Make wider to clean up for long tags r.extent.y = 13; WinDrawChars(str, strLen, 1, 147); WinEraseRectangle(&r, 0); } } return false; } void doAboutDialog() { FormType *form = FrmInitForm(aboutDialogId); FrmDoDialog(form); FrmDeleteForm(form); } /* * p r e f s */ void FldInsertText(FieldType *field, const Char *source) { MemHandle textHandle; Char *text; textHandle = FldGetTextHandle(field); if(textHandle != NULL) { FldSetTextHandle(field, NULL); MemHandleFree(textHandle); } textHandle = MemHandleNew(5); text = MemHandleLock(textHandle); StrNCopy(text, source, 5); MemHandleUnlock(textHandle); FldSetTextHandle (field, textHandle); } void setDefaults() { UInt16 i; graphSensors = 0; captureSensors = 0; currentBank = 0; currentMode = 1; // cb - Start off in Monitor Mode topVisible = 0; metricUnit = PREFS_UNIT_METRIC; serialSpeed = ECU_BAUDRATE; ecuSpeed = 100; overClock = 0; //Insert SS 4/3/05 mapScaling = 0; vehicleSelect = 0; baseTiming = 5; //Modified SS 4/3/05 - changed default to 5 degrees base timing numericMode = 0; selectedSensor = 0; fHideUnused = false; audibleAlarms = true; // Default to on autologEnabled = true; // Default to on o2NonLinEnabled = true; // Default to on for(i = 0; i < SENSOR_COUNT; i++) { if (i>16 && i<=20) { // cb 1/4/05 Turn on several sensor for scanning/graphing by default _pnlSensor[i].capture=1; if (i!=18) _pnlSensor[i].graph=1; // Make one sensor not selected for graphing } _pnlSensor[19].capture=0; // Disable INJP _pnlSensor[19].graph=0; // Disable INJP graphing _pnlSensor[18].graph=0; // Make one sensor not selected for graphing if(_pnlSensor[i].capture) captureSensors |= (1L << i); if(_pnlSensor[i].graph) graphSensors |= (1L << i); } } void doPrefsDialog() { FormType *form = FrmInitForm(prefsDialogId); UInt16 i, index; Char *s, str[16],str2[8], str3[8]; ControlType* ctl; ctl = FrmGetObjectPtrById(form, speedTriggerId); // serial speed StrPrintF(str, "%ld bps", serialSpeed); if (serialSpeed==1200) CtlSetLabel(ctl, "Simulate"); else CtlSetLabel(ctl, str); ctl = FrmGetObjectPtrById(form, unitTriggerId); // unit switch(metricUnit) { case PREFS_UNIT_ENGLISH: CtlSetLabel(ctl, "English"); break; case PREFS_UNIT_NUMERIC: CtlSetLabel(ctl, "Numeric"); break; default: CtlSetLabel(ctl, "Metric"); break; } ctl = FrmGetObjectPtrById(form, egtinputTriggerId); // EGT input - cb Modified to use egrtInput const CtlSetLabel(ctl, egrtInput[mapScaling]); StrNCopy(_pnlSensor[9].slug, egrtSlug[mapScaling], 4); // Works but does not change slugs immediately /* switch(mapScaling) { case 0: CtlSetLabel(ctl, "3.0 BAR"); // StrNCopy(_pnlSensor[9].slug, "MAP ", 4); // Works but does not change slugs immediately break; case 1: CtlSetLabel(ctl, "2.5 BAR"); // StrNCopy(_pnlSensor[9].slug, "MAP ", 4); break; case 2: CtlSetLabel(ctl, "2.0 BAR"); // StrNCopy(_pnlSensor[9].slug, "MAP ", 4); break; case 3: CtlSetLabel(ctl, "Wideband O2"); // StrNCopy(_pnlSensor[9].slug, "WB02", 4); break; case 4: CtlSetLabel(ctl, "EGRT"); // StrNCopy(_pnlSensor[9].slug, "EGRT", 4); break; case 5: CtlSetLabel(ctl, "0-5V"); // StrNCopy(_pnlSensor[9].slug, "0-5v ", 4); break; } */ ctl = FrmGetObjectPtrById(form, vehicleTriggerId); // Vehicle Select switch(vehicleSelect) { case 0: CtlSetLabel(ctl, "3/S"); break; case 1: CtlSetLabel(ctl, "DSM"); break; } ctl = FrmGetObjectPtrById(form, baseTriggerId); // Base Timing StrPrintF(str2, "%ld°" , baseTiming); // CB Much simpler version CtlSetLabel(ctl, str2); ctl = FrmGetObjectPtrById(form, clockTriggerId); // ECU Speed //StrPrintF(str3, "%ld\%", ecuSpeed); //Escape char causing crash? StrPrintF(str3, "%ld%%", ecuSpeed); CtlSetLabel(ctl, str3); switch (ecuSpeed) { //Overclock Airflow and RPM scaling functionality established here case 100: overClock = 0; break; case 110: overClock = 1; break; case 112: overClock = 2; break; case 115: overClock = 3; break; default: overClock = 0; } index = FrmGetObjectIndex(form, alarmsCheckId); FrmSetControlValue(form, index, audibleAlarms); index = FrmGetObjectIndex(form, o2NonLinCheckId); FrmSetControlValue(form, index, o2NonLinEnabled); index = FrmGetObjectIndex(form, autologCheckId); FrmSetControlValue(form, index, autologEnabled); i = FrmDoDialog(form); if(i == defaultButtonId) { MemMove(_pnlSensor, _pnlSensorDefault, sizeof(_pnlSensor)); setDefaults(); PnlUpdate(&panel, &currentSample); } else if(i == okButtonId) { ctl = FrmGetObjectPtrById(form, speedTriggerId); // serial speed serialSpeed = StrAToI(CtlGetLabel(ctl)); if (serialSpeed==0) serialSpeed = 1200; // Use 1200 baud as flag for simulation ctl = FrmGetObjectPtrById(form, unitTriggerId); s = (Char*)CtlGetLabel(ctl); if(0 == StrCompare(s, "English")) { metricUnit = PREFS_UNIT_ENGLISH; } else if(0 == StrCompare(s, "Numeric")) { metricUnit = PREFS_UNIT_NUMERIC; } else { metricUnit = PREFS_UNIT_METRIC; } //ctl = FrmGetObjectPtrById(form, egtinputTriggerId); // EGT Input //mapScaling = CtlGetValue(ctl); ctl = FrmGetObjectPtrById(form, egtinputTriggerId); // EGT Input s = (Char*)CtlGetLabel(ctl); for (mapScaling=0; mapScaling < 6; mapScaling++) // cd - refactored to use egrtInput constants if (0==StrCompare(s,egrtInput[mapScaling])) // If it matches we're break; /* // cd - refactored to use egrtInput constants if(0 == StrCompare(s, "3.0 BAR")) { mapScaling = 0; } else if(0 == StrCompare(s, "2.5 BAR")) { mapScaling = 1; } else if(0 == StrCompare(s, "2.0 BAR")) { mapScaling = 2; } else if(0 == StrCompare(s, "Wideband O2")) { mapScaling = 3; } else if(0 == StrCompare(s, "EGRT")) { mapScaling = 4; } else { mapScaling = 5; // 0-5V back to EGT Scaling } */ ctl = FrmGetObjectPtrById(form, vehicleTriggerId); // Insert SS 1/3/05 Vehicle Select s = (Char*)CtlGetLabel(ctl); if(0 == StrCompare(s, "3/S")) { vehicleSelect = 0; } else if(0 == StrCompare(s, "DSM")) { vehicleSelect = 1; } ctl = FrmGetObjectPtrById(form, baseTriggerId); // Insert SS 1/3/05 Base Timing baseTiming = StrAToI(CtlGetLabel(ctl)); ctl = FrmGetObjectPtrById(form, clockTriggerId); // Insert SS 4/4/05 ECU Speed ecuSpeed = StrAToI(CtlGetLabel(ctl)); index = FrmGetObjectIndex(form, alarmsCheckId); audibleAlarms = FrmGetControlValue(form, index) ? true : false; index = FrmGetObjectIndex(form, o2NonLinCheckId); o2NonLinEnabled = FrmGetControlValue(form, index) ? true : false; index = FrmGetObjectIndex(form, autologCheckId); autologEnabled = FrmGetControlValue(form, index) ? true : false; } FrmDeleteForm(form); } /* * a l a r m s */ static Char* alarmListSlugs[32]; static Char alarmListStrings[32*6]; static Char alarmListSensor[32]; void updateAlarmThreshold(FormType* form, UInt16 si) { static Char str[16] = "\0"; _pnlSensor[si].format(_pnlSensor[si].threshold, str); FrmCopyLabel(form, threshFieldId, str); } void updateAlarmForm(FormType* form, UInt16 si) { UInt16 index; ControlType* control; UInt16 v[4] = { 0, 255, 1, _pnlSensor[si].threshold }; index = FrmGetObjectIndex(form, alarmTriggerId); control = FrmGetObjectPtr(form, index); CtlSetLabel(control, _pnlSensor[si].slug); FrmSetControlGroupSelection(form, soundGroupId, sound0ButtonId + _pnlSensor[si].sound); index = FrmGetObjectIndex(form, threshSliderId); control = FrmGetObjectPtr(form, index); CtlSetSliderValues(control, &v[0], &v[1], &v[2], &v[3]); index = FrmGetObjectIndex(form, threshSignButtonId); control = FrmGetObjectPtr(form, index); CtlSetLabel(control, _pnlSensor[si].polarity ? "<" : ">"); index = FrmGetObjectIndex(form, triggerCheckId); FrmSetControlValue(form, index, _pnlSensor[si].trigger); updateAlarmThreshold(form, si); } void newupdateAlarmForm(FormType* form, UInt16 si) { UInt16 index; ControlType* control; UInt16 v[4] = { 0, 255, 1, _pnlSensor[si].threshold }; index = FrmGetObjectIndex(form, alarmTriggerId); control = FrmGetObjectPtr(form, index); CtlSetLabel(control, _pnlSensor[si].slug); //FrmCopyLabel(form, sensorTagId, alarmTriggerId); //index = FrmGetObjectIndex(form, sensorTagId); //control = FrmGetObjectPtr(form, index); //CtlSetLabel(control, _pnlSensor[si].slug); FrmSetControlGroupSelection(form, soundGroupId, sound0ButtonId + _pnlSensor[si].sound); index = FrmGetObjectIndex(form, threshSliderId); control = FrmGetObjectPtr(form, index); CtlSetSliderValues(control, &v[0], &v[1], &v[2], &v[3]); index = FrmGetObjectIndex(form, threshSignButtonId); control = FrmGetObjectPtr(form, index); CtlSetLabel(control, _pnlSensor[si].polarity ? "<" : ">"); updateAlarmThreshold(form, si); } void updateAlarmList(FormType* form) { Int16 i, last = 0; ListType* list; /* for(i = 0; i < SENSOR_COUNT; i++) { if(_pnlSensor[sensorPosition[i]].exists) { alarmListSensor[last] = sensorPosition[i]; StrNCopy(alarmListSlugs[last], _pnlSensor[sensorPosition[i]].slug, 5); if(_pnlSensor[sensorPosition[i]].sound) StrNCat(alarmListSlugs[last], "#", 6); last++; } } */ for(i = 0; i < SENSOR_COUNT; i++) { if(_pnlSensor[i].exists) { alarmListSensor[last] = i; StrNCopy(alarmListSlugs[last], _pnlSensor[i].slug, 5); if(_pnlSensor[i].sound || _pnlSensor[i].trigger) StrNCat(alarmListSlugs[last], "#", 6); last++; } } list = FrmGetObjectPtr(form, FrmGetObjectIndex(form, alarmListId)); LstSetListChoices(list, alarmListSlugs, last); } Boolean alarmsFormHandleEvent(EventType *e) { FormType *form = FrmGetActiveForm(); SndCommandType sndCmd = { sndCmdFrqOn, 0, 1000, 200, sndMaxAmp }; static int si; Int32 i; Err err; if(SysHandleEvent(e) || MenuHandleEvent(NULL, e, &err)) return true; switch(e->eType) { case frmOpenEvent: for(i = 0; i < SENSOR_COUNT; i++) alarmListSlugs[i] = alarmListStrings + i * 6; si = selectedSensor & 0x1f; for(i = 0; i < SENSOR_COUNT; i++) { if(!_pnlSensor[si].exists) si = (si + 1) % SENSOR_COUNT; else break; } FrmDrawForm(form); updateAlarmList(form); updateAlarmForm(form, si); return true; case popSelectEvent: si = alarmListSensor[e->data.popSelect.selection]; updateAlarmForm(form, si); return true; case frmCloseEvent: // if user switcher application when this form is open // post frmCloseEvent to caller form as it will get it // a chance to free resources FrmReturnToForm(0); return true; case ctlRepeatEvent: switch(e->data.ctlRepeat.controlID) { case threshSliderId: _pnlSensor[si].threshold = e->data.ctlRepeat.value; updateAlarmThreshold(form, si); return false; } break; case ctlSelectEvent: switch(e->data.ctlSelect.controlID) { case okButtonId: FrmReturnToForm(0); return true; case sound0ButtonId: case sound1ButtonId: case sound2ButtonId: case sound3ButtonId: case sound4ButtonId: _pnlSensor[si].sound = e->data.ctlSelect.controlID - sound0ButtonId; updateAlarmList(form); if((sndCmd.param1 = alarmFreq[_pnlSensor[si].sound])) SndDoCmd(NULL, &sndCmd, true); return true; case threshSignButtonId: { UInt16 index = FrmGetObjectIndex(form, threshSignButtonId); ControlType* control = FrmGetObjectPtr(form, index); const Char *label = CtlGetLabel(control); _pnlSensor[si].polarity = *label == '<' ? 0 : 1; CtlSetLabel(control, *label == '<' ? ">" : "<"); } return true; case triggerCheckId: { UInt16 index = FrmGetObjectIndex(form,triggerCheckId); _pnlSensor[si].trigger=FrmGetControlValue(form, index); // FrmSetControlValue(form, index, newValue); } return true; } return false; default: break; } return false; } /* * System Configuration */ Boolean systemFormHandleEvent(EventType *e) { FormType *form = FrmGetActiveForm(); Err err; if(SysHandleEvent(e) || MenuHandleEvent(NULL, e, &err)) return true; switch(e->eType) { case frmOpenEvent: FrmDrawForm(form); return true; case ctlSelectEvent: switch(e->data.ctlSelect.controlID) { case doneButtonId: FrmReturnToForm(0); return true; } return false; default: break; } return false; } /* * Sensor Setup */ Boolean sensorFormHandleEvent(EventType *e) { FormType *form = FrmGetActiveForm(); SndCommandType sndCmd = { sndCmdFrqOn, 0, 1000, 200, sndMaxAmp }; static int si; Int32 i; Err err; if(SysHandleEvent(e) || MenuHandleEvent(NULL, e, &err)) return true; switch(e->eType) { case frmOpenEvent: for(i = 0; i < SENSOR_COUNT; i++) alarmListSlugs[i] = alarmListStrings + i * 6; si = selectedSensor & 0x1f; for(i = 0; i < SENSOR_COUNT; i++) { if(!_pnlSensor[si].exists) si = (si + 1) % SENSOR_COUNT; else break; } FrmDrawForm(form); updateAlarmList(form); newupdateAlarmForm(form, si); return true; case popSelectEvent: si = alarmListSensor[e->data.popSelect.selection]; newupdateAlarmForm(form, si); return true; case frmCloseEvent: // if user switcher application when this form is open // post frmCloseEvent to caller form as it will get it // a chance to free resources FrmReturnToForm(0); return true; case ctlRepeatEvent: switch(e->data.ctlRepeat.controlID) { case threshSliderId: _pnlSensor[si].threshold = e->data.ctlRepeat.value; updateAlarmThreshold(form, si); return false; } break; case ctlSelectEvent: switch(e->data.ctlSelect.controlID) { case doneButtonId: FrmReturnToForm(0); // RefreshPanel(); //bug return true; case sound0ButtonId: case sound1ButtonId: case sound2ButtonId: case sound3ButtonId: case sound4ButtonId: _pnlSensor[si].sound = e->data.ctlSelect.controlID - sound0ButtonId; updateAlarmList(form); if((sndCmd.param1 = alarmFreq[_pnlSensor[si].sound])) SndDoCmd(NULL, &sndCmd, true); return true; case threshSignButtonId: { UInt16 index = FrmGetObjectIndex(form, threshSignButtonId); ControlType* control = FrmGetObjectPtr(form, index); const Char *label = CtlGetLabel(control); _pnlSensor[si].polarity = *label == '<' ? 0 : 1; CtlSetLabel(control, *label == '<' ? ">" : "<"); } return true; } return false; default: break; } return false; } /* * Custom Sensor */ void doCustomDialog() { FormType *form = FrmInitForm(customFormId); UInt16 i, index; FieldType *field; Char *s, str[16]; for(i = 0; i < 8; i++) { // SensorType *sensor = &_pnlSensor[sensorPosition[currentBank * 8 + i]]; SensorType *sensor = &_pnlSensor[sensorPosition[3 * 8 + i]]; index = FrmGetObjectIndex(form, customSensorId+i*3); // checkbox FrmSetControlValue(form, index, sensor->exists); index = FrmGetObjectIndex(form, customSensorId+i*3+1); // addr field = FrmGetObjectPtr(form, index); StrIToH(str, sensor->addr); FldInsertText(field, str + 6); index = FrmGetObjectIndex(form, customSensorId+i*3+2); // slug field = FrmGetObjectPtr(form, index); s = sensor->slug; FldInsertText(field, s); } i = FrmDoDialog(form); if(i == doneButtonId) { for(i = 0; i < 8; i++) { SensorType *sensor = &_pnlSensor[sensorPosition[3 * 8 + i]]; index = FrmGetObjectIndex(form, customSensorId+i*3); // checkbox sensor->exists = FrmGetControlValue(form, index) ? 1 : 0; index = FrmGetObjectIndex(form, customSensorId+i*3+1); // addr field = FrmGetObjectPtr(form, index); s = FldGetTextPtr(field); while(s && *s) { index <<= 4; if(*s >= '0' && *s <= '9') index += *s - '0'; else if(*s >= 'a' && *s <= 'f') index += *s - 'a' + 10; else if(*s >= 'A' && *s <= 'F') index += *s - 'A' + 10; s++; } sensor->addr = index; index = FrmGetObjectIndex(form, customSensorId+i*3+2); // slug field = FrmGetObjectPtr(form, index); s = FldGetTextPtr(field); StrNCopy(sensor->slug, s ? s : "", 5); } } FrmDeleteForm (form); } /* * e d i t */ /* * update form according to current mode */ Boolean switchMode(UInt8 newMode) { FormType *form = FrmGetActiveForm(); UInt16 pauseCheckIndex = FrmGetObjectIndex(form, pauseCheckId); UInt16 logCheckIndex = FrmGetObjectIndex(form, logCheckId); UInt16 graphCheckIndex = FrmGetObjectIndex(form, graphCheckId); UInt16 peakCheckIndex = FrmGetObjectIndex(form, peakCheckId); UInt16 timeLabelIndex = FrmGetObjectIndex(form, timeLabelId); UInt16 scrollBarIndex = FrmGetObjectIndex(form, scrollBarId); if(newMode == currentMode) return false; closePort(); { // erase graph RectangleType r = { { 0, graphY - 14}, { 160, 1 } }; WinEraseRectangle(&r, 0); r.topLeft.y = graphY - 12; r.extent.y = 11; WinEraseRectangle(&r, 0); } RefreshPanel(); PnlUpdate(&panel, &currentSample); switch(newMode) { case REVIEW_MODE: currentMode = REVIEW_MODE; PnlSetMode(&panel, PANEL_MULTISELECT); PnlShowPanel(&panel); // hide Graph, Log and Pause checkboxen FrmHideObject(form, pauseCheckIndex); FrmHideObject(form, logCheckIndex); FrmHideObject(form, graphCheckIndex); FrmHideObject(form, peakCheckIndex); FrmShowObject(form, timeLabelIndex); // show graph GrfSetMode(&graph, 0); GrfShowCursor(&graph, 1); GrfUpdateGraph(&graph); // show graph scroller _setScroller(graph.position); FrmShowObject(form, scrollBarIndex); break; case MONITOR_MODE: currentMode = MONITOR_MODE; PnlSetMode(&panel, numericMode ? PANEL_SINGLESELECT : PANEL_MULTISELECT); PnlShowPanel(&panel); FrmHideObject(form, timeLabelIndex); FrmHideObject(form, scrollBarIndex); // set and show Pause checkbox FrmSetControlValue(form, pauseCheckIndex, 1); FrmSetControlValue(form, graphCheckIndex, !numericMode); FrmShowObject(form, pauseCheckIndex); FrmShowObject(form, graphCheckIndex); FrmShowObject(form, logCheckIndex); FrmShowObject(form, peakCheckIndex); // hide graph GrfSetMode(&graph, !FrmGetControlValue(form, graphCheckIndex)); GrfEraseGraph(&graph); GrfShowCursor(&graph, 0); GrfAppendSample(&graph, 0); break; } return true; } Boolean editFormHandleEvent(EventType *e) { Err err; FormType *form = FrmGetActiveForm(); // give a chance to mode-specific handlers first if(currentMode == REVIEW_MODE && reviewHandleEvent(e)) return true; if(currentMode == MONITOR_MODE && monitorHandleEvent(e)) return true; // if serial comm in progress - block all system events // otherwise just block hardware buttons 1..4 if(!talkingECU && !( e->eType == keyDownEvent && ( e->data.keyDown.chr == hard1Chr || e->data.keyDown.chr == hard2Chr || e->data.keyDown.chr == hard3Chr || e->data.keyDown.chr == hard4Chr ) ) && (SysHandleEvent(e) || MenuHandleEvent(NULL, e, &err))) return true; // should be the last as it interfere with MenuHandleEvent if(PnlHandleEvent(&panel, e)) return true; switch(e->eType) { case frmOpenEvent: { FrmSetTitle(form, editFormTitle); FrmDrawForm(form); // TODO: move to resource some day graphX = 0; graphY = 70; graphW = 160; graphH = 64; // streamName was filled by main form stream = FileOpen(0, streamName, 0, CRID, fileModeUpdate, NULL); /* Insert SP 06/01/03 - default Hide Unused setting if off */ FrmSetControlValue(form, FrmGetObjectIndex(form, hideUnusedButtonId), fHideUnused); /* End Insert */ // panel PnlCreatePanel(&panel, panelId, NULL, &sensorPosition[currentBank * 8], selectedSensor, 0, 16, 160, 10 * 4); // create and paint the graph GrfCreateGraph(&graph, graphId, graphX, graphY, graphW, graphH, streamReader); // TODO: set graph without painting it GrfSetGraph(&graph, 0, 0, 0, graphSensors); GrfSetNumericMask(&graph, captureSensors); GrfSetSensor(&graph, selectedSensor); // hilite current bank FrmSetControlGroupSelection(form, bankGroupId, bank1ButtonId + currentBank); // hilite current mode FrmSetControlGroupSelection(form, modeGroupId, reviewButtonId + currentMode); WinDrawGrayLine(graphX, graphY - 13, graphX + graphW - 1, graphY - 13); WinDrawGrayLine(graphX, graphY - 1, graphX + graphW - 1, graphY - 1); WinDrawGrayLine(graphX, graphY + graphH, graphX + graphW - 1, graphY + graphH); // paint the form according to current mode switchMode(currentMode++); } return true; case frmCloseEvent: numericMode = graph._numericMode; GrfDestroyGraph(&graph); PnlDestroyPanel(&panel); { Int32 streamSize; Err err; FileTell(stream, &streamSize, &err); FileClose(stream); // delete log if empty if(streamSize == 0) FileDelete(0, streamName); } return false; case pnlSelectEvent: { PanelEventType *event = (PanelEventType *)e; if(graphSensors != event->data.pnlSelect.graph) GrfSetMask(&graph, event->data.pnlSelect.graph); if(captureSensors != event->data.pnlSelect.capture) GrfSetNumericMask(&graph, event->data.pnlSelect.capture); if(selectedSensor != event->data.pnlSelect.selection); GrfSetSensor(&graph, event->data.pnlSelect.selection); graphSensors = event->data.pnlSelect.graph; captureSensors = event->data.pnlSelect.capture; selectedSensor = event->data.pnlSelect.selection; } return true; case keyDownEvent: if(e->data.keyDown.modifiers & autoRepeatKeyMask) break; // decrement current bank if(e->data.keyDown.chr == pageUpChr) { // SndPlaySystemSound(sndClick); currentBank = (currentBank - 1) & 3; FrmSetControlGroupSelection(form, bankGroupId, bank1ButtonId + currentBank); RefreshPanel(); return true; } // cincrement current bank if(e->data.keyDown.chr == pageDownChr) { // SndPlaySystemSound(sndClick); currentBank = (currentBank + 1) & 3; FrmSetControlGroupSelection(form, bankGroupId, bank1ButtonId + currentBank); RefreshPanel(); return true; } break; case ctlSelectEvent: switch(e->data.ctlSelect.controlID) { case doneButtonId: FrmGotoForm(mainFormId); return true; case peakButtonId: // peakSample=resetSample; peakSample=scratchSample; // Solves problem with Coolant sensor where MAX T = 0 count minSample = scratchSample; return true; case bank1ButtonId: case bank2ButtonId: case bank3ButtonId: case bank4ButtonId: currentBank = (FrmGetControlGroupSelection(form, bankGroupId) - 1) & 3; /* Insert SP 06/01/03 */ RefreshPanel(); /* End Insert */ return true; case reviewButtonId: switchMode(REVIEW_MODE); return true; case monitorButtonId: switchMode(MONITOR_MODE); return true; } break; case menuEvent: switch (e->data.menu.itemID) { case alarmsMenuId: FrmPopupForm(alarmsFormId); return true; case prefsMenuId: doPrefsDialog(); PnlSetBank(&panel, &sensorPosition[currentBank * 8]); panel.graph = graphSensors; panel.capture = captureSensors; GrfSetMask(&graph, panel.graph); GrfSetNumericMask(&graph, panel.capture); return true; case customMenuId: doCustomDialog(); return true; case aboutMenuId: doAboutDialog(); return true; case systemMenuId: FrmPopupForm(systemFormId); return true; case sensorMenuId: FrmPopupForm(sensorFormId); return true; case renameLogMenuId: { FormType *dialog = FrmInitForm(renameLogFormId); UInt16 fieldIndex = FrmGetObjectIndex(dialog, newNameFieldId); Int16 checkIndex = FrmGetObjectIndex(dialog, backupLogCheckId); FieldType *field = FrmGetObjectPtr(dialog, fieldIndex); // paste current log name into the field FldInsert(field, streamName, StrLen(streamName)); // select field contents, so it would be deleted if user starts typing FldSetSelection(field, 0, StrLen(streamName)); FrmSetFocus(dialog, fieldIndex); FrmSetControlValue(dialog, checkIndex, (streamAttr & dmHdrAttrBackup) ? 1 : 0); // show dialog if(FrmDoDialog(dialog) == okButtonId) { const Char *newName = FldGetTextPtr(field); Boolean streamBackup = FrmGetControlValue(dialog, checkIndex); // file can't have no name - don't even try if(StrLen(newName) <= 0) newName = streamName; { DmOpenRef dbRef = NULL; LocalID dbId = 0; Int32 size = sizeof(DmOpenRef); UInt16 newAttr = dmHdrAttrStream | (streamBackup ? dmHdrAttrBackup : 0); (void)(0 || FileControl(fileOpGetOpenDbRef, stream, &dbRef, &size) || DmOpenDatabaseInfo(dbRef, &dbId, NULL, NULL, NULL, NULL) || DmSetDatabaseInfo(0, dbId, StrCompare(streamName, newName) ? newName : NULL, &newAttr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) || StrCopy(streamName, newName) || (streamAttr = newAttr) ); } } FrmDeleteForm(dialog); } return true; case newLogMenuId: { if(FrmAlert(newLogAlertId) == 0) { } } return true; case deleteLogMenuId: if(FrmAlert(deleteLogAlertId) == 0) { // just truncate the log and switch to main // form. frmCloseEvent handler will delete // the file... FileSeek(stream, 0, fileOriginBeginning); FileTruncate(stream, 0); FrmGotoForm(mainFormId); } return true; case truncateLogMenuId: if(FrmAlert(truncateLogAlertId) == 0) { FileSeek(stream, 0, fileOriginBeginning); FileTruncate(stream, 0); GrfUpdateGraph(&graph); _setScroller(graph.position); } return true; default: /* let the system handle it */ break; } break; default: break; } return false; } /* * m a i n */ struct DirEntry { Char name[32]; UInt16 attr; }; struct DirEntry *dir; Int16 dirSize; #define ITEM_HEIGHT 11 #define ITEM_COUNT 11 Int16 compare(void *p1, void *p2, Int32 length) { Char* s1 = ((struct DirEntry *)p1)->name; Char* s2 = ((struct DirEntry *)p2)->name; Int16 r; while(*s2) if((r = *s1++ - *s2++)) return r; return *s1; // couldn't believe it but PalmOS prior to 4.0 have no // plain strcmp! StrCompare is doing something obscure... /* return StrCompare((Char *)p1, (Char *)p2); */ } void loadDirectory() { Boolean newSearch = true; DmSearchStateType state; UInt16 cardNo; LocalID dbID; Int16 i; dirSize = 16; dir = MemPtrNew(dirSize * sizeof(struct DirEntry)); i = 0; while(errNone == DmGetNextDatabaseByTypeCreator(newSearch, &state, sysFileTFileStream, CRID, false, &cardNo, &dbID)) { if(errNone == DmDatabaseInfo(cardNo, dbID, dir[i].name, &dir[i].attr /* attributesP */, NULL /* versionP */, NULL /* cdDateP */, NULL /* modDateP */, NULL /*bckUpDateP */, NULL /* modNumP */, NULL /* appInfoIDP */, NULL /* sortInfoIDP */, NULL /* *typeP */, NULL /* creatorP */)) { if(++i >= dirSize) { void *p = MemPtrNew((dirSize + 16) * sizeof(struct DirEntry)); MemMove(p, dir, dirSize * sizeof(struct DirEntry)); MemPtrFree(dir); dir = p; dirSize += 16; } } newSearch = false; } dirSize = i; SysInsertionSort(dir, dirSize, sizeof(struct DirEntry), compare, 32); } void mainDrawItem(UInt16 i, Boolean highlight) { if(i < topVisible + ITEM_HEIGHT) { UInt16 strWidth, strLen; Char str[32]; RectangleType clip, rect = { { 0, 18 + i * ITEM_HEIGHT }, { 151, 11 } }; WinPushDrawState(); if(highlight) { WinSetBackColor(UIColorGetTableEntryIndex(UIObjectSelectedFill)); WinSetTextColor(UIColorGetTableEntryIndex(UIObjectSelectedForeground)); } else { WinSetBackColor(UIColorGetTableEntryIndex(UIObjectFill)); WinSetTextColor(UIColorGetTableEntryIndex(UIObjectForeground)); } WinEraseRectangle(&rect, 0); WinGetClip(&clip); WinSetClip(&rect); strLen = StrPrintF(str, "%d.", topVisible + i + 1); strWidth = FntCharsWidth(str, strLen); WinDrawChars(str, strLen, 14 - strWidth, 18 + i * ITEM_HEIGHT); if(dir[topVisible + i].attr & dmHdrAttrBackup) WinDrawChars("\225", 1, 146, 18 + i * ITEM_HEIGHT); rect.extent.x -= 6; WinSetClip(&rect); WinDrawChars(dir[topVisible + i].name, StrLen(dir[topVisible + i].name), 18, 18 + i * ITEM_HEIGHT); WinSetClip(&clip); WinPopDrawState(); } } Boolean mainFormHandleEvent(EventType *e) { FormType *form = FrmGetActiveForm(); static Int16 hilitedItem; Int16 i; Err err; ScrollBarType* bar = FrmGetObjectPtr(form, FrmGetObjectIndex(form, mainScrollbarId)); if(SysHandleEvent(e) || MenuHandleEvent(NULL, e, &err)) return true; switch(e->eType) { case frmOpenEvent: closePort(); FrmDrawForm(form); hilitedItem = -1; loadDirectory(); if(topVisible + ITEM_COUNT > dirSize) topVisible = MAX(0, dirSize - ITEM_COUNT); for(i = 0; i < MIN(dirSize, ITEM_COUNT); i++) mainDrawItem(i, false); SclSetScrollBar(bar, topVisible, 0, MAX(0, dirSize - ITEM_COUNT), ITEM_COUNT); return true; case frmCloseEvent: MemPtrFree(dir); return false; case sclRepeatEvent: topVisible = e->data.sclRepeat.newValue; for(i = 0; i < MIN(dirSize, ITEM_COUNT); i++) mainDrawItem(i, false); return false; case penDownEvent: if(e->screenX < 150 && e->screenY >= 18 && e->screenY <= 18 + ITEM_COUNT * ITEM_HEIGHT) { i = (e->screenY - 18) / ITEM_HEIGHT; if(i >= 0 && i < dirSize) { mainDrawItem(i, true); hilitedItem = i; return true; } } return false; case penMoveEvent: i = (e->screenY - 18) / ITEM_HEIGHT; if(hilitedItem >= 0 && i != hilitedItem) { mainDrawItem(hilitedItem, false); hilitedItem = -1; return true; } return false; case penUpEvent: i = (e->screenY - 18) / ITEM_HEIGHT; if(hilitedItem >= 0 && i == hilitedItem) { StrCopy(streamName, dir[topVisible + i].name); streamAttr = dir[topVisible + i].attr; // StrPrintF(editFormTitle, "MMCd Log %d of %d", i + 1 + topVisible, dirSize); StrPrintF(editFormTitle, "MMCd Log %d", i + 1 + topVisible); FrmGotoForm(editFormId); return true; } else if(hilitedItem >= 0) { mainDrawItem(hilitedItem, false); hilitedItem = -1; } return false; case ctlSelectEvent: if(e->data.ctlSelect.controlID == newButtonId) { DateTimeType date; TimSecondsToDateTime(TimGetSeconds(), &date); StrPrintF(streamName, "%04d-%02d-%02d %02d:%02d:%02d", date.year, date.month, date.day, date.hour, date.minute, date.second); streamAttr = dmHdrAttrStream; StrPrintF(editFormTitle, "MMCd Log"); FrmGotoForm(editFormId); newLog=true; // Set when creating new log file return true; } return false; case menuEvent: switch (e->data.menu.itemID) { case alarmsMenuId: FrmPopupForm(alarmsFormId); return true; case aboutMenuId: doAboutDialog(); return true; case testMenuId: FrmGotoForm(testFormId); return true; case debugMenuId: FrmGotoForm(debugFormId); return true; case prefsMenuId: doPrefsDialog(); // PnlSetBank(&panel, &sensorPosition[currentBank * 8]); // Test to see if this updates panel slugs return true; case customMenuId: doCustomDialog(); return true; case systemMenuId: FrmPopupForm(systemFormId); return true; case sensorMenuId: FrmPopupForm(sensorFormId); return true; default: break; } default: break; } return false; } Boolean appHandleEvent(EventType *e) { if(e->eType == frmLoadEvent) { // Load the form resource. UInt16 formID = e->data.frmLoad.formID; FormPtr form = FrmInitForm(formID); FrmSetActiveForm(form); switch(formID) { case mainFormId: FrmSetEventHandler(form, mainFormHandleEvent); break; case editFormId: FrmSetEventHandler(form, editFormHandleEvent); break; case testFormId: FrmSetEventHandler(form, testFormHandleEvent); break; case debugFormId: FrmSetEventHandler(form, debugFormHandleEvent); break; case alarmsFormId: FrmSetEventHandler(form, alarmsFormHandleEvent); break; case systemFormId: FrmSetEventHandler (form, systemFormHandleEvent); break; case sensorFormId: FrmSetEventHandler (form, sensorFormHandleEvent); break; default: ErrFatalDisplayIf(1, "Unknown form!"); break; } } return false; } void loadPrefs() { // Prefs prefs = { 0, 0, 0, 0 }; PrefsType prefs = { 0, 0, 0, 0 }; UInt16 size = sizeof(PrefsType); UInt16 chksize = 0; UInt32 i; Err err = !errNone; // initialize sensors MemMove(_pnlSensor, _pnlSensorDefault, sizeof(_pnlSensor)); // check size of preferences before reading PrefGetAppPreferences(CRID, 0, NULL, &chksize, false); if (chksize == size) err = PrefGetAppPreferences(CRID, 0, &prefs, &size, false); if (err != errNone) { // Prefs struct has changed size so initialize PrefSetAppPreferences(CRID, 0, 0, NULL, 0, false); setDefaults(); } else { for(i = 0; i < SENSOR_COUNT; i++) { _pnlSensor[i].capture = !!(prefs.capture & (1L << i)); _pnlSensor[i].graph = !!(prefs.graph & (1L << i)); _pnlSensor[i].exists = !!(prefs.exists & (1L << i)); _pnlSensor[i].polarity = !!(prefs.polarity & (1L << i)); _pnlSensor[i].addr = prefs.addr[i]; _pnlSensor[i].threshold = prefs.threshold[i]; _pnlSensor[i].trigger = prefs.trigger[i]; _pnlSensor[i].sound = prefs.sound[i]; StrNCopy(_pnlSensor[i].slug, prefs.slug[i], 4); } graphSensors = prefs.graph; captureSensors = prefs.capture; currentBank = prefs.currentBank; currentMode = prefs.currentMode; topVisible = prefs.topVisible; serialSpeed = prefs.serialSpeed; ecuSpeed = prefs.ecuSpeed; overClock = prefs.overClock; //Insert SS 4/3/05 metricUnit = prefs.metricUnit; mapScaling = prefs.mapScaling; vehicleSelect = prefs.vehicleSelect; baseTiming = prefs.baseTiming; audibleAlarms = prefs.audibleAlarms; selectedSensor = prefs.selectedSensor; fHideUnused = prefs.hideUnused; numericMode = prefs.numericMode; audibleAlarms = prefs.audibleAlarms; autologEnabled = prefs.autologEnabled; o2NonLinEnabled = prefs.o2NonLinEnabled; MemMove(bigNumSelect, prefs.bigNumSelect, sizeof(bigNumSelect)); // V1.7d } } void savePrefs() { UInt16 i; // Prefs prefs; PrefsType prefs; prefs.capture = captureSensors; prefs.graph = graphSensors; prefs.currentBank = currentBank; prefs.currentMode = currentMode; prefs.topVisible = topVisible; prefs.serialSpeed = serialSpeed; prefs.ecuSpeed = ecuSpeed; prefs.overClock = overClock; // Insert SS 4/3/05 prefs.metricUnit = metricUnit; prefs.mapScaling = mapScaling; prefs.vehicleSelect = vehicleSelect; prefs.baseTiming = baseTiming; prefs.selectedSensor = selectedSensor; prefs.hideUnused = fHideUnused; prefs.numericMode = numericMode; prefs.audibleAlarms = audibleAlarms; prefs.autologEnabled = autologEnabled; prefs.o2NonLinEnabled = o2NonLinEnabled; MemMove(prefs.bigNumSelect, bigNumSelect, sizeof(bigNumSelect)); // V1.7d prefs.exists = 0; prefs.polarity = 0; for(i = 0; i < SENSOR_COUNT; i++) { if(_pnlSensor[i].exists) prefs.exists |= (1L << i); prefs.addr[i] = _pnlSensor[i].addr; MemMove(prefs.slug[i], _pnlSensor[i].slug, 4); prefs.threshold[i] = _pnlSensor[i].threshold; prefs.trigger[i] = _pnlSensor[i].trigger; if(_pnlSensor[i].polarity) prefs.polarity |= (1L << i); prefs.sound[i] = _pnlSensor[i].sound; } PrefSetAppPreferences(CRID, 0, 0, &prefs, sizeof(prefs), false); } UInt32 PilotMain(UInt16 cmd, void *pbp, UInt16 flags __attribute__ ((unused))) { switch(cmd) { case sysAppLaunchCmdNormalLaunch: { EventType event; Boolean screenEnableColor; UInt32 version; // check if PalmOS version is 3.5 or newer if(errNone != FtrGet(sysFtrCreator, sysFtrNumROMVersion, &version) || version < 0x03503000) { FrmAlert(versionAlertId); return 0; } colorMode = false; screenDensity = 1; if(errNone == WinScreenMode(winScreenModeGet, &screenWidth, &screenHeight, &screenDepth, &screenEnableColor)) { if(screenDepth >= 8) colorMode = true; } if(errNone == FtrGet(sysFtrCreator, sysFtrNumWinVersion, &version) && version >= 4) { UInt32 attr; WinScreenGetAttribute(winScreenDensity, &attr); switch(attr) { case kDensityDouble: screenDensity = 2; break; case kDensityTriple: screenDensity = 3; break; case kDensityQuadruple: screenDensity = 4; break; } } // If on Handspring device, disable the keyboard // thread before opening the serial library. if(!FtrGet('hsEx', 0, &version)) HsExtKeyboardEnable(false); AllocateMem(); ticksPerSecond = SysTicksPerSecond(); loadPrefs(); FrmGotoForm(mainFormId); bigFont = MemHandleLock(DmGetResource('NFNT', bigFontId)); FntDefineFont(fntAppFontCustomBase, bigFont); talkingECU = false; portOpen = false; do { /* Modified SP 06/03/03 - Wait up to 1 second for events */ /* reduced timeout to 1/50 sec, because null events are used by graph widget to scroll diplay when pen is dragged to the edge of the screen -/dmitry */ EvtGetEvent(&event, talkingECU ? 0 : ticksPerSecond / 50); /* End Modified */ (void) ( appHandleEvent(&event) || FrmDispatchEvent(&event) ); } while(event.eType != appStopEvent); MemHandleUnlock(DmGetResource('NFNT', bigFontId)); //Added SS 11/23/04 to avoid chunk over-locked error FrmCloseAllForms(); closePort(); // just in case savePrefs(); ReleaseMem(); } break; case sysAppLaunchCmdSystemReset: break; case sysAppLaunchCmdAlarmTriggered: { SysAlarmTriggeredParamType *param = pbp; param->purgeAlarm = true; } break; } return 0; }
gpl-2.0
sstjohn/wireshark
epan/dissectors/packet-db-lsp.c
1
7779
/* packet-db-lsp.c * Routines for Dropbox LAN sync Protocol * * Copyright 2010, Stig Bjorlykke <stig@bjorlykke.org> * * $Id$ * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <glib.h> #include <epan/packet.h> #include <epan/asn1.h> #include <epan/prefs.h> #include <epan/expert.h> #include "packet-tcp.h" #include "packet-x509af.h" #define PNAME "Dropbox LAN sync Protocol" #define PSNAME "DB-LSP" #define PFNAME "db-lsp" #define PNAME_DISC "Dropbox LAN sync Discovery Protocol" #define PSNAME_DISC "DB-LSP-DISC" #define PFNAME_DISC "db-lsp-disc" #define DB_LSP_PORT 17500 static int proto_db_lsp = -1; static int proto_db_lsp_disc = -1; static int hf_type = -1; static int hf_magic = -1; static int hf_length = -1; static int hf_opvalue = -1; static int hf_data = -1; static int hf_value = -1; static int hf_text = -1; static gint ett_db_lsp = -1; /* desegmentation of tcp payload */ static gboolean db_lsp_desegment = TRUE; #define TYPE_CONFIG 0x16 #define TYPE_DATA 0x17 static const value_string type_vals[] = { { TYPE_CONFIG, "Configuration" }, { TYPE_DATA, "Data" }, { 0, NULL } }; #define OP_CERT 0x0B static const value_string op_vals[] = { { OP_CERT, "Certificate" }, { 0, NULL } }; static void dissect_db_lsp_pdu (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) { proto_tree *db_lsp_tree; proto_item *db_lsp_item; gint offset = 0; guint8 type, opvalue; guint16 magic, length; col_set_str (pinfo->cinfo, COL_PROTOCOL, PSNAME); col_set_str (pinfo->cinfo, COL_INFO, PNAME); db_lsp_item = proto_tree_add_item (tree, proto_db_lsp, tvb, offset, -1, ENC_NA); db_lsp_tree = proto_item_add_subtree (db_lsp_item, ett_db_lsp); type = tvb_get_guint8 (tvb, offset); proto_tree_add_item (db_lsp_tree, hf_type, tvb, offset, 1, ENC_BIG_ENDIAN); offset += 1; if (type == 0x80) { /* Two unknown bytes */ offset += 2; } magic = tvb_get_ntohs (tvb, offset); proto_tree_add_item (db_lsp_tree, hf_magic, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; length = tvb_get_ntohs (tvb, offset); proto_tree_add_item (db_lsp_tree, hf_length, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; if (magic != 0x0301 || length > tvb_length_remaining (tvb, offset)) { /* Probably an unknown packet */ /* expert_add_info_format (pinfo, db_lsp_item, PI_UNDECODED, PI_WARN, "Unknown packet"); */ return; } if (type == TYPE_CONFIG) { opvalue = tvb_get_guint8 (tvb, offset); proto_tree_add_item (db_lsp_tree, hf_opvalue, tvb, offset, 1, ENC_BIG_ENDIAN); if (opvalue == OP_CERT) { /* X509 Certificate */ tvbuff_t *cert_tvb = tvb_new_subset (tvb, offset+10, length-10, length-10); dissect_x509af_Certificate_PDU (cert_tvb, pinfo, db_lsp_tree); } else { proto_tree_add_item (db_lsp_tree, hf_value, tvb, offset, length, ENC_NA); } } else if (type == TYPE_DATA) { proto_tree_add_item (db_lsp_tree, hf_data, tvb, offset, length, ENC_NA); } else { proto_tree_add_item (db_lsp_tree, hf_value, tvb, offset, length, ENC_NA); } /*offset += length;*/ proto_item_append_text (db_lsp_item, ", Type: %d, Length: %d", type, length); proto_item_set_len (db_lsp_item, length + 5); } static guint get_db_lsp_pdu_len (packet_info *pinfo _U_, tvbuff_t *tvb, int offset) { if (tvb_get_ntohs (tvb, offset + 1) != 0x0301) { /* Unknown data, eat remaining data for this frame */ return tvb_length_remaining (tvb, offset); } return tvb_get_ntohs (tvb, offset + 3) + 5; } static void dissect_db_lsp_tcp (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) { tcp_dissect_pdus (tvb, pinfo, tree, db_lsp_desegment, 5, get_db_lsp_pdu_len, dissect_db_lsp_pdu); } static void dissect_db_lsp_disc (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) { proto_tree *db_lsp_tree; proto_item *db_lsp_item; gint offset = 0; col_set_str (pinfo->cinfo, COL_PROTOCOL, PSNAME_DISC); col_set_str (pinfo->cinfo, COL_INFO, PNAME_DISC); db_lsp_item = proto_tree_add_item (tree, proto_db_lsp_disc, tvb, offset, -1, ENC_NA); db_lsp_tree = proto_item_add_subtree (db_lsp_item, ett_db_lsp); proto_tree_add_item (db_lsp_tree, hf_text, tvb, offset, -1, ENC_ASCII|ENC_NA); } void proto_register_db_lsp (void) { static hf_register_info hf[] = { { &hf_type, { "Type", "db-lsp.type", FT_UINT8, BASE_DEC_HEX, VALS(type_vals), 0x0, NULL, HFILL } }, { &hf_magic, { "Magic", "db-lsp.magic", FT_UINT16, BASE_DEC_HEX, NULL, 0x0, "Magic number", HFILL } }, { &hf_length, { "Length", "db-lsp.length", FT_UINT16, BASE_DEC_HEX, NULL, 0x0, "Length in bytes", HFILL } }, { &hf_opvalue, { "OP Value", "db-lsp.op", FT_UINT8, BASE_DEC_HEX, VALS(op_vals), 0x0, NULL, HFILL } }, { &hf_value, { "Value", "db-lsp.value", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_data, { "Data", "db-lsp.data", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_text, { "Text", "db-lsp.text", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL } }, }; static gint *ett[] = { &ett_db_lsp, }; module_t *db_lsp_module; proto_db_lsp = proto_register_protocol (PNAME, PSNAME, PFNAME); proto_db_lsp_disc = proto_register_protocol (PNAME_DISC, PSNAME_DISC, PFNAME_DISC); register_dissector ("db-lsp.tcp", dissect_db_lsp_tcp, proto_db_lsp); register_dissector ("db-lsp.udp", dissect_db_lsp_disc, proto_db_lsp_disc); proto_register_field_array (proto_db_lsp, hf, array_length (hf)); proto_register_subtree_array (ett, array_length (ett)); /* Register our configuration options */ db_lsp_module = prefs_register_protocol (proto_db_lsp, NULL); prefs_register_bool_preference (db_lsp_module, "desegment_pdus", "Reassemble PDUs spanning multiple TCP segments", "Whether the LAN sync dissector should reassemble PDUs" " spanning multiple TCP segments." " To use this option, you must also enable \"Allow subdissectors" " to reassemble TCP streams\" in the TCP protocol settings.", &db_lsp_desegment); } void proto_reg_handoff_db_lsp (void) { dissector_handle_t db_lsp_tcp_handle; dissector_handle_t db_lsp_udp_handle; db_lsp_tcp_handle = find_dissector ("db-lsp.tcp"); db_lsp_udp_handle = find_dissector ("db-lsp.udp"); dissector_add_uint ("tcp.port", DB_LSP_PORT, db_lsp_tcp_handle); dissector_add_uint ("udp.port", DB_LSP_PORT, db_lsp_udp_handle); } /* * Editor modelines * * Local Variables: * c-basic-offset: 2 * tab-width: 8 * indent-tabs-mode: nil * End: * * ex: set shiftwidth=2 tabstop=8 expandtab: * :indentSize=2:tabSize=8:noTabs=true: */
gpl-2.0
vishesh/kde-baseapps
konqueror/settings/performance/konqueror.cpp
1
6997
/* * Copyright (c) 2003 Lubos Lunak <l.lunak@kde.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "konqueror.h" #include <kconfig.h> #include <QtDBus/QtDBus> #include <QtGui/QRadioButton> #include <QtGui/QLabel> #include <QtGui/QCheckBox> #include <klocale.h> namespace KCMPerformance { Konqueror::Konqueror( QWidget* parent_P ) : Konqueror_ui( parent_P ) { rb_never_reuse->setWhatsThis( i18n( "Disables the minimization of memory usage and allows you " "to make each browsing activity independent from the others" )); rb_file_browsing_reuse->setWhatsThis( i18n( "<p>With this option activated, only one instance of Konqueror " "used for file browsing will exist in the memory of your computer " "at any moment, " "no matter how many file browsing windows you open, " "thus reducing resource requirements.</p>" "<p>Be aware that this also means that, if something goes wrong, " "all your file browsing windows will be closed simultaneously</p>" )); rb_always_reuse->setWhatsThis( i18n( "<p>With this option activated, only one instance of Konqueror " "will exist in the memory of your computer at any moment, " "no matter how many browsing windows you open, " "thus reducing resource requirements.</p>" "<p>Be aware that this also means that, if something goes wrong, " "all your browsing windows will be closed simultaneously.</p>" )); connect( rb_never_reuse, SIGNAL( toggled(bool)), SIGNAL( changed())); connect( rb_file_browsing_reuse, SIGNAL( toggled(bool)), SIGNAL( changed())); connect( rb_always_reuse, SIGNAL( toggled(bool)), SIGNAL( changed())); rb_file_browsing_reuse->setChecked( true ); QString tmp = i18n( "<p>If non-zero, this option allows keeping Konqueror instances " "in memory after all their windows have been closed, up to the " "number specified in this option.</p>" "<p>When a new Konqueror instance is needed, one of these preloaded " "instances will be reused instead, improving responsiveness at " "the expense of the memory required by the preloaded instances.</p>" ); sb_preload_count->setWhatsThis( tmp ); lb_preload_count->setWhatsThis( tmp ); cb_preload_on_startup->setWhatsThis( i18n( "<p>If enabled, an instance of Konqueror will be preloaded after the ordinary KDE " "startup sequence.</p>" "<p>This will make the first Konqueror window open faster, but " "at the expense of longer KDE startup times (but you will be able to work " "while it is loading, so you may not even notice that it is taking longer).</p>" )); cb_always_have_preloaded->setWhatsThis( i18n( "<p>If enabled, KDE will always try to have one preloaded Konqueror instance ready; " "preloading a new instance in the background whenever there is not one available, " "so that windows will always open quickly.</p>" "<p><b>Warning:</b> In some cases, it is actually possible that this will " "reduce perceived performance.</p>" )); connect( sb_preload_count, SIGNAL( valueChanged( int )), SLOT( preload_count_changed( int ))); connect( sb_preload_count, SIGNAL( valueChanged( int )), SIGNAL( changed())); connect( cb_preload_on_startup, SIGNAL( toggled(bool)), SIGNAL( changed())); connect( cb_always_have_preloaded, SIGNAL( toggled(bool)), SIGNAL( changed())); defaults(); } void Konqueror::preload_count_changed( int count ) { cb_preload_on_startup->setEnabled( count >= 1 ); // forcing preloading with count == 1 can often do more harm than good, because // if there's one konqy preloaded, and the user requests "starting" new konqueror, // the preloaded instance will be used, new one will be preloaded, and if the user soon // "quits" konqueror, one of the instances will have to be terminated cb_always_have_preloaded->setEnabled( count >= 2 ); } void Konqueror::load() { KConfig _cfg( "konquerorrc" ); KConfigGroup cfg(&_cfg, "Reusing" ); allowed_parts = cfg.readEntry( "SafeParts", "SAFE" ); if( allowed_parts == "ALL" ) rb_always_reuse->setChecked( true ); else if( allowed_parts.isEmpty()) rb_never_reuse->setChecked( true ); else rb_file_browsing_reuse->setChecked( true ); sb_preload_count->setValue( cfg.readEntry( "MaxPreloadCount", 1 )); cb_always_have_preloaded->setChecked( cfg.readEntry( "AlwaysHavePreloaded", false)); cb_preload_on_startup->setChecked( cfg.readEntry( "PreloadOnStartup", false)); } void Konqueror::save() { KConfig _cfg( "konquerorrc" ); KConfigGroup cfg(&_cfg, "Reusing" ); if( rb_always_reuse->isChecked()) allowed_parts = "ALL"; else if( rb_never_reuse->isChecked()) allowed_parts = ""; else { if( allowed_parts.isEmpty() || allowed_parts == "ALL" ) allowed_parts = "SAFE"; // else - keep allowed_parts as read from the file, as the user may have modified the list there } cfg.writeEntry( "SafeParts", allowed_parts ); int count = sb_preload_count->value(); cfg.writeEntry( "MaxPreloadCount", count ); cfg.writeEntry( "PreloadOnStartup", cb_preload_on_startup->isChecked() && count >= 1 ); cfg.writeEntry( "AlwaysHavePreloaded", cb_always_have_preloaded->isChecked() && count >= 2 ); cfg.sync(); QDBusMessage message = QDBusMessage::createSignal("/KonqMain", "org.kde.Konqueror.Main", "reparseConfiguration"); QDBusConnection::sessionBus().send(message); QDBusInterface kded("org.kde.kded", "/modules/konqy_preloader", "org.kde.konqueror.Preloader"); kded.call( "reconfigure" ); } void Konqueror::defaults() { rb_file_browsing_reuse->setChecked( true ); allowed_parts = "SAFE"; sb_preload_count->setValue( 1 ); cb_preload_on_startup->setChecked( false ); cb_always_have_preloaded->setChecked( false ); preload_count_changed( sb_preload_count->value()); } } // namespace #include "konqueror.moc"
gpl-2.0
boyska/chdkripto
platform/s90/sub/101c/lib.c
1
3067
#include "platform.h" #include "stdlib.h" #include "lolevel.h" /* ********************* G11 ********************** note sensor size for camera.h is from @ FFB09A68 0xEE9200 = 15634944 15634944/12 * 8 = 10423296 --- | @ FFB09690 | 0xEA0 and 0xAE0 = | 3744 * 2784 = 10423296 --- ROM:FFB025A8 LDR R1, =0xEE9200 ROM:FFB025AC ADR R0, aCrawBuffSizeP ; "CRAW BUFF SIZE %p" ROM:FF933B98 MOV R2, #0xEA0 ROM:FF933B9C MOV R3, #0xAE0 */ /* G11: IMG VRAM BUFF = 0x41B4AE44 //0x4161CFC4 THUM VRAM BUFF= 0x40A85C30 //0x40A1C030 CRAW BUFF = 0x4213A6EC //0x41C0F460 CRAW BUFF SIZE= 0xEE9200 JPEG BUFF = 0x43026600 //0x42DEAC00 (0x1215400) */ /* LED c0220130 dp green 1 1 = red 0 1= orange 34 dp orange 2c 3c = poweroff */ char *hook_raw_image_addr() { return (char*)0x4213A6EC; // search for aCrwaddressLxCr " CrwAddress %lx, CrwSize H %ld V %ld\r" 0x41c0f460 0xEA0 0xAE0 // or for aCrawBuffP DCB "CRAW BUFF %p",0 } long hook_raw_size() { return 0xEE9200;// Search for "aCrawBuffSizeP" 0xEE9200 } void *vid_get_viewport_live_fb() { void **fb=(void **)0x2240; //100c, 101a, 101c @ff84e0b4; sub_ff84d748 unsigned char buff = *((unsigned char*)0x2084); //100c, 101a, 101c @ff84de2c; sub_ff84d748 if (buff == 0) { buff = 2; } else { buff--; } return fb[buff]; } void *vid_get_bitmap_fb() { return (void*)0x40471000; // G11 OK @FF858728 (at end of function DispCon_ShowBlackChart } void *vid_get_viewport_fb() { return (void*)0x407E8A00; /* ROM:FFAFF2DC LDR R1, =0x407E8A00 ROM:FFAFF2E0 LDR R0, =0x7E900 ROM:FFAFF2E4 STR R1, [R4] ROM:FFAFF2E8 STR R0, [R4,#4] ROM:FFAFF2EC ADR R0, aVramAddressP ; "VRAM Address : %p\r" ROM:FFAFF2F0 BL sub_FF93500C ROM:FFAFF2F4 LDR R1, [R4,#4] ROM:FFAFF2F8 ADR R0, aVramSize0xX ; "VRAM Size : 0x%x\r" */ } // Histo etc. when in play mode maybe ? void *vid_get_viewport_fb_d() { return (void*)(*(int*)(0x2A20+0x58)); // G11 // S90: 0x58 @FF86FA30 0x2A50 @FF86F9F8 (Search for aImageplayer_c) } /* ERR99: ToDO: Check if this is also ok for G11 (taken from SX200IS port) */ void JogDial_CW(void){ _PostLogicalEventForNotPowerType(0x874, 1); // RotateJogDialRight } void JogDial_CCW(void){ _PostLogicalEventForNotPowerType(0x875, 1); // RotateJogDialLeft } char *camera_jpeg_count_str() { return (char*)0x9792C;// S90 OK /* Search for a9999 ; "9999" */ } void *vid_get_bitmap_active_palette() { return (void *)*(unsigned int*)(0x5CFC+0x2C); // sub_FF9152C4, via sub_FFAE56B0 two refs to "Palette Class." } void *vid_get_bitmap_active_buffer() { return (void*)(*(int*)(0x5CFC+0x18)); //sub_FF9152C4 via "<GetBmpVramInfo> Add: %p Width : %ld Hight : %ld" }
gpl-2.0
loginab/esxdrivers
vmkdrivers/src26/drivers/net/ixgbe/ixgbe_82598.c
1
27976
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2008 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe_type.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num); s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete); s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index); /** * ixgbe_init_ops_82598 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for 82598. * Does not touch the hardware. **/ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; ret_val = ixgbe_init_phy_ops_generic(hw); ret_val = ixgbe_init_ops_generic(hw); /* MAC */ mac->ops.reset_hw = &ixgbe_reset_hw_82598; mac->ops.get_media_type = &ixgbe_get_media_type_82598; /* LEDs */ mac->ops.blink_led_start = &ixgbe_blink_led_start_82598; mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598; /* RAR, Multicast, VLAN */ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; mac->ops.set_vfta = &ixgbe_set_vfta_82598; mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; /* Flow Control */ mac->ops.setup_fc = &ixgbe_setup_fc_82598; /* Call PHY identify routine to get the phy type */ phy->ops.identify(hw); /* PHY Init */ switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: phy->ops.reset = &ixgbe_reset_phy_nl; break; default: break; } /* Link */ mac->ops.check_link = &ixgbe_check_mac_link_82598; if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82598; mac->ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_82598; } else { mac->ops.setup_link = &ixgbe_setup_mac_link_82598; mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598; mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598; } mac->mcft_size = 128; mac->vft_size = 128; mac->num_rar_entries = 16; mac->max_tx_queues = 32; mac->max_rx_queues = 64; return IXGBE_SUCCESS; } /** * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * * Determines the link capabilities by reading the AUTOC register. **/ s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = IXGBE_SUCCESS; s32 autoc_reg; autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; autoc_reg |= hw->mac.link_attach_type; autoc_reg |= hw->mac.link_mode_select; } switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc_reg & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; break; default: status = IXGBE_ERR_LINK_SETUP; break; } return status; } /** * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * * Determines the link capabilities by reading the AUTOC register. **/ s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = IXGBE_ERR_LINK_SETUP; u16 speed_ability; *speed = 0; *autoneg = true; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &speed_ability); if (status == IXGBE_SUCCESS) { if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) *speed |= IXGBE_LINK_SPEED_1GB_FULL; } return status; } /** * ixgbe_get_media_type_82598 - Determines media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; /* Media type for I82598 is based on device ID */ switch (hw->device_id) { case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82598AT: media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; break; } return media_type; } /** * ixgbe_setup_fc_82598 - Configure flow control settings * @hw: pointer to hardware structure * @packetbuf_num: packet buffer number (0-7) * * Configures the flow control settings based on SW configuration. This * function is used for 802.3x flow control configuration only. **/ s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) { u32 frctl_reg; u32 rmcs_reg; if (packetbuf_num < 0 || packetbuf_num > 7) { DEBUGOUT1("Invalid packet buffer number [%d], expected range is" " 0-7\n", packetbuf_num); ASSERT(0); } frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); /* * 10 gig parts do not have a word in the EEPROM to determine the * default flow control setting, so we explicitly set it to full. */ if (hw->fc.type == ixgbe_fc_default) hw->fc.type = ixgbe_fc_full; /* * We want to save off the original Flow Control configuration just in * case we get disconnected and then reconnected into a different hub * or switch with different Flow Control capabilities. */ hw->fc.original_type = hw->fc.type; /* * The possible values of the "flow_control" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames but not * send pause frames). * 2: Tx flow control is enabled (we can send pause frames but we do not * support receiving pause frames) * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.type) { case ixgbe_fc_none: break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled, * and Tx Flow control is disabled. */ frctl_reg |= IXGBE_FCTRL_RFCE; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is disabled, * by a software over-ride. */ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; case ixgbe_fc_full: /* * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ frctl_reg |= IXGBE_FCTRL_RFCE; rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; default: /* We should never get here. The value should be 0-3. */ DEBUGOUT("Flow control param set incorrectly\n"); ASSERT(0); break; } /* Enable 802.3x based flow control settings. */ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); /* * Check for invalid software configuration, zeros are completely * invalid for all parameters used past this point, and if we enable * flow control with zero water marks, we blast flow control packets. */ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { DEBUGOUT("Flow control structure initialized incorrectly\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } /* * We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of * XON frames. */ if (hw->fc.type & ixgbe_fc_tx_pause) { if (hw->fc.send_xon) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), (hw->fc.low_water | IXGBE_FCRTL_XONE)); } else { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), hw->fc.low_water); } IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), (hw->fc.high_water)|IXGBE_FCRTH_FCEN); } IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); return IXGBE_SUCCESS; } /** * ixgbe_setup_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = IXGBE_SUCCESS; autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; autoc_reg |= hw->mac.link_attach_type; autoc_reg |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); msleep(50); } /* Restart link */ autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); /* Only poll for autoneg to complete if specified to do so */ if (hw->phy.autoneg_wait_to_complete) { if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN || hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; DEBUGOUT("Autonegotiation did not complete.\n"); } } } /* * We want to save off the original Flow Control configuration just in * case we get disconnected and then reconnected into a different hub * or switch with different Flow Control capabilities. */ hw->fc.original_type = hw->fc.type; ixgbe_setup_fc_82598(hw, 0); /* Add delay to filter out noises during initial link setup */ msleep(50); return status; } /** * ixgbe_check_mac_link_82598 - Get link/speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true is link is up, false otherwise * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 links_reg; u32 i; u16 link_reg, adapt_comp_reg; /* * SERDES PHY requires us to read link status from undocumented * register 0xC79F. Bit 0 set indicates link is up/ready; clear * indicates link down. OxC00C is read to check that the XAUI lanes * are active. Bit 0 clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) { *link_up = true; break; } else { *link_up = false; } msleep(100); hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); } } else { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) *link_up = true; else *link_up = false; } if (*link_up == false) goto out; } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if (links_reg & IXGBE_LINKS_UP) { *link_up = true; break; } else { *link_up = false; } msleep(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { if (links_reg & IXGBE_LINKS_UP) *link_up = true; else *link_up = false; } if (links_reg & IXGBE_LINKS_SPEED) *speed = IXGBE_LINK_SPEED_10GB_FULL; else *speed = IXGBE_LINK_SPEED_1GB_FULL; out: return IXGBE_SUCCESS; } /** * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if auto-negotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Set the link speed in the AUTOC register and restarts link. **/ s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = IXGBE_SUCCESS; /* If speed is 10G, then check for CX4 or XAUI. */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; } else if (autoneg) { /* BX mode - Autonegotiate 1G */ if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; else /* KX/KX4 mode */ hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN; } else { status = IXGBE_ERR_LINK_SETUP; } if (status == IXGBE_SUCCESS) { hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete; hw->mac.link_settings_loaded = true; /* * Setup and restart the link based on the new values in * ixgbe_hw This will write the AUTOC register based on the new * stored values */ ixgbe_setup_mac_link_82598(hw); } return status; } /** * ixgbe_setup_copper_link_82598 - Setup copper link settings * @hw: pointer to hardware structure * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. Restart * phy and wait for autonegotiate to finish. Then synchronize the * MAC and PHY. **/ s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) { s32 status; /* Restart autonegotiation on PHY */ status = hw->phy.ops.setup_link(hw); /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; /* Set up MAC */ ixgbe_setup_mac_link_82598(hw); return status; } /** * ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, autoneg_wait_to_complete); /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; /* Set up MAC */ ixgbe_setup_mac_link_82598(hw); return status; } /** * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); /* * Power up the Atlas Tx lanes if they are currently powered down. * Atlas Tx lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { /* Enable Tx Atlas so packets can be transmitted again */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); analog_val &= ~ IXGBE_ATLAS_PDN_TX_10G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); } /* Reset PHY */ if (hw->phy.reset_disable == false) hw->phy.ops.reset(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) { status = IXGBE_ERR_MASTER_REQUESTS_PENDING; DEBUGOUT("PCI-E Master disable polling has failed.\n"); } /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it */ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; DEBUGOUT("Reset polling failed to complete.\n"); } msleep(50); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* * AUTOC register which stores link settings gets cleared * and reloaded from EEPROM after reset. We need to restore * our stored value from init in case SW changed the attach * type or speed. If this is the first time and link settings * have not been stored, store default settings from AUTOC. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); autoc &= ~(IXGBE_AUTOC_LMS_MASK); autoc |= hw->mac.link_attach_type; autoc |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = true; } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); return status; } /** * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq set index **/ s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); rar_high &= ~IXGBE_RAH_VIND_MASK; rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); return IXGBE_SUCCESS; } /** * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; if (rar < rar_entries) { rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); if (rar_high & IXGBE_RAH_VIND_MASK) { rar_high &= ~IXGBE_RAH_VIND_MASK; IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } } else { DEBUGOUT1("RAR index %d is out of range.\n", rar); } return IXGBE_SUCCESS; } /** * ixgbe_set_vfta_82598 - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; if (vlan > 4095) return IXGBE_ERR_PARAM; /* Determine 32-bit word position in array */ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ /* Determine the location of the (VMD) queue index */ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ /* Set the nibble for VMD queue index */ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); bits &= (~(0x0F << bitindex)); bits |= (vind << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); /* Determine the location of the bit for this VLAN id */ bitindex = vlan & 0x1F; /* lower five bits */ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ bits |= (1 << bitindex); else /* Turn off this VLAN id */ bits &= ~(1 << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return IXGBE_SUCCESS; } /** * ixgbe_clear_vfta_82598 - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filer table, and the VMDq index associated with the filter **/ s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); return IXGBE_SUCCESS; } /** * ixgbe_blink_led_start_82598 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink **/ s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; bool link_up = 0; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); /* * Link must be up to auto-blink the LEDs on the 82598EB MAC; * force it if link is down. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); msleep(10); } led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } /** * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking **/ s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); autoc_reg &= ~IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; }
gpl-2.0
vinni-au/vega-strike
vegastrike/src/networking/netclient_login.cpp
1
28692
#include "networking/lowlevel/vsnet_debug.h" #include "cmd/unit_generic.h" #include "vs_globals.h" #include "vsfilesystem.h" #include "networking/netclient.h" #include "savegame.h" #include "main_loop.h" #include "networking/lowlevel/netbuffer.h" #include "networking/lowlevel/packet.h" #include "lin_time.h" #include "networking/lowlevel/vsnet_notify.h" #include "networking/lowlevel/vsnet_sockethttp.h" #include "networking/lowlevel/vsnet_dloadmgr.h" #include "networking/lowlevel/netui.h" #include "networking/client.h" #include "networking/fileutil.h" #include "vs_random.h" //For random ping time. std::string global_username; std::string global_password; /* ************************************************************ **** Authenticate the client *** ************************************************************ */ int NetClient::authenticate() { COUT<<" enter "<<__PRETTY_FUNCTION__<<endl; Packet packet2; string str_callsign, str_passwd; NetBuffer netbuf; //Get the name and password from vegastrike.config //Maybe someday use a default Guest account if no callsign or password is provided thus allowing //Player to wander but not interact with the universe this->callsign = str_callsign = vs_config->getVariable( "player", "callsign", "" ); this->password = str_passwd = vs_config->getVariable( "player", "password", "" ); if ( global_username.length() ) this->callsign = global_username; if ( global_password.length() ) this->password = global_password; if ( str_callsign.length() && str_passwd.length() ) { netbuf.addString( str_callsign ); netbuf.addString( str_passwd ); packet2.send( CMD_LOGIN, 0, netbuf.getData(), netbuf.getDataLength(), SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 165 ) ); COUT<<"Send login for player <"<<str_callsign<<">:<*******" "> - buffer length : "<<packet2.getDataLength() <<" (+"<<packet2.getHeaderLength()<<" header len"<<endl; } else { cerr<<"Callsign and/or password not specified in vegastrike.config, please check this."<<endl<<endl; return -1; } return 0; } /* ************************************************************ **** Login loop : waiting for game server to respond *** ************************************************************ */ int NetClient::loginAuth( string str_callsign, string str_passwd, string &error ) { COUT<<"enter "<<"NetClient::loginLoop"<<endl; lastsave.clear(); ship_select_list.clear(); Packet packet2; NetBuffer netbuf; //memset( buffer, 0, tmplen+1); netbuf.addString( str_callsign ); netbuf.addString( str_passwd ); packet2.send( CMD_LOGIN, 0, netbuf.getData(), netbuf.getDataLength(), SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 316 ) ); COUT<<"Sent login for player <"<<str_callsign<<">:<*******" <<">"<<endl <<" - buffer length : "<<packet2.getDataLength()<<endl; this->callsign = str_callsign; this->password = str_passwd; //Now the loop return loginLoop( error ); } int NetClient::loginLoop( string &error ) { int timeout = 0, recv = 0; //int ret=0; Packet packet; string login_tostr = vs_config->getVariable( "network", "logintimeout", "100" ); timeval tv = {atoi( login_tostr.c_str() ), 0}; while (!timeout) { recv = this->recvMsg( &packet, &tv ); if (recv == 0) { error = "NETWORK ERROR : Login procedeure timed out."; timeout = 1; } else if (recv < 0) { char str[127]; sprintf( str, "NETWORK ERROR in recieving login data (error number %d)!!!", #ifdef _WIN32 WSAGetLastError() #else errno #endif ); error = (str); timeout = 1; } else { break; } } COUT<<"End of login loop"<<endl; if (lastsave.empty() || lastsave[0] == "") { if ( ship_select_list.empty() ) error = "Login failure!"; return ship_select_list.size(); } //cout<<"GLOBALSAVES[0] : " //cout<<"GLOBALSAVES[1] : "<<globalsaves[1]<<endl; return 1; } /* ************************************************************ **** Login loop : waiting for account server to respond *** ************************************************************ */ vector< string >& NetClient::loginAcctLoop( string str_callsign, string str_passwd ) { COUT<<"enter "<<"NetClient::loginAcctLoop"<<endl; this->error_message = string(); std::string netbuf; addSimpleChar( netbuf, ACCT_LOGIN_DATA ); //memset( buffer, 0, tmplen+1); addSimpleString( netbuf, str_callsign ); addSimpleString( netbuf, str_passwd ); COUT<<"Buffering to send with LOGIN_DATA for "<<str_callsign<<endl; //PacketMem m( netbuf.getData(), netbuf.getDataLength(), PacketMem::LeaveOwnership ); //m.dump( cerr, 3 ); acct_sock->sendstr( netbuf ); /* * packet2.send( LOGIN_DATA, 0, * netbuf.getData(), netbuf.getDataLength(), * SENDRELIABLE, NULL, this->acct_sock, * __FILE__, PSEUDO__LINE__(378) ); */ //Now the loop int timeout = 0, recv = 0; //int ret=0; Packet packet; double initial = queryTime(); double newtime = 0; double elapsed = 0; string login_tostr = vs_config->getVariable( "network", "logintimeout", "20" ); int login_to = atoi( login_tostr.c_str() ); while (!timeout && !recv) { //If we have no response in "login_to" seconds -> fails newtime = queryTime(); elapsed = newtime-initial; //COUT<<elapsed<<" seconds since login request"<<endl; if (elapsed > login_to) { //lastsave.push_back( ""); COUT<<"!!! NETWORK ERROR : Connection to account server timed out !!!"<<endl; timeout = 1; break; } recv = checkAcctMsg(); _sock_set.waste_time( 0, 40000 ); } COUT<<"End of loginAcct loop"<<endl; //globalsaves should be empty otherwise we filled it with an empty string followed by the error message if (lastsave.empty() || lastsave[0] != "") { //this->callsign = str_callsign; //savefiles = globalsaves; COUT<<"Trying to connect to game server..."<<endl <<"\tIP="<<_serverip<<":"<<_serverport<<endl; } return lastsave; } void NetClient::loginChooseShip( Packet &p1 ) { NetBuffer netbuf( p1.getData(), p1.getDataLength() ); ship_select_list.clear(); unsigned short numShips = netbuf.getShort(); ship_select_list.reserve( numShips ); for (int i = 0; i < numShips; i++) ship_select_list.push_back( netbuf.getString() ); } void NetClient::loginAccept( Packet &p1 ) { using namespace VSFileSystem; NetBuffer netbuf( p1.getData(), p1.getDataLength() ); Packet pckt; this->serial = p1.getSerial(); COUT<<">>> LOGIN ACCEPTED =( serial #"<<serial<<" )= --------------------------------------"<<endl; { char msg[100]; sprintf( msg, "#cc66ffNETWORK: Login Accepted. Serial number is %d. Downloading system file...", serial ); bootstrap_draw( msg, NULL ); } //Should receive player's data (savegame) from server if there is a save localSerials.push_back( serial ); string datestr = netbuf.getString(); _Universe->current_stardate.InitTrek( datestr ); cerr<<"Stardate initialized"<<endl; cerr<<"WE ARE ON STARDATE "<<datestr<<" - converted : " <<_Universe->current_stardate.GetFullTrekDate()<<endl; lastsave.push_back( netbuf.getString() ); lastsave.push_back( netbuf.getString() ); unsigned short digest_length; /* * // Get universe file... not too useful. * // But this is a good example of using VsnetDownload download manager. * * // Get the galaxy file from buffer with relative path to datadir ! * digest_length = netbuf.getShort(); * string univfile = netbuf.getString(); * if (digest_length) { * digest = netbuf.getBuffer( digest_length ); * #ifdef CRYPTO * cerr<<"Initial system = "<<VSFileSystem::datadir+univfile<<" - File Hash = "<<digest<<endl; * // Compare to local hash and ask for the good file if we don't have it or bad version * if( !FileUtil::HashCompare( univfile, digest, UniverseFile)) * { * VsnetDownload::Client::NoteFile f( this->clt_tcp_sock, univfile, VSFileSystem::UniverseFile); * _downloadManagerClient->addItem( &f); * timeval timeout={10,0}; * while( !f.done()) * { * if (recvMsg( NULL, &timeout )<=0) { * //NETFIXME: What to do if the download times out? * break; * } * } * } * #endif * } */ //Get the initial system file... string sysfile = netbuf.getString(); bool downloadsystem = true; bool autogen; string fullsys = VSFileSystem::GetCorrectStarSysPath( sysfile, autogen ); if ( fullsys.empty() ) fullsys = sysfile; digest_length = netbuf.getShort(); COUT<<"Initial system = "<<fullsys; if (digest_length) { #ifdef CRYPTO unsigned char *digest = netbuf.getBuffer( digest_length ); cerr<<" - File Hash = "<<digest; if ( FileUtil::HashCompare( fullsys, digest, SystemFile ) ) downloadsystem = false; #else netbuf.getBuffer( digest_length ); #endif } //Set the zone number this->zone = netbuf.getShort(); //Did the hash compare fail? if (downloadsystem) { cerr<<": Downloading system from server..."<<endl; VsnetDownload::Client::NoteFile f( fullsys, *this->clt_tcp_sock, sysfile, VSFileSystem::SystemFile ); _downloadManagerClient->addItem( &f ); timeval timeout = {10, 0}; while ( !f.done() ) if (recvMsg( NULL, &timeout ) <= 0) //NETFIXME: what to do if timeout elapses... break; } cout<<endl; } void NetClient::respawnRequest() { Packet packet2; NetBuffer netbuf; //No data. packet2.send( CMD_RESPAWN, 0, netbuf.getData(), netbuf.getDataLength(), SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 165 ) ); } void NetClient::textMessage( const std::string &data ) { Packet packet2; NetBuffer netbuf; netbuf.addString( data ); //No data. packet2.send( CMD_TXTMESSAGE, 0, netbuf.getData(), netbuf.getDataLength(), SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 165 ) ); } void NetClient::GetCurrentServerAddress( string &addr, unsigned short &port ) { addr = this->_serverip; port = this->_serverport; } void NetClient::SetCurrentServerAddress( string addr, unsigned short port ) { this->_serverip = addr; this->_serverport = port; } void NetClient::SetConfigServerAddress( string &addr, unsigned short &port ) { bool use_acctserver = XMLSupport::parse_bool( vs_config->getVariable( "network", "use_account_server", "false" ) ); if (use_acctserver) { this->_serverport = port = 0; this->_serverip = addr = vs_config->getVariable( "network", "account_server_url", "http://localhost/cgi-bin/accountserver.py" ); cout<<endl<<"Account Server URL : "<<addr<<endl<<endl; return; } int port_tmp; string srvport = vs_config->getVariable( "network", "server_port", "6777" ); port_tmp = atoi( srvport.c_str() ); if (port_tmp > 65535 || port_tmp < 0) port_tmp = 0; port = (unsigned short) port_tmp; addr = vs_config->getVariable( "network", "server_ip", "" ); this->_serverip = addr; this->_serverport = port; cout<<endl<<"Server IP : "<<addr<<" - port : "<<srvport<<endl<<endl; } /* ************************************************************ **** Initialize the client network to account server *** ************************************************************ */ VsnetHTTPSocket* NetClient::init_acct( const std::string &addr ) { COUT<<" enter "<<__PRETTY_FUNCTION__ <<" with "<<addr<<endl; _sock_set.start(); cout<<"Initializing connection to account server..."<<endl; acct_sock = new VsnetHTTPSocket( addr, _sock_set ); COUT<<"accountserver on socket "<<acct_sock<<" done."<<endl; return acct_sock; } /* ************************************************************ **** Initialize the client network *** ************************************************************ */ SOCKETALT NetClient::init( const char *addr, unsigned short port, std::string &error ) { if ( clt_tcp_sock && clt_tcp_sock->valid() ) clt_tcp_sock->disconnect( "NC_init_tcp" ); if ( clt_udp_sock && clt_udp_sock->valid() ) NetUIUDP::disconnectSaveUDP( *clt_udp_sock ); lastsave.clear(); netversion = 0; if (addr == NULL) { addr = _serverip.c_str(); port = _serverport; } COUT<<" enter "<<__PRETTY_FUNCTION__ <<" with "<<addr<<":"<<port<<endl; _sock_set.start(); string strnetatom; strnetatom = vs_config->getVariable( "network", "network_atom", "" ); if (strnetatom == "") NETWORK_ATOM = 0.2; else NETWORK_ATOM = (double) atof( strnetatom.c_str() ); *this->clt_tcp_sock = NetUITCP::createSocket( addr, port, _sock_set ); this->lossy_socket = this->clt_tcp_sock; if ( !clt_tcp_sock->valid() ) return *this->clt_tcp_sock; COUT<<"created TCP socket ("<<addr<<","<<port<<") -> "<<this->clt_tcp_sock<<endl; /* * if( this->authenticate() == -1) * { * perror( "Error login in "); * return -1; * } */ Packet join; join.send( CMD_CONNECT, CLIENT_NETVERSION, "", 0, SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 407 ) ); this->enabled = 1; string login_tostr = vs_config->getVariable( "network", "connecttimeout", "10" ); timeval tv = {atoi( login_tostr.c_str() ), 0}; int timeout = 0; Packet packet; while (!timeout) { int recvd = this->recvMsg( &packet, &tv ); if (recvd == 0) { error = "Connection to game server timed out!"; timeout = 1; } else if (recvd < 0) { char str[127]; sprintf( str, "NETWORK ERROR in recieving socket (error number %d)!!!", #ifdef _WIN32 WSAGetLastError() #else errno #endif ); error = str; timeout = 1; } else if (this->netversion) { break; } } if (!this->netversion) { if ( error.empty() ) error = "Unable to receive a valid version from this server."; timeout = 1; } if (timeout) clt_tcp_sock->disconnect( "NCinit_timedout" ); return *this->clt_tcp_sock; } /* ************************************************************ **** Synchronize server time and client time *** ************************************************************ **** This function creates the UDP socket and determines *** **** whether to use the UDP or the TCP socket for lossy *** **** packet data. *** ************************************************************ */ //NETFIXME: Correctly obtain ping time. void NetClient::synchronizeTime( SOCKETALT *udpsock ) { int i = 0; int timeout = 0; int recv; timeval tv = {1, 0}; //Timeout after 1 second, request send again. double ping; //use deltaTime? double pingavg = 0.; double timeavg = 0.; std::map< double, double >times; //sorted container. double initialTime = queryTime(); static int NUM_TIMES = XMLSupport::parse_int( vs_config->getVariable( "network", "servertime_calibration", "10" ) ); static int UDP_TIMEOUT = XMLSupport::parse_int( vs_config->getVariable( "network", "udp_timeout", "1" ) ); static int clt_port_read = XMLSupport::parse_int( vs_config->getVariable( "network", "udp_listen_port", "6778" ) ); if (clt_port_read > 65535 || clt_port_read <= 0) clt_port_read = 0; static int clt_port_read_max = XMLSupport::parse_int( vs_config->getVariable( "network", "udp_listen_port_max", "6778" ) ); if (clt_port_read_max > 65535 || clt_port_read_max <= 0) clt_port_read_max = clt_port_read; unsigned short clt_port = (unsigned short) clt_port_read; unsigned short clt_port_max = (unsigned short) clt_port_read_max; if (clt_port_max < clt_port) clt_port_max = clt_port; static string nettransport = vs_config->getVariable( "network", "transport", "udp" ); //std::string addr; unsigned short port = this->_serverport; //getConfigServerAddress(addr, port); if ( !( udpsock != NULL && udpsock->setRemoteAddress( NetUIBase::lookupHost( this->_serverip.c_str(), port ) ) ) ) { do *this->clt_udp_sock = NetUIUDP::createSocket( this->_serverip.c_str(), port, clt_port, _sock_set ); while ( ( !this->clt_udp_sock->valid() ) && (clt_port++) ); } else { this->clt_udp_sock = udpsock; } COUT<<"created UDP socket ("<<this->_serverip<<","<<port<<", listen on "<<clt_port<<") -> "<<this->clt_udp_sock<<endl; if (nettransport == "udp") { //NETFIXME: Keep trying ports until a connection is established. COUT<<"Default lossy transport configured to UDP."<<endl; this->lossy_socket = clt_udp_sock; } else { COUT<<"Default lossy transport configured to TCP (behind firewall)."<<endl; this->lossy_socket = clt_tcp_sock; clt_port = 0; } this->clt_tcp_sock->set_block(); this->clt_udp_sock->set_block(); //Wait for NUM_TIMES (10) successful tries, or 10 consecutive 1-second timeouts //(we use UDP on the response (SENDANDFORGET) to improve timing accuracy). while (i < NUM_TIMES && timeout < UDP_TIMEOUT) { Packet packet; NetBuffer outData; outData.addShort( clt_port ); packet.send( CMD_SERVERTIME, 0, outData.getData(), outData.getDataLength(), //No data. SENDRELIABLE, NULL, *this->clt_tcp_sock, __FILE__, PSEUDO__LINE__( 343 ) ); recv = this->recvMsg( &packet, &tv ); //If we have no response. if (recv <= 0) { COUT<<"synchronizeTime() Timed out"<<endl; ++timeout; if (timeout >= UDP_TIMEOUT) { if (this->lossy_socket->isTcp() == false) { if (clt_port < clt_port_max && !udpsock) { NetUIUDP::disconnectSaveUDP( *this->clt_udp_sock ); *this->clt_udp_sock = NetUIUDP::createSocket( this->_serverip.c_str(), port, clt_port, _sock_set ); clt_port++; COUT<<"Trying UDP port "<<clt_port<<"."<<endl; } else { //no UDP requests made it, fallback to TCP. this->lossy_socket = this->clt_tcp_sock; clt_port = 0; COUT<<"Setting default lossy transport to TCP (UDP timeout)."<<endl; } timeout = 0; } } } else if (packet.getCommand() == CMD_SERVERTIME) { //NETFIXME: obtain actual ping time //ping = getPingTime( &tv ); ping = exp( vsrandom.uniformInc( -10, 0 ) ); if (ping > 0 && ping < 1.) { ++i; NetBuffer data( packet.getData(), packet.getDataLength() ); double serverTime = data.getDouble(); double currentTime = queryTime(); serverTime += initialTime-currentTime; times.insert( std::multimap< double, double >::value_type( ping, serverTime-ping ) ); timeout = 0; } else { ++timeout; } } } this->clt_tcp_sock->set_nonblock(); this->clt_udp_sock->set_nonblock(); //std::sort(times[0], times[i]); if (i >= NUM_TIMES) { int mid = i/2; double median = 0.; double tot = 0.; int location = 0; std::map< double, double >::const_iterator iter; for (iter = times.begin(); iter != times.end(); ++iter) { if (location == mid) { median = iter->first; if (i%2 == 1) { ++iter; median += iter->first; } break; } ++location; } if (i%2 == 1) median /= 2; for (iter = times.begin(); iter != times.end(); ++iter) { double wdiff = exp( -10*(median-iter->first)*(median-iter->first) ); pingavg += wdiff*iter->first; timeavg += wdiff*iter->second; tot += wdiff; } pingavg /= tot; timeavg /= tot; } else { COUT<<"Error in time synchronization: connection ended or timed out."; } this->deltatime = pingavg; double newTime = timeavg+queryTime()-initialTime; COUT<<"Setting time to: New time: "<<newTime<<endl; setNewTime( newTime ); for (unsigned int cpnum = 0; cpnum < _Universe->numPlayers(); cpnum++) //Seems like a bad idea... shouldn't this rely on SIMULATION_ATOM? _Universe->AccessCockpit( cpnum )->TimeOfLastCollision = -200; cur_time = newTime; } /* ************************************************************ **** Receive that start locations *** ************************************************************ */ //Receives possible start locations (first a short representing number of locations) //Then for each number, a desc // This function does absolutely nothing. why does it exist? void NetClient::receiveLocations( const Packet* ) { // unsigned char cmd; #ifdef __DEBUG__ COUT<<"Nb start locations : "<<nblocs<<endl; #endif //Choose starting location here //Send the chosen location to the server // cmd = CMD_ADDCLIENT; } /* ************************************************************ **** Create a new character *** ************************************************************ */ bool NetClient::selectShip( unsigned int ship ) { if (lastsave.empty() || lastsave[0] == "") { NetBuffer netbuf; string shipname; netbuf.addShort( (unsigned short) ship ); if ( ship < ship_select_list.size() ) shipname = ship_select_list[ship]; netbuf.addString( shipname ); Packet p; p.send( CMD_CHOOSESHIP, 0, netbuf.getData(), netbuf.getDataLength(), SENDRELIABLE, NULL, *clt_tcp_sock, __FILE__, PSEUDO__LINE__( 628 ) ); string err; int ret = loginLoop( err ); if (ret != 1 || lastsave.size() < 2 || lastsave[0] == "") { cout<<"Error in CHOOSEHIP: "<<err <<"choice="<<ship<<"("<<shipname<<"), max="<<ret<<endl; return false; } } return true; } void NetClient::createChar() {} int NetClient::connectLoad( string username, string passwd, string &error ) { localSerials.resize( 0 ); bootstrap_draw( "#cc66ffNETWORK: Initializing...", NULL ); cout<<"NETWORK: Initializing..."<<endl; string srvipadr; unsigned short port; bool ret = false; //Are we using the directly account server to identify us ? GetCurrentServerAddress( srvipadr, port ); if (!port) { //using account server. string srvipadr = vs_config->getVariable( "network", "account_server_url", "http://localhost/cgi-bin/accountserver.py" ); bootstrap_draw( "#cc66ffNETWORK: Connecting to account server.", NULL ); cout<<"NETWORK: Connecting to account server."<<endl; init_acct( srvipadr ); vector< string > &savetmp = loginAcctLoop( username, passwd ); //We don't expect a saved game... if ( savetmp.size() >= 2 && savetmp[0].empty() ) { //But this is the way the acctserver code indicates an error. error = savetmp[1]; return 0; } bootstrap_draw( "#cc66ffNETWORK: Connecting to VegaServer.", NULL ); cout<<"NETWORK: Connecting to VegaServer."<<endl; ret = init( NULL, 0, error ).valid(); } else { //Or are we going through a game server to do so ? bootstrap_draw( "#cc66ffNETWORK: Connecting to VegaServer.", NULL ); cout<<"NETWORK: Connecting to VegaServer."<<endl; ret = init( srvipadr.c_str(), port, error ).valid(); } if (ret == false) { //If network initialization fails, exit if ( error.empty() ) error = "Network connection error"; if ( !this->error_message.empty() ) error += "\n"+this->error_message; cout<<"Error: "<<error<<endl; return 0; } cout<<"Successfully connected!"; //sleep( 3); cout<<"Waiting for player "<<username<<": login response..."<<endl; bootstrap_draw( "#cc66ffNETWORK: Successful connection! Waiting to log in.", NULL ); int loggedin = loginAuth( username, passwd, error ); if ( !this->error_message.empty() ) { cout<<"Warning: "<<this->error_message<<endl; if ( !error.empty() ) error += "\n"; error += this->error_message; } return loggedin; } vector< string >* NetClient::loginSavedGame( int ship ) { if ( !selectShip( ship ) ) return NULL; /************* NETWORK PART ***************/ return &lastsave; } void NetClient::startGame() { vector< string >savedships; QVector pos; //useless. string mysystem; string savefiles; bool setplayerXloc = false; float credits = 0.0; vector< StarSystem* >ss; vector< QVector > playerNloc; vector< string >playersaveunit; vector< vector< string > >vecstr; bootstrap_draw( "#cc66ffNETWORK: Checking for UDP connection.", NULL ); cout<<"NETWORK: Checking for UDP connection."<<endl; synchronizeTime( NULL ); cout<<" logged in !"<<endl; bootstrap_draw( "#cc66ffNETWORK: Loading player ship.", NULL ); cout<<"NETWORK: Loading player ship."<<endl; if (_Universe->numPlayers() == 0) _Universe->createCockpit( callsign ); _Universe->clearAllSystems(); _Universe->AccessCockpit( 0 )->savegame->SetStarSystem( string() ); _Universe->AccessCockpit( 0 )->savegame->ParseSaveGame( "", mysystem, "", pos, setplayerXloc, credits, savedships, 0, lastsave[0], false ); ss.push_back( _Universe->Init( mysystem, Vector( 0, 0, 0 ), string() ) ); CopySavedShips( callsign, 0, savedships, true ); playersaveunit.push_back( savedships[0] ); if (setplayerXloc) playerNloc.push_back( pos ); else playerNloc.push_back( QVector( FLT_MAX, FLT_MAX, FLT_MAX ) ); vecstr.push_back( lastsave ); createObjects( playersaveunit, ss, playerNloc, vecstr ); bootstrap_draw( "#cc66ffNETWORK: Loading system.", NULL ); cout<<"NETWORK: Loading system."<<endl; inGame(); //PacketLoop(CMD_ADDEDYOU); // Wait for the command before stopping. }
gpl-2.0
drdaeman/accel-ppp
accel-pppd/ctrl/pppoe/pppoe.c
1
43574
#include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <errno.h> #include <string.h> #include <pthread.h> #include <fcntl.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <net/ethernet.h> #include <netpacket/packet.h> #include <arpa/inet.h> #include <printf.h> #include <ctype.h> #include "crypto.h" #include "events.h" #include "triton.h" #include "log.h" #include "ppp.h" #include "mempool.h" #include "cli.h" #ifdef RADIUS #include "radius.h" #endif #include "iputils.h" #include "connlimit.h" #include "pppoe.h" #include "memdebug.h" struct pppoe_conn_t { struct list_head entry; struct triton_context_t ctx; struct pppoe_serv_t *serv; int disc_sock; uint16_t sid; uint8_t addr[ETH_ALEN]; int ppp_started:1; struct pppoe_tag *relay_sid; struct pppoe_tag *host_uniq; struct pppoe_tag *service_name; struct pppoe_tag *tr101; uint8_t cookie[COOKIE_LENGTH]; struct ppp_ctrl_t ctrl; struct ppp_t ppp; #ifdef RADIUS struct rad_plugin_t radius; #endif }; struct delayed_pado_t { struct list_head entry; struct triton_timer_t timer; struct pppoe_serv_t *serv; uint8_t addr[ETH_ALEN]; struct pppoe_tag *host_uniq; struct pppoe_tag *relay_sid; struct pppoe_tag *service_name; }; struct padi_t { struct list_head entry; struct timespec ts; uint8_t addr[ETH_ALEN]; }; struct iplink_arg { pcre *re; const char *opt; void *cli; }; int conf_verbose; char *conf_ac_name; int conf_ifname_in_sid; char *conf_pado_delay; int conf_tr101 = 1; int conf_padi_limit = 0; int conf_mppe = MPPE_UNSET; static char *conf_ip_pool; int conf_reply_exact_service = 0; char *conf_service_names[MAX_SERVICE_NAMES]; static mempool_t conn_pool; static mempool_t pado_pool; static mempool_t padi_pool; unsigned int stat_starting; unsigned int stat_active; unsigned int stat_delayed_pado; unsigned long stat_PADI_recv; unsigned long stat_PADI_drop; unsigned long stat_PADO_sent; unsigned long stat_PADR_recv; unsigned long stat_PADR_dup_recv; unsigned long stat_PADS_sent; unsigned int total_padi_cnt; pthread_rwlock_t serv_lock = PTHREAD_RWLOCK_INITIALIZER; LIST_HEAD(serv_list); static uint8_t bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static void pppoe_send_PADT(struct pppoe_conn_t *conn); static void _server_stop(struct pppoe_serv_t *serv); void pppoe_server_free(struct pppoe_serv_t *serv); static int init_secret(struct pppoe_serv_t *serv); static void __pppoe_server_start(const char *ifname, const char *opt, void *cli); static void disconnect(struct pppoe_conn_t *conn) { if (conn->ppp_started) { dpado_check_prev(__sync_fetch_and_sub(&stat_active, 1)); conn->ppp_started = 0; ppp_terminate(&conn->ppp, TERM_USER_REQUEST, 1); } pppoe_send_PADT(conn); close(conn->disc_sock); triton_event_fire(EV_CTRL_FINISHED, &conn->ppp); log_ppp_info1("disconnected\n"); pthread_mutex_lock(&conn->serv->lock); conn->serv->conn[conn->sid] = NULL; list_del(&conn->entry); conn->serv->conn_cnt--; if (conn->serv->stopping && conn->serv->conn_cnt == 0) { pthread_mutex_unlock(&conn->serv->lock); pppoe_server_free(conn->serv); } else pthread_mutex_unlock(&conn->serv->lock); _free(conn->ctrl.calling_station_id); _free(conn->ctrl.called_station_id); _free(conn->service_name); if (conn->host_uniq) _free(conn->host_uniq); if (conn->relay_sid) _free(conn->relay_sid); if (conn->tr101) _free(conn->tr101); triton_context_unregister(&conn->ctx); mempool_free(conn); } static void ppp_started(struct ppp_t *ppp) { log_ppp_debug("pppoe: ppp started\n"); } static void ppp_finished(struct ppp_t *ppp) { struct pppoe_conn_t *conn = container_of(ppp, typeof(*conn), ppp); log_ppp_debug("pppoe: ppp finished\n"); if (conn->ppp_started) { dpado_check_prev(__sync_fetch_and_sub(&stat_active, 1)); conn->ppp_started = 0; triton_context_call(&conn->ctx, (triton_event_func)disconnect, conn); } } static void pppoe_conn_close(struct triton_context_t *ctx) { struct pppoe_conn_t *conn = container_of(ctx, typeof(*conn), ctx); if (conn->ppp_started) ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 0); else disconnect(conn); } #ifdef RADIUS static int pppoe_rad_send_access_request(struct rad_plugin_t *rad, struct rad_packet_t *pack) { struct pppoe_conn_t *conn = container_of(rad, typeof(*conn), radius); if (conn->tr101) return tr101_send_access_request(conn->tr101, pack); return 0; } static int pppoe_rad_send_accounting_request(struct rad_plugin_t *rad, struct rad_packet_t *pack) { struct pppoe_conn_t *conn = container_of(rad, typeof(*conn), radius); if (conn->tr101) return tr101_send_accounting_request(conn->tr101, pack); return 0; } #endif static struct pppoe_conn_t *allocate_channel(struct pppoe_serv_t *serv, const uint8_t *addr, const struct pppoe_tag *host_uniq, const struct pppoe_tag *relay_sid, const struct pppoe_tag *service_name, const struct pppoe_tag *tr101, const uint8_t *cookie) { struct pppoe_conn_t *conn; int sid; conn = mempool_alloc(conn_pool); if (!conn) { log_emerg("pppoe: out of memory\n"); return NULL; } memset(conn, 0, sizeof(*conn)); pthread_mutex_lock(&serv->lock); for (sid = serv->sid + 1; sid != serv->sid; sid++) { if (sid == MAX_SID) sid = 1; if (!serv->conn[sid]) { conn->sid = sid; serv->sid = sid; serv->conn[sid] = conn; list_add_tail(&conn->entry, &serv->conn_list); serv->conn_cnt++; break; } } pthread_mutex_unlock(&serv->lock); if (!conn->sid) { log_warn("pppoe: no free sid available\n"); mempool_free(conn); return NULL; } conn->serv = serv; memcpy(conn->addr, addr, ETH_ALEN); if (host_uniq) { conn->host_uniq = _malloc(sizeof(*host_uniq) + ntohs(host_uniq->tag_len)); memcpy(conn->host_uniq, host_uniq, sizeof(*host_uniq) + ntohs(host_uniq->tag_len)); } if (relay_sid) { conn->relay_sid = _malloc(sizeof(*relay_sid) + ntohs(relay_sid->tag_len)); memcpy(conn->relay_sid, relay_sid, sizeof(*relay_sid) + ntohs(relay_sid->tag_len)); } if (tr101) { conn->tr101 = _malloc(sizeof(*tr101) + ntohs(tr101->tag_len)); memcpy(conn->tr101, tr101, sizeof(*tr101) + ntohs(tr101->tag_len)); } conn->service_name = _malloc(sizeof(*service_name) + ntohs(service_name->tag_len)); memcpy(conn->service_name, service_name, sizeof(*service_name) + ntohs(service_name->tag_len)); memcpy(conn->cookie, cookie, COOKIE_LENGTH); conn->ctx.before_switch = log_switch; conn->ctx.close = pppoe_conn_close; conn->ctrl.ctx = &conn->ctx; conn->ctrl.started = ppp_started; conn->ctrl.finished = ppp_finished; conn->ctrl.max_mtu = MAX_PPPOE_MTU; conn->ctrl.type = CTRL_TYPE_PPPOE; conn->ctrl.name = "pppoe"; conn->ctrl.mppe = conf_mppe; conn->ctrl.def_pool = conf_ip_pool; conn->ctrl.calling_station_id = _malloc(IFNAMSIZ + 19); conn->ctrl.called_station_id = _malloc(IFNAMSIZ + 19); if (conf_ifname_in_sid == 1 || conf_ifname_in_sid == 3) sprintf(conn->ctrl.calling_station_id, "%s:%02x:%02x:%02x:%02x:%02x:%02x", serv->ifname, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); else sprintf(conn->ctrl.calling_station_id, "%02x:%02x:%02x:%02x:%02x:%02x", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); if (conf_ifname_in_sid == 2 || conf_ifname_in_sid == 3) sprintf(conn->ctrl.called_station_id, "%s:%02x:%02x:%02x:%02x:%02x:%02x", serv->ifname, serv->hwaddr[0], serv->hwaddr[1], serv->hwaddr[2], serv->hwaddr[3], serv->hwaddr[4], serv->hwaddr[5]); else sprintf(conn->ctrl.called_station_id, "%02x:%02x:%02x:%02x:%02x:%02x", serv->hwaddr[0], serv->hwaddr[1], serv->hwaddr[2], serv->hwaddr[3], serv->hwaddr[4], serv->hwaddr[5]); ppp_init(&conn->ppp); conn->ppp.ctrl = &conn->ctrl; conn->ppp.chan_name = conn->ctrl.calling_station_id; triton_context_register(&conn->ctx, &conn->ppp); triton_context_wakeup(&conn->ctx); triton_event_fire(EV_CTRL_STARTING, &conn->ppp); triton_event_fire(EV_CTRL_STARTED, &conn->ppp); conn->disc_sock = dup(serv->hnd.fd); return conn; } static void connect_channel(struct pppoe_conn_t *conn) { int sock; struct sockaddr_pppox sp; sock = socket(AF_PPPOX, SOCK_STREAM, PX_PROTO_OE); if (!sock) { log_error("pppoe: socket(PPPOX): %s\n", strerror(errno)); goto out_err; } fcntl(sock, F_SETFD, fcntl(sock, F_GETFD) | FD_CLOEXEC); memset(&sp, 0, sizeof(sp)); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OE; sp.sa_addr.pppoe.sid = htons(conn->sid); strcpy(sp.sa_addr.pppoe.dev, conn->serv->ifname); memcpy(sp.sa_addr.pppoe.remote, conn->addr, ETH_ALEN); if (connect(sock, (struct sockaddr *)&sp, sizeof(sp))) { log_error("pppoe: connect: %s\n", strerror(errno)); goto out_err_close; } conn->ppp.fd = sock; if (establish_ppp(&conn->ppp)) goto out_err_close; #ifdef RADIUS if (conn->tr101 && triton_module_loaded("radius")) { conn->radius.send_access_request = pppoe_rad_send_access_request; conn->radius.send_accounting_request = pppoe_rad_send_accounting_request; rad_register_plugin(&conn->ppp, &conn->radius); } #endif conn->ppp_started = 1; dpado_check_next(__sync_add_and_fetch(&stat_active, 1)); return; out_err_close: close(sock); out_err: disconnect(conn); } static struct pppoe_conn_t *find_channel(struct pppoe_serv_t *serv, const uint8_t *cookie) { struct pppoe_conn_t *conn; list_for_each_entry(conn, &serv->conn_list, entry) if (!memcmp(conn->cookie, cookie, COOKIE_LENGTH)) return conn; return NULL; } static void print_tag_string(struct pppoe_tag *tag) { int i; for (i = 0; i < ntohs(tag->tag_len); i++) log_info2("%c", tag->tag_data[i]); } static void print_tag_octets(struct pppoe_tag *tag) { int i; for (i = 0; i < ntohs(tag->tag_len); i++) log_info2("%02x", (uint8_t)tag->tag_data[i]); } static void print_packet(uint8_t *pack) { struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_tag *tag; int n; log_info2("[PPPoE "); switch (hdr->code) { case CODE_PADI: log_info2("PADI"); break; case CODE_PADO: log_info2("PADO"); break; case CODE_PADR: log_info2("PADR"); break; case CODE_PADS: log_info2("PADS"); break; case CODE_PADT: log_info2("PADT"); break; } log_info2(" %02x:%02x:%02x:%02x:%02x:%02x => %02x:%02x:%02x:%02x:%02x:%02x", ethhdr->h_source[0], ethhdr->h_source[1], ethhdr->h_source[2], ethhdr->h_source[3], ethhdr->h_source[4], ethhdr->h_source[5], ethhdr->h_dest[0], ethhdr->h_dest[1], ethhdr->h_dest[2], ethhdr->h_dest[3], ethhdr->h_dest[4], ethhdr->h_dest[5]); log_info2(" sid=%04x", ntohs(hdr->sid)); for (n = 0; n < ntohs(hdr->length); n += sizeof(*tag) + ntohs(tag->tag_len)) { tag = (struct pppoe_tag *)(pack + ETH_HLEN + sizeof(*hdr) + n); switch (ntohs(tag->tag_type)) { case TAG_END_OF_LIST: log_info2(" <End-Of-List>"); break; case TAG_SERVICE_NAME: log_info2(" <Service-Name "); print_tag_string(tag); log_info2(">"); break; case TAG_AC_NAME: log_info2(" <AC-Name "); print_tag_string(tag); log_info2(">"); break; case TAG_HOST_UNIQ: log_info2(" <Host-Uniq "); print_tag_octets(tag); log_info2(">"); break; case TAG_AC_COOKIE: log_info2(" <AC-Cookie "); print_tag_octets(tag); log_info2(">"); break; case TAG_VENDOR_SPECIFIC: if (ntohs(tag->tag_len) < 4) log_info2(" <Vendor-Specific invalid>"); else log_info2(" <Vendor-Specific %x>", ntohl(*(uint32_t *)tag->tag_data)); break; case TAG_RELAY_SESSION_ID: log_info2(" <Relay-Session-Id"); print_tag_octets(tag); log_info2(">"); break; case TAG_SERVICE_NAME_ERROR: log_info2(" <Service-Name-Error>"); break; case TAG_AC_SYSTEM_ERROR: log_info2(" <AC-System-Error>"); break; case TAG_GENERIC_ERROR: log_info2(" <Generic-Error>"); break; default: log_info2(" <Unknown (%x)>", ntohs(tag->tag_type)); break; } } log_info2("]\n"); } static void generate_cookie(struct pppoe_serv_t *serv, const uint8_t *src, uint8_t *cookie) { MD5_CTX ctx; DES_cblock key; DES_key_schedule ks; int i; union { DES_cblock b[3]; uint8_t raw[24]; } u1, u2; DES_random_key(&key); DES_set_key(&key, &ks); MD5_Init(&ctx); MD5_Update(&ctx, serv->secret, SECRET_LENGTH); MD5_Update(&ctx, serv->hwaddr, ETH_ALEN); MD5_Update(&ctx, src, ETH_ALEN); MD5_Update(&ctx, &key, 8); MD5_Final(u1.raw, &ctx); for (i = 0; i < 2; i++) DES_ecb_encrypt(&u1.b[i], &u2.b[i], &ks, DES_ENCRYPT); memcpy(u2.b[2], &key, 8); for (i = 0; i < 3; i++) DES_ecb_encrypt(&u2.b[i], &u1.b[i], &serv->des_ks, DES_ENCRYPT); memcpy(cookie, u1.raw, 24); } static int check_cookie(struct pppoe_serv_t *serv, const uint8_t *src, const uint8_t *cookie) { MD5_CTX ctx; DES_key_schedule ks; int i; union { DES_cblock b[3]; uint8_t raw[24]; } u1, u2; memcpy(u1.raw, cookie, 24); for (i = 0; i < 3; i++) DES_ecb_encrypt(&u1.b[i], &u2.b[i], &serv->des_ks, DES_DECRYPT); if (DES_set_key_checked(&u2.b[2], &ks)) return -1; for (i = 0; i < 2; i++) DES_ecb_encrypt(&u2.b[i], &u1.b[i], &ks, DES_DECRYPT); MD5_Init(&ctx); MD5_Update(&ctx, serv->secret, SECRET_LENGTH); MD5_Update(&ctx, serv->hwaddr, ETH_ALEN); MD5_Update(&ctx, src, ETH_ALEN); MD5_Update(&ctx, u2.b[2], 8); MD5_Final(u2.raw, &ctx); return memcmp(u1.raw, u2.raw, 16); } static void setup_header(uint8_t *pack, const uint8_t *src, const uint8_t *dst, int code, uint16_t sid) { struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); memcpy(ethhdr->h_source, src, ETH_ALEN); memcpy(ethhdr->h_dest, dst, ETH_ALEN); ethhdr->h_proto = htons(ETH_P_PPP_DISC); hdr->ver = 1; hdr->type = 1; hdr->code = code; hdr->sid = htons(sid); hdr->length = 0; } static void add_tag(uint8_t *pack, int type, const uint8_t *data, int len) { struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_tag *tag = (struct pppoe_tag *)(pack + ETH_HLEN + sizeof(*hdr) + ntohs(hdr->length)); tag->tag_type = htons(type); tag->tag_len = htons(len); memcpy(tag->tag_data, data, len); hdr->length = htons(ntohs(hdr->length) + sizeof(*tag) + len); } static void add_tag2(uint8_t *pack, const struct pppoe_tag *t) { struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_tag *tag = (struct pppoe_tag *)(pack + ETH_HLEN + sizeof(*hdr) + ntohs(hdr->length)); memcpy(tag, t, sizeof(*t) + ntohs(t->tag_len)); hdr->length = htons(ntohs(hdr->length) + sizeof(*tag) + ntohs(t->tag_len)); } static void pppoe_send(int fd, const uint8_t *pack) { struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); int n, s; s = ETH_HLEN + sizeof(*hdr) + ntohs(hdr->length); n = write(fd, pack, s); if (n < 0 ) log_error("pppoe: write: %s\n", strerror(errno)); else if (n != s) { log_warn("pppoe: short write %i/%i\n", n,s); } } static void pppoe_send_PADO(struct pppoe_serv_t *serv, const uint8_t *addr, const struct pppoe_tag *host_uniq, const struct pppoe_tag *relay_sid, const struct pppoe_tag *service_name) { uint8_t pack[ETHER_MAX_LEN]; uint8_t cookie[COOKIE_LENGTH]; char **service_names = NULL; int i; setup_header(pack, serv->hwaddr, addr, CODE_PADO, 0); add_tag(pack, TAG_AC_NAME, (uint8_t *)conf_ac_name, strlen(conf_ac_name)); if (service_name) add_tag2(pack, service_name); if (!service_name || !conf_reply_exact_service) { if (serv->service_names[0]) service_names = serv->service_names; else if (conf_service_names[0]) service_names = conf_service_names; if (service_names) for (i = 0; i < MAX_SERVICE_NAMES && service_names[i]; i++) add_tag(pack, TAG_SERVICE_NAME, (uint8_t *)service_names[i], strlen(service_names[i])); } generate_cookie(serv, addr, cookie); add_tag(pack, TAG_AC_COOKIE, cookie, COOKIE_LENGTH); if (host_uniq) add_tag2(pack, host_uniq); if (relay_sid) add_tag2(pack, relay_sid); if (conf_verbose) { log_info2("send "); print_packet(pack); } __sync_add_and_fetch(&stat_PADO_sent, 1); pppoe_send(serv->hnd.fd, pack); } static void pppoe_send_err(struct pppoe_serv_t *serv, const uint8_t *addr, const struct pppoe_tag *host_uniq, const struct pppoe_tag *relay_sid, int code, int tag_type) { uint8_t pack[ETHER_MAX_LEN]; setup_header(pack, serv->hwaddr, addr, code, 0); add_tag(pack, TAG_AC_NAME, (uint8_t *)conf_ac_name, strlen(conf_ac_name)); add_tag(pack, tag_type, NULL, 0); if (host_uniq) add_tag2(pack, host_uniq); if (relay_sid) add_tag2(pack, relay_sid); if (conf_verbose) { log_info2("send "); print_packet(pack); } pppoe_send(serv->hnd.fd, pack); } static void pppoe_send_PADS(struct pppoe_conn_t *conn) { uint8_t pack[ETHER_MAX_LEN]; setup_header(pack, conn->serv->hwaddr, conn->addr, CODE_PADS, conn->sid); add_tag(pack, TAG_AC_NAME, (uint8_t *)conf_ac_name, strlen(conf_ac_name)); add_tag2(pack, conn->service_name); if (conn->host_uniq) add_tag2(pack, conn->host_uniq); if (conn->relay_sid) add_tag2(pack, conn->relay_sid); if (conf_verbose) { log_info2("send "); print_packet(pack); } __sync_add_and_fetch(&stat_PADS_sent, 1); pppoe_send(conn->disc_sock, pack); } static void pppoe_send_PADT(struct pppoe_conn_t *conn) { uint8_t pack[ETHER_MAX_LEN]; setup_header(pack, conn->serv->hwaddr, conn->addr, CODE_PADT, conn->sid); add_tag(pack, TAG_AC_NAME, (uint8_t *)conf_ac_name, strlen(conf_ac_name)); add_tag2(pack, conn->service_name); if (conn->host_uniq) add_tag2(pack, conn->host_uniq); if (conn->relay_sid) add_tag2(pack, conn->relay_sid); if (conf_verbose) { log_info2("send "); print_packet(pack); } pppoe_send(conn->disc_sock, pack); } static void free_delayed_pado(struct delayed_pado_t *pado) { triton_timer_del(&pado->timer); __sync_sub_and_fetch(&stat_delayed_pado, 1); list_del(&pado->entry); if (pado->host_uniq) _free(pado->host_uniq); if (pado->relay_sid) _free(pado->relay_sid); if (pado->service_name) _free(pado->service_name); mempool_free(pado); } static void pado_timer(struct triton_timer_t *t) { struct delayed_pado_t *pado = container_of(t, typeof(*pado), timer); if (!ppp_shutdown) pppoe_send_PADO(pado->serv, pado->addr, pado->host_uniq, pado->relay_sid, pado->service_name); free_delayed_pado(pado); } static int check_padi_limit(struct pppoe_serv_t *serv, uint8_t *addr) { struct padi_t *padi; struct timespec ts; if (serv->padi_limit == 0) goto connlimit_check; clock_gettime(CLOCK_MONOTONIC, &ts); while (!list_empty(&serv->padi_list)) { padi = list_entry(serv->padi_list.next, typeof(*padi), entry); if ((ts.tv_sec - padi->ts.tv_sec) * 1000 + (ts.tv_nsec - padi->ts.tv_nsec) / 1000000 > 1000) { list_del(&padi->entry); mempool_free(padi); serv->padi_cnt--; __sync_sub_and_fetch(&total_padi_cnt, 1); } else break; } if (serv->padi_cnt == serv->padi_limit) return -1; if (conf_padi_limit && total_padi_cnt >= conf_padi_limit) return -1; list_for_each_entry(padi, &serv->padi_list, entry) { if (memcmp(padi->addr, addr, ETH_ALEN) == 0) return -1; } padi = mempool_alloc(padi_pool); if (!padi) return -1; padi->ts = ts; memcpy(padi->addr, addr, ETH_ALEN); list_add_tail(&padi->entry, &serv->padi_list); serv->padi_cnt++; __sync_add_and_fetch(&total_padi_cnt, 1); connlimit_check: if (triton_module_loaded("connlimit") && connlimit_check(cl_key_from_mac(addr))) return -1; return 0; } static void pppoe_recv_PADI(struct pppoe_serv_t *serv, uint8_t *pack, int size) { struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_tag *tag; struct pppoe_tag *host_uniq_tag = NULL; struct pppoe_tag *relay_sid_tag = NULL; struct pppoe_tag *service_name_tag = NULL; int n, i, service_match = 0; struct delayed_pado_t *pado; char **service_names = NULL; struct timespec ts; int len; __sync_add_and_fetch(&stat_PADI_recv, 1); if (ppp_shutdown || pado_delay == -1) return; if (check_padi_limit(serv, ethhdr->h_source)) { __sync_add_and_fetch(&stat_PADI_drop, 1); if (conf_verbose) { clock_gettime(CLOCK_MONOTONIC, &ts); if (ts.tv_sec - 60 >= serv->last_padi_limit_warn) { log_warn("pppoe: discarding overlimit PADI packets on interface %s\n", serv->ifname); serv->last_padi_limit_warn = ts.tv_sec; } } return; } if (hdr->sid) return; if (conf_verbose) { log_info2("recv "); print_packet(pack); } if (serv->service_names[0]) service_names = serv->service_names; else if (conf_service_names[0]) service_names = conf_service_names; len = ntohs(hdr->length); for (n = 0; n < len; n += sizeof(*tag) + ntohs(tag->tag_len)) { tag = (struct pppoe_tag *)(pack + ETH_HLEN + sizeof(*hdr) + n); if (n + sizeof(*tag) + ntohs(tag->tag_len) > len) return; switch (ntohs(tag->tag_type)) { case TAG_END_OF_LIST: break; case TAG_SERVICE_NAME: if (service_names && tag->tag_len) { for (i = 0; i < MAX_SERVICE_NAMES && service_names[i]; i++) { if (ntohs(tag->tag_len) != strlen(service_names[i])) continue; if (memcmp(tag->tag_data, service_names[i], ntohs(tag->tag_len))) continue; if (conf_reply_exact_service) service_name_tag = tag; service_match = 1; break; } } else if (!serv->require_service_name) { service_name_tag = tag; service_match = 1; } break; case TAG_HOST_UNIQ: host_uniq_tag = tag; break; case TAG_RELAY_SESSION_ID: relay_sid_tag = tag; break; } } if (conf_verbose) { log_info2("recv "); print_packet(pack); } if (!service_match) { if (conf_verbose) log_warn("pppoe: discarding PADI packet (Service-Name mismatch)\n"); return; } if (pado_delay) { list_for_each_entry(pado, &serv->pado_list, entry) { if (memcmp(pado->addr, ethhdr->h_source, ETH_ALEN)) continue; if (conf_verbose) log_warn("pppoe: discarding PADI packet (already queued)\n"); return; } pado = mempool_alloc(pado_pool); memset(pado, 0, sizeof(*pado)); pado->serv = serv; memcpy(pado->addr, ethhdr->h_source, ETH_ALEN); if (host_uniq_tag) { pado->host_uniq = _malloc(sizeof(*host_uniq_tag) + ntohs(host_uniq_tag->tag_len)); memcpy(pado->host_uniq, host_uniq_tag, sizeof(*host_uniq_tag) + ntohs(host_uniq_tag->tag_len)); } if (relay_sid_tag) { pado->relay_sid = _malloc(sizeof(*relay_sid_tag) + ntohs(relay_sid_tag->tag_len)); memcpy(pado->relay_sid, relay_sid_tag, sizeof(*relay_sid_tag) + ntohs(relay_sid_tag->tag_len)); } if (service_name_tag) { pado->service_name = _malloc(sizeof(*service_name_tag) + ntohs(service_name_tag->tag_len)); memcpy(pado->service_name, service_name_tag, sizeof(*service_name_tag) + ntohs(service_name_tag->tag_len)); } pado->timer.expire = pado_timer; pado->timer.period = pado_delay; triton_timer_add(&serv->ctx, &pado->timer, 0); list_add_tail(&pado->entry, &serv->pado_list); __sync_add_and_fetch(&stat_delayed_pado, 1); } else pppoe_send_PADO(serv, ethhdr->h_source, host_uniq_tag, relay_sid_tag, service_name_tag); } static void pppoe_recv_PADR(struct pppoe_serv_t *serv, uint8_t *pack, int size) { struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_tag *tag; struct pppoe_tag *host_uniq_tag = NULL; struct pppoe_tag *relay_sid_tag = NULL; struct pppoe_tag *ac_cookie_tag = NULL; struct pppoe_tag *service_name_tag = NULL; struct pppoe_tag *tr101_tag = NULL; int n, i, service_match = 0; struct pppoe_conn_t *conn; int vendor_id; char **service_names = NULL; __sync_add_and_fetch(&stat_PADR_recv, 1); if (ppp_shutdown) return; if (!memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN)) { if (conf_verbose) log_warn("pppoe: discard PADR (destination address is broadcast)\n"); return; } if (hdr->sid) { if (conf_verbose) log_warn("pppoe: discarding PADR packet (sid is not zero)\n"); return; } if (conf_verbose) { log_info2("recv "); print_packet(pack); } if (serv->service_names[0]) service_names = serv->service_names; else if (conf_service_names[0]) service_names = conf_service_names; for (n = 0; n < ntohs(hdr->length); n += sizeof(*tag) + ntohs(tag->tag_len)) { tag = (struct pppoe_tag *)(pack + ETH_HLEN + sizeof(*hdr) + n); if (n + sizeof(*tag) > ntohs(hdr->length)) { if (conf_verbose) log_warn("pppoe: discard PADR packet (truncated tag)\n"); return; } if (n + sizeof(*tag) + ntohs(tag->tag_len) > ntohs(hdr->length)) { if (conf_verbose) log_warn("pppoe: discard PADR packet (invalid tag length)\n"); return; } switch (ntohs(tag->tag_type)) { case TAG_END_OF_LIST: break; case TAG_SERVICE_NAME: service_name_tag = tag; if (tag->tag_len == 0) service_match = 1; else if (service_names) { for (i = 0; i < MAX_SERVICE_NAMES && service_names[i]; i++) { if (ntohs(tag->tag_len) != strlen(service_names[i])) continue; if (memcmp(tag->tag_data, service_names[i], ntohs(tag->tag_len))) continue; service_match = 1; break; } } else { service_match = 1; } break; case TAG_HOST_UNIQ: host_uniq_tag = tag; break; case TAG_AC_COOKIE: ac_cookie_tag = tag; break; case TAG_RELAY_SESSION_ID: relay_sid_tag = tag; break; case TAG_VENDOR_SPECIFIC: if (ntohs(tag->tag_len) < 4) continue; vendor_id = ntohl(*(uint32_t *)tag->tag_data); if (vendor_id == VENDOR_ADSL_FORUM) if (conf_tr101) tr101_tag = tag; break; } } if (!ac_cookie_tag) { if (conf_verbose) log_warn("pppoe: discard PADR packet (no AC-Cookie tag present)\n"); return; } if (ntohs(ac_cookie_tag->tag_len) != COOKIE_LENGTH) { if (conf_verbose) log_warn("pppoe: discard PADR packet (incorrect AC-Cookie tag length)\n"); return; } if (check_cookie(serv, ethhdr->h_source, (uint8_t *)ac_cookie_tag->tag_data)) { if (conf_verbose) log_warn("pppoe: discard PADR packet (incorrect AC-Cookie)\n"); return; } if (!service_match) { if (conf_verbose) log_warn("pppoe: Service-Name mismatch\n"); pppoe_send_err(serv, ethhdr->h_source, host_uniq_tag, relay_sid_tag, CODE_PADS, TAG_SERVICE_NAME_ERROR); return; } pthread_mutex_lock(&serv->lock); conn = find_channel(serv, (uint8_t *)ac_cookie_tag->tag_data); if (conn && !conn->ppp.username) { __sync_add_and_fetch(&stat_PADR_dup_recv, 1); pppoe_send_PADS(conn); } pthread_mutex_unlock(&serv->lock); if (conn) return; conn = allocate_channel(serv, ethhdr->h_source, host_uniq_tag, relay_sid_tag, service_name_tag, tr101_tag, (uint8_t *)ac_cookie_tag->tag_data); if (!conn) pppoe_send_err(serv, ethhdr->h_source, host_uniq_tag, relay_sid_tag, CODE_PADS, TAG_AC_SYSTEM_ERROR); else { pppoe_send_PADS(conn); triton_context_call(&conn->ctx, (triton_event_func)connect_channel, conn); } } static void pppoe_recv_PADT(struct pppoe_serv_t *serv, uint8_t *pack) { struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); struct pppoe_conn_t *conn; if (!memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN)) { if (conf_verbose) log_warn("pppoe: discard PADT (destination address is broadcast)\n"); return; } if (conf_verbose) { log_info2("recv "); print_packet(pack); } pthread_mutex_lock(&serv->lock); conn = serv->conn[ntohs(hdr->sid)]; if (conn && !memcmp(conn->addr, ethhdr->h_source, ETH_ALEN)) triton_context_call(&conn->ctx, (void (*)(void *))disconnect, conn); pthread_mutex_unlock(&serv->lock); } static int pppoe_serv_read(struct triton_md_handler_t *h) { struct pppoe_serv_t *serv = container_of(h, typeof(*serv), hnd); uint8_t pack[ETHER_MAX_LEN]; struct ethhdr *ethhdr = (struct ethhdr *)pack; struct pppoe_hdr *hdr = (struct pppoe_hdr *)(pack + ETH_HLEN); int n; while (1) { n = read(h->fd, pack, sizeof(pack)); if (n < 0) { if (errno == EAGAIN) break; log_error("pppoe: read: %s\n", strerror(errno)); return 0; } if (n < ETH_HLEN + sizeof(*hdr)) { if (conf_verbose) log_warn("pppoe: short packet received (%i)\n", n); continue; } if (mac_filter_check(ethhdr->h_source)) continue; if (memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN) && memcmp(ethhdr->h_dest, serv->hwaddr, ETH_ALEN)) continue; if (!memcmp(ethhdr->h_source, bc_addr, ETH_ALEN)) { if (conf_verbose) log_warn("pppoe: discarding packet (host address is broadcast)\n"); continue; } if ((ethhdr->h_source[0] & 1) != 0) { if (conf_verbose) log_warn("pppoe: discarding packet (host address is not unicast)\n"); continue; } if (n < ETH_HLEN + sizeof(*hdr) + ntohs(hdr->length)) { if (conf_verbose) log_warn("pppoe: short packet received\n"); continue; } if (hdr->ver != 1) { if (conf_verbose) log_warn("pppoe: discarding packet (unsupported version %i)\n", hdr->ver); continue; } if (hdr->type != 1) { if (conf_verbose) log_warn("pppoe: discarding packet (unsupported type %i)\n", hdr->type); } switch (hdr->code) { case CODE_PADI: pppoe_recv_PADI(serv, pack, n); break; case CODE_PADR: pppoe_recv_PADR(serv, pack, n); break; case CODE_PADT: pppoe_recv_PADT(serv, pack); break; } } return 0; } static void pppoe_serv_close(struct triton_context_t *ctx) { struct pppoe_serv_t *serv = container_of(ctx, typeof(*serv), ctx); triton_md_disable_handler(&serv->hnd, MD_MODE_READ | MD_MODE_WRITE); serv->stopping = 1; pthread_mutex_lock(&serv->lock); if (!serv->conn_cnt) { pthread_mutex_unlock(&serv->lock); pppoe_server_free(serv); return; } pthread_mutex_unlock(&serv->lock); } int pppoe_add_service_name(char **list, const char *item) { int i; for (i = 0; i < MAX_SERVICE_NAMES; i++) { if (!list[i]) { list[i] = _strdup(item); return 0; } } return -1; } int pppoe_del_service_name(char **list, const char *item) { int i, found = -1; for (i = 0; i < MAX_SERVICE_NAMES; i++) { if (list[i] && !strcmp(list[i], item)) found = i; if (found >= 0 && (!list[i] || i == MAX_SERVICE_NAMES-1)) { _free(list[found]); if (found != i) list[found] = list[i]; list[i] = NULL; return 0; } } return -1; } static int parse_interface_set_option(struct pppoe_serv_t *serv, char *property, char *value, char *errbuf) { if (!strcmp(property, "padi-limit")) { serv->padi_limit = atol(value); if (serv->padi_limit < 0) { sprintf(errbuf, "Invalid padi-limit value %d", serv->padi_limit); return 0; } } else if (!strcmp(property, "require-service-name") || !strcmp(property, "require-sn")) { serv->require_service_name = !!atoi(value); } else if (!strcmp(property, "service-name")) { if (pppoe_add_service_name(serv->service_names, value)) { sprintf(errbuf, "Cannot add Service-Name '%s'", value); return 0; } } else { sprintf(errbuf, "Unknown option: '%s'", property); return 0; } return -1; } enum parse_ifopt_state { PIS_Property = 0, PIS_AnyValue, PIS_QuotedValue, PIS_UnquotedValue, PIS_ExpectComma }; static int parse_interface_options(const char *ifopt, struct pppoe_serv_t *serv, char **errmsg) { enum parse_ifopt_state state = PIS_Property; char *str = _strdup(ifopt); char *cur, *start, *property = NULL; char error[1280]; char c; int running = -1; *error = 0; start = cur = str; while (running) { c = *cur; switch (state) { case PIS_Property: if (!c) { if (!property && cur != start) property = start; if (property && strlen(property) > 0) parse_interface_set_option(serv, property, "1", error); running = 0; } else if (c == '=') { property = start; *cur = 0; state = PIS_AnyValue; } else if (c == ',') { property = start; *cur = 0; if (property && strlen(property) > 0) running = parse_interface_set_option(serv, property, "1", error); start = cur + 1; } else if (!(isalpha(c) || isdigit(c) || c == '-')) { sprintf(error, "Invalid character 0x%02x in property name at offset %ld", c, cur - str); running = 0; } break; case PIS_AnyValue: if (!c || c == ',') { running = parse_interface_set_option(serv, property, "", error); if (!c) { running = 0; } } else if (c == '"') { start = cur + 1; state = PIS_QuotedValue; } else { start = cur; state = PIS_UnquotedValue; } break; case PIS_QuotedValue: if (!c) { sprintf(error, "Unexpected end-of-string while parsing value for '%s'", property); running = 0; } else if (c == '"') { *cur = 0; running = parse_interface_set_option(serv, property, start, error); state = PIS_ExpectComma; } break; case PIS_UnquotedValue: if (!c || c == ',') { *cur = 0; running = parse_interface_set_option(serv, property, start, error); if (!c) { running = 0; } start = cur + 1; state = PIS_Property; } break; case PIS_ExpectComma: if (!c || c == ',') { start = cur + 1; state = PIS_Property; if (!c) { running = 0; } } else { sprintf(error, "Expected comma or end-of-string but got 0x%02x at offset %ld", c, cur - str); running = 0; } break; default: sprintf(error, "Bug in parse_interface_options: parser ran into unknown state %d", state); running = 0; } if (running) { cur++; } } _free(str); if (*error) { *errmsg = _strdup(error); return -1; } return 0; } static int __pppoe_add_interface_re(int index, int flags, const char *name, struct iplink_arg *arg) { if (pcre_exec(arg->re, NULL, name, strlen(name), 0, 0, NULL, 0) < 0) return 0; __pppoe_server_start(name, arg->opt, arg->cli); return 0; } static void pppoe_add_interface_re(const char *opt, void *cli) { pcre *re = NULL; const char *pcre_err; char *pattern; const char *ptr; int pcre_offset; struct iplink_arg arg; for (ptr = opt; *ptr && *ptr != ','; ptr++); pattern = _malloc(ptr - (opt + 3) + 1); memcpy(pattern, opt + 3, ptr - (opt + 3)); pattern[ptr - (opt + 3)] = 0; re = pcre_compile2(pattern, 0, NULL, &pcre_err, &pcre_offset, NULL); if (!re) { log_error("pppoe: %s at %i\r\n", pcre_err, pcre_offset); return; } arg.re = re; arg.opt = ptr; arg.cli = cli; iplink_list((iplink_list_func)__pppoe_add_interface_re, &arg); pcre_free(re); _free(pattern); } void pppoe_server_start(const char *opt, void *cli) { char name[IFNAMSIZ]; const char *ptr; if (strlen(opt) > 3 && memcmp(opt, "re:", 3) == 0) { pppoe_add_interface_re(opt, cli); return; } ptr = strchr(opt, ','); if (ptr) { memcpy(name, opt, ptr - opt); name[ptr - opt] = 0; __pppoe_server_start(name, ptr, cli); } else __pppoe_server_start(opt, opt, cli); } static void __pppoe_server_start(const char *ifname, const char *opt, void *cli) { struct pppoe_serv_t *serv; int sock; int f = 1; struct ifreq ifr; struct sockaddr_ll sa; char *ifopt, *errmsg; pthread_rwlock_rdlock(&serv_lock); list_for_each_entry(serv, &serv_list, entry) { if (!strcmp(serv->ifname, ifname)) { if (cli) cli_send(cli, "error: already exists\r\n"); pthread_rwlock_unlock(&serv_lock); return; } } pthread_rwlock_unlock(&serv_lock); serv = _malloc(sizeof(*serv)); memset(serv, 0, sizeof(*serv)); if (init_secret(serv)) { if (cli) cli_sendv(cli, "init secret failed\r\n"); _free(serv); return; } sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_PPP_DISC)); if (sock < 0) { if (cli) cli_sendv(cli, "socket: %s\r\n", strerror(errno)); log_emerg("pppoe: socket: %s\n", strerror(errno)); _free(serv); return; } fcntl(sock, F_SETFD, fcntl(sock, F_GETFD) | FD_CLOEXEC); if (setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &f, sizeof(f))) { if (cli) cli_sendv(cli, "setsockopt(SO_BROADCAST): %s\r\n", strerror(errno)); log_emerg("pppoe: setsockopt(SO_BROADCAST): %s\n", strerror(errno)); goto out_err; } strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); if (ioctl(sock, SIOCGIFHWADDR, &ifr)) { if (cli) cli_sendv(cli, "ioctl(SIOCGIFHWADDR): %s\r\n", strerror(errno)); log_emerg("pppoe: ioctl(SIOCGIFHWADDR): %s\n", strerror(errno)); goto out_err; } #ifdef ARPHDR_ETHER if (ifr.ifr_hwaddr.sa_family != ARPHDR_ETHER) { log_emerg("pppoe: interface %s is not ethernet\n", ifname); goto out_err; } #endif if ((ifr.ifr_hwaddr.sa_data[0] & 1) != 0) { if (cli) cli_sendv(cli, "interface %s has not unicast address\r\n", ifname); log_emerg("pppoe: interface %s has not unicast address\n", ifname); goto out_err; } memcpy(serv->hwaddr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); if (ioctl(sock, SIOCGIFMTU, &ifr)) { if (cli) cli_sendv(cli, "ioctl(SIOCGIFMTU): %s\r\n", strerror(errno)); log_emerg("pppoe: ioctl(SIOCGIFMTU): %s\n", strerror(errno)); goto out_err; } if (ifr.ifr_mtu < ETH_DATA_LEN) { if (cli) cli_sendv(cli, "interface %s has MTU of %i, should be %i\r\n", ifname, ifr.ifr_mtu, ETH_DATA_LEN); log_emerg("pppoe: interface %s has MTU of %i, should be %i\n", ifname, ifr.ifr_mtu, ETH_DATA_LEN); } if (ioctl(sock, SIOCGIFINDEX, &ifr)) { if (cli) cli_sendv(cli, "ioctl(SIOCGIFINDEX): %s\r\n", strerror(errno)); log_emerg("pppoe: ioctl(SIOCGIFINDEX): %s\n", strerror(errno)); goto out_err; } memset(&sa, 0, sizeof(sa)); sa.sll_family = AF_PACKET; sa.sll_protocol = htons(ETH_P_PPP_DISC); sa.sll_ifindex = ifr.ifr_ifindex; if (bind(sock, (struct sockaddr *)&sa, sizeof(sa))) { if (cli) cli_sendv(cli, "bind: %s\n", strerror(errno)); log_emerg("pppoe: bind: %s\n", strerror(errno)); goto out_err; } if (fcntl(sock, F_SETFL, O_NONBLOCK)) { if (cli) cli_sendv(cli, "failed to set nonblocking mode: %s\n", strerror(errno)); log_emerg("pppoe: failed to set nonblocking mode: %s\n", strerror(errno)); goto out_err; } serv->padi_limit = conf_padi_limit; ifopt = strchr(opt, ','); if (ifopt) ifopt++; /* point after comma, not to it */ if (ifopt && parse_interface_options(ifopt, serv, &errmsg)) { if (cli) cli_sendv(cli, "%s\r\n", errmsg); else log_error("pppoe: %s\r\n", errmsg); _free(errmsg); goto out_err; } serv->ctx.close = pppoe_serv_close; serv->ctx.before_switch = log_switch; serv->hnd.fd = sock; serv->hnd.read = pppoe_serv_read; serv->ifname = _strdup(ifname); pthread_mutex_init(&serv->lock, NULL); INIT_LIST_HEAD(&serv->conn_list); INIT_LIST_HEAD(&serv->pado_list); INIT_LIST_HEAD(&serv->padi_list); triton_context_register(&serv->ctx, NULL); triton_md_register_handler(&serv->ctx, &serv->hnd); triton_md_enable_handler(&serv->hnd, MD_MODE_READ); triton_context_wakeup(&serv->ctx); pthread_rwlock_wrlock(&serv_lock); list_add_tail(&serv->entry, &serv_list); pthread_rwlock_unlock(&serv_lock); return; out_err: close(sock); _free(serv); } static void _conn_stop(struct pppoe_conn_t *conn) { ppp_terminate(&conn->ppp, TERM_ADMIN_RESET, 0); } static void _server_stop(struct pppoe_serv_t *serv) { struct pppoe_conn_t *conn; if (serv->stopping) return; serv->stopping = 1; triton_md_disable_handler(&serv->hnd, MD_MODE_READ | MD_MODE_WRITE); pthread_mutex_lock(&serv->lock); if (!serv->conn_cnt) { pthread_mutex_unlock(&serv->lock); pppoe_server_free(serv); return; } list_for_each_entry(conn, &serv->conn_list, entry) triton_context_call(&conn->ctx, (triton_event_func)_conn_stop, conn); pthread_mutex_unlock(&serv->lock); } void pppoe_server_free(struct pppoe_serv_t *serv) { struct delayed_pado_t *pado; int i; pthread_rwlock_wrlock(&serv_lock); list_del(&serv->entry); pthread_rwlock_unlock(&serv_lock); while (!list_empty(&serv->pado_list)) { pado = list_entry(serv->pado_list.next, typeof(*pado), entry); free_delayed_pado(pado); } triton_md_unregister_handler(&serv->hnd); close(serv->hnd.fd); triton_context_unregister(&serv->ctx); for (i = 0; i < MAX_SERVICE_NAMES; i++) { if (serv->service_names[i]) { _free(serv->service_names[i]); serv->service_names[i] = NULL; } } _free(serv->ifname); _free(serv); } void pppoe_server_stop(const char *ifname) { struct pppoe_serv_t *serv; pthread_rwlock_rdlock(&serv_lock); list_for_each_entry(serv, &serv_list, entry) { if (strcmp(serv->ifname, ifname)) continue; triton_context_call(&serv->ctx, (triton_event_func)_server_stop, serv); break; } pthread_rwlock_unlock(&serv_lock); } void __export pppoe_get_stat(unsigned int **starting, unsigned int **active) { *starting = &stat_starting; *active = &stat_active; } static int init_secret(struct pppoe_serv_t *serv) { DES_cblock key; if (read(urandom_fd, serv->secret, SECRET_LENGTH) < 0) { log_emerg("pppoe: failed to read /dev/urandom: %s\n", strerror(errno)); return -1; } memset(key, 0, sizeof(key)); DES_random_key(&key); DES_set_key(&key, &serv->des_ks); return 0; } static void load_config(void) { char *opt; opt = conf_get_opt("pppoe", "verbose"); if (opt) conf_verbose = atoi(opt); opt = conf_get_opt("pppoe", "ac-name"); if (!opt) opt = conf_get_opt("pppoe", "AC-Name"); if (opt) { if (conf_ac_name) _free(conf_ac_name); conf_ac_name = _strdup(opt); } else conf_ac_name = _strdup("accel-ppp"); opt = conf_get_opt("pppoe", "reply-exact-service"); if (!opt) opt = conf_get_opt("pppoe", "Reply-Exact-Service"); if (opt) { conf_reply_exact_service = !!atoi(opt); } opt = conf_get_opt("pppoe", "ifname-in-sid"); if (opt) { if (!strcmp(opt, "calling-sid")) conf_ifname_in_sid = 1; else if (!strcmp(opt, "called-sid")) conf_ifname_in_sid = 2; else if (!strcmp(opt, "both")) conf_ifname_in_sid = 3; else if (atoi(opt) >= 0) conf_ifname_in_sid = atoi(opt); } opt = conf_get_opt("pppoe", "pado-delay"); if (!opt) opt = conf_get_opt("pppoe", "PADO-Delay"); if (opt) dpado_parse(opt); opt = conf_get_opt("pppoe", "tr101"); if (opt) conf_tr101 = atoi(opt); opt = conf_get_opt("pppoe", "padi-limit"); if (opt) conf_padi_limit = atoi(opt); conf_mppe = MPPE_UNSET; opt = conf_get_opt("l2tp", "mppe"); if (opt) { if (strcmp(opt, "deny") == 0) conf_mppe = MPPE_DENY; else if (strcmp(opt, "allow") == 0) conf_mppe = MPPE_ALLOW; else if (strcmp(opt, "prefer") == 0) conf_mppe = MPPE_PREFER; else if (strcmp(opt, "require") == 0) conf_mppe = MPPE_REQUIRE; } opt = conf_get_opt("pppoe", "ip-pool"); if (opt) { if (!conf_ip_pool || strcmp(conf_ip_pool, opt)) conf_ip_pool = _strdup(opt); } else conf_ip_pool = NULL; } static void pppoe_init(void) { struct conf_sect_t *s = conf_get_section("pppoe"); struct conf_option_t *opt; int fd; fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OE); if (fd >= 0) close(fd); else if (system("modprobe -q pppoe")) log_warn("failed to load pppoe kernel module\n"); conn_pool = mempool_create(sizeof(struct pppoe_conn_t)); pado_pool = mempool_create(sizeof(struct delayed_pado_t)); padi_pool = mempool_create(sizeof(struct padi_t)); if (!s) { log_emerg("pppoe: no configuration, disabled...\n"); return; } list_for_each_entry(opt, &s->items, entry) { if (opt->val) { if (!strcmp(opt->name, "interface")) { pppoe_server_start(opt->val, NULL); } else if (!strcmp(opt->name, "service-name") || !strcmp(opt->name, "Service-Name")) { pppoe_add_service_name(conf_service_names, opt->val); } } } load_config(); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); } DEFINE_INIT(21, pppoe_init);
gpl-2.0
brymaster5000/Lunar_Max
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
513
209656
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/pkt_sched.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; #ifdef IXGBE_FCOE char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #else static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif #define MAJ 3 #define MIN 8 #define BUILD 21 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2012 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, [board_82599] = &ixgbe_82599_info, [board_X540] = &ixgbe_X540_info, }; /* ixgbe_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, void *p); static struct notifier_block dca_notifier = { .notifier_call = ixgbe_notify_dca, .next = NULL, .priority = 0 }; #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ static unsigned int allow_unsupported_sfp; module_param(allow_unsupported_sfp, uint, 0); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) schedule_work(&adapter->service_task); } static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) { BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_clear_bit(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); } struct ixgbe_reg_info { u32 ofs; char *name; }; static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { /* General Registers */ {IXGBE_CTRL, "CTRL"}, {IXGBE_STATUS, "STATUS"}, {IXGBE_CTRL_EXT, "CTRL_EXT"}, /* Interrupt Registers */ {IXGBE_EICR, "EICR"}, /* RX Registers */ {IXGBE_SRRCTL(0), "SRRCTL"}, {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, {IXGBE_RDLEN(0), "RDLEN"}, {IXGBE_RDH(0), "RDH"}, {IXGBE_RDT(0), "RDT"}, {IXGBE_RXDCTL(0), "RXDCTL"}, {IXGBE_RDBAL(0), "RDBAL"}, {IXGBE_RDBAH(0), "RDBAH"}, /* TX Registers */ {IXGBE_TDBAL(0), "TDBAL"}, {IXGBE_TDBAH(0), "TDBAH"}, {IXGBE_TDLEN(0), "TDLEN"}, {IXGBE_TDH(0), "TDH"}, {IXGBE_TDT(0), "TDT"}, {IXGBE_TXDCTL(0), "TXDCTL"}, /* List Terminator */ {} }; /* * ixgbe_regdump - register printout routine */ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) { int i = 0, j = 0; char rname[16]; u32 regs[64]; switch (reginfo->ofs) { case IXGBE_SRRCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); break; case IXGBE_DCA_RXCTRL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); break; case IXGBE_RDLEN(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); break; case IXGBE_RDH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); break; case IXGBE_RDT(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); break; case IXGBE_RXDCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); break; case IXGBE_RDBAL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); break; case IXGBE_RDBAH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); break; case IXGBE_TDBAL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); break; case IXGBE_TDBAH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); break; case IXGBE_TDLEN(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); break; case IXGBE_TDH(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); break; case IXGBE_TDT(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); break; case IXGBE_TXDCTL(0): for (i = 0; i < 64; i++) regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); break; default: pr_info("%-15s %08x\n", reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); return; } for (i = 0; i < 8; i++) { snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); pr_err("%-15s", rname); for (j = 0; j < 8; j++) pr_cont(" %08x", regs[i*8+j]); pr_cont("\n"); } } /* * ixgbe_dump - Print registers, tx-rings and rx-rings */ static void ixgbe_dump(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_reg_info *reginfo; int n = 0; struct ixgbe_ring *tx_ring; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct ixgbe_ring *rx_ring; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer_info; u32 staterr; int i = 0; if (!netif_msg_hw(adapter)) return; /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state " "trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); pr_info(" Register Name Value\n"); for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; reginfo->name; reginfo++) { ixgbe_regdump(hw, reginfo); } /* Print TX Ring Summary */ if (!netdev || !netif_running(netdev)) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, (u64)tx_buffer->time_stamp); } /* Print TX Rings */ if (!netif_msg_tx_done(adapter)) goto rx_ring_summary; dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); /* Transmit Descriptor Formats * * Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; pr_info("------------------------------------\n"); pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); pr_info("------------------------------------\n"); pr_info("T [desc] [address 63:0 ] " "[PlPOIdStDDt Ln] [bi->dma ] " "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = IXGBE_TX_DESC(tx_ring, i); tx_buffer = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; pr_info("T [0x%03X] %016llX %016llX %016llX" " %04X %p %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, (u64)tx_buffer->time_stamp, tx_buffer->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) pr_cont(" NTC/U\n"); else if (i == tx_ring->next_to_use) pr_cont(" NTU\n"); else if (i == tx_ring->next_to_clean) pr_cont(" NTC\n"); else pr_cont("\n"); if (netif_msg_pktdata(adapter) && dma_unmap_len(tx_buffer, len) != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(dma_unmap_addr(tx_buffer, dma)), dma_unmap_len(tx_buffer, len), true); } } /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); pr_info("Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("%5d %5X %5X\n", n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ if (!netif_msg_rx_status(adapter)) goto exit; dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); /* Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| * +----------------------------------------------+------+ * 8 | Header Buffer Address [63:1] | DD | * +-----------------------------------------------------+ * * * Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 16 15 4 3 0 * +------------------------------------------------------+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | * | Checksum Ident | | | | Type | Type | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 */ for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); pr_info("R [desc] [ PktBuf A0] " "[ HeadBuf DD] [bi->dma ] [bi->skb] " "<-- Adv Rx Read format\n"); pr_info("RWB[desc] [PcsmIpSHl PtRs] " "[vl er S cks ln] ---------------- [bi->skb] " "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_desc = IXGBE_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (staterr & IXGBE_RXD_STAT_DD) { /* Descriptor Done */ pr_info("RWB[0x%03X] %016llX " "%016llX ---------------- %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), rx_buffer_info->skb); } else { pr_info("R [0x%03X] %016llX " "%016llX %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)rx_buffer_info->dma, rx_buffer_info->skb); if (netif_msg_pktdata(adapter)) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(rx_buffer_info->dma), ixgbe_rx_bufsz(rx_ring), true); } } if (i == rx_ring->next_to_use) pr_cont(" NTU\n"); else if (i == rx_ring->next_to_clean) pr_cont(" NTC\n"); else pr_cont("\n"); } } exit: return; } static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; /* Let firmware take over control of h/w */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); } static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; /* Let firmware know the driver has taken over */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } /* * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue * */ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; switch (hw->mac.type) { case ixgbe_mac_82598EB: msix_vector |= IXGBE_IVAR_ALLOC_VAL; if (direction == -1) direction = 0; index = (((direction * 64) + queue) >> 2) & 0x1F; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); ivar &= ~(0xFF << (8 * (queue & 0x3))); ivar |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((queue & 1) * 8); ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); break; } else { /* tx or rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); break; } default: break; } } static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); break; default: break; } } void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, struct ixgbe_tx_buffer *tx_buffer) { if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* tx_buffer must be completely set up in the transmit path */ } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u32 data = 0; u32 xoff[8] = {0}; int i; if ((hw->fc.current_mode == ixgbe_fc_full) || (hw->fc.current_mode == ixgbe_fc_rx_pause)) { switch (hw->mac.type) { case ixgbe_mac_82598EB: data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); break; default: data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } hwstats->lxoffrxc += data; /* refill credits (no tx hang) if we received xoff */ if (!data) return; for (i = 0; i < adapter->num_tx_queues; i++) clear_bit(__IXGBE_HANG_CHECK_ARMED, &adapter->tx_ring[i]->state); return; } else if (!(adapter->dcb_cfg.pfc_mode_enable)) return; /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; default: xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } hwstats->pxoffrxc[i] += xoff[i]; } /* disarm tx queues that have received xoff frames */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; u8 tc = tx_ring->dcb_tc; if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } } static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) { return ring->stats.packets; } static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); struct ixgbe_hw *hw = &adapter->hw; u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); if (head != tail) return (head < tail) ? tail - head : (tail + ring->count - head); return 0; } static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) { u32 tx_done = ixgbe_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = ixgbe_get_tx_pending(tx_ring); bool ret = false; clear_check_for_tx_hang(tx_ring); /* * Check for a hung queue, but be thorough. This verifies * that a transmit has been completed since the previous * check AND there is at least one packet pending. The * ARMED bit is set to indicate a potential hang. The * bit is cleared if a pause frame is received to remove * false hang detection due to PFC or 802.3x frames. By * requiring this to fail twice we avoid races with * pfc clearing the ARMED bit and conditions where we * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } else { /* update completed stats and continue */ tx_ring->tx_stats.tx_done_old = tx_done; /* reset the countdown */ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } /** * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout * @adapter: driver private struct **/ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; ixgbe_service_event_schedule(adapter); } } /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; do { union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); /* clear tx_buffer data */ tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* unmap any remaining paged data */ if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); dma_unmap_len_set(tx_buffer, len, 0); } } /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; tx_desc = IXGBE_TX_DESC(tx_ring, 0); } /* issue prefetch for next Tx descriptor */ prefetch(tx_desc); /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " jiffies <%lx>\n", tx_ring->queue_index, IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, i, tx_ring->tx_buffer_info[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); e_info(probe, "tx hang %d detected on queue %d, resetting adapter\n", adapter->tx_timeout_count + 1, tx_ring->queue_index); /* schedule immediate reset if we believe we hung */ ixgbe_tx_timeout_reset(adapter); /* the adapter is about to reset, no point in enabling stuff */ return true; } netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBE_DOWN, &adapter->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } return !!budget; } #ifdef CONFIG_IXGBE_DCA static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); u16 reg_offset; switch (hw->mac.type) { case ixgbe_mac_82598EB: reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; break; default: /* for unknown hardware do not write register */ return; } /* * We can enable relaxed ordering for reads, but not writes when * DCA is enabled. This is due to a known issue in some chipsets * which will cause the DCA tag to be cleared. */ txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | IXGBE_DCA_TXCTRL_DATA_RRO_EN | IXGBE_DCA_TXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, reg_offset, txctrl); } static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); u8 reg_idx = rx_ring->reg_idx; switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; break; default: break; } /* * We can enable relaxed ordering for reads, but not writes when * DCA is enabled. This is due to a known issue in some chipsets * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); } static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_update_tx_dca(adapter, ring, cpu); ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_update_rx_dca(adapter, ring, cpu); q_vector->cpu = cpu; out_no_update: put_cpu(); } static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { int num_q_vectors; int i; if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) return; /* always use CB2 mode, difference is masked in the CB driver */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; else num_q_vectors = 1; for (i = 0; i < num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; ixgbe_update_dca(adapter->q_vector[i]); } } static int __ixgbe_notify_dca(struct device *dev, void *data) { struct ixgbe_adapter *adapter = dev_get_drvdata(dev); unsigned long event = *(unsigned long *)data; if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) return 0; switch (event) { case DCA_PROVIDER_ADD: /* if we're already enabled, don't do it again */ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) break; if (dca_add_requester(dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ixgbe_setup_dca(adapter); break; } /* Fall Through since DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); } break; } return 0; } #endif /* CONFIG_IXGBE_DCA */ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { if (ring->netdev->features & NETIF_F_RXHASH) skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } #ifdef IXGBE_FCOE /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type * @ring: structure containing ring specific data * @rx_desc: advanced rx descriptor * * Returns : true if it is FCoE pkt */ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; return test_bit(__IXGBE_RX_FCOE, &ring->state) && ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); } #endif /* IXGBE_FCOE */ /** * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containing ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified **/ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; /* * 82599 errata, UDP frames with a 0 checksum can be marked as * checksum errors. */ if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) return; ring->rx_stats.csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; } static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; /* update next to alloc since we have filled the ring */ rx_ring->next_to_alloc = val; /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(val, rx_ring->tail); } static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma = bi->dma; /* since we are recycling buffers we should seldom need to alloc */ if (likely(dma)) return true; /* alloc new page for storage */ if (likely(!page)) { page = alloc_pages(GFP_ATOMIC | __GFP_COLD, ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->page = page; } /* map page for use */ dma = dma_map_page(rx_ring->dev, page, 0, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); /* * if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { put_page(page); bi->page = NULL; rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); return true; } /** * ixgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; /* nothing to do */ if (!cleaned_count) return; rx_desc = IXGBE_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; do { if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; i++; if (unlikely(!i)) { rx_desc = IXGBE_RX_DESC(rx_ring, 0); bi = rx_ring->rx_buffer_info; i -= rx_ring->count; } /* clear the hdr_addr for the next_to_use descriptor */ rx_desc->read.hdr_addr = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; if (rx_ring->next_to_use != i) ixgbe_release_rx_desc(rx_ring, i); } /** * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE * @data: pointer to the start of the headers * @max_len: total length of section to find headers in * * This function is meant to determine the length of headers that will * be recognized by hardware for LRO, GRO, and RSC offloads. The main * motivation of doing this is to only perform one pull for IPv4 TCP * packets so that we can do basic things like calculating the gso_size * based on the average data per packet. **/ static unsigned int ixgbe_get_headlen(unsigned char *data, unsigned int max_len) { union { unsigned char *network; /* l2 headers */ struct ethhdr *eth; struct vlan_hdr *vlan; /* l3 headers */ struct iphdr *ipv4; } hdr; __be16 protocol; u8 nexthdr = 0; /* default to not TCP */ u8 hlen; /* this should never happen, but better safe than sorry */ if (max_len < ETH_HLEN) return max_len; /* initialize network frame pointer */ hdr.network = data; /* set first protocol and move network header forward */ protocol = hdr.eth->h_proto; hdr.network += ETH_HLEN; /* handle any vlan tag if present */ if (protocol == __constant_htons(ETH_P_8021Q)) { if ((hdr.network - data) > (max_len - VLAN_HLEN)) return max_len; protocol = hdr.vlan->h_vlan_encapsulated_proto; hdr.network += VLAN_HLEN; } /* handle L3 protocols */ if (protocol == __constant_htons(ETH_P_IP)) { if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) return max_len; /* access ihl as a u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[0] & 0x0F) << 2; /* verify hlen meets minimum size requirements */ if (hlen < sizeof(struct iphdr)) return hdr.network - data; /* record next protocol */ nexthdr = hdr.ipv4->protocol; hdr.network += hlen; #ifdef IXGBE_FCOE } else if (protocol == __constant_htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; hdr.network += FCOE_HEADER_LEN; #endif } else { return hdr.network - data; } /* finally sort out TCP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) return max_len; /* access doff as a u8 to avoid unaligned access on ia64 */ hlen = (hdr.network[12] & 0xF0) >> 2; /* verify hlen meets minimum size requirements */ if (hlen < sizeof(struct tcphdr)) return hdr.network - data; hdr.network += hlen; } /* * If everything has gone correctly hdr.network should be the * data section of the packet and will be the end of the header. * If not then it probably represents the end of the last recognized * header. */ if ((hdr.network - data) < max_len) return hdr.network - data; else return max_len; } static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { __le32 rsc_enabled; u32 rsc_cnt; if (!ring_is_rsc_enabled(rx_ring)) return; rsc_enabled = rx_desc->wb.lower.lo_dword.data & cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); /* If this is an RSC frame rsc_cnt should be non-zero */ if (!rsc_enabled) return; rsc_cnt = le32_to_cpu(rsc_enabled); rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { u16 hdr_len = skb_headlen(skb); /* set gso_size to avoid messing up TCP MSS */ skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), IXGBE_CB(skb)->append_cnt); } static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { /* if append_cnt is 0 then frame is not RSC */ if (!IXGBE_CB(skb)->append_cnt) return; rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; rx_ring->rx_stats.rsc_flush++; ixgbe_set_rsc_gso_size(rx_ring, skb); /* gso_size is computed using append_cnt so always clear it last */ IXGBE_CB(skb)->append_cnt = 0; } /** * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { ixgbe_update_rsc_stats(rx_ring, skb); ixgbe_rx_hash(rx_ring, rx_desc, skb); ixgbe_rx_checksum(rx_ring, rx_desc, skb); if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); __vlan_hwaccel_put_tag(skb, vid); } skb_record_rx_queue(skb, rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { struct ixgbe_adapter *adapter = q_vector->adapter; if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); else netif_rx(skb); } /** * ixgbe_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. **/ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ ntc = (ntc < rx_ring->count) ? ntc : 0; rx_ring->next_to_clean = ntc; prefetch(IXGBE_RX_DESC(rx_ring, ntc)); if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) return false; /* append_cnt indicates packet is RSC, if so fetch nextp */ if (IXGBE_CB(skb)->append_cnt) { ntc = le32_to_cpu(rx_desc->wb.upper.status_error); ntc &= IXGBE_RXDADV_NEXTP_MASK; ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; } /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; } /** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed * * Check for corrupted packet headers caused by senders on the local L2 * embedded NIC switch not setting up their Tx Descriptors right. These * should be very rare. * * Also address the case where we are pulling data in on pages only * and as such no data is present in the skb header. * * In addition if skb is not at least 60 bytes we need to pad it so that * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. **/ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct net_device *netdev = rx_ring->netdev; unsigned char *va; unsigned int pull_len; /* if the page was released unmap it, else just sync our portion */ if (unlikely(IXGBE_CB(skb)->page_released)) { dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); IXGBE_CB(skb)->page_released = false; } else { dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, frag->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); } IXGBE_CB(skb)->dma = 0; /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && !(netdev->features & NETIF_F_RXALL))) { dev_kfree_skb_any(skb); return true; } /* * it is valid to use page_address instead of kmap since we are * working with pages allocated out of the lomem pool per * alloc_page(GFP_ATOMIC) */ va = skb_frag_address(frag); /* * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ pull_len = skb_frag_size(frag); if (pull_len > 256) pull_len = ixgbe_get_headlen(va, pull_len); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); frag->page_offset += pull_len; skb->data_len -= pull_len; skb->tail += pull_len; /* * if we sucked the frag empty then we should free it, * if there are other frags here something is screwed up in hardware */ if (skb_frag_size(frag) == 0) { BUG_ON(skb_shinfo(skb)->nr_frags != 1); skb_shinfo(skb)->nr_frags = 0; __skb_frag_unref(frag); skb->truesize -= ixgbe_rx_bufsz(rx_ring); } #ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) return false; #endif /* if skb_pad returns an error the skb was freed */ if (unlikely(skb->len < 60)) { int pad_len = 60 - skb->len; if (skb_pad(skb, pad_len)) return true; __skb_put(skb, pad_len); } return false; } /** * ixgbe_can_reuse_page - determine if we can reuse a page * @rx_buffer: pointer to rx_buffer containing the page we want to reuse * * Returns true if page can be reused in another Rx buffer **/ static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) { struct page *page = rx_buffer->page; /* if we are only owner of page and it is local we can reuse it */ return likely(page_count(page) == 1) && likely(page_to_nid(page) == numa_node_id()); } /** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Syncronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; u16 bufsz = ixgbe_rx_bufsz(rx_ring); new_buff = &rx_ring->rx_buffer_info[nta]; /* update, and store next to alloc */ nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; /* flip page offset to other buffer and store to new_buff */ new_buff->page_offset = old_buff->page_offset ^ bufsz; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, new_buff->page_offset, bufsz, DMA_FROM_DEVICE); /* bump ref count on page before it is given to the stack */ get_page(new_buff->page); } /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * * This function is based on skb_add_rx_frag. I would have used that * function however it doesn't handle the truesize case correctly since we * are allocating more memory than might be used for a single receive. **/ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, struct sk_buff *skb, int size) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size); skb->len += size; skb->data_len += size; skb->truesize += ixgbe_rx_bufsz(rx_ring); } /** * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * * Returns true if all work is completed without reaching budget **/ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; int ddp_bytes = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); do { struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; struct page *page; u16 ntc; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } ntc = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, ntc); rx_buffer = &rx_ring->rx_buffer_info[ntc]; if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) break; /* * This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * RXD_STAT_DD bit is set */ rmb(); page = rx_buffer->page; prefetchw(page); skb = rx_buffer->skb; if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; /* prefetch first cache line of first page */ prefetch(page_addr); #if L1_CACHE_BYTES < 128 prefetch(page_addr + L1_CACHE_BYTES); #endif /* allocate a skb to store the frags */ skb = netdev_alloc_skb_ip_align(rx_ring->netdev, IXGBE_RX_HDR_SIZE); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; } /* * we will be copying header into skb->data in * pskb_may_pull so it is in our interest to prefetch * it now to avoid a possible cache miss */ prefetchw(skb->data); /* * Delay unmapping of the first packet. It carries the * header information, HW may still access the header * after the writeback. Only unmap it when EOP is * reached */ IXGBE_CB(skb)->dma = rx_buffer->dma; } else { /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); } /* pull page into skb */ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, le16_to_cpu(rx_desc->wb.upper.length)); if (ixgbe_can_reuse_page(rx_buffer)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { /* the page has been released from the ring */ IXGBE_CB(skb)->page_released = true; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page(rx_ring->dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); } /* clear contents of buffer_info */ rx_buffer->skb = NULL; rx_buffer->dma = 0; rx_buffer->page = NULL; ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) continue; /* verify the packet layout is correct */ if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); if (!ddp_bytes) { dev_kfree_skb_any(skb); continue; } } #endif /* IXGBE_FCOE */ ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ budget--; } while (likely(budget)); #ifdef IXGBE_FCOE /* include DDPed FCoE data */ if (ddp_bytes > 0) { unsigned int mss; mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - sizeof(struct fc_frame_header) - sizeof(struct fcoe_crc_eof); if (mss > 512) mss &= ~511; total_rx_bytes += ddp_bytes; total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); } #endif /* IXGBE_FCOE */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; if (cleaned_count) ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); return !!budget; } /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * ixgbe_configure_msix sets up the hardware to properly generate MSI-X * interrupts. **/ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector; int q_vectors, v_idx; u32 mask; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } /* * Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbe_ring *ring; q_vector = adapter->q_vector[v_idx]; ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } ixgbe_write_eitr(q_vector); } switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: break; } IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_MAILBOX | IXGBE_EIMS_LSC); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * ixgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. * this functionality is controlled by the InterruptThrottleRate module * parameter (see ixgbe_param.c) **/ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) { int bytes = ring_container->total_bytes; int packets = ring_container->total_packets; u32 timepassed_us; u64 bytes_perint; u8 itr_setting = ring_container->itr; if (packets == 0) return; /* simple throttlerate management * 0-10MB/s lowest (100000 ints/s) * 10-20MB/s low (20000 ints/s) * 20-1249MB/s bulk (8000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: if (bytes_perint > 10) itr_setting = low_latency; break; case low_latency: if (bytes_perint > 20) itr_setting = bulk_latency; else if (bytes_perint <= 10) itr_setting = lowest_latency; break; case bulk_latency: if (bytes_perint <= 20) itr_setting = low_latency; break; } /* clear work counters since we have the values we need */ ring_container->total_bytes = 0; ring_container->total_packets = 0; /* write updated itr to ring container */ ring_container->itr = itr_setting; } /** * ixgbe_write_eitr - write EITR register in hardware specific way * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: /* must write high and low 16 bits to reset counter */ itr_reg |= (itr_reg << 16); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; break; default: break; } IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); } static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) { u32 new_itr = q_vector->itr; u8 current_itr; ixgbe_update_itr(q_vector, &q_vector->tx); ixgbe_update_itr(q_vector, &q_vector->rx); current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = IXGBE_100K_ITR; break; case low_latency: new_itr = IXGBE_20K_ITR; break; case bulk_latency: new_itr = IXGBE_8K_ITR; break; default: break; } if (new_itr != q_vector->itr) { /* do an exponential smoothing */ new_itr = (10 * new_itr * q_vector->itr) / ((9 * new_itr) + q_vector->itr); /* save the algorithm value here */ q_vector->itr = new_itr; ixgbe_write_eitr(q_vector); } } /** * ixgbe_check_overtemp_subtask - check for over temperature * @adapter: pointer to adapter **/ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) return; adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; switch (hw->device_id) { case IXGBE_DEV_ID_82599_T3_LOM: /* * Since the warning interrupt is for both ports * we don't have to check if: * - This interrupt wasn't for our port. * - We may have missed the interrupt so always have to * check if we got a LSC */ if (!(eicr & IXGBE_EICR_GPI_SDP0) && !(eicr & IXGBE_EICR_LSC)) return; if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { u32 autoneg; bool link_up = false; hw->mac.ops.check_link(hw, &autoneg, &link_up, false); if (link_up) return; } /* Check if this is not due to overtemp */ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) return; break; default: if (!(eicr & IXGBE_EICR_GPI_SDP0)) return; break; } e_crit(drv, "Network adapter has been stopped because it has over heated. " "Restart the computer. If the problem persists, " "power off the system and replace the adapter\n"); adapter->interrupt_event = 0; } static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && (eicr & IXGBE_EICR_GPI_SDP1)) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: /* * Need to check link state so complete overtemp check * on service task */ if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ixgbe_service_event_schedule(adapter); return; } return; case ixgbe_mac_X540: if (!(eicr & IXGBE_EICR_TS)) return; break; default: return; } e_crit(drv, "Network adapter has been stopped because it has over heated. " "Restart the computer. If the problem persists, " "power off the system and replace the adapter\n"); } static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; if (eicr & IXGBE_EICR_GPI_SDP2) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; ixgbe_service_event_schedule(adapter); } } if (eicr & IXGBE_EICR_GPI_SDP1) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); } } } static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->lsc_int++; adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; if (!test_bit(__IXGBE_DOWN, &adapter->state)) { IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); IXGBE_WRITE_FLUSH(hw); ixgbe_service_event_schedule(adapter); } } static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; struct ixgbe_hw *hw = &adapter->hw; switch (hw->mac.type) { case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); mask = (qmask >> 32); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); break; default: break; } /* skip the flush */ } static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; struct ixgbe_hw *hw = &adapter->hw; switch (hw->mac.type) { case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); mask = (qmask >> 32); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); break; default: break; } /* skip the flush */ } /** * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) mask &= ~IXGBE_EIMS_LSC; if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP0; break; case ixgbe_mac_X540: mask |= IXGBE_EIMS_TS; break; default: break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) mask |= IXGBE_EIMS_GPI_SDP1; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; case ixgbe_mac_X540: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; default: break; } if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); if (queues) ixgbe_irq_enable_queues(adapter, ~0); if (flush) IXGBE_WRITE_FLUSH(&adapter->hw); } static irqreturn_t ixgbe_msix_other(int irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; u32 eicr; /* * Workaround for Silicon errata. Use clear-by-write instead * of clear-by-read. Reading with EICS will return the * interrupt causes without clearing, which later be done * with the write to EICR. */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); if (eicr & IXGBE_EICR_MAILBOX) ixgbe_msg_task(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (eicr & IXGBE_EICR_ECC) e_info(link, "Received unrecoverable ECC Err, please " "reboot\n"); /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int reinit_count = 0; int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = adapter->tx_ring[i]; if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state)) reinit_count++; } if (reinit_count) { /* no more flow director interrupts until after init */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; ixgbe_service_event_schedule(adapter); } } ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_overtemp_event(adapter, eicr); break; default: break; } ixgbe_check_fan_failure(adapter, eicr); /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); return IRQ_HANDLED; } static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule(&q_vector->napi); return IRQ_HANDLED; } /** * ixgbe_poll - NAPI Rx polling callback * @napi: structure for representing this polling device * @budget: how many packets driver is allowed to clean * * This function is used for legacy and MSI, NAPI mode **/ int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring; int per_ring_budget; bool clean_complete = true; #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ixgbe_update_dca(q_vector); #endif ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ napi_complete(napi); if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); return 0; } /** * ixgbe_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure * * ixgbe_request_msix_irqs allocates MSI-X vectors and requests * interrupts from the kernel. **/ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int vector, err; int ri = 0, ti = 0; for (vector = 0; vector < q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "TxRx", ri++); ti++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "rx", ri++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "tx", ti++); } else { /* skip this unused q_vector */ continue; } err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { e_err(probe, "request_irq failed for MSIX interrupt " "Error: %d\n", err); goto free_queue_irqs; } /* If Flow Director is enabled, set interrupt affinity */ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); } } err = request_irq(adapter->msix_entries[vector].vector, ixgbe_msix_other, 0, netdev->name, adapter); if (err) { e_err(probe, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } return 0; free_queue_irqs: while (vector) { vector--; irq_set_affinity_hint(adapter->msix_entries[vector].vector, NULL); free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; return err; } /** * ixgbe_intr - legacy mode Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/ static irqreturn_t ixgbe_intr(int irq, void *data) { struct ixgbe_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; u32 eicr; /* * Workaround for silicon errata #26 on 82598. Mask the interrupt * before the read of EICR. */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read * therefore no explicit interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (!eicr) { /* * shared interrupt alert! * make sure interrupts are enabled because the read will * have disabled interrupts due to EIAM * finish the workaround of silicon errata on 82598. Unmask * the interrupt that we masked before the EICR read. */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, true, true); return IRQ_NONE; /* Not our interrupt */ } if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); /* Fall through */ case ixgbe_mac_X540: if (eicr & IXGBE_EICR_ECC) e_info(link, "Received unrecoverable ECC err, please " "reboot\n"); ixgbe_check_overtemp_event(adapter, eicr); break; default: break; } ixgbe_check_fan_failure(adapter, eicr); /* would disable interrupts here but EIAM disabled it */ napi_schedule(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. * ixgbe_poll will re-enable the queue interrupts */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); return IRQ_HANDLED; } /** * ixgbe_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) err = ixgbe_request_msix_irqs(adapter); else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, netdev->name, adapter); else err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, netdev->name, adapter); if (err) e_err(probe, "request_irq failed, Error %d\n", err); return err; } static void ixgbe_free_irq(struct ixgbe_adapter *adapter) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { int i, q_vectors; q_vectors = adapter->num_msix_vectors; i = q_vectors - 1; free_irq(adapter->msix_entries[i].vector, adapter); i--; for (; i >= 0; i--) { /* free only the irqs that were actually requested */ if (!adapter->q_vector[i]->rx.ring && !adapter->q_vector[i]->tx.ring) continue; /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(adapter->msix_entries[i].vector, NULL); free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } } else { free_irq(adapter->pdev->irq, adapter); } } /** * ixgbe_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); break; default: break; } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { int i; for (i = 0; i < adapter->num_msix_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { synchronize_irq(adapter->pdev->irq); } } /** * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts * **/ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; /* rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; ixgbe_write_eitr(q_vector); ixgbe_set_ivar(adapter, 0, 0, 0); ixgbe_set_ivar(adapter, 1, 0, 0); e_info(hw, "Legacy interrupt IVAR setup done\n"); } /** * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset * @adapter: board private structure * @ring: structure containing ring specific data * * Configure the Tx descriptor ring after a reset. **/ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u64 tdba = ring->dma; int wait_loop = 10; u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), (tdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_tx_desc)); IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); /* * set WTHRESH to encourage burst writeback, it should not be set * higher than 1 when ITR is 0 as it could cause false TX hangs * * In order to avoid issues WTHRESH + PTHRESH should always be equal * to or less than the number of on chip descriptors, which is * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < 8)) txdctl |= (1 << 16); /* WTHRESH = 1 */ else txdctl |= (8 << 16); /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ txdctl |= (1 << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && adapter->atr_sample_rate) { ring->atr_sample_rate = adapter->atr_sample_rate; ring->atr_count = 0; set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); } else { ring->atr_sample_rate = 0; } clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; /* poll to verify queue is enabled */ do { usleep_range(1000, 2000); txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rttdcs; u32 reg; u8 tcs = netdev_get_num_tc(adapter->netdev); if (hw->mac.type == ixgbe_mac_82598EB) return; /* disable the arbiter while setting MTQC */ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); rttdcs |= IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { case (IXGBE_FLAG_SRIOV_ENABLED): IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); break; default: if (!tcs) reg = IXGBE_MTQC_64Q_1PB; else if (tcs <= 4) reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; else reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); /* Enable Security TX Buffer IFG for multiple pb */ if (tcs) { reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg |= IXGBE_SECTX_DCB; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); } break; } /* re-enable the arbiter */ rttdcs &= ~IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); } /** * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 dmatxctl; u32 i; ixgbe_setup_mtqc(adapter); if (hw->mac.type != ixgbe_mac_82598EB) { /* DMATXCTL.EN must be before Tx queues are enabled */ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); dmatxctl |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); } /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { u32 srrctl; u8 reg_idx = rx_ring->reg_idx; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: { struct ixgbe_ring_feature *feature = adapter->ring_feature; const int mask = feature[RING_F_RSS].mask; reg_idx = reg_idx & mask; } break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: default: break; } srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; if (adapter->num_vfs) srrctl |= IXGBE_SRRCTL_DROP_EN; srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK; #if PAGE_SIZE > IXGBE_MAX_RXBUFFER srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, 0x6A3E67EA, 0x14364D17, 0x3BED200D}; u32 mrqc = 0, reta = 0; u32 rxcsum; int i, j; u8 tcs = netdev_get_num_tc(adapter->netdev); int maxq = adapter->ring_feature[RING_F_RSS].indices; if (tcs) maxq = min(maxq, adapter->num_tx_queues / tcs); /* Fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); /* Fill out redirection table */ for (i = 0, j = 0; i < 128; i++, j++) { if (j == maxq) j = 0; /* reta = 4-byte sliding window of * 0x00..(indices-1)(indices-1)00..etc. */ reta = (reta << 8) | (j * 0x11); if ((i & 3) == 3) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); } /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); if (adapter->hw.mac.type == ixgbe_mac_82598EB && (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { mrqc = IXGBE_MRQC_RSSEN; } else { int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_SRIOV_ENABLED); switch (mask) { case (IXGBE_FLAG_RSS_ENABLED): if (!tcs) mrqc = IXGBE_MRQC_RSSEN; else if (tcs <= 4) mrqc = IXGBE_MRQC_RTRSS4TCEN; else mrqc = IXGBE_MRQC_RTRSS8TCEN; break; case (IXGBE_FLAG_SRIOV_ENABLED): mrqc = IXGBE_MRQC_VMDQEN; break; default: break; } } /* Perform hash on these packet types */ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } /** * ixgbe_configure_rscctl - enable RSC for the indicated ring * @adapter: address of board private structure * @index: index of ring to set **/ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; u8 reg_idx = ring->reg_idx; if (!ring_is_rsc_enabled(ring)) return; rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); rscctrl |= IXGBE_RSCCTL_RSCEN; /* * we must limit the number of descriptors so that the * total size of max desc * buf_len is not greater * than 65536 */ #if (PAGE_SIZE <= 8192) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; #elif (PAGE_SIZE <= 16384) rscctrl |= IXGBE_RSCCTL_MAXDESC_8; #else rscctrl |= IXGBE_RSCCTL_MAXDESC_4; #endif IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } /** * ixgbe_set_uta - Set unicast filter table address * @adapter: board private structure * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used * however due to certain limitations in the hardware it is necessary to * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ static void ixgbe_set_uta(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; /* The UTA table only exists on 82599 hardware and newer */ if (hw->mac.type < ixgbe_mac_82599EB) return; /* we only need to do this if VMDq is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return; for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); } #define IXGBE_MAX_RX_DESC_POLL 10 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; int wait_loop = IXGBE_MAX_RX_DESC_POLL; u32 rxdctl; u8 reg_idx = ring->reg_idx; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; do { usleep_range(1000, 2000); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) { e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " "the polling period\n", reg_idx); } } void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; int wait_loop = IXGBE_MAX_RX_DESC_POLL; u32 rxdctl; u8 reg_idx = ring->reg_idx; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) return; /* the hardware may take up to 100us to really disable the rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!wait_loop) { e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " "the polling period\n", reg_idx); } } void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ixgbe_disable_rx_queue(adapter, ring); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); /* If operating in IOV mode set RLPML for X540 */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && hw->mac.type == ixgbe_mac_X540) { rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; rxdctl |= ((ring->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); } if (hw->mac.type == ixgbe_mac_82598EB) { /* * enable cache line friendly hardware writes: * PTHRESH=32 descriptors (half the internal cache), * this also removes ugly rx_no_buffer_count increment * HTHRESH=4 descriptors (to minimize latency on fetch) * WTHRESH=8 burst writeback up to two cache lines */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; } /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int p; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_L2HDR | IXGBE_PSRTYPE_IPV6HDR; if (hw->mac.type == ixgbe_mac_82598EB) return; if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) psrtype |= (adapter->num_rx_queues_per_pool << 29); for (p = 0; p < adapter->num_rx_pools; p++) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), psrtype); } static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gcr_ext; u32 vt_reg_bits; u32 reg_offset, vf_shift; u32 vmdctl; int i; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return; vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); vf_shift = adapter->num_vfs % 32; reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); /* * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); /* enable Tx loopback for VF/PF communication */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), adapter->num_vfs); /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { if (!adapter->vfinfo[i].spoofchk_enabled) ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); } } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; struct ixgbe_ring *rx_ring; int i; u32 mhadd, hlreg0; #ifdef IXGBE_FCOE /* adjust max frame to be able to do baby jumbo for FCoE */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* IXGBE_FCOE */ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ max_frame += VLAN_HLEN; hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); else clear_ring_rsc_enabled(rx_ring); } } static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or * buffer sizes through the use of multiple SRRCTL * registers, RDRXCTL.MVMEN must be set to 1 * * also, the manual doesn't mention it clearly but DCA hints * will only use queue 0's tags unless this bit is set. Side * effects of setting this bit are only that SRRCTL must be * fully programmed [0..15] */ rdrxctl |= IXGBE_RDRXCTL_MVMEN; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; /* hardware requires some bits to be set by default */ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; break; default: /* We should do nothing since we don't know this hardware */ return; } IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } /** * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; u32 rxctrl; /* disable receives while setting up the descriptors */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); /* Program registers for the distribution of queues */ ixgbe_setup_mrqc(adapter); ixgbe_set_uta(adapter); /* set_rx_buffer_len must be called before ring initialization */ ixgbe_set_rx_buffer_len(adapter); /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); /* disable drop enable for 82598 parts */ if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; /* enable all receives */ rxctrl |= IXGBE_RXCTRL_RXEN; hw->mac.ops.enable_rx_dma(hw, rxctrl); } static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int pool_ndx = adapter->num_vfs; /* add VID to filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); set_bit(vid, adapter->active_vlans); return 0; } static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int pool_ndx = adapter->num_vfs; /* remove VID from filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); clear_bit(vid, adapter->active_vlans); return 0; } /** * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering * @adapter: driver data */ static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } /** * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering * @adapter: driver data */ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VFE; vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } /** * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping * @adapter: driver data */ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); } break; default: break; } } /** * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping * @adapter: driver data */ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlnctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); } break; default: break; } } static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { u16 vid; ixgbe_vlan_rx_add_vid(adapter->netdev, 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) ixgbe_vlan_rx_add_vid(adapter->netdev, vid); } /** * ixgbe_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure * * Writes unicast address list to the RAR table. * Returns: -ENOMEM on failure/insufficient address space * 0 on no addresses written * X on writing X addresses to the RAR table **/ static int ixgbe_write_uc_addr_list(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; unsigned int vfn = adapter->num_vfs; unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; int count = 0; /* return ENOMEM indicating insufficient memory for addresses */ if (netdev_uc_count(netdev) > rar_entries) return -ENOMEM; if (!netdev_uc_empty(netdev) && rar_entries) { struct netdev_hw_addr *ha; /* return error if we do not support writing to RAR table */ if (!hw->mac.ops.set_rar) return -ENOMEM; netdev_for_each_uc_addr(ha, netdev) { if (!rar_entries) break; hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, vfn, IXGBE_RAH_AV); count++; } } /* write the addresses in reverse order to avoid write combining */ for (; rar_entries > 0 ; rar_entries--) hw->mac.ops.clear_rar(hw, rar_entries); return count; } /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the unicast/multicast * address list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast and * promiscuous mode. **/ void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; int count; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); /* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ fctrl |= IXGBE_FCTRL_BAM; fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ fctrl |= IXGBE_FCTRL_PMCF; /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); /* don't hardware filter vlans in promisc mode */ ixgbe_vlan_filter_disable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } else { /* * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ hw->mac.ops.update_mc_addr_list(hw, netdev); vmolr |= IXGBE_VMOLR_ROMPE; } ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = false; /* * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ count = ixgbe_write_uc_addr_list(netdev); if (count < 0) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; } } if (adapter->num_vfs) { ixgbe_restore_vf_multicasts(adapter); vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); } /* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ fctrl &= ~(IXGBE_FCTRL_DPF); /* NOTE: VLAN filtering is disabled by setting PROMISC */ } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->features & NETIF_F_HW_VLAN_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; struct ixgbe_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* legacy and MSI only use one vector */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) q_vectors = 1; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_enable(&q_vector->napi); } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; struct ixgbe_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* legacy and MSI only use one vector */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) q_vectors = 1; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); } } #ifdef CONFIG_IXGBE_DCB /* * ixgbe_configure_dcb - Configure DCB hardware * @adapter: ixgbe adapter struct * * This is called by the driver on open to configure the DCB hardware. * This is also called by the gennetlink interface when reconfiguring * the DCB state. */ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 65536); return; } if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); /* Enable VLAN tag insert/strip */ adapter->netdev->features |= NETIF_F_HW_VLAN_RX; hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif /* reconfigure the hardware */ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_RX_CONFIG); ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { ixgbe_dcb_hw_ets(&adapter->hw, adapter->ixgbe_ieee_ets, max_frame); ixgbe_dcb_hw_pfc_config(&adapter->hw, adapter->ixgbe_ieee_pfc->pfc_en, adapter->ixgbe_ieee_ets->prio_tc); } /* Enable RSS Hash per TC */ if (hw->mac.type != ixgbe_mac_82598EB) { int i; u32 reg = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { u8 msb = 0; u8 cnt = adapter->netdev->tc_to_txq[i].count; while (cnt >>= 1) msb++; reg |= msb << IXGBE_RQTC_SHIFT_TC(i); } IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); } } #endif /* Additional bittime to account for IXGBE framing */ #define IXGBE_ETH_FRAMING 20 /* * ixgbe_hpbthresh - calculate high water mark for flow control * * @adapter: board private structure to calculate for * @pb - packet buffer to calculate */ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; int link, tc, kb, marker; u32 dv_id, rx_pba; /* Calculate max LAN frame size */ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ if (dev->features & NETIF_F_FCOE_MTU) { int fcoe_pb = 0; #ifdef CONFIG_IXGBE_DCB fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up); #endif if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; } #endif /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: dv_id = IXGBE_DV_X540(link, tc); break; default: dv_id = IXGBE_DV(link, tc); break; } /* Loopback switch introduces additional latency */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) dv_id += IXGBE_B2BT(tc); /* Delay value is calculated in bit times convert to KB */ kb = IXGBE_BT2KB(dv_id); rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; marker = rx_pba - kb; /* It is possible that the packet buffer is not large enough * to provide required headroom. In this case throw an error * to user and a do the best we can. */ if (marker < 0) { e_warn(drv, "Packet Buffer(%i) can not provide enough" "headroom to support flow control." "Decrease MTU or number of traffic classes\n", pb); marker = tc + 1; } return marker; } /* * ixgbe_lpbthresh - calculate low water mark for for flow control * * @adapter: board private structure to calculate for * @pb - packet buffer to calculate */ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; int tc; u32 dv_id; /* Calculate max LAN frame size */ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: dv_id = IXGBE_LOW_DV_X540(tc); break; default: dv_id = IXGBE_LOW_DV(tc); break; } /* Delay value is calculated in bit times convert to KB */ return IXGBE_BT2KB(dv_id); } /* * ixgbe_pbthresh_setup - calculate and setup high low water marks */ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int num_tc = netdev_get_num_tc(adapter->netdev); int i; if (!num_tc) num_tc = 1; hw->fc.low_water = ixgbe_lpbthresh(adapter); for (i = 0; i < num_tc; i++) { hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); /* Low water marks must not be larger than high water marks */ if (hw->fc.low_water > hw->fc.high_water[i]) hw->fc.low_water = 0; } } static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int hdrm; u8 tc = netdev_get_num_tc(adapter->netdev); if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) hdrm = 32 << adapter->fdir_pballoc; else hdrm = 0; hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); ixgbe_pbthresh_setup(adapter); } static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node, *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); if (!hlist_empty(&adapter->fdir_filter_list)) ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); hlist_for_each_entry_safe(filter, node, node2, &adapter->fdir_filter_list, fdir_node) { ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, (filter->action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : adapter->rx_ring[filter->action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); } static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; ixgbe_configure_pb(adapter); #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); #endif ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ixgbe_configure_fcoe(adapter); #endif /* IXGBE_FCOE */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: hw->mac.ops.disable_rx_buff(hw); break; default: break; } if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ixgbe_init_fdir_signature_82599(&adapter->hw, adapter->fdir_pballoc); } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { ixgbe_init_fdir_perfect_82599(&adapter->hw, adapter->fdir_pballoc); ixgbe_fdir_filter_restore(adapter); } switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: hw->mac.ops.enable_rx_buff(hw); break; default: break; } ixgbe_configure_virtualization(adapter); ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); } static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) { switch (hw->phy.type) { case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: case ixgbe_phy_sfp_active_unknown: case ixgbe_phy_sfp_ftl_active: return true; case ixgbe_phy_nl: if (hw->mac.type == ixgbe_mac_82598EB) return true; default: return false; } } /** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct **/ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) { /* * We are assuming the worst case scenario here, and that * is that an SFP was inserted/removed after the reset * but before SFP detection was enabled. As such the best * solution is to just start searching as soon as we start */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; } /** * ixgbe_non_sfp_link_config - set up non-SFP+ link * @hw: pointer to private hardware struct * * Returns 0 on success, negative on failure **/ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 autoneg; bool negotiation, link_up = false; u32 ret = IXGBE_ERR_LINK_SETUP; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); if (ret) goto link_cfg_out; autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); if (ret) goto link_cfg_out; if (hw->mac.ops.setup_link) ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); link_cfg_out: return ret; } static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie = 0; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; gpie |= IXGBE_GPIE_EIAME; /* * use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt */ switch (hw->mac.type) { case ixgbe_mac_82598EB: IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); break; } } else { /* legacy interrupts, use EIAM to auto-mask when reading EICR, * specifically only auto mask tx and rx interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } /* XXX: to interrupt immediately for EICS writes, enable this */ /* gpie |= IXGBE_GPIE_EIMEN; */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; gpie |= IXGBE_GPIE_VTMODE_64; } /* Enable Thermal over heat sensor interrupt */ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: gpie |= IXGBE_SDP0_GPIEN; break; case ixgbe_mac_X540: gpie |= IXGBE_EIMS_TS; break; default: break; } } /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) gpie |= IXGBE_SDP1_GPIEN; if (hw->mac.type == ixgbe_mac_82599EB) { gpie |= IXGBE_SDP1_GPIEN; gpie |= IXGBE_SDP2_GPIEN; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); } static void ixgbe_up_complete(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int err; u32 ctrl_ext; ixgbe_get_hw_control(adapter); ixgbe_setup_gpie(adapter); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ixgbe_configure_msix(adapter); else ixgbe_configure_msi_and_legacy(adapter); /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.enable_tx_laser && ((hw->phy.multispeed_fiber) || ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.enable_tx_laser(hw); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); if (ixgbe_is_sfp(hw)) { ixgbe_sfp_link_config(adapter); } else { err = ixgbe_non_sfp_link_config(hw); if (err) e_err(probe, "link_config FAILED %d\n", err); } /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_EICR); ixgbe_irq_enable(adapter, true, true); /* * If this adapter has a fan, check to see if we had a failure * before we enabled the interrupt. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) e_crit(drv, "Fan has stopped, replace the adapter\n"); } /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ adapter->netdev->trans_start = jiffies; while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); ixgbe_down(adapter); /* * If SR-IOV enabled then wait a bit before bringing the adapter * back up to give the VFs time to respond to the reset. The * two second wait is based upon the watchdog timer cycle in * the VF driver. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) msleep(2000); ixgbe_up(adapter); clear_bit(__IXGBE_RESETTING, &adapter->state); } void ixgbe_up(struct ixgbe_adapter *adapter) { /* hardware has been reset, we need to reload some things */ ixgbe_configure(adapter); ixgbe_up_complete(adapter); } void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int err; /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) usleep_range(1000, 2000); /* clear all SFP and link config related flags while holding SFP_INIT */ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | IXGBE_FLAG2_SFP_NEEDS_RESET); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; err = hw->mac.ops.init_hw(hw); switch (err) { case 0: case IXGBE_ERR_SFP_NOT_PRESENT: case IXGBE_ERR_SFP_NOT_SUPPORTED: break; case IXGBE_ERR_MASTER_REQUESTS_PENDING: e_dev_err("master disable timed out\n"); break; case IXGBE_ERR_EEPROM_VERSION: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated with " "your hardware. If you are experiencing problems " "please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); break; default: e_dev_err("Hardware Error: %d\n", err); } clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, IXGBE_RAH_AV); } /** * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers * @rx_ring: ring to setup * * On many IA platforms the L1 cache has a critical stride of 4K, this * results in each receive buffer starting in the same cache set. To help * reduce the pressure on this cache set we can interleave the offsets so * that only every other buffer will be in the same cache set. **/ static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) { struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; u16 i; for (i = 0; i < rx_ring->count; i += 2) { rx_buffer[0].page_offset = 0; rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); rx_buffer = &rx_buffer[2]; } } /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; unsigned long size; u16 i; /* ring already cleared, nothing to do */ if (!rx_ring->rx_buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ixgbe_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[i]; if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) { dma_unmap_page(dev, IXGBE_CB(skb)->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); IXGBE_CB(skb)->page_released = false; } dev_kfree_skb(skb); } rx_buffer->skb = NULL; if (rx_buffer->dma) dma_unmap_page(dev, rx_buffer->dma, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); rx_buffer->dma = 0; if (rx_buffer->page) put_page(rx_buffer->page); rx_buffer->page = NULL; } size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); ixgbe_init_rx_page_offset(rx_ring); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } /** * ixgbe_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *tx_buffer_info; unsigned long size; u16 i; /* ring already cleared, nothing to do */ if (!tx_ring->tx_buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } netdev_tx_reset_queue(txring_txq(tx_ring)); size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } /** * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_clean_rx_ring(adapter->rx_ring[i]); } /** * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbe_clean_tx_ring(adapter->tx_ring[i]); } static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) { struct hlist_node *node, *node2; struct ixgbe_fdir_filter *filter; spin_lock(&adapter->fdir_perfect_lock); hlist_for_each_entry_safe(filter, node, node2, &adapter->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); } adapter->fdir_filter_count = 0; spin_unlock(&adapter->fdir_perfect_lock); } void ixgbe_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl; int i; /* signal that we are down to the interrupt handler */ set_bit(__IXGBE_DOWN, &adapter->state); /* disable receives */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) /* this call also flushes the previous write */ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); usleep_range(10000, 20000); netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); netif_tx_disable(netdev); ixgbe_irq_disable(adapter); ixgbe_napi_disable_all(adapter); adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); if (adapter->num_vfs) { /* Clear EITR Select mapping */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); /* Mark all the VFs as inactive */ for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); /* Disable all VFTE/VFRE TX/RX */ ixgbe_disable_tx_rx(adapter); } /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { u8 reg_idx = adapter->tx_ring[i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } /* Disable the Tx DMA engine on 82599 and X540 */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); break; default: break; } if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser && ((hw->phy.multispeed_fiber) || ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.disable_tx_laser(hw); ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); #ifdef CONFIG_IXGBE_DCA /* since we reset the hardware DCA settings were cleared */ ixgbe_setup_dca(adapter); #endif } /** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ static void ixgbe_tx_timeout(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ ixgbe_tx_timeout_reset(adapter); } /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize * * ixgbe_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; unsigned int rss; #ifdef CONFIG_IXGBE_DCB int j; struct tc_configuration *tc; #endif /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; /* Set capability flags */ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (hw->mac.type) { case ixgbe_mac_82598EB: if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; break; case ixgbe_mac_X540: adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; case ixgbe_mac_82599EB: adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; /* Flow Director hash filters enabled */ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->atr_sample_rate = 20; adapter->ring_feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->ring_feature[RING_F_FCOE].indices = 0; #ifdef CONFIG_IXGBE_DCB /* Default traffic class to use for FCoE */ adapter->fcoe.up = IXGBE_FCOE_DEFTC; #endif #endif /* IXGBE_FCOE */ break; default: break; } /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); #ifdef CONFIG_IXGBE_DCB switch (hw->mac.type) { case ixgbe_mac_X540: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; default: adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; break; } /* Configure DCB traffic classes */ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { tc = &adapter->dcb_cfg.tc_config[j]; tc->path[DCB_TX_CONFIG].bwg_id = 0; tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); tc->path[DCB_RX_CONFIG].bwg_id = 0; tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); tc->dcb_pfc = pfc_disabled; } /* Initialize default user to priority mapping, UPx->TC0 */ tc = &adapter->dcb_cfg.tc_config[0]; tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_set_bitmap = 0x00; adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, sizeof(adapter->temp_dcb_cfg)); #endif /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ #ifdef CONFIG_DCB adapter->last_lfc_mode = hw->fc.current_mode; #endif ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; hw->fc.disable_fc_autoneg = false; /* enable itr by default in dynamic mode */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; /* initialize eeprom parameters */ if (ixgbe_init_eeprom_params_generic(hw)) { e_dev_err("EEPROM initialization failed\n"); return -EIO; } set_bit(__IXGBE_DOWN, &adapter->state); return 0; } /** * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); int numa_node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) numa_node = tx_ring->q_vector->numa_node; tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); set_dev_node(dev, numa_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); set_dev_node(dev, orig_node); if (!tx_ring->desc) tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } /** * ixgbe_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Tx Queue %u failed\n", i); break; } return err; } /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int numa_node = -1; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) numa_node = rx_ring->q_vector->numa_node; rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto err; /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); set_dev_node(dev, numa_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); set_dev_node(dev, orig_node); if (!rx_ring->desc) rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; ixgbe_init_rx_page_offset(rx_ring); return 0; err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } /** * ixgbe_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Rx Queue %u failed\n", i); break; } return err; } /** * ixgbe_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) { ixgbe_clean_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; /* if not set, then don't free */ if (!tx_ring->desc) return; dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) ixgbe_free_tx_resources(adapter->tx_ring[i]); } /** * ixgbe_free_rx_resources - Free Rx Resources * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) { ixgbe_clean_rx_ring(rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (!rx_ring->desc) return; dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** * ixgbe_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) return -EINVAL; /* * For 82599EB we cannot allow PF to change MTU greater than 1500 * in SR-IOV mode as it may cause buffer overruns in guest VFs that * don't allocate and chain buffers correctly. */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) return -EINVAL; e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; if (netif_running(netdev)) ixgbe_reinit_locked(adapter); return 0; } /** * ixgbe_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int err; /* disallow open during test */ if (test_bit(__IXGBE_TESTING, &adapter->state)) return -EBUSY; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = ixgbe_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = ixgbe_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; ixgbe_configure(adapter); err = ixgbe_request_irq(adapter); if (err) goto err_req_irq; ixgbe_up_complete(adapter); return 0; err_req_irq: err_setup_rx: ixgbe_free_all_rx_resources(adapter); err_setup_tx: ixgbe_free_all_tx_resources(adapter); ixgbe_reset(adapter); return err; } /** * ixgbe_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); ixgbe_down(adapter); ixgbe_free_irq(adapter); ixgbe_fdir_filter_exit(adapter); ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); ixgbe_release_hw_control(adapter); return 0; } #ifdef CONFIG_PM static int ixgbe_resume(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { e_dev_err("Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); pci_wake_from_d3(pdev, false); rtnl_lock(); err = ixgbe_init_interrupt_scheme(adapter); rtnl_unlock(); if (err) { e_dev_err("Cannot initialize interrupts for device\n"); return err; } ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); if (netif_running(netdev)) { err = ixgbe_open(netdev); if (err) return err; } netif_device_attach(netdev); return 0; } #endif /* CONFIG_PM */ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 ctrl, fctrl; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) { rtnl_lock(); ixgbe_down(adapter); ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); rtnl_unlock(); } ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif if (wufc) { ixgbe_set_rx_mode(netdev); /* * enable the optics for both mult-speed fiber and * 82599 SFP+ fiber as we can WoL. */ if (hw->mac.ops.enable_tx_laser && (hw->phy.multispeed_fiber || (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && hw->mac.type == ixgbe_mac_82599EB))) hw->mac.ops.enable_tx_laser(hw); /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & IXGBE_WUFC_MC) { fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); } ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ctrl |= IXGBE_CTRL_GIO_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); } else { IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); } switch (hw->mac.type) { case ixgbe_mac_82598EB: pci_wake_from_d3(pdev, false); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: pci_wake_from_d3(pdev, !!wufc); break; default: break; } *enable_wake = !!wufc; ixgbe_release_hw_control(adapter); pci_disable_device(pdev); return 0; } #ifdef CONFIG_PM static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; bool wake; retval = __ixgbe_shutdown(pdev, &wake); if (retval) return retval; if (wake) { pci_prepare_to_sleep(pdev); } else { pci_wake_from_d3(pdev, false); pci_set_power_state(pdev, PCI_D3hot); } return 0; } #endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { bool wake; __ixgbe_shutdown(pdev, &wake); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); pci_set_power_state(pdev, PCI_D3hot); } } /** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbe_update_stats(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 total_mpc = 0; u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; #ifdef IXGBE_FCOE struct ixgbe_fcoe *fcoe = &adapter->fcoe; unsigned int cpu; u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0; #endif /* IXGBE_FCOE */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { u64 rsc_count = 0; u64 rsc_flush = 0; for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); for (i = 0; i < adapter->num_rx_queues; i++) { rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; } adapter->rsc_total_count = rsc_count; adapter->rsc_total_flush = rsc_flush; } for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; hw_csum_rx_error += rx_ring->rx_stats.csum_err; bytes += rx_ring->stats.bytes; packets += rx_ring->stats.packets; } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; netdev->stats.rx_bytes = bytes; netdev->stats.rx_packets = packets; bytes = 0; packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } adapter->restart_queue = restart_queue; adapter->tx_busy = tx_busy; netdev->stats.tx_bytes = bytes; netdev->stats.tx_packets = packets; hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); /* 8 register reads */ for (i = 0; i < 8; i++) { /* for packet buffers not used, the register should read 0 */ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); missed_rx += mpc; hwstats->mpc[i] += mpc; total_mpc += hwstats->mpc[i]; hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); switch (hw->mac.type) { case ixgbe_mac_82598EB: hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; default: break; } } /*16 register reads */ for (i = 0; i < 16; i++) { hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ } } hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); /* work around hardware counting issue */ hwstats->gprc -= missed_rx; ixgbe_update_xoff_received(adapter); /* 82598 hardware only has a 32 bit counter in the high register */ switch (hw->mac.type) { case ixgbe_mac_82598EB: hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_X540: /* OS2BMC stats are X540 only*/ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); case ixgbe_mac_82599EB: hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); #ifdef IXGBE_FCOE hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); /* Add up per cpu counters for total ddp aloc fail */ if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { for_each_possible_cpu(cpu) { fcoe_noddp_counts_sum += *per_cpu_ptr(fcoe->pcpu_noddp, cpu); fcoe_noddp_ext_buff_counts_sum += *per_cpu_ptr(fcoe-> pcpu_noddp_ext_buff, cpu); } } hwstats->fcoe_noddp = fcoe_noddp_counts_sum; hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum; #endif /* IXGBE_FCOE */ break; default: break; } bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); hwstats->bprc += bprc; hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); if (hw->mac.type == ixgbe_mac_82598EB) hwstats->mprc -= bprc; hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); hwstats->lxontxc += lxon; lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); hwstats->lxofftxc += lxoff; hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); /* * 82598 errata - tx of flow control packets is included in tx counters */ xon_off_tot = lxon + lxoff; hwstats->gptc -= xon_off_tot; hwstats->mptc -= xon_off_tot; hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); hwstats->ptc64 -= xon_off_tot; hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); /* Fill out the OS statistics structure */ netdev->stats.multicast = hwstats->mprc; /* Rx Errors */ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; netdev->stats.rx_dropped = 0; netdev->stats.rx_length_errors = hwstats->rlec; netdev->stats.rx_crc_errors = hwstats->crcerrs; netdev->stats.rx_missed_errors = total_mpc; } /** * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table * @adapter - pointer to the device adapter structure **/ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) return; adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if we are not using signature filters */ if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) return; adapter->fdir_overflow++; if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, &(adapter->tx_ring[i]->state)); /* re-enable flow director interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); } else { e_err(probe, "failed to finish FDIR re-initialization, " "ignored adding FDIR ATR filters\n"); } } /** * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts * @adapter - pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. */ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u64 eics = 0; int i; /* If we're down or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; /* Force detection of hung controller */ if (netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) set_check_for_tx_hang(adapter->tx_ring[i]); } if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { /* * for legacy and MSI interrupts don't set any bits * that are enabled for EIAM, because this operation * would set *both* EIMS and EICS for any bit in EIAM */ IXGBE_WRITE_REG(hw, IXGBE_EICS, (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); } else { /* get one bit for every active tx/rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= ((u64)1 << i); } } /* Cause software interrupt to ensure rings are cleaned */ ixgbe_irq_rearm_queues(adapter, eics); } /** * ixgbe_watchdog_update_link - update the link status * @adapter - pointer to the device adapter structure * @link_speed - pointer to a u32 to store the link_speed **/ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; int i; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) return; if (hw->mac.ops.check_link) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); } else { /* always assume link is up, if no check link function */ link_speed = IXGBE_LINK_SPEED_10GB_FULL; link_up = true; } if (link_up) { if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) hw->mac.ops.fc_enable(hw, i); } else { hw->mac.ops.fc_enable(hw, 0); } } if (link_up || time_after(jiffies, (adapter->link_check_timeout + IXGBE_TRY_LINK_TIMEOUT))) { adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); IXGBE_WRITE_FLUSH(hw); } adapter->link_up = link_up; adapter->link_speed = link_speed; } /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool flow_rx, flow_tx; /* only continue if link was previously down */ if (netif_carrier_ok(netdev)) return; adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; switch (hw->mac.type) { case ixgbe_mac_82598EB: { u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } break; case ixgbe_mac_X540: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); } break; default: flow_tx = false; flow_rx = false; break; } e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? "10 Gbps" : (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? "1 Gbps" : (link_speed == IXGBE_LINK_SPEED_100_FULL ? "100 Mbps" : "unknown speed"))), ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); } /** * ixgbe_watchdog_link_is_down - update netif_carrier status and * print link down message * @adapter - pointer to the adapter structure **/ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; adapter->link_up = false; adapter->link_speed = 0; /* only continue if link was up previously */ if (!netif_carrier_ok(netdev)) return; /* poll for SFP+ cable when link is down */ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); } /** * ixgbe_watchdog_flush_tx - flush queues on link down * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { int i; int some_tx_pending = 0; if (!netif_carrier_ok(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; if (tx_ring->next_to_use != tx_ring->next_to_clean) { some_tx_pending = 1; break; } } if (some_tx_pending) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; } } } static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; /* Do not perform spoof check for 82598 */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) return; ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); /* * ssvpc register is cleared on read, if zero then no * spoofed packets in the last interval. */ if (!ssvpc) return; e_warn(drv, "%d Spoofed packets detected\n", ssvpc); } /** * ixgbe_watchdog_subtask - check and bring link up * @adapter - pointer to the device adapter structure **/ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) { /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; ixgbe_watchdog_update_link(adapter); if (adapter->link_up) ixgbe_watchdog_link_is_up(adapter); else ixgbe_watchdog_link_is_down(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); ixgbe_watchdog_flush_tx(adapter); } /** * ixgbe_sfp_detection_subtask - poll for SFP+ cable * @adapter - the ixgbe adapter structure **/ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; s32 err; /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; if (err == IXGBE_ERR_SFP_NOT_PRESENT) { /* If no cable is present, then we need to reset * the next time we find a good cable. */ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; } /* exit on error */ if (err) goto sfp_out; /* exit if reset not needed */ if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) goto sfp_out; adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; /* * A module may be identified correctly, but the EEPROM may not have * support for that module. setup_sfp() will fail in that case, so * we should not allow that module to load. */ if (hw->mac.type == ixgbe_mac_82598EB) err = hw->phy.ops.reset(hw); else err = hw->mac.ops.setup_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) goto sfp_out; adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); sfp_out: clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && (adapter->netdev->reg_state == NETREG_REGISTERED)) { e_dev_err("failed to initialize because an unsupported " "SFP+ module type was detected.\n"); e_dev_err("Reload the driver after installing a " "supported module.\n"); unregister_netdev(adapter->netdev); } } /** * ixgbe_sfp_link_config_subtask - set up link SFP after module install * @adapter - the ixgbe adapter structure **/ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 autoneg; bool negotiation; if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) return; /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, autoneg, negotiation, true); adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->link_check_timeout = jiffies; clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } #ifdef CONFIG_PCI_IOV static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { int vf; struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 gpc; u32 ciaa, ciad; gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); if (gpc) /* If incrementing then no need for the check below */ return; /* * Check to see if a bad DMA write target from an errant or * malicious VF has caused a PCIe error. If so then we can * issue a VFLR to the offending VF(s) and then resume without * requesting a full slot reset. */ for (vf = 0; vf < adapter->num_vfs; vf++) { ciaa = (vf << 16) | 0x80000000; /* 32 bit read so align, we really want status at offset 6 */ ciaa |= PCI_COMMAND; IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); ciaa &= 0x7FFFFFFF; /* disable debug mode asap after reading data */ IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); /* Get the upper 16 bits which will be the PCI status reg */ ciad >>= 16; if (ciad & PCI_STATUS_REC_MASTER_ABORT) { netdev_err(netdev, "VF %d Hung DMA\n", vf); /* Issue VFLR */ ciaa = (vf << 16) | 0x80000000; ciaa |= 0xA8; IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); ciad = 0x00008000; /* VFLR */ IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); ciaa &= 0x7FFFFFFF; IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); } } } #endif /** * ixgbe_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ static void ixgbe_service_timer(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; unsigned long next_event_offset; bool ready = true; /* poll faster when waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) next_event_offset = HZ / 10; else next_event_offset = HZ * 2; #ifdef CONFIG_PCI_IOV /* * don't bother with SR-IOV VF DMA hang check if there are * no VFs or the link is down */ if (!adapter->num_vfs || (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) goto normal_timer_service; /* If we have VFs allocated then we must check for DMA hangs */ ixgbe_check_for_bad_vf(adapter); next_event_offset = HZ / 50; adapter->timer_event_accumulator++; if (adapter->timer_event_accumulator >= 100) adapter->timer_event_accumulator = 0; else ready = false; normal_timer_service: #endif /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); if (ready) ixgbe_service_event_schedule(adapter); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) return; adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; /* If we're already down or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) return; ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; ixgbe_reinit_locked(adapter); } /** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ static void ixgbe_service_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, service_task); ixgbe_reset_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); ixgbe_service_event_complete(adapter); } static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; if (!skb_is_gso(skb)) return 0; if (skb_header_cloned(skb)) { int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; if (first->protocol == __constant_htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } /* compute header lengths */ l4len = tcp_hdrlen(skb); *hdr_len = skb_transport_offset(skb) + l4len; /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, mss_l4len_idx); return 1; } static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) return; } else { u8 l4_hdr = 0; switch (first->protocol) { case __constant_htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; case __constant_htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", first->protocol); } break; } switch (l4_hdr) { case IPPROTO_TCP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; mss_l4len_idx = tcp_hdrlen(skb) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_SCTP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; mss_l4len_idx = sizeof(struct sctphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_UDP: mss_l4len_idx = sizeof(struct udphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but l4 proto=%x!\n", l4_hdr); } break; } /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, mss_l4len_idx); } static __le32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); /* set HW vlan bit if vlan is present */ if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); /* set segmentation enable bits for TSO/FSO */ #ifdef IXGBE_FCOE if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) #else if (tx_flags & IXGBE_TX_FLAGS_TSO) #endif cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); return cmd_type; } static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) { __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); /* enable L4 checksum for TSO and TX checksum offload */ if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); /* enble IPv4 checksum for TSO */ if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); /* use index 1 context for TSO/FSO/FCOE */ #ifdef IXGBE_FCOE if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) #else if (tx_flags & IXGBE_TX_FLAGS_TSO) #endif olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ #ifdef IXGBE_FCOE if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) #else if (tx_flags & IXGBE_TX_FLAGS_TXSW) #endif olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); tx_desc->read.olinfo_status = olinfo_status; } #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) { dma_addr_t dma; struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); unsigned int paylen = skb->len - hdr_len; u32 tx_flags = first->tx_flags; __le32 cmd_type; u16 i = tx_ring->next_to_use; tx_desc = IXGBE_TX_DESC(tx_ring, i); ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); cmd_type = ixgbe_tx_cmd_type(tx_flags); #ifdef IXGBE_FCOE if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (data_len < sizeof(struct fcoe_crc_eof)) { size -= sizeof(struct fcoe_crc_eof) - data_len; data_len = 0; } else { data_len -= sizeof(struct fcoe_crc_eof); } } #endif dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; /* record length, and DMA address */ dma_unmap_len_set(first, len, size); dma_unmap_addr_set(first, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); for (;;) { while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } dma += IXGBE_MAX_DATA_PER_TXD; size -= IXGBE_MAX_DATA_PER_TXD; tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.olinfo_status = 0; } if (likely(!data_len)) break; if (unlikely(skb->no_fcs)) cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); i++; tx_desc++; if (i == tx_ring->count) { tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } #ifdef IXGBE_FCOE size = min_t(unsigned int, data_len, skb_frag_size(frag)); #else size = skb_frag_size(frag); #endif data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; tx_buffer = &tx_ring->tx_buffer_info[i]; dma_unmap_len_set(tx_buffer, len, size); dma_unmap_addr_set(tx_buffer, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.olinfo_status = 0; frag++; } /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); tx_desc->read.cmd_type_len = cmd_type; netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); /* set the timestamp */ first->time_stamp = jiffies; /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). * * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; i++; if (i == tx_ring->count) i = 0; tx_ring->next_to_use = i; /* notify HW of packet */ writel(i, tx_ring->tail); return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_buffer_info map */ for (;;) { tx_buffer = &tx_ring->tx_buffer_info[i]; ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); if (tx_buffer == first) break; if (i == 0) i = tx_ring->count; i--; } tx_ring->next_to_use = i; } static void ixgbe_atr(struct ixgbe_ring *ring, struct ixgbe_tx_buffer *first) { struct ixgbe_q_vector *q_vector = ring->q_vector; union ixgbe_atr_hash_dword input = { .dword = 0 }; union ixgbe_atr_hash_dword common = { .dword = 0 }; union { unsigned char *network; struct iphdr *ipv4; struct ipv6hdr *ipv6; } hdr; struct tcphdr *th; __be16 vlan_id; /* if ring doesn't have a interrupt vector, cannot perform ATR */ if (!q_vector) return; /* do nothing if sampling is disabled */ if (!ring->atr_sample_rate) return; ring->atr_count++; /* snag network header to get L4 type and address */ hdr.network = skb_network_header(first->skb); /* Currently only IPv4/IPv6 with TCP is supported */ if ((first->protocol != __constant_htons(ETH_P_IPV6) || hdr.ipv6->nexthdr != IPPROTO_TCP) && (first->protocol != __constant_htons(ETH_P_IP) || hdr.ipv4->protocol != IPPROTO_TCP)) return; th = tcp_hdr(first->skb); /* skip this packet since it is invalid or the socket is closing */ if (!th || th->fin) return; /* sample on all syn packets or once every atr sample count */ if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) return; /* reset sample count */ ring->atr_count = 0; vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); /* * src and dst are inverted, think how the receiver sees them * * The input is broken into two sections, a non-compressed section * containing vm_pool, vlan_id, and flow_type. The rest of the data * is XORed together and stored in the compressed dword. */ input.formatted.vlan_id = vlan_id; /* * since src port and flex bytes occupy the same word XOR them together * and write the value to source port portion of compressed dword */ if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); else common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; if (first->protocol == __constant_htons(ETH_P_IP)) { input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; } else { input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ hdr.ipv6->saddr.s6_addr32[1] ^ hdr.ipv6->saddr.s6_addr32[2] ^ hdr.ipv6->saddr.s6_addr32[3] ^ hdr.ipv6->daddr.s6_addr32[0] ^ hdr.ipv6->daddr.s6_addr32[1] ^ hdr.ipv6->daddr.s6_addr32[2] ^ hdr.ipv6->daddr.s6_addr32[3]; } /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, input, common, ring->queue_index); } static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (likely(ixgbe_desc_unused(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; return 0; } static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) { if (likely(ixgbe_desc_unused(tx_ring) >= size)) return 0; return __ixgbe_maybe_stop_tx(tx_ring, size); } static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : smp_processor_id(); #ifdef IXGBE_FCOE __be16 protocol = vlan_get_protocol(skb); if (((protocol == htons(ETH_P_FCOE)) || (protocol == htons(ETH_P_FIP))) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); txq += adapter->ring_feature[RING_F_FCOE].mask; return txq; } #endif if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { while (unlikely(txq >= dev->real_num_tx_queues)) txq -= dev->real_num_tx_queues; return txq; } return skb_tx_hash(dev, skb); } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *first; int tso; u32 tx_flags = 0; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = skb->protocol; u8 hdr_len = 0; /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); #else count += skb_shinfo(skb)->nr_frags; #endif if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; /* if we have a HW VLAN tag being added default to the HW one */ if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN check the next protocol and store the tag */ } else if (protocol == __constant_htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) goto out_drop; protocol = vhdr->h_vlan_encapsulated_proto; tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA * Tx switch had been disabled. */ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) tx_flags |= IXGBE_TX_FLAGS_TXSW; #endif /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL))) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; tx_flags |= (skb->priority & 0x7) << IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto out_drop; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); } else { tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; } } /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == __constant_htons(ETH_P_FCOE)) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; goto xmit_fcoe; } #endif /* IXGBE_FCOE */ tso = ixgbe_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; else if (!tso) ixgbe_tx_csum(tx_ring, first); /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) ixgbe_atr(tx_ring, first); #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ ixgbe_tx_map(tx_ring, first, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; return NETDEV_TX_OK; } static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; if (skb->len <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ if (skb->len < 17) { if (skb_padto(skb, 17)) return NETDEV_TX_OK; skb->len = 17; } tx_ring = adapter->tx_ring[skb->queue_mapping]; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int ixgbe_set_mac(struct net_device *netdev, void *p) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, IXGBE_RAH_AV); return 0; } static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 value; int rc; if (prtad != hw->phy.mdio.prtad) return -EINVAL; rc = hw->phy.ops.read_reg(hw, addr, devad, &value); if (!rc) rc = value; return rc; } static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr, u16 value) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; if (prtad != hw->phy.mdio.prtad) return -EINVAL; return hw->phy.ops.write_reg(hw, addr, devad, value); } static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } /** * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding * netdev->dev_addrs * @netdev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_add_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_mac_info *mac = &adapter->hw.mac; if (is_valid_ether_addr(mac->san_addr)) { rtnl_lock(); err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } return err; } /** * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding * netdev->dev_addrs * @netdev: network interface device structure * * Returns non-zero on failure **/ static int ixgbe_del_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_mac_info *mac = &adapter->hw.mac; if (is_valid_ether_addr(mac->san_addr)) { rtnl_lock(); err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } return err; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void ixgbe_netpoll(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; /* if interface is down do nothing */ if (test_bit(__IXGBE_DOWN, &adapter->state)) return; adapter->flags |= IXGBE_FLAG_IN_NETPOLL; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < num_q_vectors; i++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; ixgbe_msix_clean_rings(0, q_vector); } } else { ixgbe_intr(adapter->pdev->irq, netdev); } adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; } #endif static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i; rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; if (ring) { do { start = u64_stats_fetch_begin_bh(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } } for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); u64 bytes, packets; unsigned int start; if (ring) { do { start = u64_stats_fetch_begin_bh(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } } rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; stats->rx_length_errors = netdev->stats.rx_length_errors; stats->rx_crc_errors = netdev->stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; return stats; } #ifdef CONFIG_IXGBE_DCB /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. * #adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled * * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm * 802.1Q priority maps to a packet buffer that exists. */ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) { struct ixgbe_hw *hw = &adapter->hw; u32 reg, rsave; int i; /* 82598 have a static priority to TC mapping that can not * be changed so no validation is needed. */ if (hw->mac.type == ixgbe_mac_82598EB) return; reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); rsave = reg; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); /* If up2tc is out of bounds default to zero */ if (up2tc > tc) reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); } if (reg != rsave) IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); return; } /* ixgbe_setup_tc - routine to configure net_device for multiple traffic * classes. * * @netdev: net device to configure * @tc: number of traffic classes to enable */ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; /* Multiple traffic classes requires multiple queues */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { e_err(drv, "Enable failed, needs MSI-X\n"); return -EINVAL; } /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) return -EINVAL; /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. */ if (netif_running(dev)) ixgbe_close(dev); ixgbe_clear_interrupt_scheme(adapter); if (tc) { netdev_set_num_tc(dev, tc); adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = ixgbe_fc_none; } else { netdev_reset_tc(dev); adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } ixgbe_init_interrupt_scheme(adapter); ixgbe_validate_rtr(adapter, tc); if (netif_running(dev)) ixgbe_open(dev); return 0; } #endif /* CONFIG_IXGBE_DCB */ void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else ixgbe_reset(adapter); } static netdev_features_t ixgbe_fix_features(struct net_device *netdev, netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); #ifdef CONFIG_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) features &= ~NETIF_F_HW_VLAN_RX; #endif /* return error if RXHASH is being enabled when RSS is not supported */ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) features &= ~NETIF_F_RXHASH; /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; /* Turn off LRO if not RSC capable */ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; return features; } static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = netdev->features ^ features; bool need_reset = false; /* Make sure RSC matches LRO, reset if change */ if (!(features & NETIF_F_LRO)) { if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) need_reset = true; adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { if (adapter->rx_itr_setting == 1 || adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; need_reset = true; } else if ((changed ^ features) & NETIF_F_LRO) { e_info(probe, "rx-usecs set too low, " "disabling RSC\n"); } } /* * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ if (!(features & NETIF_F_NTUPLE)) { if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { /* turn off Flow Director, set ATR and reset */ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; need_reset = true; } adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { /* turn off ATR, enable perfect filters and reset */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; need_reset = true; } if (changed & NETIF_F_RXALL) need_reset = true; netdev->features = features; if (need_reset) ixgbe_do_reset(netdev); return 0; } static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, .ndo_select_queue = ixgbe_select_queue, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB .ndo_setup_tc = ixgbe_setup_tc, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, #endif /* IXGBE_FCOE */ .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, }; static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, const struct ixgbe_info *ii) { #ifdef CONFIG_PCI_IOV struct ixgbe_hw *hw = &adapter->hw; if (hw->mac.type == ixgbe_mac_82598EB) return; /* The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the * physical function */ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; ixgbe_enable_sriov(adapter, ii); #endif /* CONFIG_PCI_IOV */ } /** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl * * Returns 0 on success, negative on failure * * ixgbe_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int __devinit ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; u8 part_str[IXGBE_PBANUM_LENGTH]; unsigned int indices = num_possible_cpus(); #ifdef IXGBE_FCOE u16 device_caps; #endif u32 eec; u16 wol_cap; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. */ if (pdev->is_virtfn) { WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), pdev->vendor, pdev->device); return -EINVAL; } err = pci_enable_device_mem(pdev); if (err) return err; if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } } pci_using_dac = 0; } err = pci_request_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM), ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); goto err_pci_reg; } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); pci_save_state(pdev); #ifdef CONFIG_IXGBE_DCB indices *= MAX_TRAFFIC_CLASS; #endif if (ii->mac == ixgbe_mac_82598EB) indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); #ifdef IXGBE_FCOE indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } for (i = 1; i <= 5; i++) { if (pci_resource_len(pdev, i) == 0) continue; } netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); adapter->bd_number = cards_found; /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; /* EEPROM */ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); eec = IXGBE_READ_REG(hw, IXGBE_EEC); /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ if (!(eec & (1 << 8))) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); hw->phy.sfp_type = ixgbe_sfp_type_unknown; /* ixgbe_identify_phy_generic will set prtad and mmds properly */ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; hw->phy.mdio.mmds = 0; hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; hw->phy.mdio.dev = netdev; hw->phy.mdio.mdio_read = ixgbe_mdio_read; hw->phy.mdio.mdio_write = ixgbe_mdio_write; ii->get_invariants(hw); /* setup the private structure */ err = ixgbe_sw_init(adapter); if (err) goto err_sw_init; /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: break; } /* * If there is a fan on this device and it has failed log the * failure. */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) e_crit(probe, "Fan has stopped, replace the adapter\n"); } if (allow_unsupported_sfp) hw->allow_unsupported_sfp = allow_unsupported_sfp; /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; if (err == IXGBE_ERR_SFP_NOT_PRESENT && hw->mac.type == ixgbe_mac_82598EB) { err = 0; } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { e_dev_err("failed to load because an unsupported SFP+ " "module type was detected.\n"); e_dev_err("Reload the driver after installing a supported " "module.\n"); goto err_sw_init; } else if (err) { e_dev_err("HW Init failed: %d\n", err); goto err_sw_init; } ixgbe_probe_vf(adapter, ii); netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM; netdev->hw_features = netdev->features; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: netdev->features |= NETIF_F_SCTP_CSUM; netdev->hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; break; default: break; } netdev->hw_features |= NETIF_F_RXALL; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED); #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; #endif #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { if (hw->mac.ops.get_device_caps) { hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } } if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { netdev->vlan_features |= NETIF_F_FCOE_CRC; netdev->vlan_features |= NETIF_F_FSO; netdev->vlan_features |= NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; goto err_sw_init; } memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); if (ixgbe_validate_mac_addr(netdev->perm_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter); INIT_WORK(&adapter->service_task, ixgbe_service_task); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); err = ixgbe_init_interrupt_scheme(adapter); if (err) goto err_sw_init; if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { netdev->hw_features &= ~NETIF_F_RXHASH; netdev->features &= ~NETIF_F_RXHASH; } /* WOL not supported for all but the following */ adapter->wol = 0; switch (pdev->device) { case IXGBE_DEV_ID_82599_SFP: /* Only these subdevice supports WOL */ switch (pdev->subsystem_device) { case IXGBE_SUBDEV_ID_82599_560FLR: /* only support first port */ if (hw->bus.func != 0) break; case IXGBE_SUBDEV_ID_82599_SFP: adapter->wol = IXGBE_WUFC_MAG; break; } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) adapter->wol = IXGBE_WUFC_MAG; break; case IXGBE_DEV_ID_82599_KX4: adapter->wol = IXGBE_WUFC_MAG; break; case IXGBE_DEV_ID_X540T: /* Check eeprom to see if it is enabled */ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && (hw->bus.func == 0))) adapter->wol = IXGBE_WUFC_MAG; break; } device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* save off EEPROM version number */ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); /* pick up the PCI bus settings for reporting later */ hw->mac.ops.get_bus_info(hw); /* print bus type/speed/width info */ e_dev_info("(PCI Express:%s:%s) %pM\n", (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : "Unknown"), (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : "Unknown"), netdev->dev_addr); err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); if (err) strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); else e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, part_str); if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { e_dev_warn("PCI-Express bandwidth available for this card is " "not sufficient for optimal performance.\n"); e_dev_warn("For optimal performance a x8 PCI-Express slot " "is required.\n"); } /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); if (err == IXGBE_ERR_EEPROM_VERSION) { /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated " "with your hardware. If you are experiencing " "problems please contact your Intel or hardware " "representative who provided you with this " "hardware.\n"); } strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser && ((hw->phy.multispeed_fiber) || ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.disable_tx_laser(hw); /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ixgbe_setup_dca(adapter); } #endif if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); for (i = 0; i < adapter->num_vfs; i++) ixgbe_vf_configuration(pdev, (i | 0x10000000)); } /* firmware requires driver version to be 0xFFFFFFFF * since os does not support feature */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); e_dev_info("%s\n", ixgbe_default_device_descr); cards_found++; return 0; err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * ixgbe_remove - Device Removal Routine * @pdev: PCI device information struct * * ixgbe_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void __devexit ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; dca_remove_requester(&pdev->dev); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); } #endif #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ixgbe_cleanup_fcoe(adapter); #endif /* IXGBE_FCOE */ /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { if (!(ixgbe_check_vf_assignment(adapter))) ixgbe_disable_sriov(adapter); else e_dev_warn("Unloading driver while VFs are assigned " "- VFs will not be deallocated\n"); } ixgbe_clear_interrupt_scheme(adapter); ixgbe_release_hw_control(adapter); #ifdef CONFIG_DCB kfree(adapter->ixgbe_ieee_pfc); kfree(adapter->ixgbe_ieee_ets); #endif iounmap(adapter->hw.hw_addr); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); e_dev_info("complete\n"); free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** * ixgbe_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; #ifdef CONFIG_PCI_IOV struct pci_dev *bdev, *vfdev; u32 dw0, dw1, dw2, dw3; int vf, pos; u16 req_id, pf_func; if (adapter->hw.mac.type == ixgbe_mac_82598EB || adapter->num_vfs == 0) goto skip_bad_vf_detection; bdev = pdev->bus->self; while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) bdev = bdev->bus->self; if (!bdev) goto skip_bad_vf_detection; pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); if (!pos) goto skip_bad_vf_detection; pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); req_id = dw1 >> 16; /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ if (!(req_id & 0x0080)) goto skip_bad_vf_detection; pf_func = req_id & 0x01; if ((pf_func & 1) == (pdev->devfn & 1)) { unsigned int device_id; vf = (req_id & 0x7F) >> 1; e_dev_err("VF %d has caused a PCIe error\n", vf); e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " "%8.8x\tdw3: %8.8x\n", dw0, dw1, dw2, dw3); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: device_id = IXGBE_82599_VF_DEVICE_ID; break; case ixgbe_mac_X540: device_id = IXGBE_X540_VF_DEVICE_ID; break; default: device_id = 0; break; } /* Find the pci device of the offending VF */ vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); while (vfdev) { if (vfdev->devfn == (req_id & 0xFF)) break; vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, vfdev); } /* * There's a slim chance the VF could have been hot plugged, * so if it is no longer present we don't need to issue the * VFLR. Just clean up the AER in that case. */ if (vfdev) { e_dev_err("Issuing VFLR to VF %d\n", vf); pci_write_config_dword(vfdev, 0xA8, 0x00008000); } pci_cleanup_aer_uncorrect_error_status(pdev); } /* * Even though the error may have occurred on the other port * we still need to increment the vf error reference count for * both ports because the I/O resume function will be called * for both of them. */ adapter->vferr_refcount++; return PCI_ERS_RESULT_RECOVERED; skip_bad_vf_detection: #endif /* CONFIG_PCI_IOV */ netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) ixgbe_down(adapter); pci_disable_device(pdev); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * ixgbe_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. */ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); pci_ers_result_t result; int err; if (pci_enable_device_mem(pdev)) { e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); result = PCI_ERS_RESULT_RECOVERED; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) { e_dev_err("pci_cleanup_aer_uncorrect_error_status " "failed 0x%0x\n", err); /* non-fatal, continue */ } return result; } /** * ixgbe_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. */ static void ixgbe_io_resume(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; #ifdef CONFIG_PCI_IOV if (adapter->vferr_refcount) { e_info(drv, "Resuming after VF err\n"); adapter->vferr_refcount--; return; } #endif if (netif_running(netdev)) ixgbe_up(adapter); netif_device_attach(netdev); } static struct pci_error_handlers ixgbe_err_handler = { .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, }; static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, .remove = __devexit_p(ixgbe_remove), #ifdef CONFIG_PM .suspend = ixgbe_suspend, .resume = ixgbe_resume, #endif .shutdown = ixgbe_shutdown, .err_handler = &ixgbe_err_handler }; /** * ixgbe_init_module - Driver Registration Routine * * ixgbe_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbe_init_module(void) { int ret; pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&ixgbe_driver); return ret; } module_init(ixgbe_init_module); /** * ixgbe_exit_module - Driver Exit Cleanup Routine * * ixgbe_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbe_exit_module(void) { #ifdef CONFIG_IXGBE_DCA dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, void *p) { int ret_val; ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, __ixgbe_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } #endif /* CONFIG_IXGBE_DCA */ module_exit(ixgbe_exit_module); /* ixgbe_main.c */
gpl-2.0
shianyow/kernel-android-galaxy-s2-t989
fs/ocfs2/export.c
1025
6700
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * export.c * * Functions to facilitate NFS exporting * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #define MLOG_MASK_PREFIX ML_EXPORT #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dir.h" #include "dlmglue.h" #include "dcache.h" #include "export.h" #include "inode.h" #include "buffer_head_io.h" #include "suballoc.h" struct ocfs2_inode_handle { u64 ih_blkno; u32 ih_generation; }; static struct dentry *ocfs2_get_dentry(struct super_block *sb, struct ocfs2_inode_handle *handle) { struct inode *inode; struct ocfs2_super *osb = OCFS2_SB(sb); u64 blkno = handle->ih_blkno; int status, set; struct dentry *result; mlog_entry("(0x%p, 0x%p)\n", sb, handle); if (blkno == 0) { mlog(0, "nfs wants inode with blkno: 0\n"); result = ERR_PTR(-ESTALE); goto bail; } inode = ocfs2_ilookup(sb, blkno); /* * If the inode exists in memory, we only need to check it's * generation number */ if (inode) goto check_gen; /* * This will synchronize us against ocfs2_delete_inode() on * all nodes */ status = ocfs2_nfs_sync_lock(osb, 1); if (status < 0) { mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); goto check_err; } status = ocfs2_test_inode_bit(osb, blkno, &set); if (status < 0) { if (status == -EINVAL) { /* * The blkno NFS gave us doesn't even show up * as an inode, we return -ESTALE to be * nice */ mlog(0, "test inode bit failed %d\n", status); status = -ESTALE; } else { mlog(ML_ERROR, "test inode bit failed %d\n", status); } goto unlock_nfs_sync; } /* If the inode allocator bit is clear, this inode must be stale */ if (!set) { mlog(0, "inode %llu suballoc bit is clear\n", (unsigned long long)blkno); status = -ESTALE; goto unlock_nfs_sync; } inode = ocfs2_iget(osb, blkno, 0, 0); unlock_nfs_sync: ocfs2_nfs_sync_unlock(osb, 1); check_err: if (status < 0) { if (status == -ESTALE) { mlog(0, "stale inode ino: %llu generation: %u\n", (unsigned long long)blkno, handle->ih_generation); } result = ERR_PTR(status); goto bail; } if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); result = (void *)inode; goto bail; } check_gen: if (handle->ih_generation != inode->i_generation) { iput(inode); mlog(0, "stale inode ino: %llu generation: %u\n", (unsigned long long)blkno, handle->ih_generation); result = ERR_PTR(-ESTALE); goto bail; } result = d_obtain_alias(inode); if (!IS_ERR(result)) result->d_op = &ocfs2_dentry_ops; else mlog_errno(PTR_ERR(result)); bail: mlog_exit_ptr(result); return result; } static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *dir = child->d_inode; mlog_entry("(0x%p, '%.*s')\n", child, child->d_name.len, child->d_name.name); mlog(0, "find parent of directory %llu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); if (!IS_ERR(parent)) parent->d_op = &ocfs2_dentry_ops; bail_unlock: ocfs2_inode_unlock(dir, 0); bail: mlog_exit_ptr(parent); return parent; } static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); if (len < 3 || (connectable && len < 6)) { mlog(ML_ERROR, "fh buffer is too small for encoding\n"); type = 255; goto bail; } blkno = OCFS2_I(inode)->ip_blkno; generation = inode->i_generation; mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); len = 3; fh[0] = cpu_to_le32((u32)(blkno >> 32)); fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; fh[3] = cpu_to_le32((u32)(blkno >> 32)); fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); spin_unlock(&dentry->d_lock); len = 6; type = 2; mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } *max_len = len; bail: mlog_exit(type); return type; } static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle handle; if (fh_len < 3 || fh_type > 2) return NULL; handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32; handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]); handle.ih_generation = le32_to_cpu(fid->raw[2]); return ocfs2_get_dentry(sb, &handle); } static struct dentry *ocfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle parent; if (fh_type != 2 || fh_len < 6) return NULL; parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32; parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]); parent.ih_generation = le32_to_cpu(fid->raw[5]); return ocfs2_get_dentry(sb, &parent); } const struct export_operations ocfs2_export_ops = { .encode_fh = ocfs2_encode_fh, .fh_to_dentry = ocfs2_fh_to_dentry, .fh_to_parent = ocfs2_fh_to_parent, .get_parent = ocfs2_get_parent, };
gpl-2.0