code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/status_icons/status_tray_linux.h" #include "build/build_config.h" #if !defined(OS_CHROMEOS) #include "chrome/browser/ui/views/status_icons/status_icon_linux_wrapper.h" #include "ui/views/linux_ui/linux_ui.h" StatusTrayLinux::StatusTrayLinux() { } StatusTrayLinux::~StatusTrayLinux() { } StatusIcon* StatusTrayLinux::CreatePlatformStatusIcon( StatusIconType type, const gfx::ImageSkia& image, const base::string16& tool_tip) { return StatusIconLinuxWrapper::CreateWrappedStatusIcon(image, tool_tip); } StatusTray* StatusTray::Create() { const views::LinuxUI* linux_ui = views::LinuxUI::instance(); // Only create a status tray if we can actually create status icons. if (linux_ui && linux_ui->IsStatusIconSupported()) return new StatusTrayLinux(); return NULL; } #else // defined(OS_CHROMEOS) StatusTray* StatusTray::Create() { return NULL; } #endif
XiaosongWei/chromium-crosswalk
chrome/browser/ui/views/status_icons/status_tray_linux.cc
C++
bsd-3-clause
1,089
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> <meta http-equiv="Content-type" content="text/html; charset=utf-8"> <title>body</title> <style type="text/css" media="screen"> body { margin: 1px; padding: 5px; } #marker { position: absolute; border: 2px solid #000; width: 50px; height: 50px; background: #ccc; } </style> <script src="../include_js.php"></script> <script type="text/javascript" charset="utf-8"> jQuery(function($) { $('body').click(function() { $('#marker').css( $(this).offset() ); return false; }); }); </script> </head> <body> <div id="marker"></div> </body> </html>
daejunpark/jsaf
tests/clone_detector_tests/jquery/test/data/offset/body.html
HTML
bsd-3-clause
725
// Copyright 2009 the Sputnik authors. All rights reserved. /** * The production x >>>= y is the same as x = x >>> y * * @path ch11/11.13/11.13.2/S11.13.2_A4.8_T1.3.js * @description Type(x) and Type(y) vary between primitive string and String object */ //CHECK#1 x = "1"; x >>>= "1"; if (x !== 0) { $ERROR('#1: x = "1"; x >>>= "1"; x === 0. Actual: ' + (x)); } //CHECK#2 x = new String("1"); x >>>= "1"; if (x !== 0) { $ERROR('#2: x = new String("1"); x >>>= "1"; x === 0. Actual: ' + (x)); } //CHECK#3 x = "1"; x >>>= new String("1"); if (x !== 0) { $ERROR('#3: x = "1"; x >>>= new String("1"); x === 0. Actual: ' + (x)); } //CHECK#4 x = new String("1"); x >>>= new String("1"); if (x !== 0) { $ERROR('#4: x = new String("1"); x >>>= new String("1"); x === 0. Actual: ' + (x)); } //CHECK#5 x = "x"; x >>>= "1"; if (x !== 0) { $ERROR('#5: x = "x"; x >>>= "1"; x === 0. Actual: ' + (x)); } //CHECK#6 x = "1"; x >>>= "x"; if (x !== 1) { $ERROR('#6: x = "1"; x >>>= "x"; x === 1. Actual: ' + (x)); }
Oceanswave/NiL.JS
Tests/tests/sputnik/ch11/11.13/11.13.2/S11.13.2_A4.8_T1.3.js
JavaScript
bsd-3-clause
1,023
/// Copyright (c) 2012 Ecma International. All rights reserved. /** * @path ch11/11.4/11.4.1/11.4.1-5-a-27-s.js * @description Strict Mode - TypeError is thrown after deleting a property, calling preventExtensions, and attempting to reassign the property * @onlyStrict */ function testcase() { "use strict"; var a = {x:0, get y() { return 0;}}; delete a.x; Object.preventExtensions(a); try { a.x = 1; return false; } catch (e) { return e instanceof TypeError; } } runTestCase(testcase);
Oceanswave/NiL.JS
Tests/tests/sputnik/ch11/11.4/11.4.1/11.4.1-5-a-27-s.js
JavaScript
bsd-3-clause
587
/*! Select2 4.0.0 | https://github.com/select2/select2/blob/master/LICENSE.md */ (function(){if(jQuery&&jQuery.fn&&jQuery.fn.select2&&jQuery.fn.select2.amd)var e=jQuery.fn.select2.amd;return e.define("select2/i18n/tr",[],function(){return{inputTooLong:function(e){var t=e.input.length-e.maximum,n=t+" karakter daha girmelisiniz";return n},inputTooShort:function(e){var t=e.minimum-e.input.length,n="En az "+t+" karakter daha girmelisiniz";return n},loadingMore:function(){return"Daha fazla…"},maximumSelected:function(e){var t="Sadece "+e.maximum+" seçim yapabilirsiniz";return t},noResults:function(){return"Sonuç bulunamadı"},searching:function(){return"Aranıyor…"}}}),{define:e.define,require:e.require}})();
AndrewListat/paydoc
web/template_admin/plugins/select2/i18n/tr.js
JavaScript
bsd-3-clause
722
<!-- Any copyright is dedicated to the Public Domain. http://creativecommons.org/publicdomain/zero/1.0/ --> <!DOCTYPE html> <title>CSS Test: Test declaring a variable that consists of a comma-separated font family list with the comma coming from a variable reference.</title> <link rel="author" title="Cameron McCormack" href="mailto:cam@mcc.id.au"> <link rel="help" href="http://www.w3.org/TR/css-variables-1/#syntax"> <link rel="match" href="variable-declaration-18-ref.html"> <meta name="flags" content="ahem"> <link rel="stylesheet" href="/fonts/ahem.css" type="text/css"> <style> body { font-family: serif; } p { font-family: monospace; --a: Ahem var(--b) sans-serif; --b: ,; font-family: var(--a); } </style> <p>This text must be in Ahem.</p>
scheib/chromium
third_party/blink/web_tests/external/wpt/css/css-variables/variable-declaration-18.html
HTML
bsd-3-clause
769
/// Copyright (c) 2012 Ecma International. All rights reserved. /** * @path ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-258.js * @description Object.create - 'get' property of one property in 'Properties' is the primitive value null (8.10.5 step 7.b) */ function testcase() { try { Object.create({}, { prop: { get: null } }); return false; } catch (e) { return (e instanceof TypeError); } } runTestCase(testcase);
Oceanswave/NiL.JS
Tests/tests/sputnik/ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-258.js
JavaScript
bsd-3-clause
543
module.exports = require('./lib/socket.io');
thetomcraig/redwood
web/node_modules/socket.io/index.js
JavaScript
isc
44
var _complement = require('./internal/_complement'); var _curry2 = require('./internal/_curry2'); var filter = require('./filter'); /** * Similar to `filter`, except that it keeps only values for which the given predicate * function returns falsy. The predicate function is passed one argument: *(value)*. * * Acts as a transducer if a transformer is given in list position. * @see R.transduce * * @func * @memberOf R * @category List * @sig (a -> Boolean) -> [a] -> [a] * @param {Function} fn The function called per iteration. * @param {Array} list The collection to iterate over. * @return {Array} The new filtered array. * @see R.filter * @example * * var isOdd = function(n) { * return n % 2 === 1; * }; * R.reject(isOdd, [1, 2, 3, 4]); //=> [2, 4] */ module.exports = _curry2(function reject(fn, list) { return filter(_complement(fn), list); });
concertcoder/leaky-ionic-app
www/lib/ramda/src/reject.js
JavaScript
mit
899
/************************************************************* * * MathJax/jax/output/HTML-CSS/fonts/Asana-Math/Size1/Regular/Main.js * * Copyright (c) 2013-2014 The MathJax Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ MathJax.OutputJax['HTML-CSS'].FONTDATA.FONTS['AsanaMathJax_Size1'] = { directory: 'Size1/Regular', family: 'AsanaMathJax_Size1', testString: '\u0302\u0303\u0305\u0306\u030C\u0332\u0333\u033F\u2016\u2044\u2045\u2046\u20D6\u20D7\u220F', 0x20: [0,0,249,0,0], 0x28: [981,490,399,84,360], 0x29: [981,490,399,40,316], 0x5B: [984,492,350,84,321], 0x5D: [984,492,350,84,321], 0x7B: [981,490,362,84,328], 0x7C: [908,367,241,86,156], 0x7D: [981,490,362,84,328], 0x302: [783,-627,453,0,453], 0x303: [763,-654,700,0,701], 0x305: [587,-542,510,0,511], 0x306: [664,-506,383,0,384], 0x30C: [783,-627,736,0,737], 0x332: [-130,175,510,0,511], 0x333: [-130,283,510,0,511], 0x33F: [695,-542,510,0,511], 0x2016: [908,367,436,86,351], 0x2044: [742,463,382,-69,383], 0x2045: [943,401,353,64,303], 0x2046: [943,401,358,30,269], 0x20D6: [790,-519,807,0,807], 0x20D7: [790,-519,807,0,807], 0x220F: [901,448,1431,78,1355], 0x2210: [901,448,1431,78,1355], 0x2211: [893,446,1224,89,1135], 0x221A: [1280,0,770,63,803], 0x2229: [1039,520,1292,124,1169], 0x222B: [1310,654,1000,54,1001], 0x222C: [1310,654,1659,54,1540], 0x222D: [1310,654,2198,54,2079], 0x222E: [1310,654,1120,54,1001], 0x222F: [1310,654,1659,54,1540], 0x2230: [1310,654,2198,54,2079], 0x2231: [1310,654,1120,54,1001], 0x2232: [1310,654,1146,80,1027], 0x2233: [1310,654,1120,54,1001], 0x22C0: [1040,519,1217,85,1132], 0x22C1: [1040,519,1217,85,1132], 0x22C2: [1039,520,1292,124,1169], 0x22C3: [1039,520,1292,124,1169], 0x2308: [980,490,390,84,346], 0x2309: [980,490,390,84,346], 0x230A: [980,490,390,84,346], 0x230B: [980,490,390,84,346], 0x23B4: [755,-518,977,0,978], 0x23B5: [-238,475,977,0,978], 0x23DC: [821,-545,972,0,973], 0x23DD: [-545,821,972,0,973], 0x23DE: [789,-545,1572,51,1522], 0x23DF: [-545,789,1572,51,1522], 0x23E0: [755,-545,1359,0,1360], 0x23E1: [-545,755,1359,0,1360], 0x27C5: [781,240,450,53,397], 0x27C6: [781,240,450,53,397], 0x27E6: [684,341,502,84,473], 0x27E7: [684,341,502,84,473], 0x27E8: [681,340,422,53,371], 0x27E9: [681,340,422,53,371], 0x27EA: [681,340,605,53,554], 0x27EB: [681,340,605,53,554], 0x29FC: [915,457,518,50,469], 0x29FD: [915,457,518,49,469], 0x2A00: [1100,550,1901,124,1778], 0x2A01: [1100,550,1901,124,1778], 0x2A02: [1100,550,1901,124,1778], 0x2A03: [1039,520,1292,124,1169], 0x2A04: [1039,520,1292,124,1169], 0x2A05: [1024,513,1292,124,1169], 0x2A06: [1024,513,1292,124,1169], 0x2A07: [1039,520,1415,86,1330], 0x2A08: [1039,520,1415,86,1330], 0x2A09: [888,445,1581,124,1459], 0x2A0C: [1310,654,2736,54,2617], 0x2A0D: [1310,654,1120,54,1001], 0x2A0E: [1310,654,1120,54,1001], 0x2A0F: [1310,654,1120,54,1001], 0x2A10: [1310,654,1120,54,1001], 0x2A11: [1310,654,1182,54,1063], 0x2A12: [1310,654,1120,54,1001], 0x2A13: [1310,654,1120,54,1001], 0x2A14: [1310,654,1120,54,1001], 0x2A15: [1310,654,1120,54,1001], 0x2A16: [1310,654,1120,54,1001], 0x2A17: [1310,654,1431,54,1362], 0x2A18: [1310,654,1120,54,1001], 0x2A19: [1310,654,1120,54,1001], 0x2A1A: [1310,654,1120,54,1001], 0x2A1B: [1471,654,1130,54,1011], 0x2A1C: [1471,654,1156,80,1037] }; MathJax.Callback.Queue( ["initFont",MathJax.OutputJax["HTML-CSS"],"AsanaMathJax_Size1"], ["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Size1/Regular/Main.js"] );
uva/mathjax-rails-assets
vendor/assets/javascripts/jax/output/HTML-CSS/fonts/Asana-Math/Size1/Regular/Main.js
JavaScript
mit
4,163
# Usage Comparison Between dojox/grid and dgrid ## Simple programmatic usage Given the following programmatic example using `dojox/grid`... ```js require(["dojox/grid/DataGrid", "dojo/store/Memory", "dojo/data/ObjectStore", "dojo/domReady!"], function(DataGrid, Memory, ObjectStore){ var memoryStore = new Memory({data: [ // data here... ]}); var objectStore = new ObjectStore({ objectStore: memoryStore }); var grid = new DataGrid({ structure: [ { field: "id", name: "ID", width: "10%" }, { field: "name", name: "Name", width: "20%" }, { field: "description", name: "Description", width: "70%" } ], store: objectStore }, "grid"); grid.startup(); }); ``` A result similar to the above example could be achieved using dgrid with the following styles... ```css #dgrid .field-id { width: 10%; } #dgrid .field-name { width: 20%; } #dgrid .field-description { width: 70%; } ``` ...and the following JavaScript... ```js require(["dgrid/OnDemandGrid", "dgrid/Keyboard", "dgrid/Selection", "dojo/_base/declare", "dojo/store/Memory", "dojo/domReady!"], function(OnDemandGrid, Keyboard, Selection, declare, Memory){ var memoryStore = new Memory({data: [ // data here... ]}); var grid = new declare([OnDemandGrid, Keyboard, Selection])({ columns: { id: { label: "ID" }, name: { label: "Name" }, description: { label: "Description" } }, store: memoryStore }, "grid"); // dgrid will call startup for you if the node appears to be in flow }); ``` There are a few key differences worth pointing out: * Whereas `dojox/grid` expects styles to be specified within the column definition to be eventually applied inline to all cells in the column, dgrid lets CSS do the talking whenever possible for purposes of layout and appearance. This allows for better separation between visual and functional concerns. * `dojox/grid` operates with stores implementing the earlier `dojo/data` APIs; in order to use it with a store instance implementing the `dojo/store` APIs, the store must first be wrapped using the `dojo/data/ObjectStore` module. On the other hand, dgrid communicates with `dojo/store` APIs out of the box. (Conversely, however, if you *do* need to work with a `dojo/data` store, you would then have to pass it through the `dojo/store/DataStore` wrapper in order for dgrid to work with it.) * Note that in the dgrid version of the example, the Selection and Keyboard modules are required and mixed into the constructor to be instantiated, in order to enable those pieces of functionality which are baked-in by default in `dojox/grid` components. * Also note that the dgrid example's structure is a bit more concise, taking advantage of the ability to provide simple column arrangements via an object hash instead of an array, in which case the object's keys double as the columns' `field` values (i.e., which store item properties the columns represent). ## Programmatic usage, with sub-rows Assuming the same context as the examples in the previous section, here is a contrived example demonstrating use of sub-rows in `dojox/grid`... ```js var grid = new DataGrid({ structure: [ [ { field: "id", name: "ID", width: "10%" }, { field: "name", name: "Name", width: "20%" } ], [ { field: "description", name: "Description", width: "70%", colSpan: 2 } ] ], store: objectStore }, "grid"); grid.startup(); ``` ...and the equivalent, using dgrid... (again assuming the same context as the previous example) ```js var grid = new declare([OnDemandGrid, Keyboard, Selection])({ subRows: [ [ { field: "id", label: "ID" }, { field: "name", label: "Name" } ], [ { field: "description", label: "Description", colSpan: 2 } ] ], store: memoryStore }, "grid"); ``` Notice that `subRows` is now defined instead of `columns`. The `columns` property of dgrid components is usable *only* for simple cases involving a single sub-row. Also notice that each item in the top-level `subRows` array is itself another array, containing an object for each column. In this case, `field` must be specified in each column definition object, since there is no longer an object hash in order to infer field names from keys. ## Using views / columnsets The `dojox/grid` components implement a concept known as "views", which are represented as separate horizontal regions within a single grid. This feature is generally useful for situations where many fields are to be shown, and some should remain visible while others are able to scroll horizontally. This capability is also available in dgrid, via the ColumnSet mixin. For instance, continuing in the vein of the examples in the previous two sections, the following `dojox/grid` structure with multiple views... ```js var grid = new DataGrid({ structure: [ { // first view width: "10%", cells: [ { field: "id", name: "ID", width: "auto" } ] }, [ // second view [ { field: "name", name: "Name", width: "20%" }, { field: "description", name: "Description", width: "80%" } ] ] ], store: objectStore }, "grid"); grid.startup(); ``` ...could be represented in dgrid, using the following CSS... ```css #dgrid .dgrid-column-set-0 { width: 10%; } #dgrid .field-name { width: 20%; } #dgrid .field-description { width: 80%; } ``` ...and the following JavaScript... (require call included, to demonstrate additional dependency) ```js require(["dgrid/OnDemandGrid", "dgrid/ColumnSet", "dgrid/Keyboard", "dgrid/Selection", "dojo/_base/declare", "dojo/store/Memory", "dojo/domReady!"], function(OnDemandGrid, ColumnSet, Keyboard, Selection, declare, Memory){ // ... create memoryStore here ... var grid = new declare([OnDemandGrid, ColumnSet, Keyboard, Selection])({ columnSets: [ [ // first columnSet [ { field: "id", label: "ID" } ] ], [ // second columnSet [ { field: "name", label: "Name" }, { field: "description", label: "Description" } ] ] ], store: memoryStore }, "grid"); }); ``` ## Specifying column layout via HTML While programmatic creation of grids is highly encouraged, dgrid does allow for declarative specification of grid layouts via a `table` element, somewhat along the same lines of `dojox/grid`. In the case of dgrid, this ability is not baked in by default, but is instead exposed primarily by the GridFromHtml module, which adds table-scanning capabilities atop the OnDemandGrid constructor. Note that unlike `dojox/grid`, which is *only* capable of reading declarative layouts through the use of `dojo/parser`, dgrid is also capable of creating instances programmatically while referencing a `table` node from which to read a declarative layout. For the purposes of the examples below, use of parser will be assumed, in order to allow comparison between `dojox/grid` and dgrid usage. For instance, the following declarative `dojox/grid` layout... ```html <table id="grid" data-dojo-type="dojox.grid.DataGrid" data-dojo-props="store: objectStore"> <thead> <tr> <th field="id" width="10%">ID</th> <th field="name" width="20%">Name</th> <th field="description" width="70%">Description</th> </tr> </thead> </table> ``` ...could be achieved declaratively using dgrid as follows... ```html <table id="grid" data-dojo-type="dgrid.CustomGrid" data-dojo-props="store: memoryStore"> <thead> <tr> <th data-dgrid-column="{ field: 'id' }">ID</th> <th data-dgrid-column="{ field: 'name' }">Name</th> <th data-dgrid-column="{ field: 'description' }">Description</th> </tr> </thead> </table> ``` ...provided the following script is used... ```js require(["dgrid/GridFromHtml", "dgrid/Keyboard", "dgrid/Selection", "dojo/store/Memory", "dojo/_base/declare", "dojo/parser", "dojo/domReady!"], function(GridFromHtml, Keyboard, Selection, Memory, declare, parser){ var memoryStore = window.memoryStore = new Memory({data: [ // ... data here ... ]}); // Globally expose a Grid constructor including the mixins we want. window.dgrid = { CustomGrid: declare([GridFromHtml, Keyboard, Selection]) }; // Parse the markup after exposing the global. parser.parse(); }); ``` Notice that rather than specifying individual non-standard attributes inside the `th` elements, declarative layout specification with dgrid centers primarily around a data-attribute named `data-dgrid-column`. This attribute receives a string representation of a JavaScript object, which will ultimately become the basis for the column definition. (It operates much like `data-dojo-props`, except that the surrounding curly braces must be included.) Note that some properties which have standard equivalents, such as `colspan` and `rowspan`, can be specified directly as HTML attributes in the element instead. Additionally, the innerHTML of the `th` becomes the column's label. The script block above demonstrates the main catch to using dgrid declaratively with `dojo/parser`: since the modules in the dgrid package are written to be pure AMD, they do not expose globals, which means whatever constructors are to be used need to be exposed manually. Furthermore, rather than simply exposing the GridFromHtml constructor, the above example exposes a custom-declared constructor which mixes in desired functionality. Note that if column plugins are to be employed, these will also need to be similarly globally exposed. Column plugins may be specified in the column definitions of declarative grid layouts within the `data-dgrid-column` attribute; for example: ```html <th data-dgrid-column="dgrid.editor({ field: 'name', editOn: 'dblclick' })">Name</th> ``` ### Column Layout via HTML with views / columnsets While both `dojox/grid` and dgrid also enable declarative creation of grids with multiple views/columnsets, in dgrid's case this is again separated to its own module, GridWithColumnSetsFromHtml. This separation exists due to the significant amount of additional code necessary to resolve columnsets from the representative markup, combined with the relative rarity of cases calling for the additional functionality. As a quick example, here is what a simple declarative grid with two views could look like with `dojox/grid`... ```html <table id="grid" data-dojo-type="dojox.grid.DataGrid" data-dojo-props="store: objectStore"> <colgroup span="1" width="10%"></colgroup> <colgroup span="2"></colgroup> <thead> <tr> <th field="id" width="auto">ID</th> <th field="name" width="20%">Name</th> <th field="description" width="80%">Description</th> </tr> </thead> </table> ``` ...and here is the equivalent, using dgrid... (this assumes the same styles are in play as the earlier programmatic ColumnSet example) ```html <table id="grid" data-dojo-type="dgrid.CustomGrid" data-dojo-props="store: memoryStore"> <colgroup span="1"></colgroup> <colgroup span="2"></colgroup> <thead> <tr> <th data-dgrid-column="{ field: 'id' }">ID</th> <th data-dgrid-column="{ field: 'name' }">Name</th> <th data-dgrid-column="{ field: 'description' }">Description</th> </tr> </thead> </table> ``` ## Events `dojox/grid` and dgrid take significantly different approaches to handling events. `dojox/grid` provides a wide selection of stub methods which can be connected to in order to react to many common events on rows or cells in the header or body. The [Working with Grids](http://dojotoolkit.org/documentation/tutorials/1.7/working_grid/) tutorial gives an idea of what kinds of events are supported by `dojox/grid`. On the other hand, dgrid leaves it up to the developer as to which events are at all worth listening for. This results in generally far less overhead, since listeners are hooked up only for events of interest; at the same time, it still allows for the same range of event listeners as `dojox/grid`. `dojox/grid` components generally attach any useful information directly to the event object received by the handler callback. While dgrid does this to a certain degree for custom events, the most commonly-sought information is retrievable using the `row` and `cell` methods. See [Working with Events](../usage/Working-with-Events.md) for more information. As a quick side-by-side comparison, here is an example logging the name property of an item whose row was clicked, using `dojox/grid`... ```js grid.connect(grid, "onRowClick", function(evt){ var item = grid.getItem(evt.rowIndex); // don't forget to use store.getValue, since dojox/grid uses a dojo/data store console.log("Clicked item with name: " + grid.store.getValue(item, "name")); }); ``` ...and using dgrid... ```js grid.on(".dgrid-row:click", function(evt){ var item = grid.row(evt).data; console.log("Clicked item with name: " + item.name); }); ```
khangiskhan/ScalableCrud
web-app/js/lib/dgrid/doc/migrating/Usage-Comparison.md
Markdown
mit
13,593
/************************************************************* * * MathJax/jax/output/HTML-CSS/fonts/Neo-Euler/Variants/Regular/Main.js * * Copyright (c) 2013-2014 The MathJax Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ MathJax.OutputJax['HTML-CSS'].FONTDATA.FONTS['NeoEulerMathJax_Variants'] = { directory: 'Variants/Regular', family: 'NeoEulerMathJax_Variants', testString: '\u00A0\u2032\u2033\u2034\u2035\u2036\u2037\u2057\uE200\uE201\uE202\uE203\uE204\uE205\uE206', 0x20: [0,0,333,0,0], 0xA0: [0,0,333,0,0], 0x2032: [559,-41,329,48,299], 0x2033: [559,-41,640,48,610], 0x2034: [559,-41,950,48,920], 0x2035: [559,-41,329,48,299], 0x2036: [559,-41,640,48,610], 0x2037: [559,-41,950,48,919], 0x2057: [559,-41,1260,48,1230], 0xE200: [493,13,501,41,456], 0xE201: [469,1,501,46,460], 0xE202: [474,-1,501,59,485], 0xE203: [474,182,501,38,430], 0xE204: [476,192,501,10,482], 0xE205: [458,184,501,47,441], 0xE206: [700,13,501,45,471], 0xE207: [468,181,501,37,498], 0xE208: [706,10,501,40,461], 0xE209: [470,182,501,27,468] }; MathJax.Callback.Queue( ["initFont",MathJax.OutputJax["HTML-CSS"],"NeoEulerMathJax_Variants"], ["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Variants/Regular/Main.js"] );
uva/mathjax-rails-assets
vendor/assets/javascripts/jax/output/HTML-CSS/fonts/Neo-Euler/Variants/Regular/Main.js
JavaScript
mit
1,809
/** * ag-grid - Advanced Data Grid / Data Table supporting Javascript / React / AngularJS / Web Components * @version v9.0.3 * @link http://www.ag-grid.com/ * @license MIT */ "use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; Object.defineProperty(exports, "__esModule", { value: true }); var context_1 = require("./context/context"); var LINE_SEPARATOR = '\r\n'; var XmlFactory = (function () { function XmlFactory() { } XmlFactory.prototype.createXml = function (xmlElement, booleanTransformer) { var _this = this; var props = ""; if (xmlElement.properties) { if (xmlElement.properties.prefixedAttributes) { xmlElement.properties.prefixedAttributes.forEach(function (prefixedSet) { Object.keys(prefixedSet.map).forEach(function (key) { props += _this.returnAttributeIfPopulated(prefixedSet.prefix + key, prefixedSet.map[key], booleanTransformer); }); }); } if (xmlElement.properties.rawMap) { Object.keys(xmlElement.properties.rawMap).forEach(function (key) { props += _this.returnAttributeIfPopulated(key, xmlElement.properties.rawMap[key], booleanTransformer); }); } } var result = "<" + xmlElement.name + props; if (!xmlElement.children && !xmlElement.textNode) { return result + "/>" + LINE_SEPARATOR; } if (xmlElement.textNode) { return result + ">" + xmlElement.textNode + "</" + xmlElement.name + ">" + LINE_SEPARATOR; } result += ">" + LINE_SEPARATOR; xmlElement.children.forEach(function (it) { result += _this.createXml(it, booleanTransformer); }); return result + "</" + xmlElement.name + ">" + LINE_SEPARATOR; }; XmlFactory.prototype.returnAttributeIfPopulated = function (key, value, booleanTransformer) { if (!value) { return ""; } var xmlValue = value; if ((typeof (value) === 'boolean')) { if (booleanTransformer) { xmlValue = booleanTransformer(value); } } xmlValue = '"' + xmlValue + '"'; return " " + key + "=" + xmlValue; }; return XmlFactory; }()); XmlFactory = __decorate([ context_1.Bean('xmlFactory') ], XmlFactory); exports.XmlFactory = XmlFactory;
extend1994/cdnjs
ajax/libs/ag-grid/9.0.4/lib/xmlFactory.js
JavaScript
mit
2,992
/** * High performant way to check whether an element with a specific class name is in the given document * Optimized for being heavily executed * Unleashes the power of live node lists * * @param {Object} doc The document object of the context where to check * @param {String} tagName Upper cased tag name * @example * wysihtml5.dom.hasElementWithClassName(document, "foobar"); */ (function(wysihtml5) { var LIVE_CACHE = {}, DOCUMENT_IDENTIFIER = 1; function _getDocumentIdentifier(doc) { return doc._wysihtml5_identifier || (doc._wysihtml5_identifier = DOCUMENT_IDENTIFIER++); } wysihtml5.dom.hasElementWithClassName = function(doc, className) { // getElementsByClassName is not supported by IE<9 // but is sometimes mocked via library code (which then doesn't return live node lists) if (!wysihtml5.browser.supportsNativeGetElementsByClassName()) { return !!doc.querySelector("." + className); } var key = _getDocumentIdentifier(doc) + ":" + className, cacheEntry = LIVE_CACHE[key]; if (!cacheEntry) { cacheEntry = LIVE_CACHE[key] = doc.getElementsByClassName(className); } return cacheEntry.length > 0; }; })(wysihtml5);
StepicOrg/wysihtml5
src/dom/has_element_with_class_name.js
JavaScript
mit
1,231
// $Id: UTF32_Encoding_Converter.cpp 80826 2008-03-04 14:51:23Z wotte $ // ====================================================================== // // The actual conversion methods are covered by the copyright information // below. It is not the actual code provided by Unicode, Inc. but is an // ACE-ified and only slightly modified version. // // Chad Elliott 4/28/2005 // // Copyright 2001-2004 Unicode, Inc. // // Limitations on Rights to Redistribute This Code // // Unicode, Inc. hereby grants the right to freely use the information // supplied in this file in the creation of products supporting the // Unicode Standard, and to make copies of this file in any form // for internal or external distribution as long as this notice // remains attached. // // ====================================================================== #include "ace/UTF32_Encoding_Converter.h" #if defined (ACE_USES_WCHAR) #include "ace/OS_NS_stdio.h" #include "ace/OS_Memory.h" #include "ace/Min_Max.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL static const ACE_UINT32 UNI_MAX_LEGAL_UTF32 = 0x0010FFFF; ACE_UTF32_Encoding_Converter::ACE_UTF32_Encoding_Converter (bool swap) : ACE_UTF16_Encoding_Converter (swap) { } ACE_UTF32_Encoding_Converter::~ACE_UTF32_Encoding_Converter (void) { } ACE_UTF32_Encoding_Converter::Result ACE_UTF32_Encoding_Converter::to_utf8 (const void* source, size_t source_size, ACE_Byte* target, size_t target_size, bool strict) { static const ACE_UINT32 byteMask = 0xBF; static const ACE_UINT32 byteMark = 0x80; static const ACE_UINT32 UNI_SUR_HIGH_START = get_UNI_SUR_HIGH_START (); static const ACE_UINT32 UNI_SUR_LOW_END = get_UNI_SUR_LOW_END (); static const ACE_Byte* firstByteMark = get_first_byte_mark (); Result result = CONVERSION_OK; ACE_Byte* targetEnd = target + target_size; const ACE_UINT32* sourceStart = static_cast<const ACE_UINT32*> (source); const ACE_UINT32* sourceEnd = sourceStart + (source_size / sizeof (ACE_UINT32)); while (sourceStart < sourceEnd) { ACE_UINT32 nw = *sourceStart++; ACE_UINT32 ch = (this->swap_ ? ACE_SWAP_LONG (nw) : nw); unsigned short bytesToWrite = 0; if (strict) { // UTF-16 surrogate values are illegal in UTF-32 if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { result = SOURCE_ILLEGAL; break; } } // Figure out how many bytes the result will require. Turn any // illegally large ACE_UINT32 things (> Plane 17) into replacement // chars. if (ch < 0x80) { bytesToWrite = 1; } else if (ch < 0x800) { bytesToWrite = 2; } else if (ch < 0x10000) { bytesToWrite = 3; } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4; } else { result = SOURCE_ILLEGAL; break; } target += bytesToWrite; if (target > targetEnd) { result = TARGET_EXHAUSTED; break; } // NOTE: everything falls through. switch (bytesToWrite) { case 4: *--target = (ACE_Byte)((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (ACE_Byte)((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (ACE_Byte)((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (ACE_Byte) (ch | firstByteMark[bytesToWrite]); } target += bytesToWrite; } return result; } ACE_UTF32_Encoding_Converter::Result ACE_UTF32_Encoding_Converter::from_utf8 (const ACE_Byte* source, size_t source_size, void* target, size_t target_size, bool strict) { static const ACE_UINT32 UNI_SUR_HIGH_START = get_UNI_SUR_HIGH_START (); static const ACE_UINT32 UNI_SUR_LOW_END = get_UNI_SUR_LOW_END (); static const ACE_UINT32 UNI_REPLACEMENT_CHAR = get_UNI_REPLACEMENT_CHAR (); static const ACE_Byte* trailingBytesForUTF8 = get_trailing_bytes_for_utf8 (); static const ACE_UINT32* offsetsFromUTF8 = get_offsets_from_utf8 (); Result result = CONVERSION_OK; const ACE_Byte* sourceEnd = source + source_size; ACE_UINT32* targetStart = static_cast<ACE_UINT32*> (target); ACE_UINT32* targetEnd = targetStart + target_size; while (source < sourceEnd) { ACE_UINT32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = SOURCE_EXHAUSTED; break; } // Do this check whether lenient or strict if (!this->is_legal_utf8 (source, extraBytesToRead + 1)) { result = SOURCE_ILLEGAL; break; } // The cases all fall through. See "Note A" below. switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; case 4: ch += *source++; ch <<= 6; case 3: ch += *source++; ch <<= 6; case 2: ch += *source++; ch <<= 6; case 1: ch += *source++; ch <<= 6; case 0: ch += *source++; } ch -= offsetsFromUTF8[extraBytesToRead]; if (targetStart >= targetEnd) { result = TARGET_EXHAUSTED; break; } if (ch <= UNI_MAX_LEGAL_UTF32) { // UTF-16 surrogate values are illegal in UTF-32, and anything // over Plane 17 (> 0x10FFFF) is illegal. if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (strict) { result = SOURCE_ILLEGAL; break; } else { *targetStart++ = UNI_REPLACEMENT_CHAR; } } else { *targetStart++ = ch; } } else { result = SOURCE_ILLEGAL; break; } } return result; } ACE_UTF32_Encoding_Converter* ACE_UTF32_Encoding_Converter::encoded (const ACE_Byte* source, size_t source_size) { static const size_t begin = 16; static const size_t converted = begin * 4; ACE_Byte target[converted]; ACE_UTF32_Encoding_Converter* converter = 0; ACE_NEW_RETURN (converter, ACE_UTF32_Encoding_Converter (false), 0); if (converter->to_utf8 (source, ACE_MIN (begin, source_size), target, converted) == CONVERSION_OK) { return converter; } else { delete converter; } return 0; } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_USES_WCHAR */
drakeos/Dracore
dep/acelite/ace/UTF32_Encoding_Converter.cpp
C++
gpl-2.0
7,464
#pragma once /* * Copyright (C) 2010 Team XBMC * http://www.xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * http://www.gnu.org/copyleft/gpl.html * */ #if defined(HAVE_VIDEOTOOLBOXDECODER) #include <queue> #include "DVDVideoCodec.h" #include <CoreVideo/CoreVideo.h> #include <CoreMedia/CoreMedia.h> // tracks a frame in and output queue in display order typedef struct frame_queue { double dts; double pts; int width; int height; double sort_time; FourCharCode pixel_buffer_format; CVPixelBufferRef pixel_buffer_ref; struct frame_queue *nextframe; } frame_queue; class DllAvUtil; class DllAvFormat; class CDVDVideoCodecVideoToolBox : public CDVDVideoCodec { public: CDVDVideoCodecVideoToolBox(); virtual ~CDVDVideoCodecVideoToolBox(); // Required overrides virtual bool Open(CDVDStreamInfo &hints, CDVDCodecOptions &options); virtual void Dispose(void); virtual int Decode(BYTE *pData, int iSize, double dts, double pts); virtual void Reset(void); virtual bool GetPicture(DVDVideoPicture *pDvdVideoPicture); virtual bool ClearPicture(DVDVideoPicture* pDvdVideoPicture); virtual void SetDropState(bool bDrop); virtual const char* GetName(void) { return (const char*)m_pFormatName; } protected: void DisplayQueuePop(void); void CreateVTSession(int width, int height, CMFormatDescriptionRef fmt_desc); void DestroyVTSession(void); static void VTDecoderCallback( void *refcon, CFDictionaryRef frameInfo, OSStatus status, UInt32 infoFlags, CVBufferRef imageBuffer); void *m_vt_session; // opaque videotoolbox session CMFormatDescriptionRef m_fmt_desc; const char *m_pFormatName; bool m_DropPictures; DVDVideoPicture m_videobuffer; double m_sort_time_offset; pthread_mutex_t m_queue_mutex; // mutex protecting queue manipulation frame_queue *m_display_queue; // display-order queue - next display frame is always at the queue head int32_t m_queue_depth; // we will try to keep the queue depth at m_max_ref_frames int32_t m_max_ref_frames; bool m_convert_bytestream; bool m_convert_3byteTo4byteNALSize; DllAvUtil *m_dllAvUtil; DllAvFormat *m_dllAvFormat; }; #endif
opdenkamp/xbmc
xbmc/cores/dvdplayer/DVDCodecs/Video/DVDVideoCodecVideoToolBox.h
C
gpl-2.0
3,041
/* * Copyright (C) 2005-2009 Junjiro R. Okajima * * This program, aufs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* * inode functions */ #include "aufs.h" struct inode *au_igrab(struct inode *inode) { if (inode) { AuDebugOn(!atomic_read(&inode->i_count)); atomic_inc_return(&inode->i_count); } return inode; } static void au_refresh_hinode_attr(struct inode *inode, int do_version) { au_cpup_attr_all(inode, /*force*/0); au_update_iigen(inode); if (do_version) inode->i_version++; } int au_refresh_hinode_self(struct inode *inode, int do_attr) { int err; aufs_bindex_t bindex, new_bindex; unsigned char update; struct inode *first; struct au_hinode *p, *q, tmp; struct super_block *sb; struct au_iinfo *iinfo; IiMustWriteLock(inode); update = 0; sb = inode->i_sb; iinfo = au_ii(inode); err = au_ii_realloc(iinfo, au_sbend(sb) + 1); if (unlikely(err)) goto out; p = iinfo->ii_hinode + iinfo->ii_bstart; first = p->hi_inode; err = 0; for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++, p++) { if (!p->hi_inode) continue; new_bindex = au_br_index(sb, p->hi_id); if (new_bindex == bindex) continue; if (new_bindex < 0) { update++; au_hiput(p); p->hi_inode = NULL; continue; } if (new_bindex < iinfo->ii_bstart) iinfo->ii_bstart = new_bindex; if (iinfo->ii_bend < new_bindex) iinfo->ii_bend = new_bindex; /* swap two lower inode, and loop again */ q = iinfo->ii_hinode + new_bindex; tmp = *q; *q = *p; *p = tmp; if (tmp.hi_inode) { bindex--; p--; } } au_update_brange(inode, /*do_put_zero*/0); if (do_attr) au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode)); out: return err; } int au_refresh_hinode(struct inode *inode, struct dentry *dentry) { int err, update; unsigned int flags; aufs_bindex_t bindex, bend; unsigned char isdir; struct inode *first; struct au_hinode *p; struct au_iinfo *iinfo; err = au_refresh_hinode_self(inode, /*do_attr*/0); if (unlikely(err)) goto out; update = 0; iinfo = au_ii(inode); p = iinfo->ii_hinode + iinfo->ii_bstart; first = p->hi_inode; isdir = S_ISDIR(inode->i_mode); flags = au_hi_flags(inode, isdir); bend = au_dbend(dentry); for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { struct inode *h_i; struct dentry *h_d; h_d = au_h_dptr(dentry, bindex); if (!h_d || !h_d->d_inode) continue; if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) { h_i = au_h_iptr(inode, bindex); if (h_i) { if (h_i == h_d->d_inode) continue; err = -EIO; break; } } if (bindex < iinfo->ii_bstart) iinfo->ii_bstart = bindex; if (iinfo->ii_bend < bindex) iinfo->ii_bend = bindex; au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags); update = 1; } au_update_brange(inode, /*do_put_zero*/0); if (unlikely(err)) goto out; au_refresh_hinode_attr(inode, update && isdir); out: AuTraceErr(err); return err; } static int set_inode(struct inode *inode, struct dentry *dentry) { int err; unsigned int flags; umode_t mode; aufs_bindex_t bindex, bstart, btail; unsigned char isdir; struct dentry *h_dentry; struct inode *h_inode; struct au_iinfo *iinfo; IiMustWriteLock(inode); err = 0; isdir = 0; bstart = au_dbstart(dentry); h_inode = au_h_dptr(dentry, bstart)->d_inode; mode = h_inode->i_mode; switch (mode & S_IFMT) { case S_IFREG: btail = au_dbtail(dentry); inode->i_op = &aufs_iop; inode->i_fop = &aufs_file_fop; inode->i_mapping->a_ops = &aufs_aop; break; case S_IFDIR: isdir = 1; btail = au_dbtaildir(dentry); inode->i_op = &aufs_dir_iop; inode->i_fop = &aufs_dir_fop; break; case S_IFLNK: btail = au_dbtail(dentry); inode->i_op = &aufs_symlink_iop; break; case S_IFBLK: case S_IFCHR: case S_IFIFO: case S_IFSOCK: btail = au_dbtail(dentry); inode->i_op = &aufs_iop; init_special_inode(inode, mode, h_inode->i_rdev); break; default: AuIOErr("Unknown file type 0%o\n", mode); err = -EIO; goto out; } /* do not set inotify for whiteouted dirs (SHWH mode) */ flags = au_hi_flags(inode, isdir); if (au_opt_test(au_mntflags(dentry->d_sb), SHWH) && au_ftest_hi(flags, HINOTIFY) && dentry->d_name.len > AUFS_WH_PFX_LEN && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) au_fclr_hi(flags, HINOTIFY); iinfo = au_ii(inode); iinfo->ii_bstart = bstart; iinfo->ii_bend = btail; for (bindex = bstart; bindex <= btail; bindex++) { h_dentry = au_h_dptr(dentry, bindex); if (h_dentry) au_set_h_iptr(inode, bindex, au_igrab(h_dentry->d_inode), flags); } au_cpup_attr_all(inode, /*force*/1); out: return err; } /* successful returns with iinfo write_locked */ static int reval_inode(struct inode *inode, struct dentry *dentry, int *matched) { int err; aufs_bindex_t bindex, bend; struct inode *h_inode, *h_dinode; *matched = 0; /* * before this function, if aufs got any iinfo lock, it must be only * one, the parent dir. * it can happen by UDBA and the obsoleted inode number. */ err = -EIO; if (unlikely(inode->i_ino == parent_ino(dentry))) goto out; err = 0; ii_write_lock_new_child(inode); h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode; bend = au_ibend(inode); for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { h_inode = au_h_iptr(inode, bindex); if (h_inode && h_inode == h_dinode) { *matched = 1; err = 0; if (au_iigen(inode) != au_digen(dentry)) err = au_refresh_hinode(inode, dentry); break; } } if (unlikely(err)) ii_write_unlock(inode); out: return err; } int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, unsigned int d_type, ino_t *ino) { int err; struct mutex *mtx; const int isdir = (d_type == DT_DIR); /* prevent hardlinks from race condition */ mtx = NULL; if (!isdir) { mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx; mutex_lock(mtx); } err = au_xino_read(sb, bindex, h_ino, ino); if (unlikely(err)) goto out; if (!*ino) { err = -EIO; *ino = au_xino_new_ino(sb); if (unlikely(!*ino)) goto out; err = au_xino_write(sb, bindex, h_ino, *ino); if (unlikely(err)) goto out; } out: if (!isdir) mutex_unlock(mtx); return err; } /* successful returns with iinfo write_locked */ /* todo: return with unlocked? */ struct inode *au_new_inode(struct dentry *dentry, int must_new) { struct inode *inode; struct dentry *h_dentry; struct super_block *sb; ino_t h_ino, ino; int err, match; aufs_bindex_t bstart; sb = dentry->d_sb; bstart = au_dbstart(dentry); h_dentry = au_h_dptr(dentry, bstart); h_ino = h_dentry->d_inode->i_ino; err = au_xino_read(sb, bstart, h_ino, &ino); inode = ERR_PTR(err); if (unlikely(err)) goto out; new_ino: if (!ino) { ino = au_xino_new_ino(sb); if (unlikely(!ino)) { inode = ERR_PTR(-EIO); goto out; } } AuDbg("i%lu\n", (unsigned long)ino); inode = au_iget_locked(sb, ino); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW)); if (inode->i_state & I_NEW) { ii_write_lock_new_child(inode); err = set_inode(inode, dentry); unlock_new_inode(inode); if (!err) goto out; /* success */ iget_failed(inode); ii_write_unlock(inode); goto out_iput; } else if (!must_new) { err = reval_inode(inode, dentry, &match); if (!err) goto out; /* success */ else if (match) goto out_iput; } if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode))) AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir," " b%d, %s, %.*s, hi%lu, i%lu.\n", bstart, au_sbtype(h_dentry->d_sb), AuDLNPair(dentry), (unsigned long)h_ino, (unsigned long)ino); ino = 0; err = au_xino_write(sb, bstart, h_ino, /*ino*/0); if (!err) { iput(inode); goto new_ino; } out_iput: iput(inode); inode = ERR_PTR(err); out: return inode; } /* ---------------------------------------------------------------------- */ int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, struct inode *inode) { int err; err = au_br_rdonly(au_sbr(sb, bindex)); /* pseudo-link after flushed may happen out of bounds */ if (!err && inode && au_ibstart(inode) <= bindex && bindex <= au_ibend(inode)) { /* * permission check is unnecessary since vfsub routine * will be called later */ struct inode *hi = au_h_iptr(inode, bindex); if (hi) err = IS_IMMUTABLE(hi) ? -EROFS : 0; } return err; } int au_test_h_perm(struct inode *h_inode, int mask) { if (!current_fsuid()) return 0; return inode_permission(h_inode, mask); } int au_test_h_perm_sio(struct inode *h_inode, int mask) { if (au_test_nfs(h_inode->i_sb) && (mask & MAY_WRITE) && S_ISDIR(h_inode->i_mode)) mask |= MAY_READ; /* force permission check */ return au_test_h_perm(h_inode, mask); }
ArthySundaram/firstrepo
ubuntu/aufs/inode.c
C
gpl-2.0
9,534
<?php /* Plugin Name: MediaFire API Description: Check links to files hosted on MediaFire. Version: 1.0 Author: Janis Elsts ModuleID: mediafire-checker ModuleCategory: checker ModuleContext: on-demand ModuleLazyInit: true ModuleClassName: blcMediaFireChecker ModulePriority: 100 ModuleCheckerUrlPattern: @^http://(?:www\.)?mediafire\.com/(?:(?:download\.php)?\?|download/)([0-9a-zA-Z]{11,20})(?:$|[^0-9a-zA-Z])@ */ /** * MediaFire link checker. * * @package Broken Link Checker * @author Janis Elsts * @access public */ class blcMediaFireChecker extends blcChecker { /** * Determine if the checker can parse a specific URL. * Always returns true because the ModuleCheckerUrlPattern header constitutes sufficient verification. * * @param string $url * @param array $parsed * @return bool True. */ function can_check($url, $parsed){ return true; } /** * Check a MediaFire link. * * @param string $url * @return array */ function check($url){ $result = array( 'final_url' => $url, 'redirect_count' => 0, 'timeout' => false, 'broken' => false, 'log' => "<em>(Using MediaFire checker module)</em>\n\n", 'http_code' => 0, 'result_hash' => '', ); //URLs like http://www.mediafire.com/download.php?03mj0mwmnnm are technically valid, //but they introduce unnecessary redirects. $url = str_replace('download.php','', $url); //Since MediaFire doesn't have an API, we just send a HEAD request //and try do divine the file state from the response headers. $start = microtime_float(); $rez = $this->head($url); $result['request_duration'] = microtime_float() - $start; if ( is_wp_error($rez) ){ //An unexpected error. $result['broken'] = true; $result['log'] .= "Error : " . $rez->get_error_message(); if ( $data = $rez->get_error_data() ){ $result['log'] .= "\n\nError data : " . print_r($data, true); } } else { $result['http_code'] = intval($rez['response']['code']); if ( $result['http_code'] == 200 ){ //200 - OK $result['broken'] = false; $result['log'] .= "File OK"; } elseif ( isset($rez['headers']['location']) ) { //Redirect = either an error or a redirect to the full file URL. //For errors, the redirect URL is structured like this : '/error.php?errno=320'. //The 'errno' argument contains an (undocumented) error code. $result['broken'] = true; if ( strpos($rez['headers']['location'], '/download/') !== false ) { $result['broken'] = false; $result['http_code'] = 200; $result['log'] .= "File OK"; $result['log'] .= "\nFull URL: " . $rez['headers']['location']; } elseif ( strpos($rez['headers']['location'], 'errno=320') !== false ){ $result['status_code'] = BLC_LINK_STATUS_ERROR; $result['status_text'] = __('Not Found', 'broken-link-checker'); $result['http_code'] = 0; $result['log'] .= "The file is invalid or has been removed."; } elseif ( strpos($rez['headers']['location'], 'errno=378') !== false ) { $result['status_code'] = BLC_LINK_STATUS_ERROR; $result['status_text'] = __('Not Found', 'broken-link-checker'); $result['http_code'] = 0; $result['log'] .= "The file has been removed due to a violation of MediaFire ToS."; } elseif ( strpos($rez['headers']['location'], 'errno=388') !== false ) { $result['status_code'] = BLC_LINK_STATUS_WARNING; $result['status_text'] = __('Permission Denied', 'broken-link-checker'); $result['http_code'] = 0; $result['log'] .= "Permission denied. Most likely the plugin sent too many requests too quickly. Try again later."; } else { $result['status_code'] = BLC_LINK_STATUS_INFO; $result['status_text'] = __('Unknown Error', 'broken-link-checker'); $result['log'] .= "Unknown error.\n\n"; foreach($rez['headers'] as $name => $value){ $result['log'] .= sprintf("%s: %s\n", $name, $value); } } } else { $result['log'] .= "Unknown error.\n\n" . implode("\n",$rez['headers']); } } //Generate the result hash (used for detecting false positives) $result['result_hash'] = implode('|', array( 'mediafire', $result['http_code'], $result['broken']?'broken':'0', $result['timeout']?'timeout':'0' )); return $result; } /** * Perform a HEAD request to the specified URL. * * Note : * * Since the MediaFire checker works by parsing the "Location" header, redirect following * _must_ be disabled. This can become a problem on servers where WP is forced to fall back * on using WP_Http_Fopen which ignores the 'redirection' flag. WP_Http_Fsockopen would work, * but it has the lowest priority of all transports. * * Alas, there is no way to reliably influence which transport is chosen - the WP_Http::_getTransport * function caches the available choices, so plugins can disable individual transports only during * its first run. Therefore, we must pick the best transport manually. * * @param string $url * @return array|WP_Error */ function head($url){ $conf = blc_get_configuration(); $args = array( 'timeout' => $conf->options['timeout'], 'redirection' => 0, '_redirection' => 0, //Internal flag that turns off redirect handling. See WP_Http::handle_redirects() ); return wp_remote_head($url, $args); } }
SayenkoDesign/playground
wp-content/plugins/broken-link-checker/modules/extras/mediafire.php
PHP
gpl-2.0
5,392
// SPDX-License-Identifier: GPL-2.0 /* * USB4 specific functionality * * Copyright (C) 2019, Intel Corporation * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> * Rajmohan Mani <rajmohan.mani@intel.com> */ #include <linux/delay.h> #include <linux/ktime.h> #include "tb.h" #define USB4_DATA_DWORDS 16 #define USB4_DATA_RETRIES 3 enum usb4_switch_op { USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, USB4_SWITCH_OP_NVM_WRITE = 0x20, USB4_SWITCH_OP_NVM_AUTH = 0x21, USB4_SWITCH_OP_NVM_READ = 0x22, USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, USB4_SWITCH_OP_DROM_READ = 0x24, USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, }; #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) #define USB4_NVM_READ_OFFSET_SHIFT 2 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) #define USB4_NVM_READ_LENGTH_SHIFT 24 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) #define USB4_DROM_ADDRESS_SHIFT 2 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) #define USB4_DROM_SIZE_SHIFT 15 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t); typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t); static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec) { ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); do { u32 val; int ret; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if ((val & bit) == value) return 0; usleep_range(50, 100); } while (ktime_before(ktime_get(), timeout)); return -ETIMEDOUT; } static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, size_t dwords) { if (dwords > USB4_DATA_DWORDS) return -EINVAL; return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); } static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, size_t dwords) { if (dwords > USB4_DATA_DWORDS) return -EINVAL; return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); } static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) { return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); } static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) { return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); } static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address, void *buf, size_t size, read_block_fn read_block) { unsigned int retries = USB4_DATA_RETRIES; unsigned int offset; offset = address & 3; address = address & ~3; do { size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); unsigned int dwaddress, dwords; u8 data[USB4_DATA_DWORDS * 4]; int ret; dwaddress = address / 4; dwords = ALIGN(nbytes, 4) / 4; ret = read_block(sw, dwaddress, data, dwords); if (ret) { if (ret == -ETIMEDOUT) { if (retries--) continue; ret = -EIO; } return ret; } memcpy(buf, data + offset, nbytes); size -= nbytes; address += nbytes; buf += nbytes; } while (size > 0); return 0; } static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address, const void *buf, size_t size, write_block_fn write_next_block) { unsigned int retries = USB4_DATA_RETRIES; unsigned int offset; offset = address & 3; address = address & ~3; do { u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4); u8 data[USB4_DATA_DWORDS * 4]; int ret; memcpy(data + offset, buf, nbytes); ret = write_next_block(sw, data, nbytes / 4); if (ret) { if (ret == -ETIMEDOUT) { if (retries--) continue; ret = -EIO; } return ret; } size -= nbytes; address += nbytes; buf += nbytes; } while (size > 0); return 0; } static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) { u32 val; int ret; val = opcode | ROUTER_CS_26_OV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) return ret; ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); if (ret) return ret; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (val & ROUTER_CS_26_ONS) return -EOPNOTSUPP; *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; return 0; } /** * usb4_switch_setup() - Additional setup for USB4 device * @sw: USB4 router to setup * * USB4 routers need additional settings in order to enable all the * tunneling. This function enables USB and PCIe tunneling if it can be * enabled (e.g the parent switch also supports them). If USB tunneling * is not available for some reason (like that there is Thunderbolt 3 * switch upstream) then the internal xHCI controller is enabled * instead. */ int usb4_switch_setup(struct tb_switch *sw) { struct tb_switch *parent; bool tbt3, xhci; u32 val = 0; int ret; if (!tb_route(sw)) return 0; ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); if (ret) return ret; xhci = val & ROUTER_CS_6_HCI; tbt3 = !(val & ROUTER_CS_6_TNS); tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", tbt3 ? "yes" : "no", xhci ? "yes" : "no"); ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; parent = tb_switch_parent(sw); if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { val |= ROUTER_CS_5_UTO; xhci = false; } /* Only enable PCIe tunneling if the parent router supports it */ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { val |= ROUTER_CS_5_PTO; /* * xHCI can be enabled if PCIe tunneling is supported * and the parent does not have any USB3 dowstream * adapters (so we cannot do USB 3.x tunneling). */ if (xhci) val |= ROUTER_CS_5_HCO; } /* TBT3 supported by the CM */ val |= ROUTER_CS_5_C3S; /* Tunneling configuration is ready now */ val |= ROUTER_CS_5_CV; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, ROUTER_CS_6_CR, 50); } /** * usb4_switch_read_uid() - Read UID from USB4 router * @sw: USB4 router * * Reads 64-bit UID from USB4 router config space. */ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) { return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); } static int usb4_switch_drom_read_block(struct tb_switch *sw, unsigned int dwaddress, void *buf, size_t dwords) { u8 status = 0; u32 metadata; int ret; metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & USB4_DROM_ADDRESS_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); if (ret) return ret; if (status) return -EIO; return usb4_switch_op_read_data(sw, buf, dwords); } /** * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM * @sw: USB4 router * * Uses USB4 router operations to read router DROM. For devices this * should always work but for hosts it may return %-EOPNOTSUPP in which * case the host router does not have DROM. */ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { return usb4_switch_do_read_data(sw, address, buf, size, usb4_switch_drom_read_block); } static int usb4_set_port_configured(struct tb_port *port, bool configured) { int ret; u32 val; ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_19, 1); if (ret) return ret; if (configured) val |= PORT_CS_19_PC; else val &= ~PORT_CS_19_PC; return tb_port_write(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_19, 1); } /** * usb4_switch_configure_link() - Set upstream USB4 link configured * @sw: USB4 router * * Sets the upstream USB4 link to be configured for power management * purposes. */ int usb4_switch_configure_link(struct tb_switch *sw) { struct tb_port *up; if (!tb_route(sw)) return 0; up = tb_upstream_port(sw); return usb4_set_port_configured(up, true); } /** * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration * @sw: USB4 router * * Reverse of usb4_switch_configure_link(). */ void usb4_switch_unconfigure_link(struct tb_switch *sw) { struct tb_port *up; if (sw->is_unplugged || !tb_route(sw)) return; up = tb_upstream_port(sw); usb4_set_port_configured(up, false); } /** * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding * @sw: USB4 router * * Checks whether conditions are met so that lane bonding can be * established with the upstream router. Call only for device routers. */ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) { struct tb_port *up; int ret; u32 val; up = tb_upstream_port(sw); ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); if (ret) return false; return !!(val & PORT_CS_18_BE); } /** * usb4_switch_set_sleep() - Prepare the router to enter sleep * @sw: USB4 router * * Enables wakes and sets sleep bit for the router. Returns when the * router sleep ready bit has been asserted. */ int usb4_switch_set_sleep(struct tb_switch *sw) { int ret; u32 val; /* Set sleep bit and wait for sleep ready to be asserted */ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; val |= ROUTER_CS_5_SLP; ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); if (ret) return ret; return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, ROUTER_CS_6_SLPR, 500); } /** * usb4_switch_nvm_sector_size() - Return router NVM sector size * @sw: USB4 router * * If the router supports NVM operations this function returns the NVM * sector size in bytes. If NVM operations are not supported returns * %-EOPNOTSUPP. */ int usb4_switch_nvm_sector_size(struct tb_switch *sw) { u32 metadata; u8 status; int ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); if (ret) return ret; if (status) return status == 0x2 ? -EOPNOTSUPP : -EIO; ret = usb4_switch_op_read_metadata(sw, &metadata); if (ret) return ret; return metadata & USB4_NVM_SECTOR_SIZE_MASK; } static int usb4_switch_nvm_read_block(struct tb_switch *sw, unsigned int dwaddress, void *buf, size_t dwords) { u8 status = 0; u32 metadata; int ret; metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & USB4_NVM_READ_LENGTH_MASK; metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & USB4_NVM_READ_OFFSET_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); if (ret) return ret; if (status) return -EIO; return usb4_switch_op_read_data(sw, buf, dwords); } /** * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM * @sw: USB4 router * @address: Starting address in bytes * @buf: Read data is placed here * @size: How many bytes to read * * Reads NVM contents of the router. If NVM is not supported returns * %-EOPNOTSUPP. */ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { return usb4_switch_do_read_data(sw, address, buf, size, usb4_switch_nvm_read_block); } static int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) { u32 metadata, dwaddress; u8 status = 0; int ret; dwaddress = address / 4; metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & USB4_NVM_SET_OFFSET_MASK; ret = usb4_switch_op_write_metadata(sw, metadata); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); if (ret) return ret; return status ? -EIO : 0; } static int usb4_switch_nvm_write_next_block(struct tb_switch *sw, const void *buf, size_t dwords) { u8 status; int ret; ret = usb4_switch_op_write_data(sw, buf, dwords); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); if (ret) return ret; return status ? -EIO : 0; } /** * usb4_switch_nvm_write() - Write to the router NVM * @sw: USB4 router * @address: Start address where to write in bytes * @buf: Pointer to the data to write * @size: Size of @buf in bytes * * Writes @buf to the router NVM using USB4 router operations. If NVM * write is not supported returns %-EOPNOTSUPP. */ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size) { int ret; ret = usb4_switch_nvm_set_offset(sw, address); if (ret) return ret; return usb4_switch_do_write_data(sw, address, buf, size, usb4_switch_nvm_write_next_block); } /** * usb4_switch_nvm_authenticate() - Authenticate new NVM * @sw: USB4 router * * After the new NVM has been written via usb4_switch_nvm_write(), this * function triggers NVM authentication process. If the authentication * is successful the router is power cycled and the new NVM starts * running. In case of failure returns negative errno. */ int usb4_switch_nvm_authenticate(struct tb_switch *sw) { u8 status = 0; int ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); if (ret) return ret; switch (status) { case 0x0: tb_sw_dbg(sw, "NVM authentication successful\n"); return 0; case 0x1: return -EINVAL; case 0x2: return -EAGAIN; case 0x3: return -EOPNOTSUPP; default: return -EIO; } } /** * usb4_switch_query_dp_resource() - Query availability of DP IN resource * @sw: USB4 router * @in: DP IN adapter * * For DP tunneling this function can be used to query availability of * DP IN resource. Returns true if the resource is available for DP * tunneling, false otherwise. */ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return false; ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); /* * If DP resource allocation is not supported assume it is * always available. */ if (ret == -EOPNOTSUPP) return true; else if (ret) return false; return !status; } /** * usb4_switch_alloc_dp_resource() - Allocate DP IN resource * @sw: USB4 router * @in: DP IN adapter * * Allocates DP IN resource for DP tunneling using USB4 router * operations. If the resource was allocated returns %0. Otherwise * returns negative errno, in particular %-EBUSY if the resource is * already allocated. */ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) return ret; return status ? -EBUSY : 0; } /** * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource * @sw: USB4 router * @in: DP IN adapter * * Releases the previously allocated DP IN resource. */ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { u8 status; int ret; ret = usb4_switch_op_write_metadata(sw, in->port); if (ret) return ret; ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) return ret; return status ? -EIO : 0; } static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) { struct tb_port *p; int usb4_idx = 0; /* Assume port is primary */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_null(p)) continue; if (tb_is_upstream_port(p)) continue; if (!p->link_nr) { if (p == port) break; usb4_idx++; } } return usb4_idx; } /** * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter * @sw: USB4 router * @port: USB4 port * * USB4 routers have direct mapping between USB4 ports and PCIe * downstream adapters where the PCIe topology is extended. This * function returns the corresponding downstream PCIe adapter or %NULL * if no such mapping was possible. */ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port) { int usb4_idx = usb4_port_idx(sw, port); struct tb_port *p; int pcie_idx = 0; /* Find PCIe down port matching usb4_port */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_pcie_down(p)) continue; if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p)) return p; pcie_idx++; } return NULL; } /** * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter * @sw: USB4 router * @port: USB4 port * * USB4 routers have direct mapping between USB4 ports and USB 3.x * downstream adapters where the USB 3.x topology is extended. This * function returns the corresponding downstream USB 3.x adapter or * %NULL if no such mapping was possible. */ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port) { int usb4_idx = usb4_port_idx(sw, port); struct tb_port *p; int usb_idx = 0; /* Find USB3 down port matching usb4_port */ tb_switch_for_each_port(sw, p) { if (!tb_port_is_usb3_down(p)) continue; if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p)) return p; usb_idx++; } return NULL; } /** * usb4_port_unlock() - Unlock USB4 downstream port * @port: USB4 port to unlock * * Unlocks USB4 downstream port so that the connection manager can * access the router below this port. */ int usb4_port_unlock(struct tb_port *port) { int ret; u32 val; ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); if (ret) return ret; val &= ~ADP_CS_4_LCK; return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); }
c0d3z3r0/linux-rockchip
drivers/thunderbolt/usb4.c
C
gpl-2.0
17,878
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 99, 2003 by Ralf Baechle */ #ifndef _ASM_BYTEORDER_H #define _ASM_BYTEORDER_H #include <linux/compiler.h> #include <asm/types.h> #if defined(__MIPSEB__) # define __BIG_ENDIAN #elif defined(__MIPSEL__) # define __LITTLE_ENDIAN #else # error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" #endif #define __SWAB_64_THRU_32__ #ifdef CONFIG_CPU_MIPSR2 static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { __asm__( " wsbh %0, %1 \n" : "=r" (x) : "r" (x)); return x; } #define __arch_swab16 __arch_swab16 static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __asm__( " wsbh %0, %1 \n" " rotr %0, %0, 16 \n" : "=r" (x) : "r" (x)); return x; } #define __arch_swab32 __arch_swab32 #ifdef CONFIG_CPU_MIPS64_R2 static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( " dsbh %0, %1\n" " dshd %0, %0" : "=r" (x) : "r" (x)); return x; } #define __arch_swab64 __arch_swab64 #endif /* CONFIG_CPU_MIPS64_R2 */ #endif /* CONFIG_CPU_MIPSR2 */ #include <linux/byteorder.h> #endif /* _ASM_BYTEORDER_H */
kzlin129/tt-gpl
go12/linux-2.6.28.10/arch/mips/include/asm/byteorder.h
C
gpl-2.0
1,265
/* Simple DirectMedia Layer Copyright (C) 1997-2018 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../../SDL_internal.h" #if SDL_AUDIO_DRIVER_PSP #include <stdio.h> #include <string.h> #include <stdlib.h> #include <malloc.h> #include "SDL_audio.h" #include "SDL_error.h" #include "SDL_timer.h" #include "../SDL_audio_c.h" #include "../SDL_audiodev_c.h" #include "../SDL_sysaudio.h" #include "SDL_pspaudio.h" #include <pspaudio.h> #include <pspthreadman.h> /* The tag name used by PSP audio */ #define PSPAUDIO_DRIVER_NAME "psp" static int PSPAUDIO_OpenDevice(_THIS, void *handle, const char *devname, int iscapture) { int format, mixlen, i; this->hidden = (struct SDL_PrivateAudioData *) SDL_malloc(sizeof(*this->hidden)); if (this->hidden == NULL) { return SDL_OutOfMemory(); } SDL_zerop(this->hidden); switch (this->spec.format & 0xff) { case 8: case 16: this->spec.format = AUDIO_S16LSB; break; default: return SDL_SetError("Unsupported audio format"); } /* The sample count must be a multiple of 64. */ this->spec.samples = PSP_AUDIO_SAMPLE_ALIGN(this->spec.samples); this->spec.freq = 44100; /* Update the fragment size as size in bytes. */ SDL_CalculateAudioSpec(&this->spec); /* Allocate the mixing buffer. Its size and starting address must be a multiple of 64 bytes. Our sample count is already a multiple of 64, so spec->size should be a multiple of 64 as well. */ mixlen = this->spec.size * NUM_BUFFERS; this->hidden->rawbuf = (Uint8 *) memalign(64, mixlen); if (this->hidden->rawbuf == NULL) { return SDL_SetError("Couldn't allocate mixing buffer"); } /* Setup the hardware channel. */ if (this->spec.channels == 1) { format = PSP_AUDIO_FORMAT_MONO; } else { this->spec.channels = 2; format = PSP_AUDIO_FORMAT_STEREO; } this->hidden->channel = sceAudioChReserve(PSP_AUDIO_NEXT_CHANNEL, this->spec.samples, format); if (this->hidden->channel < 0) { free(this->hidden->rawbuf); this->hidden->rawbuf = NULL; return SDL_SetError("Couldn't reserve hardware channel"); } memset(this->hidden->rawbuf, 0, mixlen); for (i = 0; i < NUM_BUFFERS; i++) { this->hidden->mixbufs[i] = &this->hidden->rawbuf[i * this->spec.size]; } this->hidden->next_buffer = 0; return 0; } static void PSPAUDIO_PlayDevice(_THIS) { Uint8 *mixbuf = this->hidden->mixbufs[this->hidden->next_buffer]; if (this->spec.channels == 1) { sceAudioOutputBlocking(this->hidden->channel, PSP_AUDIO_VOLUME_MAX, mixbuf); } else { sceAudioOutputPannedBlocking(this->hidden->channel, PSP_AUDIO_VOLUME_MAX, PSP_AUDIO_VOLUME_MAX, mixbuf); } this->hidden->next_buffer = (this->hidden->next_buffer + 1) % NUM_BUFFERS; } /* This function waits until it is possible to write a full sound buffer */ static void PSPAUDIO_WaitDevice(_THIS) { /* Because we block when sending audio, there's no need for this function to do anything. */ } static Uint8 *PSPAUDIO_GetDeviceBuf(_THIS) { return this->hidden->mixbufs[this->hidden->next_buffer]; } static void PSPAUDIO_CloseDevice(_THIS) { if (this->hidden->channel >= 0) { sceAudioChRelease(this->hidden->channel); } free(this->hidden->rawbuf); /* this uses memalign(), not SDL_malloc(). */ SDL_free(this->hidden); } static void PSPAUDIO_ThreadInit(_THIS) { /* Increase the priority of this audio thread by 1 to put it ahead of other SDL threads. */ SceUID thid; SceKernelThreadInfo status; thid = sceKernelGetThreadId(); status.size = sizeof(SceKernelThreadInfo); if (sceKernelReferThreadStatus(thid, &status) == 0) { sceKernelChangeThreadPriority(thid, status.currentPriority - 1); } } static int PSPAUDIO_Init(SDL_AudioDriverImpl * impl) { /* Set the function pointers */ impl->OpenDevice = PSPAUDIO_OpenDevice; impl->PlayDevice = PSPAUDIO_PlayDevice; impl->WaitDevice = PSPAUDIO_WaitDevice; impl->GetDeviceBuf = PSPAUDIO_GetDeviceBuf; impl->CloseDevice = PSPAUDIO_CloseDevice; impl->ThreadInit = PSPAUDIO_ThreadInit; /* PSP audio device */ impl->OnlyHasDefaultOutputDevice = 1; /* impl->HasCaptureSupport = 1; impl->OnlyHasDefaultCaptureDevice = 1; */ /* impl->DetectDevices = DSOUND_DetectDevices; impl->Deinitialize = DSOUND_Deinitialize; */ return 1; /* this audio target is available. */ } AudioBootStrap PSPAUDIO_bootstrap = { "psp", "PSP audio driver", PSPAUDIO_Init, 0 }; /* SDL_AUDI */ #endif /* SDL_AUDIO_DRIVER_PSP */ /* vi: set ts=4 sw=4 expandtab: */
joncampbell123/dosbox-rewrite
vs2015/sdl2/src/audio/psp/SDL_pspaudio.c
C
gpl-2.0
5,604
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; only version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/mm.h> #include <linux/module.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/pm_qos.h> #include <linux/uio.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/compress_offload.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #include <sound/minors.h> #include <asm/io.h> #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) #include <dma-coherence.h> #endif //htc audio ++ #include <sound/soc.h> #undef pr_info #undef pr_err #define pr_info(fmt, ...) pr_aud_info(fmt, ##__VA_ARGS__) #define pr_err(fmt, ...) pr_aud_err(fmt, ##__VA_ARGS__) //htc audio -- /* * Compatibility */ struct snd_pcm_hw_params_old { unsigned int flags; unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT - SNDRV_PCM_HW_PARAM_ACCESS + 1]; struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME - SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1]; unsigned int rmask; unsigned int cmask; unsigned int info; unsigned int msbits; unsigned int rate_num; unsigned int rate_den; snd_pcm_uframes_t fifo_size; unsigned char reserved[64]; }; #ifdef CONFIG_SND_SUPPORT_OLD_API #define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old) #define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old) static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams); #endif static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); /* * */ DEFINE_RWLOCK(snd_pcm_link_rwlock); EXPORT_SYMBOL(snd_pcm_link_rwlock); static DECLARE_RWSEM(snd_pcm_link_rwsem); static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info) { struct snd_pcm_runtime *runtime; struct snd_pcm *pcm = substream->pcm; struct snd_pcm_str *pstr = substream->pstr; memset(info, 0, sizeof(*info)); info->card = pcm->card->number; info->device = pcm->device; info->stream = substream->stream; info->subdevice = substream->number; strlcpy(info->id, pcm->id, sizeof(info->id)); strlcpy(info->name, pcm->name, sizeof(info->name)); info->dev_class = pcm->dev_class; info->dev_subclass = pcm->dev_subclass; info->subdevices_count = pstr->substream_count; info->subdevices_avail = pstr->substream_count - pstr->substream_opened; strlcpy(info->subname, substream->name, sizeof(info->subname)); runtime = substream->runtime; /* AB: FIXME!!! This is definitely nonsense */ if (runtime) { info->sync = runtime->sync; substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info); } return 0; } int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user * _info) { struct snd_pcm_info *info; int err; info = kmalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; err = snd_pcm_info(substream, info); if (err >= 0) { if (copy_to_user(_info, info, sizeof(*info))) err = -EFAULT; } kfree(info); return err; } #undef RULES_DEBUG #if 1 //htc audio #define HW_PARAM(v) [SNDRV_PCM_HW_PARAM_##v] = #v static const char * const snd_pcm_hw_param_names[] = { HW_PARAM(ACCESS), HW_PARAM(FORMAT), HW_PARAM(SUBFORMAT), HW_PARAM(SAMPLE_BITS), HW_PARAM(FRAME_BITS), HW_PARAM(CHANNELS), HW_PARAM(RATE), HW_PARAM(PERIOD_TIME), HW_PARAM(PERIOD_SIZE), HW_PARAM(PERIOD_BYTES), HW_PARAM(PERIODS), HW_PARAM(BUFFER_TIME), HW_PARAM(BUFFER_SIZE), HW_PARAM(BUFFER_BYTES), HW_PARAM(TICK_TIME), }; #endif int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { unsigned int k; struct snd_pcm_hardware *hw; struct snd_interval *i = NULL; struct snd_mask *m = NULL; struct snd_pcm_hw_constraints *constrs = &substream->runtime->hw_constraints; unsigned int rstamps[constrs->rules_num]; unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1]; unsigned int stamp = 2; int changed, again; params->info = 0; params->fifo_size = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS)) params->msbits = 0; if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) { params->rate_num = 0; params->rate_den = 0; } for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { m = hw_param_mask(params, k); if (snd_mask_empty(m)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); printk("%04x%04x%04x%04x -> ", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif changed = snd_mask_refine(m, constrs_mask(constrs, k)); #ifdef RULES_DEBUG printk("%04x%04x%04x%04x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) { //htc audio ++ pr_info("refine mask %s \n",snd_pcm_hw_param_names[k]); pr_info("fail mask 0x%x 0x%x 0x%x 0x%x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]); //htc audio -- return changed; } } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { i = hw_param_interval(params, k); if (snd_interval_empty(i)) return -EINVAL; if (!(params->rmask & (1 << k))) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "%s = ", snd_pcm_hw_param_names[k]); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); printk(" -> "); #endif changed = snd_interval_refine(i, constrs_interval(constrs, k)); #ifdef RULES_DEBUG if (i->empty) printk("empty\n"); else printk("%c%u %u%c\n", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); #endif if (changed) params->cmask |= 1 << k; if (changed < 0) { //htc audio ++ pr_info("refine interval %s fail\n",snd_pcm_hw_param_names[k]); pr_info("fail max %u min %u\n",i->max,i->min); //htc audio -- return changed; } } for (k = 0; k < constrs->rules_num; k++) rstamps[k] = 0; for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0; do { again = 0; for (k = 0; k < constrs->rules_num; k++) { struct snd_pcm_hw_rule *r = &constrs->rules[k]; unsigned int d; int doit = 0; if (r->cond && !(r->cond & params->flags)) continue; for (d = 0; r->deps[d] >= 0; d++) { if (vstamps[r->deps[d]] > rstamps[k]) { doit = 1; break; } } if (!doit) continue; #ifdef RULES_DEBUG printk(KERN_DEBUG "Rule %d [%p]: ", k, r->func); if (r->var >= 0) { printk("%s = ", snd_pcm_hw_param_names[r->var]); if (hw_is_mask(r->var)) { m = hw_param_mask(params, r->var); printk("%x", *m->bits); } else { i = hw_param_interval(params, r->var); if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } #endif changed = r->func(params, r); #ifdef RULES_DEBUG if (r->var >= 0) { printk(" -> "); if (hw_is_mask(r->var)) printk("%x", *m->bits); else { if (i->empty) printk("empty"); else printk("%c%u %u%c", i->openmin ? '(' : '[', i->min, i->max, i->openmax ? ')' : ']'); } } printk("\n"); #endif rstamps[k] = stamp; if (changed && r->var >= 0) { params->cmask |= (1 << r->var); vstamps[r->var] = stamp; again = 1; } if (changed < 0) { //htc audio ++ pr_info("refine rule %s fail",snd_pcm_hw_param_names[r->var]); if (hw_is_mask(r->var)) { m = hw_param_mask(params, r->var); pr_info("fail rule mask %x", *m->bits); } else if (r->var >= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL && r->var <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL) { i = hw_param_interval(params, r->var); if (i->empty) pr_info("empty"); else pr_info("fail rule max %u min %u",i->max,i->min); } //htc audio -- return changed; } stamp++; } } while (again); if (!params->msbits) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (snd_interval_single(i)) params->msbits = snd_interval_value(i); } if (!params->rate_den) { i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (snd_interval_single(i)) { params->rate_num = snd_interval_value(i); params->rate_den = 1; } } hw = &substream->runtime->hw; if (!params->info) params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES; if (!params->fifo_size) { m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); if (snd_mask_min(m) == snd_mask_max(m) && snd_interval_min(i) == snd_interval_max(i)) { changed = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_FIFO_SIZE, params); if (changed < 0) return changed; } } params->rmask = 0; return 0; } EXPORT_SYMBOL(snd_pcm_hw_refine); static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; pr_info("%s: ++\n",__func__); params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_refine(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } pr_info("%s: --\n",__func__); kfree(params); return err; } static int period_to_usecs(struct snd_pcm_runtime *runtime) { int usecs; if (! runtime->rate) return -1; /* invalid */ /* take 75% of period time as the deadline */ usecs = (750000 / runtime->rate) * runtime->period_size; usecs += ((750000 % runtime->rate) * runtime->period_size) / runtime->rate; return usecs; } static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime; int err, usecs; unsigned int bits; snd_pcm_uframes_t frames; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); #if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) if (!substream->oss.oss) #endif if (atomic_read(&substream->mmap_count)) return -EBADFD; params->rmask = ~0U; err = snd_pcm_hw_refine(substream, params); if (err < 0) goto _error; err = snd_pcm_hw_params_choose(substream, params); if (err < 0) goto _error; if (substream->ops->hw_params != NULL) { err = substream->ops->hw_params(substream, params); if (err < 0) goto _error; } runtime->access = params_access(params); runtime->format = params_format(params); runtime->subformat = params_subformat(params); runtime->channels = params_channels(params); runtime->rate = params_rate(params); runtime->period_size = params_period_size(params); runtime->periods = params_periods(params); runtime->buffer_size = params_buffer_size(params); runtime->info = params->info; runtime->rate_num = params->rate_num; runtime->rate_den = params->rate_den; runtime->no_period_wakeup = (params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) && (params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP); bits = snd_pcm_format_physical_width(runtime->format); runtime->sample_bits = bits; bits *= runtime->channels; runtime->frame_bits = bits; frames = 1; while (bits % 8 != 0) { bits *= 2; frames *= 2; } runtime->byte_align = bits / 8; runtime->min_align = frames; /* Default sw params */ runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; runtime->period_step = 1; runtime->control->avail_min = runtime->period_size; runtime->start_threshold = 1; runtime->stop_threshold = runtime->buffer_size; runtime->silence_threshold = 0; runtime->silence_size = 0; runtime->boundary = runtime->buffer_size; while (runtime->boundary * 2 * runtime->channels <= LONG_MAX - runtime->buffer_size) runtime->boundary *= 2; snd_pcm_timer_resolution_change(substream); runtime->status->state = SNDRV_PCM_STATE_SETUP; if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if ((usecs = period_to_usecs(runtime)) >= 0) pm_qos_add_request(&substream->latency_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, usecs); return 0; _error: /* hardware might be unusable from this time, so we force application to retry to set the correct hardware parameter settings */ runtime->status->state = SNDRV_PCM_STATE_OPEN; if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); return err; } static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params __user * _params) { struct snd_pcm_hw_params *params; int err; pr_info("%s ++\n",__func__); params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params); err = snd_pcm_hw_params(substream, params); if (copy_to_user(_params, params, sizeof(*params))) { if (!err) err = -EFAULT; } pr_info("%s --\n",__func__); kfree(params); return err; } static int snd_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: break; default: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (atomic_read(&substream->mmap_count)) return -EBADFD; if (substream->ops->hw_free) result = substream->ops->hw_free(substream); runtime->status->state = SNDRV_PCM_STATE_OPEN; pm_qos_remove_request(&substream->latency_pm_qos_req); return result; } static int snd_pcm_sw_params(struct snd_pcm_substream *substream, struct snd_pcm_sw_params *params) { struct snd_pcm_runtime *runtime; int err; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST) return -EINVAL; if (params->avail_min == 0) return -EINVAL; if (params->silence_size >= runtime->boundary) { if (params->silence_threshold != 0) return -EINVAL; } else { if (params->silence_size > params->silence_threshold) return -EINVAL; if (params->silence_threshold > runtime->buffer_size) return -EINVAL; } err = 0; pr_info("%s +++", __func__); snd_pcm_stream_lock_irq(substream); runtime->tstamp_mode = params->tstamp_mode; runtime->period_step = params->period_step; runtime->control->avail_min = params->avail_min; runtime->start_threshold = params->start_threshold; runtime->stop_threshold = params->stop_threshold; runtime->silence_threshold = params->silence_threshold; runtime->silence_size = params->silence_size; params->boundary = runtime->boundary; if (snd_pcm_running(substream)) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); err = snd_pcm_update_state(substream, runtime); } pr_info("%s ---", __func__); snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream, struct snd_pcm_sw_params __user * _params) { struct snd_pcm_sw_params params; int err; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; err = snd_pcm_sw_params(substream, &params); if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } int snd_pcm_status(struct snd_pcm_substream *substream, struct snd_pcm_status *status) { struct snd_pcm_runtime *runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); status->state = runtime->status->state; status->suspended_state = runtime->status->suspended_state; if (status->state == SNDRV_PCM_STATE_OPEN) goto _end; status->trigger_tstamp = runtime->trigger_tstamp; if (snd_pcm_running(substream)) { snd_pcm_update_hw_ptr(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { status->tstamp = runtime->status->tstamp; goto _tstamp_end; } } snd_pcm_gettime(runtime, &status->tstamp); _tstamp_end: status->appl_ptr = runtime->control->appl_ptr; status->hw_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { status->avail = snd_pcm_playback_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING || runtime->status->state == SNDRV_PCM_STATE_DRAINING) { status->delay = runtime->buffer_size - status->avail; status->delay += runtime->delay; } else status->delay = 0; } else { status->avail = snd_pcm_capture_avail(runtime); if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) status->delay = status->avail + runtime->delay; else status->delay = 0; } status->avail_max = runtime->avail_max; status->overrange = runtime->overrange; runtime->avail_max = 0; runtime->overrange = 0; _end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return 0; } static int snd_pcm_status_user(struct snd_pcm_substream *substream, struct snd_pcm_status __user * _status) { struct snd_pcm_status status; int res; memset(&status, 0, sizeof(status)); res = snd_pcm_status(substream, &status); if (res < 0) return res; if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_pcm_channel_info(struct snd_pcm_substream *substream, struct snd_pcm_channel_info * info) { struct snd_pcm_runtime *runtime; unsigned int channel; channel = info->channel; runtime = substream->runtime; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (channel >= runtime->channels) return -EINVAL; memset(info, 0, sizeof(*info)); info->channel = channel; return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info); } static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream, struct snd_pcm_channel_info __user * _info) { struct snd_pcm_channel_info info; int res; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; res = snd_pcm_channel_info(substream, &info); if (res < 0) return res; if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master == NULL) return; if (runtime->trigger_master == substream) { snd_pcm_gettime(runtime, &runtime->trigger_tstamp); } else { snd_pcm_trigger_tstamp(runtime->trigger_master); runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp; } runtime->trigger_master = NULL; } struct action_ops { int (*pre_action)(struct snd_pcm_substream *substream, int state); int (*do_action)(struct snd_pcm_substream *substream, int state); void (*undo_action)(struct snd_pcm_substream *substream, int state); void (*post_action)(struct snd_pcm_substream *substream, int state); }; /* * this functions is core for handling of linked stream * Note: the stream state might be changed also on failure * Note2: call with calling stream lock + link lock */ static int snd_pcm_action_group(struct action_ops *ops, struct snd_pcm_substream *substream, int state, int do_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0; snd_pcm_group_for_each_entry(s, substream) { if (do_lock && s != substream) spin_lock_nested(&s->self_group.lock, SINGLE_DEPTH_NESTING); res = ops->pre_action(s, state); if (res < 0) goto _unlock; } snd_pcm_group_for_each_entry(s, substream) { res = ops->do_action(s, state); if (res < 0) { if (ops->undo_action) { snd_pcm_group_for_each_entry(s1, substream) { if (s1 == s) /* failed stream */ break; ops->undo_action(s1, state); } } s = NULL; /* unlock all */ goto _unlock; } } snd_pcm_group_for_each_entry(s, substream) { ops->post_action(s, state); } _unlock: if (do_lock) { /* unlock streams */ snd_pcm_group_for_each_entry(s1, substream) { if (s1 != substream) spin_unlock(&s1->self_group.lock); if (s1 == s) /* end */ break; } } return res; } /* * Note: call with stream lock */ static int snd_pcm_action_single(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; res = ops->pre_action(substream, state); if (res < 0) return res; res = ops->do_action(substream, state); if (res == 0) ops->post_action(substream, state); else if (ops->undo_action) ops->undo_action(substream, state); return res; } /* * Note: call with stream lock */ static int snd_pcm_action(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; if (snd_pcm_stream_linked(substream)) { if (!spin_trylock(&substream->group->lock)) { spin_unlock(&substream->self_group.lock); spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); } res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->group->lock); } else { res = snd_pcm_action_single(ops, substream, state); } return res; } /* * Note: don't use any locks before */ static int snd_pcm_action_lock_irq(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; read_lock_irq(&snd_pcm_link_rwlock); if (snd_pcm_stream_linked(substream)) { spin_lock(&substream->group->lock); spin_lock(&substream->self_group.lock); res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->self_group.lock); spin_unlock(&substream->group->lock); } else { spin_lock(&substream->self_group.lock); res = snd_pcm_action_single(ops, substream, state); spin_unlock(&substream->self_group.lock); } read_unlock_irq(&snd_pcm_link_rwlock); return res; } /* */ static int snd_pcm_action_nonatomic(struct action_ops *ops, struct snd_pcm_substream *substream, int state) { int res; down_read(&snd_pcm_link_rwsem); if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, 0); else res = snd_pcm_action_single(ops, substream, state); up_read(&snd_pcm_link_rwsem); return res; } /* * start callbacks */ static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != SNDRV_PCM_STATE_PREPARED) return -EBADFD; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !substream->hw_no_buffer && !snd_pcm_playback_data(substream)) return -EPIPE; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master != substream) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); } static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); } static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); runtime->hw_ptr_jiffies = jiffies; runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) / runtime->rate; runtime->status->state = state; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTART, &runtime->trigger_tstamp); } static struct action_ops snd_pcm_action_start = { .pre_action = snd_pcm_pre_start, .do_action = snd_pcm_do_start, .undo_action = snd_pcm_undo_start, .post_action = snd_pcm_post_start }; /** * snd_pcm_start - start all linked streams * @substream: the PCM substream instance */ int snd_pcm_start(struct snd_pcm_substream *substream) { return snd_pcm_action(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); } /* * stop callbacks */ static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); return 0; /* unconditonally stop all substreams */ } static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state != state) { snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP, &runtime->trigger_tstamp); runtime->status->state = state; } wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_stop = { .pre_action = snd_pcm_pre_stop, .do_action = snd_pcm_do_stop, .post_action = snd_pcm_post_stop }; /** * snd_pcm_stop - try to stop all running streams in the substream group * @substream: the PCM substream instance * @state: PCM state after stopping the stream * * The state of each stream is then changed to the given state unconditionally. */ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state) { return snd_pcm_action(&snd_pcm_action_stop, substream, state); } EXPORT_SYMBOL(snd_pcm_stop); /** * snd_pcm_drain_done - stop the DMA only when the given stream is playback * @substream: the PCM substream * * After stopping, the state is changed to SETUP. * Unlike snd_pcm_stop(), this affects only the given stream. */ int snd_pcm_drain_done(struct snd_pcm_substream *substream) { return snd_pcm_action_single(&snd_pcm_action_stop, substream, SNDRV_PCM_STATE_SETUP); } /* * pause callbacks */ static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_PAUSE)) return -ENOSYS; if (push) { if (runtime->status->state != SNDRV_PCM_STATE_RUNNING) return -EBADFD; } else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED) return -EBADFD; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master != substream) return 0; /* some drivers might use hw_ptr to recover from the pause - update the hw_ptr now */ if (push) snd_pcm_update_hw_ptr(substream); /* The jiffies check in snd_pcm_update_hw_ptr*() is done by * a delta between the current jiffies, this gives a large enough * delta, effectively to skip the check once. */ substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH : SNDRV_PCM_TRIGGER_PAUSE_RELEASE); } static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master == substream) substream->ops->trigger(substream, push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE : SNDRV_PCM_TRIGGER_PAUSE_PUSH); } static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (push) { runtime->status->state = SNDRV_PCM_STATE_PAUSED; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MPAUSE, &runtime->trigger_tstamp); wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } else { runtime->status->state = SNDRV_PCM_STATE_RUNNING; if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MCONTINUE, &runtime->trigger_tstamp); } } static struct action_ops snd_pcm_action_pause = { .pre_action = snd_pcm_pre_pause, .do_action = snd_pcm_do_pause, .undo_action = snd_pcm_undo_pause, .post_action = snd_pcm_post_pause }; /* * Push/release the pause for all linked streams. */ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push) { return snd_pcm_action(&snd_pcm_action_pause, substream, push); } #ifdef CONFIG_PM /* suspend */ static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBUSY; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; if (! snd_pcm_running(substream)) return 0; substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); return 0; /* suspend unconditionally */ } static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp); runtime->status->suspended_state = runtime->status->state; runtime->status->state = SNDRV_PCM_STATE_SUSPENDED; wake_up(&runtime->sleep); wake_up(&runtime->tsleep); } static struct action_ops snd_pcm_action_suspend = { .pre_action = snd_pcm_pre_suspend, .do_action = snd_pcm_do_suspend, .post_action = snd_pcm_post_suspend }; /** * snd_pcm_suspend - trigger SUSPEND to all linked streams * @substream: the PCM substream * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend(struct snd_pcm_substream *substream) { int err; unsigned long flags; if (! substream) return 0; snd_pcm_stream_lock_irqsave(substream, flags); err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0); snd_pcm_stream_unlock_irqrestore(substream, flags); return err; } EXPORT_SYMBOL(snd_pcm_suspend); /** * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm * @pcm: the PCM instance * * After this call, all streams are changed to SUSPENDED state. */ int snd_pcm_suspend_all(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; int stream, err = 0; if (! pcm) return 0; for (stream = 0; stream < 2; stream++) { for (substream = pcm->streams[stream].substream; substream; substream = substream->next) { /* FIXME: the open/close code should lock this as well */ if (substream->runtime == NULL) continue; err = snd_pcm_suspend(substream); if (err < 0 && err != -EBUSY) return err; } } return 0; } EXPORT_SYMBOL(snd_pcm_suspend_all); /* resume */ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_RESUME)) return -ENOSYS; runtime->trigger_master = substream; return 0; } static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->trigger_master != substream) return 0; /* DMA not running previously? */ if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING && (runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING || substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return 0; return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME); } static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state) { if (substream->runtime->trigger_master == substream && snd_pcm_running(substream)) substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND); } static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_trigger_tstamp(substream); if (substream->timer) snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp); runtime->status->state = runtime->status->suspended_state; } static struct action_ops snd_pcm_action_resume = { .pre_action = snd_pcm_pre_resume, .do_action = snd_pcm_do_resume, .undo_action = snd_pcm_undo_resume, .post_action = snd_pcm_post_resume }; static int snd_pcm_resume(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; int res; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0); snd_power_unlock(card); return res; } #else static int snd_pcm_resume(struct snd_pcm_substream *substream) { return -ENOSYS; } #endif /* CONFIG_PM */ /* * xrun ioctl * * Change the RUNNING stream(s) to XRUN state. */ static int snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_card *card = substream->pcm->card; struct snd_pcm_runtime *runtime = substream->runtime; int result; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) goto _unlock; } pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: result = 0; /* already there */ break; case SNDRV_PCM_STATE_RUNNING: result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); break; default: result = -EBADFD; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); _unlock: snd_power_unlock(card); return result; } /* * reset ioctl */ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: case SNDRV_PCM_STATE_SUSPENDED: return 0; default: return -EBADFD; } } static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); if (err < 0) return err; runtime->hw_ptr_base = 0; runtime->hw_ptr_interrupt = runtime->status->hw_ptr - runtime->status->hw_ptr % runtime->period_size; runtime->silence_start = runtime->status->hw_ptr; runtime->silence_filled = 0; return 0; } static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); } static struct action_ops snd_pcm_action_reset = { .pre_action = snd_pcm_pre_reset, .do_action = snd_pcm_do_reset, .post_action = snd_pcm_post_reset }; static int snd_pcm_reset(struct snd_pcm_substream *substream) { return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0); } /* * prepare ioctl */ /* we use the second argument for updating f_flags */ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream, int f_flags) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD; if (snd_pcm_running(substream)) return -EBUSY; substream->f_flags = f_flags; return 0; } static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state) { int err; err = substream->ops->prepare(substream); if (err < 0) return err; return snd_pcm_do_reset(substream, 0); } static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->control->appl_ptr = runtime->status->hw_ptr; runtime->status->state = SNDRV_PCM_STATE_PREPARED; } static struct action_ops snd_pcm_action_prepare = { .pre_action = snd_pcm_pre_prepare, .do_action = snd_pcm_do_prepare, .post_action = snd_pcm_post_prepare }; /** * snd_pcm_prepare - prepare the PCM substream to be triggerable * @substream: the PCM substream instance * @file: file to refer f_flags */ static int snd_pcm_prepare(struct snd_pcm_substream *substream, struct file *file) { int res; struct snd_card *card = substream->pcm->card; int f_flags; if (file) f_flags = file->f_flags; else f_flags = substream->f_flags; snd_power_lock(card); if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0) res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare, substream, f_flags); snd_power_unlock(card); return res; } /* * drain ioctl */ static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) { substream->runtime->trigger_master = substream; return 0; } static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: /* start playback stream if possible */ if (! snd_pcm_playback_empty(substream)) { snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); } break; case SNDRV_PCM_STATE_RUNNING: runtime->status->state = SNDRV_PCM_STATE_DRAINING; break; default: break; } } else { /* stop running stream */ if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) { int new_state = snd_pcm_capture_avail(runtime) > 0 ? SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP; snd_pcm_do_stop(substream, new_state); snd_pcm_post_stop(substream, new_state); } } return 0; } static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state) { } static struct action_ops snd_pcm_action_drain_init = { .pre_action = snd_pcm_pre_drain_init, .do_action = snd_pcm_do_drain_init, .post_action = snd_pcm_post_drain_init }; static int snd_pcm_drop(struct snd_pcm_substream *substream); /* * Drain the stream(s). * When the substream is linked, sync until the draining of all playback streams * is finished. * After this call, all streams are supposed to be either SETUP or DRAINING * (capture only) state. */ static int snd_pcm_drain(struct snd_pcm_substream *substream, struct file *file) { struct snd_card *card; struct snd_pcm_runtime *runtime; struct snd_pcm_substream *s; wait_queue_t wait; int result = 0; int nonblock = 0; card = substream->pcm->card; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; snd_power_lock(card); if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result < 0) { snd_power_unlock(card); return result; } } if (file) { if (file->f_flags & O_NONBLOCK) nonblock = 1; } else if (substream->f_flags & O_NONBLOCK) nonblock = 1; down_read(&snd_pcm_link_rwsem); pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); /* pre-start/stop - all running streams are changed to DRAINING state */ result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); if (result < 0) goto unlock; /* in non-blocking, we don't wait in ioctl but let caller poll */ if (nonblock) { result = -EAGAIN; goto unlock; } for (;;) { long tout; struct snd_pcm_runtime *to_check; if (signal_pending(current)) { result = -ERESTARTSYS; break; } /* find a substream to drain */ to_check = NULL; snd_pcm_group_for_each_entry(s, substream) { if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) continue; runtime = s->runtime; if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { to_check = runtime; break; } } if (!to_check) break; /* all drained */ init_waitqueue_entry(&wait, current); add_wait_queue(&to_check->sleep, &wait); pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); if (runtime->no_period_wakeup) tout = MAX_SCHEDULE_TIMEOUT; else { tout = 10; if (runtime->rate) { long t = runtime->period_size * 2 / runtime->rate; tout = max(t, tout); } tout = msecs_to_jiffies(tout * 1000); } tout = schedule_timeout_interruptible(tout); snd_power_lock(card); down_read(&snd_pcm_link_rwsem); pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); remove_wait_queue(&to_check->sleep, &wait); if (tout == 0) { if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) result = -ESTRPIPE; else { snd_printd("playback drain error (DMA or IRQ trouble?)\n"); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); result = -EIO; } break; } } unlock: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); up_read(&snd_pcm_link_rwsem); snd_power_unlock(card); return result; } static int snd_compressed_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { struct snd_pcm_runtime *runtime; int err = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; pr_debug("%s called with cmd = %d\n", __func__, cmd); err = substream->ops->ioctl(substream, cmd, arg); return err; } static int snd_user_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { struct snd_pcm_runtime *runtime; int err = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; err = substream->ops->ioctl(substream, cmd, arg); return err; } /* * drop ioctl * * Immediately put all linked substreams into SETUP state. */ static int snd_pcm_drop(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN || runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) return -EBADFD; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); /* resume pause */ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) snd_pcm_pause(substream, 0); snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return result; } /* WARNING: Don't forget to fput back the file */ static struct file *snd_pcm_file_fd(int fd) { struct file *file; struct inode *inode; unsigned int minor; file = fget(fd); if (!file) return NULL; inode = file_inode(file); if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major) { fput(file); return NULL; } minor = iminor(inode); if (!snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) && !snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE)) { fput(file); return NULL; } return file; } /* * PCM link handling */ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) { int res = 0; struct file *file; struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream1; struct snd_pcm_group *group; file = snd_pcm_file_fd(fd); if (!file) return -EBADFD; pcm_file = file->private_data; substream1 = pcm_file->substream; group = kmalloc(sizeof(*group), GFP_KERNEL); if (!group) { res = -ENOMEM; goto _nolock; } down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || substream->runtime->status->state != substream1->runtime->status->state) { res = -EBADFD; goto _end; } if (snd_pcm_stream_linked(substream1)) { res = -EALREADY; goto _end; } if (!snd_pcm_stream_linked(substream)) { substream->group = group; spin_lock_init(&substream->group->lock); INIT_LIST_HEAD(&substream->group->substreams); list_add_tail(&substream->link_list, &substream->group->substreams); substream->group->count = 1; } list_add_tail(&substream1->link_list, &substream->group->substreams); substream->group->count++; substream1->group = substream->group; _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); _nolock: fput(file); if (res < 0) kfree(group); return res; } static void relink_to_local(struct snd_pcm_substream *substream) { substream->group = &substream->self_group; INIT_LIST_HEAD(&substream->self_group.substreams); list_add_tail(&substream->link_list, &substream->self_group.substreams); } static int snd_pcm_unlink(struct snd_pcm_substream *substream) { struct snd_pcm_substream *s; int res = 0; down_write(&snd_pcm_link_rwsem); write_lock_irq(&snd_pcm_link_rwlock); if (!snd_pcm_stream_linked(substream)) { res = -EALREADY; goto _end; } list_del(&substream->link_list); substream->group->count--; if (substream->group->count == 1) { /* detach the last stream, too */ snd_pcm_group_for_each_entry(s, substream) { relink_to_local(s); break; } kfree(substream->group); } relink_to_local(substream); _end: write_unlock_irq(&snd_pcm_link_rwlock); up_write(&snd_pcm_link_rwsem); return res; } /* * hw configurator */ static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mul(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_div(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]), hw_param_interval_c(params, rule->deps[1]), (unsigned long) rule->private, &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]), (unsigned long) rule->private, hw_param_interval_c(params, rule->deps[1]), &t); return snd_interval_refine(hw_param_interval(params, rule->var), &t); } static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int k; struct snd_interval *i = hw_param_interval(params, rule->deps[0]); struct snd_mask m; struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); snd_mask_any(&m); for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(mask, k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if ((unsigned)bits < i->min || (unsigned)bits > i->max) snd_mask_reset(&m, k); } return snd_mask_refine(mask, &m); } static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; unsigned int k; t.min = UINT_MAX; t.max = 0; t.openmin = 0; t.openmax = 0; for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) { int bits; if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k)) continue; bits = snd_pcm_format_physical_width(k); if (bits <= 0) continue; /* ignore invalid formats */ if (t.min > (unsigned)bits) t.min = bits; if (t.max < (unsigned)bits) t.max = bits; } t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } #if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12 #error "Change this table" #endif static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000 }; const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = { .count = ARRAY_SIZE(rates), .list = rates, }; static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hardware *hw = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), snd_pcm_known_rates.count, snd_pcm_known_rates.list, hw->rates); } static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval t; struct snd_pcm_substream *substream = rule->private; t.min = 0; t.max = substream->buffer_bytes_max; t.openmin = 0; t.openmax = 0; t.integer = 1; return snd_interval_refine(hw_param_interval(params, rule->var), &t); } int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; int k, err; for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) { snd_mask_any(constrs_mask(constrs, k)); } for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) { snd_interval_any(constrs_interval(constrs, k)); } snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)); snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS)); err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, snd_pcm_hw_rule_format, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_sample_bits, NULL, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_div, NULL, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mul, NULL, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_mulkdiv, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, snd_pcm_hw_rule_muldivk, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_muldivk, (void*) 8, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME, snd_pcm_hw_rule_mulkdiv, (void*) 1000000, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; return 0; } int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; int err; unsigned int mask = 0; if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_MMAP) { if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED; if (hw->info & SNDRV_PCM_INFO_COMPLEX) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX; } err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask); if (err < 0) return err; err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats); if (err < 0) return err; err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, hw->channels_min, hw->channels_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, hw->rate_min, hw->rate_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, hw->period_bytes_min, hw->period_bytes_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, hw->periods_min, hw->periods_max); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, hw->period_bytes_min, hw->buffer_bytes_max); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, snd_pcm_hw_rule_buffer_bytes_max, substream, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1); if (err < 0) return err; /* FIXME: remove */ if (runtime->dma_bytes) { err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes); if (err < 0) return -EINVAL; } if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_rate, hw, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; } /* FIXME: this belong to lowlevel */ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE); return 0; } static void pcm_release_private(struct snd_pcm_substream *substream) { snd_pcm_unlink(substream); } void snd_pcm_release_substream(struct snd_pcm_substream *substream) { substream->ref_count--; if (substream->ref_count > 0) return; snd_pcm_drop(substream); if (substream->hw_opened) { if (substream->ops->hw_free != NULL) substream->ops->hw_free(substream); substream->ops->close(substream); substream->hw_opened = 0; } if (pm_qos_request_active(&substream->latency_pm_qos_req)) pm_qos_remove_request(&substream->latency_pm_qos_req); if (substream->pcm_release) { substream->pcm_release(substream); substream->pcm_release = NULL; } snd_pcm_detach_substream(substream); } EXPORT_SYMBOL(snd_pcm_release_substream); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream) { struct snd_pcm_substream *substream; int err; err = snd_pcm_attach_substream(pcm, stream, file, &substream); if (err < 0) return err; if (substream->ref_count > 1) { *rsubstream = substream; return 0; } err = snd_pcm_hw_constraints_init(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_init failed\n"); goto error; } if (substream->ops == NULL) { snd_printd("cannot open back end PCMs directly\n"); err = -ENODEV; goto error; } if ((err = substream->ops->open(substream)) < 0) goto error; substream->hw_opened = 1; err = snd_pcm_hw_constraints_complete(substream); if (err < 0) { snd_printd("snd_pcm_hw_constraints_complete failed\n"); goto error; } *rsubstream = substream; return 0; error: snd_pcm_release_substream(substream); return err; } EXPORT_SYMBOL(snd_pcm_open_substream); static int snd_pcm_open_file(struct file *file, struct snd_pcm *pcm, int stream) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; int err; err = snd_pcm_open_substream(pcm, stream, file, &substream); if (err < 0) return err; pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL); if (pcm_file == NULL) { snd_pcm_release_substream(substream); return -ENOMEM; } pcm_file->substream = substream; if (substream->ref_count == 1) { substream->file = pcm_file; substream->pcm_release = pcm_release_private; } file->private_data = pcm_file; return 0; } static int snd_pcm_playback_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_PLAYBACK); return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_pcm_capture_open(struct inode *inode, struct file *file) { struct snd_pcm *pcm; int err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_CAPTURE); return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); } static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) { int err; wait_queue_t wait; if (pcm == NULL) { err = -ENODEV; goto __error1; } err = snd_card_file_add(pcm->card, file); if (err < 0) goto __error1; if (!try_module_get(pcm->card->module)) { err = -EFAULT; goto __error2; } init_waitqueue_entry(&wait, current); add_wait_queue(&pcm->open_wait, &wait); mutex_lock(&pcm->open_mutex); while (1) { err = snd_pcm_open_file(file, pcm, stream); if (err >= 0) break; if (err == -EAGAIN) { if (file->f_flags & O_NONBLOCK) { err = -EBUSY; break; } } else break; set_current_state(TASK_INTERRUPTIBLE); mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } remove_wait_queue(&pcm->open_wait, &wait); mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; return err; __error: module_put(pcm->card->module); __error2: snd_card_file_remove(pcm->card, file); __error1: return err; } static int snd_pcm_release(struct inode *inode, struct file *file) { struct snd_pcm *pcm; struct snd_pcm_substream *substream; struct snd_pcm_file *pcm_file; pcm_file = file->private_data; substream = pcm_file->substream; if (snd_BUG_ON(!substream)) return -ENXIO; pcm = substream->pcm; mutex_lock(&pcm->open_mutex); snd_pcm_release_substream(substream); kfree(pcm_file); mutex_unlock(&pcm->open_mutex); wake_up(&pcm->open_wait); module_put(pcm->card->module); snd_card_file_remove(pcm->card, file); return 0; } static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_playback_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t hw_avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } hw_avail = snd_pcm_capture_hw_avail(runtime); if (hw_avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)hw_avail) frames = hw_avail; appl_ptr = runtime->control->appl_ptr - frames; if (appl_ptr < 0) appl_ptr += runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_playback_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t appl_ptr; snd_pcm_sframes_t ret; snd_pcm_sframes_t avail; if (frames == 0) return 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_RUNNING: if (snd_pcm_update_hw_ptr(substream) >= 0) break; /* Fall through */ case SNDRV_PCM_STATE_XRUN: ret = -EPIPE; goto __end; case SNDRV_PCM_STATE_SUSPENDED: ret = -ESTRPIPE; goto __end; default: ret = -EBADFD; goto __end; } avail = snd_pcm_capture_avail(runtime); if (avail <= 0) { ret = 0; goto __end; } if (frames > (snd_pcm_uframes_t)avail) frames = avail; appl_ptr = runtime->control->appl_ptr + frames; if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; ret = frames; __end: pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return ret; } static int snd_pcm_hwsync(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } snd_pcm_stream_unlock_irq(substream); return err; } static int snd_pcm_delay(struct snd_pcm_substream *substream, snd_pcm_sframes_t __user *res) { struct snd_pcm_runtime *runtime = substream->runtime; int err; snd_pcm_sframes_t n = 0; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_DRAINING: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) goto __badfd; case SNDRV_PCM_STATE_RUNNING: if ((err = snd_pcm_update_hw_ptr(substream)) < 0) break; /* Fall through */ case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_SUSPENDED: err = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) n = snd_pcm_playback_hw_avail(runtime); else n = snd_pcm_capture_avail(runtime); n += runtime->delay; break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; break; default: __badfd: err = -EBADFD; break; } pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); if (!err) if (put_user(n, res)) err = -EFAULT; return err; } static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, struct snd_pcm_sync_ptr __user *_sync_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_sync_ptr sync_ptr; volatile struct snd_pcm_mmap_status *status; volatile struct snd_pcm_mmap_control *control; int err; snd_pcm_uframes_t hw_avail; memset(&sync_ptr, 0, sizeof(sync_ptr)); if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags))) return -EFAULT; if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) return -EFAULT; status = runtime->status; control = runtime->control; if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) { err = snd_pcm_hwsync(substream); if (err < 0) return err; } snd_pcm_stream_lock_irq(substream); if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) control->appl_ptr = sync_ptr.c.control.appl_ptr; else sync_ptr.c.control.appl_ptr = control->appl_ptr; if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) control->avail_min = sync_ptr.c.control.avail_min; else sync_ptr.c.control.avail_min = control->avail_min; if (runtime->render_flag & SNDRV_NON_DMA_MODE) { hw_avail = snd_pcm_playback_hw_avail(runtime); if ((hw_avail >= runtime->start_threshold) && (runtime->render_flag & SNDRV_RENDER_STOPPED)) { if (substream->ops->restart) substream->ops->restart(substream); } } sync_ptr.s.status.state = status->state; sync_ptr.s.status.hw_ptr = status->hw_ptr; sync_ptr.s.status.tstamp = status->tstamp; sync_ptr.s.status.suspended_state = status->suspended_state; snd_pcm_stream_unlock_irq(substream); if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) return -EFAULT; return 0; } static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg) { struct snd_pcm_runtime *runtime = substream->runtime; int arg; if (get_user(arg, _arg)) return -EFAULT; if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST) return -EINVAL; runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_GETTIMEOFDAY; if (arg == SNDRV_PCM_TSTAMP_TYPE_MONOTONIC) runtime->tstamp_type = SNDRV_PCM_TSTAMP_TYPE_MONOTONIC; return 0; } //htc audio ++ static int snd_pcm_enable_effect(struct snd_pcm_substream *substream, int __user *_arg) { /* if substream is NULL, return error. */ if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; pr_info("%s: is called\n", __func__); return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_ENABLE_EFFECT, _arg); } //htc audio -- static int snd_pcm_common_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { switch (cmd) { case SNDRV_PCM_IOCTL_PVERSION: return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0; case SNDRV_PCM_IOCTL_INFO: return snd_pcm_info_user(substream, arg); case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */ return 0; case SNDRV_PCM_IOCTL_TTSTAMP: return snd_pcm_tstamp(substream, arg); case SNDRV_PCM_IOCTL_HW_REFINE: return snd_pcm_hw_refine_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS: return snd_pcm_hw_params_user(substream, arg); case SNDRV_PCM_IOCTL_HW_FREE: return snd_pcm_hw_free(substream); case SNDRV_PCM_IOCTL_SW_PARAMS: return snd_pcm_sw_params_user(substream, arg); case SNDRV_PCM_IOCTL_STATUS: return snd_pcm_status_user(substream, arg); case SNDRV_PCM_IOCTL_CHANNEL_INFO: return snd_pcm_channel_info_user(substream, arg); case SNDRV_PCM_IOCTL_PREPARE: return snd_pcm_prepare(substream, file); case SNDRV_PCM_IOCTL_RESET: return snd_pcm_reset(substream); case SNDRV_PCM_IOCTL_START: return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING); case SNDRV_PCM_IOCTL_LINK: return snd_pcm_link(substream, (int)(unsigned long) arg); case SNDRV_PCM_IOCTL_UNLINK: return snd_pcm_unlink(substream); case SNDRV_PCM_IOCTL_RESUME: return snd_pcm_resume(substream); case SNDRV_PCM_IOCTL_XRUN: return snd_pcm_xrun(substream); case SNDRV_PCM_IOCTL_HWSYNC: return snd_pcm_hwsync(substream); case SNDRV_PCM_IOCTL_DELAY: return snd_pcm_delay(substream, arg); case SNDRV_PCM_IOCTL_SYNC_PTR: return snd_pcm_sync_ptr(substream, arg); #ifdef CONFIG_SND_SUPPORT_OLD_API case SNDRV_PCM_IOCTL_HW_REFINE_OLD: return snd_pcm_hw_refine_old_user(substream, arg); case SNDRV_PCM_IOCTL_HW_PARAMS_OLD: return snd_pcm_hw_params_old_user(substream, arg); #endif case SNDRV_PCM_IOCTL_DRAIN: return snd_pcm_drain(substream, file); case SNDRV_PCM_IOCTL_DROP: return snd_pcm_drop(substream); case SNDRV_PCM_IOCTL_PAUSE: { int res; pr_info("%s ++", __func__); snd_pcm_stream_lock_irq(substream); res = snd_pcm_pause(substream, (int)(unsigned long)arg); pr_info("%s --", __func__); snd_pcm_stream_unlock_irq(substream); return res; } //htc audio ++ case SNDRV_PCM_IOCTL_ENABLE_EFFECT: return snd_pcm_enable_effect(substream, arg); //htc audio -- case SNDRV_COMPRESS_GET_CAPS: case SNDRV_COMPRESS_GET_CODEC_CAPS: case SNDRV_COMPRESS_SET_PARAMS: case SNDRV_COMPRESS_GET_PARAMS: case SNDRV_COMPRESS_TSTAMP: case SNDRV_COMPRESS_DRAIN: case SNDRV_COMPRESS_METADATA_MODE: return snd_compressed_ioctl(substream, cmd, arg); default: if (((cmd >> 8) & 0xff) == 'U') return snd_user_ioctl(substream, cmd, arg); } snd_printd("unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } static int snd_pcm_playback_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_WRITEI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_WRITEN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void __user **bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_writev(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_playback_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static int snd_pcm_capture_ioctl1(struct file *file, struct snd_pcm_substream *substream, unsigned int cmd, void __user *arg) { if (snd_BUG_ON(!substream)) return -ENXIO; if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE)) return -EINVAL; switch (cmd) { case SNDRV_PCM_IOCTL_READI_FRAMES: { struct snd_xferi xferi; struct snd_xferi __user *_xferi = arg; struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (put_user(0, &_xferi->result)) return -EFAULT; if (copy_from_user(&xferi, _xferi, sizeof(xferi))) return -EFAULT; result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames); __put_user(result, &_xferi->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_READN_FRAMES: { struct snd_xfern xfern; struct snd_xfern __user *_xfern = arg; struct snd_pcm_runtime *runtime = substream->runtime; void *bufs; snd_pcm_sframes_t result; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (runtime->channels > 128) return -EINVAL; if (put_user(0, &_xfern->result)) return -EFAULT; if (copy_from_user(&xfern, _xfern, sizeof(xfern))) return -EFAULT; bufs = memdup_user(xfern.bufs, sizeof(void *) * runtime->channels); if (IS_ERR(bufs)) return PTR_ERR(bufs); result = snd_pcm_lib_readv(substream, bufs, xfern.frames); kfree(bufs); __put_user(result, &_xfern->result); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_REWIND: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_rewind(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } case SNDRV_PCM_IOCTL_FORWARD: { snd_pcm_uframes_t frames; snd_pcm_uframes_t __user *_frames = arg; snd_pcm_sframes_t result; if (get_user(frames, _frames)) return -EFAULT; if (put_user(0, _frames)) return -EFAULT; result = snd_pcm_capture_forward(substream, frames); __put_user(result, _frames); return result < 0 ? result : 0; } } return snd_pcm_common_ioctl1(file, substream, cmd, arg); } static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; pcm_file = file->private_data; if ((((cmd >> 8) & 0xff) != 'A') && (((cmd >> 8) & 0xff) != 'C')) return -ENOTTY; return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_file *pcm_file; unsigned char ioctl_magic; pcm_file = file->private_data; ioctl_magic = ((cmd >> 8) & 0xff); if (ioctl_magic != 'A' && ioctl_magic != 'C' && ioctl_magic != 'U') return -ENOTTY; return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd, (void __user *)arg); } int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { mm_segment_t fs; int result; fs = snd_enter_user(); switch (substream->stream) { case SNDRV_PCM_STREAM_PLAYBACK: result = snd_pcm_playback_ioctl1(NULL, substream, cmd, (void __user *)arg); break; case SNDRV_PCM_STREAM_CAPTURE: result = snd_pcm_capture_ioctl1(NULL, substream, cmd, (void __user *)arg); break; default: result = -EINVAL; break; } snd_leave_user(fs); return result; } EXPORT_SYMBOL(snd_pcm_kernel_ioctl); static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_read(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!frame_aligned(runtime, count)) return -EINVAL; count = bytes_to_frames(runtime, count); result = snd_pcm_lib_write(substream, buf, count); if (result > 0) result = frames_to_bytes(runtime, result); return result; } static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 1024 || nr_segs != runtime->channels) return -EINVAL; if (!frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_readv(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t result; unsigned long i; void __user **bufs; snd_pcm_uframes_t frames; pcm_file = iocb->ki_filp->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (nr_segs > 128 || nr_segs != runtime->channels || !frame_aligned(runtime, iov->iov_len)) return -EINVAL; frames = bytes_to_samples(runtime, iov->iov_len); bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL); if (bufs == NULL) return -ENOMEM; for (i = 0; i < nr_segs; ++i) bufs[i] = iov[i].iov_base; result = snd_pcm_lib_writev(substream, bufs, frames); if (result > 0) result = frames_to_bytes(runtime, result); kfree(bufs); return result; } static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_playback_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLOUT | POLLWRNORM; break; } /* Fall through */ case SNDRV_PCM_STATE_DRAINING: mask = 0; break; default: mask = POLLOUT | POLLWRNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait) { struct snd_pcm_file *pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; unsigned int mask; snd_pcm_uframes_t avail; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(substream); avail = snd_pcm_capture_avail(runtime); switch (runtime->status->state) { case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_PAUSED: if (avail >= runtime->control->avail_min) { mask = POLLIN | POLLRDNORM; break; } mask = 0; break; case SNDRV_PCM_STATE_DRAINING: if (avail > 0) { mask = POLLIN | POLLRDNORM; break; } /* Fall through */ default: mask = POLLIN | POLLRDNORM | POLLERR; break; } snd_pcm_stream_unlock_irq(substream); return mask; } /* * mmap support */ /* * Only on coherent architectures, we can mmap the status and the control records * for effcient data transfer. On others, we have to use HWSYNC ioctl... */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) /* * mmap status record */ static int snd_pcm_mmap_status_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->status); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_status = { .fault = snd_pcm_mmap_status_fault, }; static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_status; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } /* * mmap control record */ static int snd_pcm_mmap_control_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; vmf->page = virt_to_page(runtime->control); get_page(vmf->page); return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_control = { .fault = snd_pcm_mmap_control_fault, }; static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { long size; if (!(area->vm_flags & VM_READ)) return -EINVAL; size = area->vm_end - area->vm_start; if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_control; area->vm_private_data = substream; area->vm_flags |= VM_RESERVED; return 0; } #else /* ! coherent mmap */ /* * don't support mmap for status and control records. */ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { return -ENXIO; } #endif /* coherent mmap */ static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = substream->runtime->dma_area + ofs; #if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return virt_to_page(CAC_ADDR(vaddr)); #endif #if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { dma_addr_t addr = substream->runtime->dma_addr + ofs; addr -= get_dma_offset(substream->dma_buffer.dev.dev); /* assume dma_handle set via pfn_to_phys() in * mm/dma-noncoherent.c */ return pfn_to_page(addr >> PAGE_SHIFT); } #endif return virt_to_page(vaddr); } /* * fault callback for mmapping a RAM page */ static int snd_pcm_mmap_data_fault(struct vm_area_struct *area, struct vm_fault *vmf) { struct snd_pcm_substream *substream = area->vm_private_data; struct snd_pcm_runtime *runtime; unsigned long offset; struct page * page; size_t dma_bytes; if (substream == NULL) return VM_FAULT_SIGBUS; runtime = substream->runtime; offset = vmf->pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if (offset > dma_bytes - PAGE_SIZE) return VM_FAULT_SIGBUS; if (substream->ops->page) page = substream->ops->page(substream, offset); else page = snd_pcm_default_page_ops(substream, offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct snd_pcm_vm_ops_data = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, }; static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { .open = snd_pcm_mmap_data_open, .close = snd_pcm_mmap_data_close, .fault = snd_pcm_mmap_data_fault, }; #ifndef ARCH_HAS_DMA_MMAP_COHERENT /* This should be defined / handled globally! */ #ifdef CONFIG_ARM #define ARCH_HAS_DMA_MMAP_COHERENT #endif #endif /* * mmap the DMA buffer on RAM */ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { area->vm_flags |= VM_RESERVED; #ifdef ARCH_HAS_DMA_MMAP_COHERENT if (!substream->ops->page && substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return dma_mmap_coherent(substream->dma_buffer.dev.dev, area, substream->runtime->dma_area, substream->runtime->dma_addr, area->vm_end - area->vm_start); #elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && !plat_device_is_coherent(substream->dma_buffer.dev.dev)) area->vm_page_prot = pgprot_noncached(area->vm_page_prot); #endif /* ARCH_HAS_DMA_MMAP_COHERENT */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); /* * mmap the DMA buffer on I/O memory area */ #if SNDRV_PCM_INFO_MMAP_IOMEM int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { long size; unsigned long offset; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); area->vm_flags |= VM_IO; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; if (io_remap_pfn_range(area, area->vm_start, (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, size, area->vm_page_prot)) return -EAGAIN; return 0; } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); #endif /* SNDRV_PCM_INFO_MMAP */ /* * mmap DMA buffer */ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area) { struct snd_pcm_runtime *runtime; long size; unsigned long offset; size_t dma_bytes; int err; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (!(area->vm_flags & (VM_WRITE|VM_READ))) return -EINVAL; } else { if (!(area->vm_flags & VM_READ)) return -EINVAL; } runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) return -ENXIO; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; size = area->vm_end - area->vm_start; offset = area->vm_pgoff << PAGE_SHIFT; dma_bytes = PAGE_ALIGN(runtime->dma_bytes); if ((size_t)size > dma_bytes) return -EINVAL; if (offset > dma_bytes - size) return -EINVAL; area->vm_ops = &snd_pcm_vm_ops_data; area->vm_private_data = substream; if (substream->ops->mmap) err = substream->ops->mmap(substream, area); else err = snd_pcm_lib_default_mmap(substream, area); if (!err) atomic_inc(&substream->mmap_count); return err; } EXPORT_SYMBOL(snd_pcm_mmap_data); static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; unsigned long offset; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; offset = area->vm_pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_status(substream, file, area); case SNDRV_PCM_MMAP_OFFSET_CONTROL: if (pcm_file->no_compat_mmap) return -ENXIO; return snd_pcm_mmap_control(substream, file, area); default: return snd_pcm_mmap_data(substream, file, area); } return 0; } static int snd_pcm_fasync(int fd, struct file * file, int on) { struct snd_pcm_file * pcm_file; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; pcm_file = file->private_data; substream = pcm_file->substream; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; return fasync_helper(fd, file, on, &runtime->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "pcm_compat.c" #else #define snd_pcm_ioctl_compat NULL #endif /* * To be removed helpers to keep binary compatibility */ #ifdef CONFIG_SND_SUPPORT_OLD_API #define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5)) #define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5)) static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params, struct snd_pcm_hw_params_old *oparams) { unsigned int i; memset(params, 0, sizeof(*params)); params->flags = oparams->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) params->masks[i].bits[0] = oparams->masks[i]; memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals)); params->rmask = __OLD_TO_NEW_MASK(oparams->rmask); params->cmask = __OLD_TO_NEW_MASK(oparams->cmask); params->info = oparams->info; params->msbits = oparams->msbits; params->rate_num = oparams->rate_num; params->rate_den = oparams->rate_den; params->fifo_size = oparams->fifo_size; } static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams, struct snd_pcm_hw_params *params) { unsigned int i; memset(oparams, 0, sizeof(*oparams)); oparams->flags = params->flags; for (i = 0; i < ARRAY_SIZE(oparams->masks); i++) oparams->masks[i] = params->masks[i].bits[0]; memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals)); oparams->rmask = __NEW_TO_OLD_MASK(params->rmask); oparams->cmask = __NEW_TO_OLD_MASK(params->cmask); oparams->info = params->info; oparams->msbits = params->msbits; oparams->rate_num = params->rate_num; oparams->rate_den = params->rate_den; oparams->fifo_size = params->fifo_size; } static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_refine(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream, struct snd_pcm_hw_params_old __user * _oparams) { struct snd_pcm_hw_params *params; struct snd_pcm_hw_params_old *oparams = NULL; int err; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; oparams = memdup_user(_oparams, sizeof(*oparams)); if (IS_ERR(oparams)) { err = PTR_ERR(oparams); goto out; } snd_pcm_hw_convert_from_old_params(params, oparams); err = snd_pcm_hw_params(substream, params); snd_pcm_hw_convert_to_old_params(oparams, params); if (copy_to_user(_oparams, oparams, sizeof(*oparams))) { if (!err) err = -EFAULT; } kfree(oparams); out: kfree(params); return err; } #endif /* CONFIG_SND_SUPPORT_OLD_API */ #ifndef CONFIG_MMU static unsigned long snd_pcm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct snd_pcm_file *pcm_file = file->private_data; struct snd_pcm_substream *substream = pcm_file->substream; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long offset = pgoff << PAGE_SHIFT; switch (offset) { case SNDRV_PCM_MMAP_OFFSET_STATUS: return (unsigned long)runtime->status; case SNDRV_PCM_MMAP_OFFSET_CONTROL: return (unsigned long)runtime->control; default: return (unsigned long)runtime->dma_area + offset; } } #else # define snd_pcm_get_unmapped_area NULL #endif /* * Register section */ const struct file_operations snd_pcm_f_ops[2] = { { .owner = THIS_MODULE, .write = snd_pcm_write, .aio_write = snd_pcm_aio_write, .open = snd_pcm_playback_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_playback_poll, .unlocked_ioctl = snd_pcm_playback_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, }, { .owner = THIS_MODULE, .read = snd_pcm_read, .aio_read = snd_pcm_aio_read, .open = snd_pcm_capture_open, .release = snd_pcm_release, .llseek = no_llseek, .poll = snd_pcm_capture_poll, .unlocked_ioctl = snd_pcm_capture_ioctl, .compat_ioctl = snd_pcm_ioctl_compat, .mmap = snd_pcm_mmap, .fasync = snd_pcm_fasync, .get_unmapped_area = snd_pcm_get_unmapped_area, } };
alexey6600/M8_Sense_7.00
sound/core/pcm_native.c
C
gpl-2.0
100,040
source: API/PortGroups.md ### `get_graph_by_portgroup` Get the graph based on the group type. Route: `/api/v0/devices/portgroups/:group` - group is the type of port group graph you want, I.e Transit, Peering, etc. You can specify multiple types comma separated. Input: - from: This is the date you would like the graph to start - See http://oss.oetiker.ch/rrdtool/doc/rrdgraph.en.html for more information. - to: This is the date you would like the graph to end - See http://oss.oetiker.ch/rrdtool/doc/rrdgraph.en.html for more information. - width: The graph width, defaults to 1075. - height: The graph height, defaults to 300. Example: ```curl curl -H 'X-Auth-Token: YOURAPITOKENHERE' https://librenms.org/api/v0/portgroups/transit,peering ``` Output: Output is an image. ### `get_graph_by_portgroup_multiport_bits` Get the graph based on the multiple port id separated by commas `,`. Route: `/api/v0/devices/portgroups/multiport/bits/:id` - id is a comma separated list of port ids you want, I.e 1,2,3,4, etc. You can specify multiple IDs comma separated. Input: - from: This is the date you would like the graph to start - See http://oss.oetiker.ch/rrdtool/doc/rrdgraph.en.html for more information. - to: This is the date you would like the graph to end - See http://oss.oetiker.ch/rrdtool/doc/rrdgraph.en.html for more information. - width: The graph width, defaults to 1075. - height: The graph height, defaults to 300. Example: ```curl curl -H 'X-Auth-Token: YOURAPITOKENHERE' https://librenms.org/api/v0/portgroups/multiport/bits/1,2,3 ``` Output: Output is an image.
wiad/librenms
doc/API/PortGroups.md
Markdown
gpl-3.0
1,616
/* -*- c++ -*- */ /* * Copyright 2002,2012 Free Software Foundation, Inc. * * This file is part of GNU Radio * * GNU Radio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3, or (at your option) * any later version. * * GNU Radio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Radio; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #ifndef INCLUDED_IIR_FILTER_H #define INCLUDED_IIR_FILTER_H #include <gnuradio/filter/api.h> #include <gnuradio/gr_complex.h> #include <vector> #include <stdexcept> namespace gr { namespace filter { namespace kernel { /*! * \brief Base class template for Infinite Impulse Response filter (IIR) * * \details * * This class provides a templated kernel for IIR filters. These * iir_filters can be instantiated with a set of feed-forward * and feed-back taps in the constructor. We then call the * iir_filter::filter function to add a new sample to the * filter, or iir_filter::filter_n to add a vector of samples to * be filtered. * * Instantiating a filter means defining the templates for the * data types being processed by the filter. There are four templates: * * \li i_type the data type of the input data (i.e., float). * \li o_type the data type of the output data (i.e., float). * \li tap_type the data type of the filter taps (i.e., double). * \li acc_type the data type of the internal accumulator (i.e., double). * * The acc_type is specified to control how data is handled * internally in the filter. This should always be the highest * precision data type of any of the first three. Often, IIR * filters require double-precision values in the taps for * stability, and so the internal accumulator should also be * double precision. * * Example: * * \code * gr::filter::kernel::iir_filter<float,float,double,double> iir_filt(fftaps, fbtaps); * ... * float y = iir_filt.filter(x); * * <or> * * iir_filt.filter(y, x, N); // y and x are float arrays * \endcode * * Another example for handling complex samples with * double-precision taps (see filter::iir_filter_ccz): * * \code * gr:;filter::kernel::iir_filter<gr_complex, gr_complex, gr_complexd, gr_complexd> iir_filt(fftaps, fbtaps); * \endcode */ template<class i_type, class o_type, class tap_type, class acc_type> class FILTER_API iir_filter { public: /*! * \brief Construct an IIR with the given taps. * * This filter uses the Direct Form I implementation, where * \p fftaps contains the feed-forward taps, and \p fbtaps the feedback ones. * * \p fftaps and \p fbtaps must have equal numbers of taps * * \p oldstyle: The old style of the IIR filter uses feedback * taps that are negative of what most definitions use (scipy * and Matlab among them). This parameter keeps using the old * GNU Radio style and is set to TRUE by default. When taps * generated from scipy, Matlab, or gr_filter_design, use the * new style by setting this to FALSE. * * The input and output satisfy a difference equation of the form \f[ y[n] \pm \sum_{k=1}^{M} a_k y[n-k] = \sum_{k=0}^{N} b_k x[n-k] \f] * with the corresponding rational system function \f[ H(z) = \frac{\sum_{k=0}^{N} b_k z^{-k}}{1 \pm \sum_{k=1}^{M} a_k z^{-k}} \f] */ iir_filter(const std::vector<tap_type>& fftaps, const std::vector<tap_type>& fbtaps, bool oldstyle=true) throw (std::invalid_argument) { d_oldstyle = oldstyle; set_taps(fftaps, fbtaps); } iir_filter() : d_latest_n(0),d_latest_m(0) { } ~iir_filter() {} /*! * \brief compute a single output value. * \returns the filtered input value. */ o_type filter(const i_type input); /*! * \brief compute an array of N output values. * \p input must have N valid entries. */ void filter_n(o_type output[], const i_type input[], long n); /*! * \return number of taps in filter. */ unsigned ntaps_ff() const { return d_fftaps.size(); } unsigned ntaps_fb() const { return d_fbtaps.size(); } /*! * \brief install new taps. */ void set_taps(const std::vector<tap_type> &fftaps, const std::vector<tap_type> &fbtaps) throw (std::invalid_argument) { d_latest_n = 0; d_latest_m = 0; d_fftaps = fftaps; if(d_oldstyle) { d_fbtaps = fbtaps; } else { // New style negates taps a[1:N-1] to fit with most IIR // tap generating programs. d_fbtaps.resize(fbtaps.size()); d_fbtaps[0] = fbtaps[0]; for(size_t i = 1; i < fbtaps.size(); i++) { d_fbtaps[i] = -fbtaps[i]; } } int n = fftaps.size(); int m = fbtaps.size(); d_prev_input.clear(); d_prev_output.clear(); d_prev_input.resize(2 * n, 0); d_prev_output.resize(2 * m, 0); } protected: bool d_oldstyle; std::vector<tap_type> d_fftaps; std::vector<tap_type> d_fbtaps; int d_latest_n; int d_latest_m; std::vector<acc_type> d_prev_output; std::vector<i_type> d_prev_input; }; // // general case. We may want to specialize this // template<class i_type, class o_type, class tap_type, class acc_type> o_type iir_filter<i_type, o_type, tap_type, acc_type>::filter(const i_type input) { acc_type acc; unsigned i = 0; unsigned n = ntaps_ff(); unsigned m = ntaps_fb(); if(n == 0) return (o_type)0; int latest_n = d_latest_n; int latest_m = d_latest_m; acc = d_fftaps[0] * input; for(i = 1; i < n; i ++) acc += (d_fftaps[i] * d_prev_input[latest_n + i]); for(i = 1; i < m; i ++) acc += (d_fbtaps[i] * d_prev_output[latest_m + i]); // store the values twice to avoid having to handle wrap-around in the loop d_prev_output[latest_m] = acc; d_prev_output[latest_m+m] = acc; d_prev_input[latest_n] = input; d_prev_input[latest_n+n] = input; latest_n--; latest_m--; if(latest_n < 0) latest_n += n; if(latest_m < 0) latest_m += m; d_latest_m = latest_m; d_latest_n = latest_n; return (o_type)acc; } template<class i_type, class o_type, class tap_type, class acc_type> void iir_filter<i_type, o_type, tap_type, acc_type>::filter_n(o_type output[], const i_type input[], long n) { for(int i = 0; i < n; i++) output[i] = filter(input[i]); } template<> gr_complex iir_filter<gr_complex, gr_complex, float, gr_complex>::filter(const gr_complex input); template<> gr_complex iir_filter<gr_complex, gr_complex, double, gr_complexd>::filter(const gr_complex input); template<> gr_complex iir_filter<gr_complex, gr_complex, gr_complexd, gr_complexd>::filter(const gr_complex input); } /* namespace kernel */ } /* namespace filter */ } /* namespace gr */ #endif /* INCLUDED_IIR_FILTER_H */
analogdevicesinc/gnuradio
gr-filter/include/gnuradio/filter/iir_filter.h
C
gpl-3.0
7,511
#ifndef __LWP_MUTEX_H__ #define __LWP_MUTEX_H__ #include <gctypes.h> #include <lwp_threadq.h> #define LWP_MUTEX_LOCKED 0 #define LWP_MUTEX_UNLOCKED 1 #define LWP_MUTEX_NEST_ACQUIRE 0 #define LWP_MUTEX_NEST_ERROR 1 #define LWP_MUTEX_NEST_BLOCK 2 #define LWP_MUTEX_FIFO 0 #define LWP_MUTEX_PRIORITY 1 #define LWP_MUTEX_INHERITPRIO 2 #define LWP_MUTEX_PRIORITYCEIL 3 #define LWP_MUTEX_SUCCESSFUL 0 #define LWP_MUTEX_UNSATISFIED_NOWAIT 1 #define LWP_MUTEX_NEST_NOTALLOWED 2 #define LWP_MUTEX_NOTOWNER 3 #define LWP_MUTEX_DELETED 4 #define LWP_MUTEX_TIMEOUT 5 #define LWP_MUTEX_CEILINGVIOL 6 #ifdef __cplusplus extern "C" { #endif typedef struct _lwpmutexattr { u32 mode; u32 nest_behavior; u8 prioceil,onlyownerrelease; } lwp_mutex_attr; typedef struct _lwpmutex { lwp_thrqueue wait_queue; lwp_mutex_attr atrrs; u32 lock,nest_cnt,blocked_cnt; lwp_cntrl *holder; } lwp_mutex; void __lwp_mutex_initialize(lwp_mutex *mutex,lwp_mutex_attr *attrs,u32 init_lock); u32 __lwp_mutex_surrender(lwp_mutex *mutex); void __lwp_mutex_seize_irq_blocking(lwp_mutex *mutex,u64 timeout); void __lwp_mutex_flush(lwp_mutex *mutex,u32 status); static __inline__ u32 __lwp_mutex_seize_irq_trylock(lwp_mutex *mutex,u32 *isr_level); #define __lwp_mutex_seize(_mutex_t,_id,_wait,_timeout,_level) \ do { \ if(__lwp_mutex_seize_irq_trylock(_mutex_t,&_level)) { \ if(!_wait) { \ _CPU_ISR_Restore(_level); \ _thr_executing->wait.ret_code = LWP_MUTEX_UNSATISFIED_NOWAIT; \ } else { \ __lwp_threadqueue_csenter(&(_mutex_t)->wait_queue); \ _thr_executing->wait.queue = &(_mutex_t)->wait_queue; \ _thr_executing->wait.id = _id; \ __lwp_thread_dispatchdisable(); \ _CPU_ISR_Restore(_level); \ __lwp_mutex_seize_irq_blocking(_mutex_t,(u64)_timeout); \ } \ } \ } while(0) #ifdef LIBOGC_INTERNAL #include <libogc/lwp_mutex.inl> #endif #ifdef __cplusplus } #endif #endif
Monroe88/RetroArch
wii/libogc/include/ogc/lwp_mutex.h
C
gpl-3.0
1,935
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create a scene with one of each cell type # Voxel voxelPoints = vtk.vtkPoints() voxelPoints.SetNumberOfPoints(8) voxelPoints.InsertPoint(0,0,0,0) voxelPoints.InsertPoint(1,1,0,0) voxelPoints.InsertPoint(2,0,1,0) voxelPoints.InsertPoint(3,1,1,0) voxelPoints.InsertPoint(4,0,0,1) voxelPoints.InsertPoint(5,1,0,1) voxelPoints.InsertPoint(6,0,1,1) voxelPoints.InsertPoint(7,1,1,1) aVoxel = vtk.vtkVoxel() aVoxel.GetPointIds().SetId(0,0) aVoxel.GetPointIds().SetId(1,1) aVoxel.GetPointIds().SetId(2,2) aVoxel.GetPointIds().SetId(3,3) aVoxel.GetPointIds().SetId(4,4) aVoxel.GetPointIds().SetId(5,5) aVoxel.GetPointIds().SetId(6,6) aVoxel.GetPointIds().SetId(7,7) aVoxelGrid = vtk.vtkUnstructuredGrid() aVoxelGrid.Allocate(1,1) aVoxelGrid.InsertNextCell(aVoxel.GetCellType(),aVoxel.GetPointIds()) aVoxelGrid.SetPoints(voxelPoints) aVoxelMapper = vtk.vtkDataSetMapper() aVoxelMapper.SetInputData(aVoxelGrid) aVoxelActor = vtk.vtkActor() aVoxelActor.SetMapper(aVoxelMapper) aVoxelActor.GetProperty().BackfaceCullingOn() # Hexahedron hexahedronPoints = vtk.vtkPoints() hexahedronPoints.SetNumberOfPoints(8) hexahedronPoints.InsertPoint(0,0,0,0) hexahedronPoints.InsertPoint(1,1,0,0) hexahedronPoints.InsertPoint(2,1,1,0) hexahedronPoints.InsertPoint(3,0,1,0) hexahedronPoints.InsertPoint(4,0,0,1) hexahedronPoints.InsertPoint(5,1,0,1) hexahedronPoints.InsertPoint(6,1,1,1) hexahedronPoints.InsertPoint(7,0,1,1) aHexahedron = vtk.vtkHexahedron() aHexahedron.GetPointIds().SetId(0,0) aHexahedron.GetPointIds().SetId(1,1) aHexahedron.GetPointIds().SetId(2,2) aHexahedron.GetPointIds().SetId(3,3) aHexahedron.GetPointIds().SetId(4,4) aHexahedron.GetPointIds().SetId(5,5) aHexahedron.GetPointIds().SetId(6,6) aHexahedron.GetPointIds().SetId(7,7) aHexahedronGrid = vtk.vtkUnstructuredGrid() aHexahedronGrid.Allocate(1,1) aHexahedronGrid.InsertNextCell(aHexahedron.GetCellType(),aHexahedron.GetPointIds()) aHexahedronGrid.SetPoints(hexahedronPoints) aHexahedronMapper = vtk.vtkDataSetMapper() aHexahedronMapper.SetInputData(aHexahedronGrid) aHexahedronActor = vtk.vtkActor() aHexahedronActor.SetMapper(aHexahedronMapper) aHexahedronActor.AddPosition(2,0,0) aHexahedronActor.GetProperty().BackfaceCullingOn() # Tetra tetraPoints = vtk.vtkPoints() tetraPoints.SetNumberOfPoints(4) tetraPoints.InsertPoint(0,0,0,0) tetraPoints.InsertPoint(1,1,0,0) tetraPoints.InsertPoint(2,.5,1,0) tetraPoints.InsertPoint(3,.5,.5,1) aTetra = vtk.vtkTetra() aTetra.GetPointIds().SetId(0,0) aTetra.GetPointIds().SetId(1,1) aTetra.GetPointIds().SetId(2,2) aTetra.GetPointIds().SetId(3,3) aTetraGrid = vtk.vtkUnstructuredGrid() aTetraGrid.Allocate(1,1) aTetraGrid.InsertNextCell(aTetra.GetCellType(),aTetra.GetPointIds()) aTetraGrid.SetPoints(tetraPoints) aTetraMapper = vtk.vtkDataSetMapper() aTetraMapper.SetInputData(aTetraGrid) aTetraActor = vtk.vtkActor() aTetraActor.SetMapper(aTetraMapper) aTetraActor.AddPosition(4,0,0) aTetraActor.GetProperty().BackfaceCullingOn() # Wedge wedgePoints = vtk.vtkPoints() wedgePoints.SetNumberOfPoints(6) wedgePoints.InsertPoint(0,0,1,0) wedgePoints.InsertPoint(1,0,0,0) wedgePoints.InsertPoint(2,0,.5,.5) wedgePoints.InsertPoint(3,1,1,0) wedgePoints.InsertPoint(4,1,0,0) wedgePoints.InsertPoint(5,1,.5,.5) aWedge = vtk.vtkWedge() aWedge.GetPointIds().SetId(0,0) aWedge.GetPointIds().SetId(1,1) aWedge.GetPointIds().SetId(2,2) aWedge.GetPointIds().SetId(3,3) aWedge.GetPointIds().SetId(4,4) aWedge.GetPointIds().SetId(5,5) aWedgeGrid = vtk.vtkUnstructuredGrid() aWedgeGrid.Allocate(1,1) aWedgeGrid.InsertNextCell(aWedge.GetCellType(),aWedge.GetPointIds()) aWedgeGrid.SetPoints(wedgePoints) aWedgeMapper = vtk.vtkDataSetMapper() aWedgeMapper.SetInputData(aWedgeGrid) aWedgeActor = vtk.vtkActor() aWedgeActor.SetMapper(aWedgeMapper) aWedgeActor.AddPosition(6,0,0) aWedgeActor.GetProperty().BackfaceCullingOn() # Pyramid pyramidPoints = vtk.vtkPoints() pyramidPoints.SetNumberOfPoints(5) pyramidPoints.InsertPoint(0,0,0,0) pyramidPoints.InsertPoint(1,1,0,0) pyramidPoints.InsertPoint(2,1,1,0) pyramidPoints.InsertPoint(3,0,1,0) pyramidPoints.InsertPoint(4,.5,.5,1) aPyramid = vtk.vtkPyramid() aPyramid.GetPointIds().SetId(0,0) aPyramid.GetPointIds().SetId(1,1) aPyramid.GetPointIds().SetId(2,2) aPyramid.GetPointIds().SetId(3,3) aPyramid.GetPointIds().SetId(4,4) aPyramidGrid = vtk.vtkUnstructuredGrid() aPyramidGrid.Allocate(1,1) aPyramidGrid.InsertNextCell(aPyramid.GetCellType(),aPyramid.GetPointIds()) aPyramidGrid.SetPoints(pyramidPoints) aPyramidMapper = vtk.vtkDataSetMapper() aPyramidMapper.SetInputData(aPyramidGrid) aPyramidActor = vtk.vtkActor() aPyramidActor.SetMapper(aPyramidMapper) aPyramidActor.AddPosition(8,0,0) aPyramidActor.GetProperty().BackfaceCullingOn() # Pixel pixelPoints = vtk.vtkPoints() pixelPoints.SetNumberOfPoints(4) pixelPoints.InsertPoint(0,0,0,0) pixelPoints.InsertPoint(1,1,0,0) pixelPoints.InsertPoint(2,0,1,0) pixelPoints.InsertPoint(3,1,1,0) aPixel = vtk.vtkPixel() aPixel.GetPointIds().SetId(0,0) aPixel.GetPointIds().SetId(1,1) aPixel.GetPointIds().SetId(2,2) aPixel.GetPointIds().SetId(3,3) aPixelGrid = vtk.vtkUnstructuredGrid() aPixelGrid.Allocate(1,1) aPixelGrid.InsertNextCell(aPixel.GetCellType(),aPixel.GetPointIds()) aPixelGrid.SetPoints(pixelPoints) aPixelMapper = vtk.vtkDataSetMapper() aPixelMapper.SetInputData(aPixelGrid) aPixelActor = vtk.vtkActor() aPixelActor.SetMapper(aPixelMapper) aPixelActor.AddPosition(0,0,2) aPixelActor.GetProperty().BackfaceCullingOn() # Quad quadPoints = vtk.vtkPoints() quadPoints.SetNumberOfPoints(4) quadPoints.InsertPoint(0,0,0,0) quadPoints.InsertPoint(1,1,0,0) quadPoints.InsertPoint(2,1,1,0) quadPoints.InsertPoint(3,0,1,0) aQuad = vtk.vtkQuad() aQuad.GetPointIds().SetId(0,0) aQuad.GetPointIds().SetId(1,1) aQuad.GetPointIds().SetId(2,2) aQuad.GetPointIds().SetId(3,3) aQuadGrid = vtk.vtkUnstructuredGrid() aQuadGrid.Allocate(1,1) aQuadGrid.InsertNextCell(aQuad.GetCellType(),aQuad.GetPointIds()) aQuadGrid.SetPoints(quadPoints) aQuadMapper = vtk.vtkDataSetMapper() aQuadMapper.SetInputData(aQuadGrid) aQuadActor = vtk.vtkActor() aQuadActor.SetMapper(aQuadMapper) aQuadActor.AddPosition(2,0,2) aQuadActor.GetProperty().BackfaceCullingOn() # Triangle trianglePoints = vtk.vtkPoints() trianglePoints.SetNumberOfPoints(3) trianglePoints.InsertPoint(0,0,0,0) trianglePoints.InsertPoint(1,1,0,0) trianglePoints.InsertPoint(2,.5,.5,0) aTriangle = vtk.vtkTriangle() aTriangle.GetPointIds().SetId(0,0) aTriangle.GetPointIds().SetId(1,1) aTriangle.GetPointIds().SetId(2,2) aTriangleGrid = vtk.vtkUnstructuredGrid() aTriangleGrid.Allocate(1,1) aTriangleGrid.InsertNextCell(aTriangle.GetCellType(),aTriangle.GetPointIds()) aTriangleGrid.SetPoints(trianglePoints) aTriangleMapper = vtk.vtkDataSetMapper() aTriangleMapper.SetInputData(aTriangleGrid) aTriangleActor = vtk.vtkActor() aTriangleActor.SetMapper(aTriangleMapper) aTriangleActor.AddPosition(4,0,2) aTriangleActor.GetProperty().BackfaceCullingOn() # Polygon polygonPoints = vtk.vtkPoints() polygonPoints.SetNumberOfPoints(4) polygonPoints.InsertPoint(0,0,0,0) polygonPoints.InsertPoint(1,1,0,0) polygonPoints.InsertPoint(2,1,1,0) polygonPoints.InsertPoint(3,0,1,0) aPolygon = vtk.vtkPolygon() aPolygon.GetPointIds().SetNumberOfIds(4) aPolygon.GetPointIds().SetId(0,0) aPolygon.GetPointIds().SetId(1,1) aPolygon.GetPointIds().SetId(2,2) aPolygon.GetPointIds().SetId(3,3) aPolygonGrid = vtk.vtkUnstructuredGrid() aPolygonGrid.Allocate(1,1) aPolygonGrid.InsertNextCell(aPolygon.GetCellType(),aPolygon.GetPointIds()) aPolygonGrid.SetPoints(polygonPoints) aPolygonMapper = vtk.vtkDataSetMapper() aPolygonMapper.SetInputData(aPolygonGrid) aPolygonActor = vtk.vtkActor() aPolygonActor.SetMapper(aPolygonMapper) aPolygonActor.AddPosition(6,0,2) aPolygonActor.GetProperty().BackfaceCullingOn() # Triangle Strip triangleStripPoints = vtk.vtkPoints() triangleStripPoints.SetNumberOfPoints(5) triangleStripPoints.InsertPoint(0,0,1,0) triangleStripPoints.InsertPoint(1,0,0,0) triangleStripPoints.InsertPoint(2,1,1,0) triangleStripPoints.InsertPoint(3,1,0,0) triangleStripPoints.InsertPoint(4,2,1,0) aTriangleStrip = vtk.vtkTriangleStrip() aTriangleStrip.GetPointIds().SetNumberOfIds(5) aTriangleStrip.GetPointIds().SetId(0,0) aTriangleStrip.GetPointIds().SetId(1,1) aTriangleStrip.GetPointIds().SetId(2,2) aTriangleStrip.GetPointIds().SetId(3,3) aTriangleStrip.GetPointIds().SetId(4,4) aTriangleStripGrid = vtk.vtkUnstructuredGrid() aTriangleStripGrid.Allocate(1,1) aTriangleStripGrid.InsertNextCell(aTriangleStrip.GetCellType(),aTriangleStrip.GetPointIds()) aTriangleStripGrid.SetPoints(triangleStripPoints) aTriangleStripMapper = vtk.vtkDataSetMapper() aTriangleStripMapper.SetInputData(aTriangleStripGrid) aTriangleStripActor = vtk.vtkActor() aTriangleStripActor.SetMapper(aTriangleStripMapper) aTriangleStripActor.AddPosition(8,0,2) aTriangleStripActor.GetProperty().BackfaceCullingOn() # Line linePoints = vtk.vtkPoints() linePoints.SetNumberOfPoints(2) linePoints.InsertPoint(0,0,0,0) linePoints.InsertPoint(1,1,1,0) aLine = vtk.vtkLine() aLine.GetPointIds().SetId(0,0) aLine.GetPointIds().SetId(1,1) aLineGrid = vtk.vtkUnstructuredGrid() aLineGrid.Allocate(1,1) aLineGrid.InsertNextCell(aLine.GetCellType(),aLine.GetPointIds()) aLineGrid.SetPoints(linePoints) aLineMapper = vtk.vtkDataSetMapper() aLineMapper.SetInputData(aLineGrid) aLineActor = vtk.vtkActor() aLineActor.SetMapper(aLineMapper) aLineActor.AddPosition(0,0,4) aLineActor.GetProperty().BackfaceCullingOn() # Poly line polyLinePoints = vtk.vtkPoints() polyLinePoints.SetNumberOfPoints(3) polyLinePoints.InsertPoint(0,0,0,0) polyLinePoints.InsertPoint(1,1,1,0) polyLinePoints.InsertPoint(2,1,0,0) aPolyLine = vtk.vtkPolyLine() aPolyLine.GetPointIds().SetNumberOfIds(3) aPolyLine.GetPointIds().SetId(0,0) aPolyLine.GetPointIds().SetId(1,1) aPolyLine.GetPointIds().SetId(2,2) aPolyLineGrid = vtk.vtkUnstructuredGrid() aPolyLineGrid.Allocate(1,1) aPolyLineGrid.InsertNextCell(aPolyLine.GetCellType(),aPolyLine.GetPointIds()) aPolyLineGrid.SetPoints(polyLinePoints) aPolyLineMapper = vtk.vtkDataSetMapper() aPolyLineMapper.SetInputData(aPolyLineGrid) aPolyLineActor = vtk.vtkActor() aPolyLineActor.SetMapper(aPolyLineMapper) aPolyLineActor.AddPosition(2,0,4) aPolyLineActor.GetProperty().BackfaceCullingOn() # Vertex vertexPoints = vtk.vtkPoints() vertexPoints.SetNumberOfPoints(1) vertexPoints.InsertPoint(0,0,0,0) aVertex = vtk.vtkVertex() aVertex.GetPointIds().SetId(0,0) aVertexGrid = vtk.vtkUnstructuredGrid() aVertexGrid.Allocate(1,1) aVertexGrid.InsertNextCell(aVertex.GetCellType(),aVertex.GetPointIds()) aVertexGrid.SetPoints(vertexPoints) aVertexMapper = vtk.vtkDataSetMapper() aVertexMapper.SetInputData(aVertexGrid) aVertexActor = vtk.vtkActor() aVertexActor.SetMapper(aVertexMapper) aVertexActor.AddPosition(0,0,6) aVertexActor.GetProperty().BackfaceCullingOn() # Poly Vertex polyVertexPoints = vtk.vtkPoints() polyVertexPoints.SetNumberOfPoints(3) polyVertexPoints.InsertPoint(0,0,0,0) polyVertexPoints.InsertPoint(1,1,0,0) polyVertexPoints.InsertPoint(2,1,1,0) aPolyVertex = vtk.vtkPolyVertex() aPolyVertex.GetPointIds().SetNumberOfIds(3) aPolyVertex.GetPointIds().SetId(0,0) aPolyVertex.GetPointIds().SetId(1,1) aPolyVertex.GetPointIds().SetId(2,2) aPolyVertexGrid = vtk.vtkUnstructuredGrid() aPolyVertexGrid.Allocate(1,1) aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),aPolyVertex.GetPointIds()) aPolyVertexGrid.SetPoints(polyVertexPoints) aPolyVertexMapper = vtk.vtkDataSetMapper() aPolyVertexMapper.SetInputData(aPolyVertexGrid) aPolyVertexActor = vtk.vtkActor() aPolyVertexActor.SetMapper(aPolyVertexMapper) aPolyVertexActor.AddPosition(2,0,6) aPolyVertexActor.GetProperty().BackfaceCullingOn() # Pentagonal prism pentaPoints = vtk.vtkPoints() pentaPoints.SetNumberOfPoints(10) pentaPoints.InsertPoint(0,0.25,0.0,0.0) pentaPoints.InsertPoint(1,0.75,0.0,0.0) pentaPoints.InsertPoint(2,1.0,0.5,0.0) pentaPoints.InsertPoint(3,0.5,1.0,0.0) pentaPoints.InsertPoint(4,0.0,0.5,0.0) pentaPoints.InsertPoint(5,0.25,0.0,1.0) pentaPoints.InsertPoint(6,0.75,0.0,1.0) pentaPoints.InsertPoint(7,1.0,0.5,1.0) pentaPoints.InsertPoint(8,0.5,1.0,1.0) pentaPoints.InsertPoint(9,0.0,0.5,1.0) aPenta = vtk.vtkPentagonalPrism() aPenta.GetPointIds().SetId(0,0) aPenta.GetPointIds().SetId(1,1) aPenta.GetPointIds().SetId(2,2) aPenta.GetPointIds().SetId(3,3) aPenta.GetPointIds().SetId(4,4) aPenta.GetPointIds().SetId(5,5) aPenta.GetPointIds().SetId(6,6) aPenta.GetPointIds().SetId(7,7) aPenta.GetPointIds().SetId(8,8) aPenta.GetPointIds().SetId(9,9) aPentaGrid = vtk.vtkUnstructuredGrid() aPentaGrid.Allocate(1,1) aPentaGrid.InsertNextCell(aPenta.GetCellType(),aPenta.GetPointIds()) aPentaGrid.SetPoints(pentaPoints) aPentaMapper = vtk.vtkDataSetMapper() aPentaMapper.SetInputData(aPentaGrid) aPentaActor = vtk.vtkActor() aPentaActor.SetMapper(aPentaMapper) aPentaActor.AddPosition(10,0,0) aPentaActor.GetProperty().BackfaceCullingOn() # Hexagonal prism hexaPoints = vtk.vtkPoints() hexaPoints.SetNumberOfPoints(12) hexaPoints.InsertPoint(0,0.0,0.0,0.0) hexaPoints.InsertPoint(1,0.5,0.0,0.0) hexaPoints.InsertPoint(2,1.0,0.5,0.0) hexaPoints.InsertPoint(3,1.0,1.0,0.0) hexaPoints.InsertPoint(4,0.5,1.0,0.0) hexaPoints.InsertPoint(5,0.0,0.5,0.0) hexaPoints.InsertPoint(6,0.0,0.0,1.0) hexaPoints.InsertPoint(7,0.5,0.0,1.0) hexaPoints.InsertPoint(8,1.0,0.5,1.0) hexaPoints.InsertPoint(9,1.0,1.0,1.0) hexaPoints.InsertPoint(10,0.5,1.0,1.0) hexaPoints.InsertPoint(11,0.0,0.5,1.0) aHexa = vtk.vtkHexagonalPrism() aHexa.GetPointIds().SetId(0,0) aHexa.GetPointIds().SetId(1,1) aHexa.GetPointIds().SetId(2,2) aHexa.GetPointIds().SetId(3,3) aHexa.GetPointIds().SetId(4,4) aHexa.GetPointIds().SetId(5,5) aHexa.GetPointIds().SetId(6,6) aHexa.GetPointIds().SetId(7,7) aHexa.GetPointIds().SetId(8,8) aHexa.GetPointIds().SetId(9,9) aHexa.GetPointIds().SetId(10,10) aHexa.GetPointIds().SetId(11,11) aHexaGrid = vtk.vtkUnstructuredGrid() aHexaGrid.Allocate(1,1) aHexaGrid.InsertNextCell(aHexa.GetCellType(),aHexa.GetPointIds()) aHexaGrid.SetPoints(hexaPoints) aHexaMapper = vtk.vtkDataSetMapper() aHexaMapper.SetInputData(aHexaGrid) aHexaActor = vtk.vtkActor() aHexaActor.SetMapper(aHexaMapper) aHexaActor.AddPosition(12,0,0) aHexaActor.GetProperty().BackfaceCullingOn() ren1.SetBackground(.1,.2,.4) ren1.AddActor(aVoxelActor) aVoxelActor.GetProperty().SetDiffuseColor(1,0,0) ren1.AddActor(aHexahedronActor) aHexahedronActor.GetProperty().SetDiffuseColor(1,1,0) ren1.AddActor(aTetraActor) aTetraActor.GetProperty().SetDiffuseColor(0,1,0) ren1.AddActor(aWedgeActor) aWedgeActor.GetProperty().SetDiffuseColor(0,1,1) ren1.AddActor(aPyramidActor) aPyramidActor.GetProperty().SetDiffuseColor(1,0,1) ren1.AddActor(aPixelActor) aPixelActor.GetProperty().SetDiffuseColor(0,1,1) ren1.AddActor(aQuadActor) aQuadActor.GetProperty().SetDiffuseColor(1,0,1) ren1.AddActor(aTriangleActor) aTriangleActor.GetProperty().SetDiffuseColor(.3,1,.5) ren1.AddActor(aPolygonActor) aPolygonActor.GetProperty().SetDiffuseColor(1,.4,.5) ren1.AddActor(aTriangleStripActor) aTriangleStripActor.GetProperty().SetDiffuseColor(.3,.7,1) ren1.AddActor(aLineActor) aLineActor.GetProperty().SetDiffuseColor(.2,1,1) ren1.AddActor(aPolyLineActor) aPolyLineActor.GetProperty().SetDiffuseColor(1,1,1) ren1.AddActor(aVertexActor) aVertexActor.GetProperty().SetDiffuseColor(1,1,1) ren1.AddActor(aPolyVertexActor) aPolyVertexActor.GetProperty().SetDiffuseColor(1,1,1) ren1.AddActor(aPentaActor) aPentaActor.GetProperty().SetDiffuseColor(.2,.4,.7) ren1.AddActor(aHexaActor) aHexaActor.GetProperty().SetDiffuseColor(.7,.5,1) ren1.ResetCamera() ren1.GetActiveCamera().Azimuth(30) ren1.GetActiveCamera().Elevation(20) ren1.GetActiveCamera().Dolly(1.25) ren1.ResetCameraClippingRange() renWin.Render() cellPicker = vtk.vtkCellPicker() pointPicker = vtk.vtkPointPicker() worldPicker = vtk.vtkWorldPointPicker() cellCount = 0 pointCount = 0 ren1.IsInViewport(0,0) x = 0 while x <= 265: y = 100 while y <= 200: cellPicker.Pick(x,y,0,ren1) pointPicker.Pick(x,y,0,ren1) worldPicker.Pick(x,y,0,ren1) if (cellPicker.GetCellId() != "-1"): cellCount = cellCount + 1 pass if (pointPicker.GetPointId() != "-1"): pointCount = pointCount + 1 pass y = y + 6 x = x + 6 # render the image # iren.Initialize() # --- end of script --
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/pickCells.py
Python
gpl-3.0
16,540
#include <kccachedb.h> using namespace std; using namespace kyotocabinet; // main routine int main(int argc, char** argv) { // create the database object GrassDB db; // open the database if (!db.open("*", GrassDB::OWRITER | GrassDB::OCREATE)) { cerr << "open error: " << db.error().name() << endl; } // store records if (!db.set("foo", "hop") || !db.set("bar", "step") || !db.set("baz", "jump")) { cerr << "set error: " << db.error().name() << endl; } // retrieve a record string value; if (db.get("foo", &value)) { cout << value << endl; } else { cerr << "get error: " << db.error().name() << endl; } // traverse records DB::Cursor* cur = db.cursor(); cur->jump(); string ckey, cvalue; while (cur->get(&ckey, &cvalue, true)) { cout << ckey << ":" << cvalue << endl; } delete cur; // close the database if (!db.close()) { cerr << "close error: " << db.error().name() << endl; } return 0; }
sapo/kyoto
kyotocabinet/example/kcgrassex.cc
C++
gpl-3.0
983
/***************************************************************************//** * \file cyutils.c * \version 5.40 * * \brief Provides a function to handle 24-bit value writes. * ******************************************************************************** * \copyright * Copyright 2008-2016, Cypress Semiconductor Corporation. All rights reserved. * You may use this file only in accordance with the license, terms, conditions, * disclaimers, and limitations in the end user license agreement accompanying * the software package with which this file was provided. *******************************************************************************/ #include "cytypes.h" #if (!CY_PSOC3) /*************************************************************************** * Function Name: CySetReg24 ************************************************************************//** * * Writes a 24-bit value to the specified register. * * \param addr The address where data must be written. * \param value The data that must be written. * * \reentrant No * ***************************************************************************/ void CySetReg24(uint32 volatile * addr, uint32 value) { uint8 volatile *tmpAddr; tmpAddr = (uint8 volatile *) addr; tmpAddr[0u] = (uint8) value; tmpAddr[1u] = (uint8) (value >> 8u); tmpAddr[2u] = (uint8) (value >> 16u); } #if(CY_PSOC4) /*************************************************************************** * Function Name: CyGetReg24 ************************************************************************//** * * Reads the 24-bit value from the specified register. * * \param addr The address where data must be read. * * \reentrant No * ***************************************************************************/ uint32 CyGetReg24(uint32 const volatile * addr) { uint8 const volatile *tmpAddr; uint32 value; tmpAddr = (uint8 const volatile *) addr; value = (uint32) tmpAddr[0u]; value |= ((uint32) tmpAddr[1u] << 8u ); value |= ((uint32) tmpAddr[2u] << 16u); return(value); } #endif /*(CY_PSOC4)*/ #endif /* (!CY_PSOC3) */ /* [] END OF FILE */
techdude101/code
PSoC BLE/WS_UARTDeepSleepWakeUp/UARTDeepSleepWakeUp.cydsn/codegentemp/cyutils.c
C
gpl-3.0
2,399
/**************************************************************** Siano Mobile Silicon, Inc. MDTV receiver kernel modules. Copyright (C) 2006-2008, Uri Shkolnik This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ****************************************************************/ #ifndef __SMS_CHAR_IOCTL_H__ #define __SMS_CHAR_IOCTL_H__ #include <linux/ioctl.h> enum smschar_pnp_event_t { SMSCHAR_PLUG_IN_EVENT, SMSCHAR_PLUG_OUT_EVENT, SMSCHAR_SUSPEND_EVENT, SMSCHAR_RESUME_EVENT, SMSCHAR_UNKNOWN_EVENT, SMSCHAR_TERMINATE_EVENT }; struct smschar_buffer_t { unsigned long offset; /* offset in common buffer (mapped to user) */ int size; }; struct smschar_get_fw_filename_ioctl_t { int mode; char filename[200]; }; struct smschar_send_fw_file_ioctl_t { char *fw_buf; int fw_size; }; struct smschar_send_fw_chunk_ioctl_t { char *buf; int size; }; #define SMSCHAR_SET_DEVICE_MODE _IOW('K', 0, int) #define SMSCHAR_GET_DEVICE_MODE _IOR('K', 1, int) #define SMSCHAR_GET_BUFFER_SIZE _IOR('K', 2, int) #define SMSCHAR_WAIT_GET_BUFFER _IOR('K', 3, struct smschar_buffer_t) #define SMSCHAR_IS_DEVICE_PNP_EVENT _IOR('K', 4, int) #define SMSCHAR_GET_FW_FILE_NAME \ _IOWR('K', 5, struct smschar_get_fw_filename_ioctl_t) #define SMSCHAR_SEND_FW_FILE \ _IOW('K', 6, struct smschar_send_fw_file_ioctl_t) #define SMSCHAR_CANCEL_WAIT_BUFFER _IO('K', 7) #define SMSCHAR_CANCEL_POLL _IO('K', 8) #define SMSCHAR_IS_CANCEL_DEVICE_PNP_EVENT _IO('K', 9) #define SMSCHAR_SEND_FW_CHUNK \ _IOW('K', 10, struct smschar_send_fw_chunk_ioctl_t) #define SMSCHAR_SEND_LAST_FW_CHUNK \ _IOW('K', 11, struct smschar_send_fw_chunk_ioctl_t) #define SMSCHAR_RESET_GPIO_VALUE_CHANGE _IOW('K', 12, int) #endif /* __SMS_CHAR_IOCTL_H__ */
danialbehzadi/Nokia-RM-1013-2.0.0.11
kernel/drivers/media/dvb/siano/smscharioctl.h
C
gpl-3.0
2,317
<?php // This file is part of BOINC. // http://boinc.berkeley.edu // Copyright (C) 2011 University of California // // BOINC is free software; you can redistribute it and/or modify it // under the terms of the GNU Lesser General Public License // as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // BOINC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. // See the GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with BOINC. If not, see <http://www.gnu.org/licenses/>. // web interfaces for viewing and controlling batches ini_set('display_errors', 'stdout'); error_reporting(E_ALL); require_once("../inc/util.inc"); require_once("../inc/boinc_db.inc"); require_once("../inc/result.inc"); require_once("../inc/submit_db.inc"); function show_batch($user) { $batch_id = get_int('batch_id'); $batch = BoincBatch::lookup_id($batch_id); if (!$batch || $batch->user_id != $user->id) { error_page("no batch"); } page_head("Batch $batch->id"); $results = BoincResult::enum("batch=$batch->id order by workunitid"); $i = 0; result_table_start(true, true, null); foreach ($results as $result) { show_result_row($result, true, true, true, $i++); } end_table(); page_tail(); } function show_batches($user) { $batches = BoincBatch::enum("user_id=$user->id"); page_head("Batches"); start_table(); table_header("Batch ID", "Submitted", "# jobs"); foreach ($batches as $batch) { echo "<tr> <td><a href=submit_status.php?action=show_batch&batch_id=$batch->id>$batch->id</a></td> <td>".time_str($batch->create_time)."</td> <td>$batch->njobs</td> </tr> "; } end_table(); page_tail(); } $user = get_logged_in_user(); $action = get_str('action', true); switch ($action) { case '': show_batches($user); break; case 'show_batch': show_batch($user); } ?>
hanxue/Boinc
html/user/submit_status.php
PHP
gpl-3.0
2,189
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015 Therp BV (<http://therp.nl>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import copy from openerp import models from openerp.addons.account.report.account_financial_report import\ report_account_common class report_account_common_horizontal(report_account_common): def __init__(self, cr, uid, name, context=None): super(report_account_common_horizontal, self).__init__( cr, uid, name, context=context) self.localcontext.update({ 'get_left_lines': self.get_left_lines, 'get_right_lines': self.get_right_lines, }) def get_lines(self, data, side=None): data = copy.deepcopy(data) if data['form']['used_context'] is None: data['form']['used_context'] = {} data['form']['used_context'].update( account_financial_report_horizontal_side=side) return super(report_account_common_horizontal, self).get_lines( data) def get_left_lines(self, data): return self.get_lines(data, side='left') def get_right_lines(self, data): return self.get_lines(data, side='right') class ReportFinancial(models.AbstractModel): _inherit = 'report.account.report_financial' _wrapped_report_class = report_account_common_horizontal
Ehtaga/account-financial-reporting
account_financial_report_horizontal/report/report_financial.py
Python
agpl-3.0
2,192
/* Copyright (c) 2001-2009, The HSQL Development Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the HSQL Development Group nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG, * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.hsqldb_voltpatches.lib; /** * This should be used as the datatype for parameters and instance variables * instead of HsqlArrayList or HsqlLinkedList to allow interchangable use of the * two. * * @author dnordahl@users * @version 1.7.2 * @since 1.7.2 */ public interface HsqlList extends Collection { void add(int index, Object element); boolean add(Object element); Object get(int index); Object remove(int index); Object set(int index, Object element); boolean isEmpty(); int size(); Iterator iterator(); }
kumarrus/voltdb
src/hsqldb19b3/org/hsqldb_voltpatches/lib/HsqlList.java
Java
agpl-3.0
2,168
#!/usr/bin/env python # Copyright (C) 2017 Francisco Acosta <francisco.acosta@inria.fr> # # This file is subject to the terms and conditions of the GNU Lesser # General Public License v2.1. See the file LICENSE in the top level # directory for more details. import os import sys sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner')) import testrunner from datetime import datetime class InvalidTimeout(Exception): pass def testfunc(child): exp_diff1 = 1000000 exp_diff5 = 5000000 exp_diff10 = 10000000 child.expect(u"This test will print \"Slept for X sec...\" every 1, 5 and 10 seconds.\r\n") child.expect(u"\r\n") child.expect(u"<======== If using pyterm, this is the time when started.") child.expect(u"\r\n") m = 9 while (m): n = 3 while (n): if n == 3: exp_diff = exp_diff1 if n == 2: exp_diff = exp_diff5 elif n == 1: exp_diff = exp_diff10 start = datetime.now() child.expect(u"Slept for \\d+ sec...", timeout=11) stop = datetime.now() diff = (stop - start) diff = (diff.seconds * 1000000) + diff.microseconds # fail within 5% of expected if diff > (exp_diff + (exp_diff1 * 0.05)) or \ diff < (exp_diff - (exp_diff1 * 0.05)): raise InvalidTimeout("Invalid timeout %d (expected %d)" % (diff, exp_diff)); else: print("Timed out correctly: %d (expected %d)" % (diff, exp_diff)) n = n - 1 m = m -1 child.expect(u"Test end.", timeout=15) if __name__ == "__main__": sys.exit(testrunner.run(testfunc))
dailab/RIOT
tests/xtimer_usleep/tests/01-run.py
Python
lgpl-2.1
1,745
//===- LiveDebugVariables.cpp - Tracking debug info variables -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LiveDebugVariables analysis. // // Remove all DBG_VALUE instructions referencing virtual registers and replace // them with a data structure tracking where live user variables are kept - in a // virtual register or in a stack slot. // // Allow the data structure to be updated during register allocation when values // are moved between registers and stack slots. Finally emit new DBG_VALUE // instructions after register allocation is complete. // //===----------------------------------------------------------------------===// #include "LiveDebugVariables.h" #include "llvm/ADT/IntervalMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/LexicalScopes.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/VirtRegMap.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Value.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" #include <memory> #include <utility> using namespace llvm; #define DEBUG_TYPE "livedebug" static cl::opt<bool> EnableLDV("live-debug-variables", cl::init(true), cl::desc("Enable the live debug variables pass"), cl::Hidden); STATISTIC(NumInsertedDebugValues, "Number of DBG_VALUEs inserted"); char LiveDebugVariables::ID = 0; INITIALIZE_PASS_BEGIN(LiveDebugVariables, "livedebugvars", "Debug Variable Analysis", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) INITIALIZE_PASS_DEPENDENCY(LiveIntervals) INITIALIZE_PASS_END(LiveDebugVariables, "livedebugvars", "Debug Variable Analysis", false, false) void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<MachineDominatorTree>(); AU.addRequiredTransitive<LiveIntervals>(); AU.setPreservesAll(); MachineFunctionPass::getAnalysisUsage(AU); } LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(nullptr) { initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); } /// LocMap - Map of where a user value is live, and its location. typedef IntervalMap<SlotIndex, unsigned, 4> LocMap; namespace { /// UserValueScopes - Keeps track of lexical scopes associated with a /// user value's source location. class UserValueScopes { DebugLoc DL; LexicalScopes &LS; SmallPtrSet<const MachineBasicBlock *, 4> LBlocks; public: UserValueScopes(DebugLoc D, LexicalScopes &L) : DL(std::move(D)), LS(L) {} /// dominates - Return true if current scope dominates at least one machine /// instruction in a given machine basic block. bool dominates(MachineBasicBlock *MBB) { if (LBlocks.empty()) LS.getMachineBasicBlocks(DL, LBlocks); return LBlocks.count(MBB) != 0 || LS.dominates(DL, MBB); } }; } // end anonymous namespace /// UserValue - A user value is a part of a debug info user variable. /// /// A DBG_VALUE instruction notes that (a sub-register of) a virtual register /// holds part of a user variable. The part is identified by a byte offset. /// /// UserValues are grouped into equivalence classes for easier searching. Two /// user values are related if they refer to the same variable, or if they are /// held by the same virtual register. The equivalence class is the transitive /// closure of that relation. namespace { class LDVImpl; class UserValue { const MDNode *Variable; ///< The debug info variable we are part of. const MDNode *Expression; ///< Any complex address expression. unsigned offset; ///< Byte offset into variable. bool IsIndirect; ///< true if this is a register-indirect+offset value. DebugLoc dl; ///< The debug location for the variable. This is ///< used by dwarf writer to find lexical scope. UserValue *leader; ///< Equivalence class leader. UserValue *next; ///< Next value in equivalence class, or null. /// Numbered locations referenced by locmap. SmallVector<MachineOperand, 4> locations; /// Map of slot indices where this value is live. LocMap locInts; /// coalesceLocation - After LocNo was changed, check if it has become /// identical to another location, and coalesce them. This may cause LocNo or /// a later location to be erased, but no earlier location will be erased. void coalesceLocation(unsigned LocNo); /// insertDebugValue - Insert a DBG_VALUE into MBB at Idx for LocNo. void insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, unsigned LocNo, LiveIntervals &LIS, const TargetInstrInfo &TII); /// splitLocation - Replace OldLocNo ranges with NewRegs ranges where NewRegs /// is live. Returns true if any changes were made. bool splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS); public: /// UserValue - Create a new UserValue. UserValue(const MDNode *var, const MDNode *expr, unsigned o, bool i, DebugLoc L, LocMap::Allocator &alloc) : Variable(var), Expression(expr), offset(o), IsIndirect(i), dl(std::move(L)), leader(this), next(nullptr), locInts(alloc) {} /// getLeader - Get the leader of this value's equivalence class. UserValue *getLeader() { UserValue *l = leader; while (l != l->leader) l = l->leader; return leader = l; } /// getNext - Return the next UserValue in the equivalence class. UserValue *getNext() const { return next; } /// match - Does this UserValue match the parameters? bool match(const MDNode *Var, const MDNode *Expr, const DILocation *IA, unsigned Offset, bool indirect) const { return Var == Variable && Expr == Expression && dl->getInlinedAt() == IA && Offset == offset && indirect == IsIndirect; } /// merge - Merge equivalence classes. static UserValue *merge(UserValue *L1, UserValue *L2) { L2 = L2->getLeader(); if (!L1) return L2; L1 = L1->getLeader(); if (L1 == L2) return L1; // Splice L2 before L1's members. UserValue *End = L2; while (End->next) { End->leader = L1; End = End->next; } End->leader = L1; End->next = L1->next; L1->next = L2; return L1; } /// getLocationNo - Return the location number that matches Loc. unsigned getLocationNo(const MachineOperand &LocMO) { if (LocMO.isReg()) { if (LocMO.getReg() == 0) return ~0u; // For register locations we dont care about use/def and other flags. for (unsigned i = 0, e = locations.size(); i != e; ++i) if (locations[i].isReg() && locations[i].getReg() == LocMO.getReg() && locations[i].getSubReg() == LocMO.getSubReg()) return i; } else for (unsigned i = 0, e = locations.size(); i != e; ++i) if (LocMO.isIdenticalTo(locations[i])) return i; locations.push_back(LocMO); // We are storing a MachineOperand outside a MachineInstr. locations.back().clearParent(); // Don't store def operands. if (locations.back().isReg()) locations.back().setIsUse(); return locations.size() - 1; } /// mapVirtRegs - Ensure that all virtual register locations are mapped. void mapVirtRegs(LDVImpl *LDV); /// addDef - Add a definition point to this value. void addDef(SlotIndex Idx, const MachineOperand &LocMO) { // Add a singular (Idx,Idx) -> Loc mapping. LocMap::iterator I = locInts.find(Idx); if (!I.valid() || I.start() != Idx) I.insert(Idx, Idx.getNextSlot(), getLocationNo(LocMO)); else // A later DBG_VALUE at the same SlotIndex overrides the old location. I.setValue(getLocationNo(LocMO)); } /// extendDef - Extend the current definition as far as possible down the /// dominator tree. Stop when meeting an existing def or when leaving the live /// range of VNI. /// End points where VNI is no longer live are added to Kills. /// @param Idx Starting point for the definition. /// @param LocNo Location number to propagate. /// @param LR Restrict liveness to where LR has the value VNI. May be null. /// @param VNI When LR is not null, this is the value to restrict to. /// @param Kills Append end points of VNI's live range to Kills. /// @param LIS Live intervals analysis. /// @param MDT Dominator tree. void extendDef(SlotIndex Idx, unsigned LocNo, LiveRange *LR, const VNInfo *VNI, SmallVectorImpl<SlotIndex> *Kills, LiveIntervals &LIS, MachineDominatorTree &MDT, UserValueScopes &UVS); /// addDefsFromCopies - The value in LI/LocNo may be copies to other /// registers. Determine if any of the copies are available at the kill /// points, and add defs if possible. /// @param LI Scan for copies of the value in LI->reg. /// @param LocNo Location number of LI->reg. /// @param Kills Points where the range of LocNo could be extended. /// @param NewDefs Append (Idx, LocNo) of inserted defs here. void addDefsFromCopies(LiveInterval *LI, unsigned LocNo, const SmallVectorImpl<SlotIndex> &Kills, SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs, MachineRegisterInfo &MRI, LiveIntervals &LIS); /// computeIntervals - Compute the live intervals of all locations after /// collecting all their def points. void computeIntervals(MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, LiveIntervals &LIS, MachineDominatorTree &MDT, UserValueScopes &UVS); /// splitRegister - Replace OldReg ranges with NewRegs ranges where NewRegs is /// live. Returns true if any changes were made. bool splitRegister(unsigned OldLocNo, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS); /// rewriteLocations - Rewrite virtual register locations according to the /// provided virtual register map. void rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI); /// emitDebugValues - Recreate DBG_VALUE instruction from data structures. void emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS, const TargetInstrInfo &TRI); /// getDebugLoc - Return DebugLoc of this UserValue. DebugLoc getDebugLoc() { return dl;} void print(raw_ostream &, const TargetRegisterInfo *); }; } // namespace /// LDVImpl - Implementation of the LiveDebugVariables pass. namespace { class LDVImpl { LiveDebugVariables &pass; LocMap::Allocator allocator; MachineFunction *MF; LiveIntervals *LIS; LexicalScopes LS; MachineDominatorTree *MDT; const TargetRegisterInfo *TRI; /// Whether emitDebugValues is called. bool EmitDone; /// Whether the machine function is modified during the pass. bool ModifiedMF; /// userValues - All allocated UserValue instances. SmallVector<std::unique_ptr<UserValue>, 8> userValues; /// Map virtual register to eq class leader. typedef DenseMap<unsigned, UserValue*> VRMap; VRMap virtRegToEqClass; /// Map user variable to eq class leader. typedef DenseMap<const MDNode *, UserValue*> UVMap; UVMap userVarMap; /// getUserValue - Find or create a UserValue. UserValue *getUserValue(const MDNode *Var, const MDNode *Expr, unsigned Offset, bool IsIndirect, const DebugLoc &DL); /// lookupVirtReg - Find the EC leader for VirtReg or null. UserValue *lookupVirtReg(unsigned VirtReg); /// handleDebugValue - Add DBG_VALUE instruction to our maps. /// @param MI DBG_VALUE instruction /// @param Idx Last valid SLotIndex before instruction. /// @return True if the DBG_VALUE instruction should be deleted. bool handleDebugValue(MachineInstr &MI, SlotIndex Idx); /// collectDebugValues - Collect and erase all DBG_VALUE instructions, adding /// a UserValue def for each instruction. /// @param mf MachineFunction to be scanned. /// @return True if any debug values were found. bool collectDebugValues(MachineFunction &mf); /// computeIntervals - Compute the live intervals of all user values after /// collecting all their def points. void computeIntervals(); public: LDVImpl(LiveDebugVariables *ps) : pass(*ps), MF(nullptr), EmitDone(false), ModifiedMF(false) {} bool runOnMachineFunction(MachineFunction &mf); /// clear - Release all memory. void clear() { MF = nullptr; userValues.clear(); virtRegToEqClass.clear(); userVarMap.clear(); // Make sure we call emitDebugValues if the machine function was modified. assert((!ModifiedMF || EmitDone) && "Dbg values are not emitted in LDV"); EmitDone = false; ModifiedMF = false; LS.reset(); } /// mapVirtReg - Map virtual register to an equivalence class. void mapVirtReg(unsigned VirtReg, UserValue *EC); /// splitRegister - Replace all references to OldReg with NewRegs. void splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs); /// emitDebugValues - Recreate DBG_VALUE instruction from data structures. void emitDebugValues(VirtRegMap *VRM); void print(raw_ostream&); }; } // namespace static void printDebugLoc(const DebugLoc &DL, raw_ostream &CommentOS, const LLVMContext &Ctx) { if (!DL) return; auto *Scope = cast<DIScope>(DL.getScope()); // Omit the directory, because it's likely to be long and uninteresting. CommentOS << Scope->getFilename(); CommentOS << ':' << DL.getLine(); if (DL.getCol() != 0) CommentOS << ':' << DL.getCol(); DebugLoc InlinedAtDL = DL.getInlinedAt(); if (!InlinedAtDL) return; CommentOS << " @[ "; printDebugLoc(InlinedAtDL, CommentOS, Ctx); CommentOS << " ]"; } static void printExtendedName(raw_ostream &OS, const DILocalVariable *V, const DILocation *DL) { const LLVMContext &Ctx = V->getContext(); StringRef Res = V->getName(); if (!Res.empty()) OS << Res << "," << V->getLine(); if (auto *InlinedAt = DL->getInlinedAt()) { if (DebugLoc InlinedAtDL = InlinedAt) { OS << " @["; printDebugLoc(InlinedAtDL, OS, Ctx); OS << "]"; } } } void UserValue::print(raw_ostream &OS, const TargetRegisterInfo *TRI) { auto *DV = cast<DILocalVariable>(Variable); OS << "!\""; printExtendedName(OS, DV, dl); OS << "\"\t"; if (offset) OS << '+' << offset; for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I) { OS << " [" << I.start() << ';' << I.stop() << "):"; if (I.value() == ~0u) OS << "undef"; else OS << I.value(); } for (unsigned i = 0, e = locations.size(); i != e; ++i) { OS << " Loc" << i << '='; locations[i].print(OS, TRI); } OS << '\n'; } void LDVImpl::print(raw_ostream &OS) { OS << "********** DEBUG VARIABLES **********\n"; for (unsigned i = 0, e = userValues.size(); i != e; ++i) userValues[i]->print(OS, TRI); } void UserValue::coalesceLocation(unsigned LocNo) { unsigned KeepLoc = 0; for (unsigned e = locations.size(); KeepLoc != e; ++KeepLoc) { if (KeepLoc == LocNo) continue; if (locations[KeepLoc].isIdenticalTo(locations[LocNo])) break; } // No matches. if (KeepLoc == locations.size()) return; // Keep the smaller location, erase the larger one. unsigned EraseLoc = LocNo; if (KeepLoc > EraseLoc) std::swap(KeepLoc, EraseLoc); locations.erase(locations.begin() + EraseLoc); // Rewrite values. for (LocMap::iterator I = locInts.begin(); I.valid(); ++I) { unsigned v = I.value(); if (v == EraseLoc) I.setValue(KeepLoc); // Coalesce when possible. else if (v > EraseLoc) I.setValueUnchecked(v-1); // Avoid coalescing with untransformed values. } } void UserValue::mapVirtRegs(LDVImpl *LDV) { for (unsigned i = 0, e = locations.size(); i != e; ++i) if (locations[i].isReg() && TargetRegisterInfo::isVirtualRegister(locations[i].getReg())) LDV->mapVirtReg(locations[i].getReg(), this); } UserValue *LDVImpl::getUserValue(const MDNode *Var, const MDNode *Expr, unsigned Offset, bool IsIndirect, const DebugLoc &DL) { UserValue *&Leader = userVarMap[Var]; if (Leader) { UserValue *UV = Leader->getLeader(); Leader = UV; for (; UV; UV = UV->getNext()) if (UV->match(Var, Expr, DL->getInlinedAt(), Offset, IsIndirect)) return UV; } userValues.push_back( make_unique<UserValue>(Var, Expr, Offset, IsIndirect, DL, allocator)); UserValue *UV = userValues.back().get(); Leader = UserValue::merge(Leader, UV); return UV; } void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Only map VirtRegs"); UserValue *&Leader = virtRegToEqClass[VirtReg]; Leader = UserValue::merge(Leader, EC); } UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) { if (UserValue *UV = virtRegToEqClass.lookup(VirtReg)) return UV->getLeader(); return nullptr; } bool LDVImpl::handleDebugValue(MachineInstr &MI, SlotIndex Idx) { // DBG_VALUE loc, offset, variable if (MI.getNumOperands() != 4 || !(MI.getOperand(1).isReg() || MI.getOperand(1).isImm()) || !MI.getOperand(2).isMetadata()) { DEBUG(dbgs() << "Can't handle " << MI); return false; } // Get or create the UserValue for (variable,offset). bool IsIndirect = MI.isIndirectDebugValue(); unsigned Offset = IsIndirect ? MI.getOperand(1).getImm() : 0; const MDNode *Var = MI.getDebugVariable(); const MDNode *Expr = MI.getDebugExpression(); //here. UserValue *UV = getUserValue(Var, Expr, Offset, IsIndirect, MI.getDebugLoc()); UV->addDef(Idx, MI.getOperand(0)); return true; } bool LDVImpl::collectDebugValues(MachineFunction &mf) { bool Changed = false; for (MachineFunction::iterator MFI = mf.begin(), MFE = mf.end(); MFI != MFE; ++MFI) { MachineBasicBlock *MBB = &*MFI; for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end(); MBBI != MBBE;) { if (!MBBI->isDebugValue()) { ++MBBI; continue; } // DBG_VALUE has no slot index, use the previous instruction instead. SlotIndex Idx = MBBI == MBB->begin() ? LIS->getMBBStartIdx(MBB) : LIS->getInstructionIndex(*std::prev(MBBI)).getRegSlot(); // Handle consecutive DBG_VALUE instructions with the same slot index. do { if (handleDebugValue(*MBBI, Idx)) { MBBI = MBB->erase(MBBI); Changed = true; } else ++MBBI; } while (MBBI != MBBE && MBBI->isDebugValue()); } } return Changed; } /// We only propagate DBG_VALUES locally here. LiveDebugValues performs a /// data-flow analysis to propagate them beyond basic block boundaries. void UserValue::extendDef(SlotIndex Idx, unsigned LocNo, LiveRange *LR, const VNInfo *VNI, SmallVectorImpl<SlotIndex> *Kills, LiveIntervals &LIS, MachineDominatorTree &MDT, UserValueScopes &UVS) { SlotIndex Start = Idx; MachineBasicBlock *MBB = LIS.getMBBFromIndex(Start); SlotIndex Stop = LIS.getMBBEndIdx(MBB); LocMap::iterator I = locInts.find(Start); // Limit to VNI's live range. bool ToEnd = true; if (LR && VNI) { LiveInterval::Segment *Segment = LR->getSegmentContaining(Start); if (!Segment || Segment->valno != VNI) { if (Kills) Kills->push_back(Start); return; } if (Segment->end < Stop) { Stop = Segment->end; ToEnd = false; } } // There could already be a short def at Start. if (I.valid() && I.start() <= Start) { // Stop when meeting a different location or an already extended interval. Start = Start.getNextSlot(); if (I.value() != LocNo || I.stop() != Start) return; // This is a one-slot placeholder. Just skip it. ++I; } // Limited by the next def. if (I.valid() && I.start() < Stop) { Stop = I.start(); ToEnd = false; } // Limited by VNI's live range. else if (!ToEnd && Kills) Kills->push_back(Stop); if (Start < Stop) I.insert(Start, Stop, LocNo); } void UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo, const SmallVectorImpl<SlotIndex> &Kills, SmallVectorImpl<std::pair<SlotIndex, unsigned> > &NewDefs, MachineRegisterInfo &MRI, LiveIntervals &LIS) { if (Kills.empty()) return; // Don't track copies from physregs, there are too many uses. if (!TargetRegisterInfo::isVirtualRegister(LI->reg)) return; // Collect all the (vreg, valno) pairs that are copies of LI. SmallVector<std::pair<LiveInterval*, const VNInfo*>, 8> CopyValues; for (MachineOperand &MO : MRI.use_nodbg_operands(LI->reg)) { MachineInstr *MI = MO.getParent(); // Copies of the full value. if (MO.getSubReg() || !MI->isCopy()) continue; unsigned DstReg = MI->getOperand(0).getReg(); // Don't follow copies to physregs. These are usually setting up call // arguments, and the argument registers are always call clobbered. We are // better off in the source register which could be a callee-saved register, // or it could be spilled. if (!TargetRegisterInfo::isVirtualRegister(DstReg)) continue; // Is LocNo extended to reach this copy? If not, another def may be blocking // it, or we are looking at a wrong value of LI. SlotIndex Idx = LIS.getInstructionIndex(*MI); LocMap::iterator I = locInts.find(Idx.getRegSlot(true)); if (!I.valid() || I.value() != LocNo) continue; if (!LIS.hasInterval(DstReg)) continue; LiveInterval *DstLI = &LIS.getInterval(DstReg); const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot()); assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value"); CopyValues.push_back(std::make_pair(DstLI, DstVNI)); } if (CopyValues.empty()) return; DEBUG(dbgs() << "Got " << CopyValues.size() << " copies of " << *LI << '\n'); // Try to add defs of the copied values for each kill point. for (unsigned i = 0, e = Kills.size(); i != e; ++i) { SlotIndex Idx = Kills[i]; for (unsigned j = 0, e = CopyValues.size(); j != e; ++j) { LiveInterval *DstLI = CopyValues[j].first; const VNInfo *DstVNI = CopyValues[j].second; if (DstLI->getVNInfoAt(Idx) != DstVNI) continue; // Check that there isn't already a def at Idx LocMap::iterator I = locInts.find(Idx); if (I.valid() && I.start() <= Idx) continue; DEBUG(dbgs() << "Kill at " << Idx << " covered by valno #" << DstVNI->id << " in " << *DstLI << '\n'); MachineInstr *CopyMI = LIS.getInstructionFromIndex(DstVNI->def); assert(CopyMI && CopyMI->isCopy() && "Bad copy value"); unsigned LocNo = getLocationNo(CopyMI->getOperand(0)); I.insert(Idx, Idx.getNextSlot(), LocNo); NewDefs.push_back(std::make_pair(Idx, LocNo)); break; } } } void UserValue::computeIntervals(MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, LiveIntervals &LIS, MachineDominatorTree &MDT, UserValueScopes &UVS) { SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs; // Collect all defs to be extended (Skipping undefs). for (LocMap::const_iterator I = locInts.begin(); I.valid(); ++I) if (I.value() != ~0u) Defs.push_back(std::make_pair(I.start(), I.value())); // Extend all defs, and possibly add new ones along the way. for (unsigned i = 0; i != Defs.size(); ++i) { SlotIndex Idx = Defs[i].first; unsigned LocNo = Defs[i].second; const MachineOperand &Loc = locations[LocNo]; if (!Loc.isReg()) { extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS, MDT, UVS); continue; } // Register locations are constrained to where the register value is live. if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) { LiveInterval *LI = nullptr; const VNInfo *VNI = nullptr; if (LIS.hasInterval(Loc.getReg())) { LI = &LIS.getInterval(Loc.getReg()); VNI = LI->getVNInfoAt(Idx); } SmallVector<SlotIndex, 16> Kills; extendDef(Idx, LocNo, LI, VNI, &Kills, LIS, MDT, UVS); if (LI) addDefsFromCopies(LI, LocNo, Kills, Defs, MRI, LIS); continue; } // For physregs, use the live range of the first regunit as a guide. unsigned Unit = *MCRegUnitIterator(Loc.getReg(), &TRI); LiveRange *LR = &LIS.getRegUnit(Unit); const VNInfo *VNI = LR->getVNInfoAt(Idx); // Don't track copies from physregs, it is too expensive. extendDef(Idx, LocNo, LR, VNI, nullptr, LIS, MDT, UVS); } // Finally, erase all the undefs. for (LocMap::iterator I = locInts.begin(); I.valid();) if (I.value() == ~0u) I.erase(); else ++I; } void LDVImpl::computeIntervals() { for (unsigned i = 0, e = userValues.size(); i != e; ++i) { UserValueScopes UVS(userValues[i]->getDebugLoc(), LS); userValues[i]->computeIntervals(MF->getRegInfo(), *TRI, *LIS, *MDT, UVS); userValues[i]->mapVirtRegs(this); } } bool LDVImpl::runOnMachineFunction(MachineFunction &mf) { clear(); MF = &mf; LIS = &pass.getAnalysis<LiveIntervals>(); MDT = &pass.getAnalysis<MachineDominatorTree>(); TRI = mf.getSubtarget().getRegisterInfo(); LS.initialize(mf); DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: " << mf.getName() << " **********\n"); bool Changed = collectDebugValues(mf); computeIntervals(); DEBUG(print(dbgs())); ModifiedMF = Changed; return Changed; } static void removeDebugValues(MachineFunction &mf) { for (MachineBasicBlock &MBB : mf) { for (auto MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ) { if (!MBBI->isDebugValue()) { ++MBBI; continue; } MBBI = MBB.erase(MBBI); } } } bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) { if (!EnableLDV) return false; if (!mf.getFunction()->getSubprogram()) { removeDebugValues(mf); return false; } if (!pImpl) pImpl = new LDVImpl(this); return static_cast<LDVImpl*>(pImpl)->runOnMachineFunction(mf); } void LiveDebugVariables::releaseMemory() { if (pImpl) static_cast<LDVImpl*>(pImpl)->clear(); } LiveDebugVariables::~LiveDebugVariables() { if (pImpl) delete static_cast<LDVImpl*>(pImpl); } //===----------------------------------------------------------------------===// // Live Range Splitting //===----------------------------------------------------------------------===// bool UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs, LiveIntervals& LIS) { DEBUG({ dbgs() << "Splitting Loc" << OldLocNo << '\t'; print(dbgs(), nullptr); }); bool DidChange = false; LocMap::iterator LocMapI; LocMapI.setMap(locInts); for (unsigned i = 0; i != NewRegs.size(); ++i) { LiveInterval *LI = &LIS.getInterval(NewRegs[i]); if (LI->empty()) continue; // Don't allocate the new LocNo until it is needed. unsigned NewLocNo = ~0u; // Iterate over the overlaps between locInts and LI. LocMapI.find(LI->beginIndex()); if (!LocMapI.valid()) continue; LiveInterval::iterator LII = LI->advanceTo(LI->begin(), LocMapI.start()); LiveInterval::iterator LIE = LI->end(); while (LocMapI.valid() && LII != LIE) { // At this point, we know that LocMapI.stop() > LII->start. LII = LI->advanceTo(LII, LocMapI.start()); if (LII == LIE) break; // Now LII->end > LocMapI.start(). Do we have an overlap? if (LocMapI.value() == OldLocNo && LII->start < LocMapI.stop()) { // Overlapping correct location. Allocate NewLocNo now. if (NewLocNo == ~0u) { MachineOperand MO = MachineOperand::CreateReg(LI->reg, false); MO.setSubReg(locations[OldLocNo].getSubReg()); NewLocNo = getLocationNo(MO); DidChange = true; } SlotIndex LStart = LocMapI.start(); SlotIndex LStop = LocMapI.stop(); // Trim LocMapI down to the LII overlap. if (LStart < LII->start) LocMapI.setStartUnchecked(LII->start); if (LStop > LII->end) LocMapI.setStopUnchecked(LII->end); // Change the value in the overlap. This may trigger coalescing. LocMapI.setValue(NewLocNo); // Re-insert any removed OldLocNo ranges. if (LStart < LocMapI.start()) { LocMapI.insert(LStart, LocMapI.start(), OldLocNo); ++LocMapI; assert(LocMapI.valid() && "Unexpected coalescing"); } if (LStop > LocMapI.stop()) { ++LocMapI; LocMapI.insert(LII->end, LStop, OldLocNo); --LocMapI; } } // Advance to the next overlap. if (LII->end < LocMapI.stop()) { if (++LII == LIE) break; LocMapI.advanceTo(LII->start); } else { ++LocMapI; if (!LocMapI.valid()) break; LII = LI->advanceTo(LII, LocMapI.start()); } } } // Finally, remove any remaining OldLocNo intervals and OldLocNo itself. locations.erase(locations.begin() + OldLocNo); LocMapI.goToBegin(); while (LocMapI.valid()) { unsigned v = LocMapI.value(); if (v == OldLocNo) { DEBUG(dbgs() << "Erasing [" << LocMapI.start() << ';' << LocMapI.stop() << ")\n"); LocMapI.erase(); } else { if (v > OldLocNo) LocMapI.setValueUnchecked(v-1); ++LocMapI; } } DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);}); return DidChange; } bool UserValue::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS) { bool DidChange = false; // Split locations referring to OldReg. Iterate backwards so splitLocation can // safely erase unused locations. for (unsigned i = locations.size(); i ; --i) { unsigned LocNo = i-1; const MachineOperand *Loc = &locations[LocNo]; if (!Loc->isReg() || Loc->getReg() != OldReg) continue; DidChange |= splitLocation(LocNo, NewRegs, LIS); } return DidChange; } void LDVImpl::splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs) { bool DidChange = false; for (UserValue *UV = lookupVirtReg(OldReg); UV; UV = UV->getNext()) DidChange |= UV->splitRegister(OldReg, NewRegs, *LIS); if (!DidChange) return; // Map all of the new virtual registers. UserValue *UV = lookupVirtReg(OldReg); for (unsigned i = 0; i != NewRegs.size(); ++i) mapVirtReg(NewRegs[i], UV); } void LiveDebugVariables:: splitRegister(unsigned OldReg, ArrayRef<unsigned> NewRegs, LiveIntervals &LIS) { if (pImpl) static_cast<LDVImpl*>(pImpl)->splitRegister(OldReg, NewRegs); } void UserValue::rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI) { // Iterate over locations in reverse makes it easier to handle coalescing. for (unsigned i = locations.size(); i ; --i) { unsigned LocNo = i-1; MachineOperand &Loc = locations[LocNo]; // Only virtual registers are rewritten. if (!Loc.isReg() || !Loc.getReg() || !TargetRegisterInfo::isVirtualRegister(Loc.getReg())) continue; unsigned VirtReg = Loc.getReg(); if (VRM.isAssignedReg(VirtReg) && TargetRegisterInfo::isPhysicalRegister(VRM.getPhys(VirtReg))) { // This can create a %noreg operand in rare cases when the sub-register // index is no longer available. That means the user value is in a // non-existent sub-register, and %noreg is exactly what we want. Loc.substPhysReg(VRM.getPhys(VirtReg), TRI); } else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT) { // FIXME: Translate SubIdx to a stackslot offset. Loc = MachineOperand::CreateFI(VRM.getStackSlot(VirtReg)); } else { Loc.setReg(0); Loc.setSubReg(0); } coalesceLocation(LocNo); } } /// findInsertLocation - Find an iterator for inserting a DBG_VALUE /// instruction. static MachineBasicBlock::iterator findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx, LiveIntervals &LIS) { SlotIndex Start = LIS.getMBBStartIdx(MBB); Idx = Idx.getBaseIndex(); // Try to find an insert location by going backwards from Idx. MachineInstr *MI; while (!(MI = LIS.getInstructionFromIndex(Idx))) { // We've reached the beginning of MBB. if (Idx == Start) { MachineBasicBlock::iterator I = MBB->SkipPHIsAndLabels(MBB->begin()); return I; } Idx = Idx.getPrevIndex(); } // Don't insert anything after the first terminator, though. return MI->isTerminator() ? MBB->getFirstTerminator() : std::next(MachineBasicBlock::iterator(MI)); } void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, unsigned LocNo, LiveIntervals &LIS, const TargetInstrInfo &TII) { MachineBasicBlock::iterator I = findInsertLocation(MBB, Idx, LIS); MachineOperand &Loc = locations[LocNo]; ++NumInsertedDebugValues; assert(cast<DILocalVariable>(Variable) ->isValidLocationForIntrinsic(getDebugLoc()) && "Expected inlined-at fields to agree"); if (Loc.isReg()) BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE), IsIndirect, Loc.getReg(), offset, Variable, Expression); else BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE)) .addOperand(Loc) .addImm(offset) .addMetadata(Variable) .addMetadata(Expression); } void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS, const TargetInstrInfo &TII) { MachineFunction::iterator MFEnd = VRM->getMachineFunction().end(); for (LocMap::const_iterator I = locInts.begin(); I.valid();) { SlotIndex Start = I.start(); SlotIndex Stop = I.stop(); unsigned LocNo = I.value(); DEBUG(dbgs() << "\t[" << Start << ';' << Stop << "):" << LocNo); MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator(); SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB); DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); insertDebugValue(&*MBB, Start, LocNo, LIS, TII); // This interval may span multiple basic blocks. // Insert a DBG_VALUE into each one. while(Stop > MBBEnd) { // Move to the next block. Start = MBBEnd; if (++MBB == MFEnd) break; MBBEnd = LIS.getMBBEndIdx(&*MBB); DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); insertDebugValue(&*MBB, Start, LocNo, LIS, TII); } DEBUG(dbgs() << '\n'); if (MBB == MFEnd) break; ++I; } } void LDVImpl::emitDebugValues(VirtRegMap *VRM) { DEBUG(dbgs() << "********** EMITTING LIVE DEBUG VARIABLES **********\n"); if (!MF) return; const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); for (unsigned i = 0, e = userValues.size(); i != e; ++i) { DEBUG(userValues[i]->print(dbgs(), TRI)); userValues[i]->rewriteLocations(*VRM, *TRI); userValues[i]->emitDebugValues(VRM, *LIS, *TII); } EmitDone = true; } void LiveDebugVariables::emitDebugValues(VirtRegMap *VRM) { if (pImpl) static_cast<LDVImpl*>(pImpl)->emitDebugValues(VRM); } bool LiveDebugVariables::doInitialization(Module &M) { return Pass::doInitialization(M); } #ifndef NDEBUG LLVM_DUMP_METHOD void LiveDebugVariables::dump() { if (pImpl) static_cast<LDVImpl*>(pImpl)->print(dbgs()); } #endif
cd80/UtilizedLLVM
lib/CodeGen/LiveDebugVariables.cpp
C++
unlicense
36,643
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.sling.maven.slingstart.run; import java.io.FileReader; import java.io.IOException; import java.io.Reader; import java.util.ArrayList; import java.util.List; import java.util.Properties; import org.apache.commons.io.IOUtils; import org.apache.maven.plugin.MojoExecutionException; import org.apache.maven.plugins.annotations.LifecyclePhase; import org.apache.maven.plugins.annotations.Mojo; /** * Stops the running launchpad instances. * */ @Mojo( name = "stop", defaultPhase = LifecyclePhase.POST_INTEGRATION_TEST, threadSafe = true ) public class StopMojo extends StartMojo { @Override public void execute() throws MojoExecutionException { if (this.skipLaunchpad) { this.getLog().info("Executing of the stop-multiple launchpad mojo is disabled by configuration."); return; } // read configurations final Properties launchpadConfigProps = new Properties(); Reader reader = null; try { reader = new FileReader(this.systemPropertiesFile); launchpadConfigProps.load(reader); } catch ( final IOException ioe) { throw new MojoExecutionException("Unable to read launchpad runner configuration properties.", ioe); } finally { IOUtils.closeQuietly(reader); } final int instances = Integer.valueOf(launchpadConfigProps.getProperty("launchpad.instances")); final List<ProcessDescription> configurations = new ArrayList<ProcessDescription>(); for(int i=1;i<=instances;i++) { final String id = launchpadConfigProps.getProperty("launchpad.instance.id." + String.valueOf(i)); final ProcessDescription config = ProcessDescriptionProvider.getInstance().getRunConfiguration(id); if ( config == null ) { getLog().warn("No launchpad configuration found for instance " + id); } else { configurations.add(config); } } if (configurations.size() > 0) { getLog().info(new StringBuilder("Stopping ").append(configurations.size()).append(" Launchpad instances").toString()); for (final ProcessDescription cfg : configurations) { try { LauncherCallable.stop(this.getLog(), cfg); ProcessDescriptionProvider.getInstance().removeRunConfiguration(cfg.getId()); } catch (Exception e) { throw new MojoExecutionException("Could not stop launchpad " + cfg.getId(), e); } } } else { getLog().warn("No stored configuration file was found at " + this.systemPropertiesFile + " - no Launchapd will be stopped"); } } }
dulvac/sling
tooling/maven/slingstart-maven-plugin/src/main/java/org/apache/sling/maven/slingstart/run/StopMojo.java
Java
apache-2.0
3,583
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.core.ml.job.config.PerPartitionCategorizationConfig; import java.io.IOException; import java.util.List; import java.util.Objects; public class UpdateProcessAction extends ActionType<UpdateProcessAction.Response> { public static final UpdateProcessAction INSTANCE = new UpdateProcessAction(); public static final String NAME = "cluster:internal/xpack/ml/job/update/process"; private UpdateProcessAction() { super(NAME, UpdateProcessAction.Response::new); } public static class Response extends BaseTasksResponse implements StatusToXContentObject, Writeable { private final boolean isUpdated; public Response() { super(null, null); this.isUpdated = true; } public Response(StreamInput in) throws IOException { super(in); isUpdated = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(isUpdated); } public boolean isUpdated() { return isUpdated; } @Override public RestStatus status() { return RestStatus.ACCEPTED; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("updated", isUpdated); builder.endObject(); return builder; } @Override public int hashCode() { return Objects.hashCode(isUpdated); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Response other = (Response) obj; return this.isUpdated == other.isUpdated; } } public static class Request extends JobTaskRequest<Request> { private ModelPlotConfig modelPlotConfig; private PerPartitionCategorizationConfig perPartitionCategorizationConfig; private List<JobUpdate.DetectorUpdate> detectorUpdates; private MlFilter filter; private boolean updateScheduledEvents = false; public Request(StreamInput in) throws IOException { super(in); modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); perPartitionCategorizationConfig = in.readOptionalWriteable(PerPartitionCategorizationConfig::new); if (in.readBoolean()) { detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); } filter = in.readOptionalWriteable(MlFilter::new); updateScheduledEvents = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalWriteable(modelPlotConfig); out.writeOptionalWriteable(perPartitionCategorizationConfig); boolean hasDetectorUpdates = detectorUpdates != null; out.writeBoolean(hasDetectorUpdates); if (hasDetectorUpdates) { out.writeList(detectorUpdates); } out.writeOptionalWriteable(filter); out.writeBoolean(updateScheduledEvents); } public Request( String jobId, ModelPlotConfig modelPlotConfig, PerPartitionCategorizationConfig perPartitionCategorizationConfig, List<JobUpdate.DetectorUpdate> detectorUpdates, MlFilter filter, boolean updateScheduledEvents ) { super(jobId); this.modelPlotConfig = modelPlotConfig; this.perPartitionCategorizationConfig = perPartitionCategorizationConfig; this.detectorUpdates = detectorUpdates; this.filter = filter; this.updateScheduledEvents = updateScheduledEvents; } public ModelPlotConfig getModelPlotConfig() { return modelPlotConfig; } public PerPartitionCategorizationConfig getPerPartitionCategorizationConfig() { return perPartitionCategorizationConfig; } public List<JobUpdate.DetectorUpdate> getDetectorUpdates() { return detectorUpdates; } public MlFilter getFilter() { return filter; } public boolean isUpdateScheduledEvents() { return updateScheduledEvents; } @Override public int hashCode() { return Objects.hash( getJobId(), modelPlotConfig, perPartitionCategorizationConfig, detectorUpdates, filter, updateScheduledEvents ); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Request other = (Request) obj; return Objects.equals(getJobId(), other.getJobId()) && Objects.equals(modelPlotConfig, other.modelPlotConfig) && Objects.equals(perPartitionCategorizationConfig, other.perPartitionCategorizationConfig) && Objects.equals(detectorUpdates, other.detectorUpdates) && Objects.equals(filter, other.filter) && Objects.equals(updateScheduledEvents, other.updateScheduledEvents); } } }
GlenRSmith/elasticsearch
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java
Java
apache-2.0
6,629
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_11) on Tue Aug 12 11:16:05 PDT 2014 --> <title>com.microsoft.windowsazure.mobileservices.table.serialization</title> <meta name="date" content="2014-08-12"> <link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../../../script.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="com.microsoft.windowsazure.mobileservices.table.serialization"; } } catch(err) { } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="package-use.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-files/index-1.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/query/package-summary.html">Prev&nbsp;Package</a></li> <li><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/sync/package-summary.html">Next&nbsp;Package</a></li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?com/microsoft/windowsazure/mobileservices/table/serialization/package-summary.html" target="_top">Frames</a></li> <li><a href="package-summary.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h1 title="Package" class="title">Package&nbsp;com.microsoft.windowsazure.mobileservices.table.serialization</h1> </div> <div class="contentContainer"> <ul class="blockList"> <li class="blockList"> <table class="typeSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation"> <caption><span>Class Summary</span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Class</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <td class="colFirst"><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/serialization/DateSerializer.html" title="class in com.microsoft.windowsazure.mobileservices.table.serialization">DateSerializer</a></td> <td class="colLast"> <div class="block">Date Serializer/Deserializer to make Mobile Services and Java dates compatible</div> </td> </tr> <tr class="rowColor"> <td class="colFirst"><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/serialization/JsonEntityParser.html" title="class in com.microsoft.windowsazure.mobileservices.table.serialization">JsonEntityParser</a></td> <td class="colLast">&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/serialization/LongSerializer.html" title="class in com.microsoft.windowsazure.mobileservices.table.serialization">LongSerializer</a></td> <td class="colLast"> <div class="block">Long Serializer to avoid losing precision when sending data to Mobile Services</div> </td> </tr> </tbody> </table> </li> </ul> </div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li class="navBarCell1Rev">Package</li> <li>Class</li> <li><a href="package-use.html">Use</a></li> <li><a href="package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-files/index-1.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/query/package-summary.html">Prev&nbsp;Package</a></li> <li><a href="../../../../../../com/microsoft/windowsazure/mobileservices/table/sync/package-summary.html">Next&nbsp;Package</a></li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?com/microsoft/windowsazure/mobileservices/table/serialization/package-summary.html" target="_top">Frames</a></li> <li><a href="package-summary.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
pragnagopa/azure-mobile-services
sdk/android/src/sdk/doc/com/microsoft/windowsazure/mobileservices/table/serialization/package-summary.html
HTML
apache-2.0
6,164
{ "EL": {"custom":["org.nutz.el.issue279.Uuuid"]} }
lzxz1234/nutz
test/org/nutz/el/issue279/279.js
JavaScript
apache-2.0
51
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.Collections.Generic; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp.Extensions; using Microsoft.CodeAnalysis.CSharp.Syntax; using Microsoft.CodeAnalysis.Formatting.Rules; using Microsoft.CodeAnalysis.Options; namespace Microsoft.CodeAnalysis.CSharp.Formatting { internal class SpacingFormattingRule : BaseFormattingRule { public override AdjustSpacesOperation GetAdjustSpacesOperation(SyntaxToken previousToken, SyntaxToken currentToken, OptionSet optionSet, NextOperation<AdjustSpacesOperation> nextOperation) { if (optionSet == null) { return nextOperation.Invoke(); } System.Diagnostics.Debug.Assert(previousToken.Parent != null && currentToken.Parent != null); var previousKind = previousToken.Kind(); var currentKind = currentToken.Kind(); var previousParentKind = previousToken.Parent.Kind(); var currentParentKind = currentToken.Parent.Kind(); // For Method Declaration if (currentToken.IsOpenParenInParameterList() && previousKind == SyntaxKind.IdentifierToken) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpacingAfterMethodDeclarationName); } // For Generic Method Declaration if (currentToken.IsOpenParenInParameterList() && previousKind == SyntaxKind.GreaterThanToken && previousParentKind == SyntaxKind.TypeParameterList) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpacingAfterMethodDeclarationName); } // Case: public static implicit operator string(Program p) { return null; } if (previousToken.IsKeyword() && currentToken.IsOpenParenInParameterListOfAConversionOperatorDeclaration()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpacingAfterMethodDeclarationName); } // Case: public static Program operator !(Program p) { return null; } if (previousToken.Parent.IsKind(SyntaxKind.OperatorDeclaration) && currentToken.IsOpenParenInParameterListOfAOperationDeclaration()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpacingAfterMethodDeclarationName); } if (previousToken.IsOpenParenInParameterList() && currentToken.IsCloseParenInParameterList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBetweenEmptyMethodDeclarationParentheses); } if (previousToken.IsOpenParenInParameterList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodDeclarationParenthesis); } if (currentToken.IsCloseParenInParameterList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodDeclarationParenthesis); } // For Method Call if (currentToken.IsOpenParenInArgumentList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterMethodCallName); } if (previousToken.IsOpenParenInArgumentList() && currentToken.IsCloseParenInArgumentList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBetweenEmptyMethodCallParentheses); } if (previousToken.IsOpenParenInArgumentList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodCallParentheses); } if (currentToken.IsCloseParenInArgumentList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodCallParentheses); } // For spacing around: typeof, default, and sizeof; treat like a Method Call if (currentKind == SyntaxKind.OpenParenToken && IsFunctionLikeKeywordExpressionKind(currentParentKind)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterMethodCallName); } if (previousKind == SyntaxKind.OpenParenToken && IsFunctionLikeKeywordExpressionKind(previousParentKind)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodCallParentheses); } if (currentKind == SyntaxKind.CloseParenToken && IsFunctionLikeKeywordExpressionKind(currentParentKind)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinMethodCallParentheses); } // For Spacing b/n control flow keyword and paren. Parent check not needed. if (currentKind == SyntaxKind.OpenParenToken && (previousKind == SyntaxKind.IfKeyword || previousKind == SyntaxKind.WhileKeyword || previousKind == SyntaxKind.SwitchKeyword || previousKind == SyntaxKind.ForKeyword || previousKind == SyntaxKind.ForEachKeyword || previousKind == SyntaxKind.CatchKeyword || previousKind == SyntaxKind.UsingKeyword)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterControlFlowStatementKeyword); } // For spacing between parenthesis and expression if ((previousParentKind == SyntaxKind.ParenthesizedExpression && previousKind == SyntaxKind.OpenParenToken) || (currentParentKind == SyntaxKind.ParenthesizedExpression && currentKind == SyntaxKind.CloseParenToken)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinExpressionParentheses); } // For spacing between the parenthesis and the cast expression if ((previousParentKind == SyntaxKind.CastExpression && previousKind == SyntaxKind.OpenParenToken) || (currentParentKind == SyntaxKind.CastExpression && currentKind == SyntaxKind.CloseParenToken)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinCastParentheses); } // For spacing between the parenthesis and the expression inside the control flow expression if (previousKind == SyntaxKind.OpenParenToken && IsControlFlowLikeKeywordStatementKind(previousParentKind)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinOtherParentheses); } // Semicolons in an empty for statement. i.e. for(;;) if (previousKind == SyntaxKind.OpenParenToken || previousKind == SyntaxKind.SemicolonToken) { if (previousToken.Parent.Kind() == SyntaxKind.ForStatement) { var forStatement = (ForStatementSyntax)previousToken.Parent; if (forStatement.Initializers.Count == 0 && forStatement.Declaration == null && forStatement.Condition == null && forStatement.Incrementors.Count == 0) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } } } if (currentKind == SyntaxKind.CloseParenToken && IsControlFlowLikeKeywordStatementKind(currentParentKind)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinOtherParentheses); } // For spacing after the cast if (previousParentKind == SyntaxKind.CastExpression && previousKind == SyntaxKind.CloseParenToken) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterCast); } // For spacing Before Square Braces if (currentKind == SyntaxKind.OpenBracketToken && HasFormattableBracketParent(currentToken) && !previousToken.IsOpenBraceOrCommaOfObjectInitializer()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBeforeOpenSquareBracket); } // For spacing empty square braces if (previousKind == SyntaxKind.OpenBracketToken && (currentKind == SyntaxKind.CloseBracketToken || currentKind == SyntaxKind.OmittedArraySizeExpressionToken) && HasFormattableBracketParent(previousToken)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBetweenEmptySquareBrackets); } // For spacing square brackets within if (previousKind == SyntaxKind.OpenBracketToken && HasFormattableBracketParent(previousToken)) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinSquareBrackets); } else if (currentKind == SyntaxKind.CloseBracketToken && HasFormattableBracketParent(currentToken)) { if (currentToken.Parent is ArrayRankSpecifierSyntax) { var parent = currentToken.Parent as ArrayRankSpecifierSyntax; if ((parent.Sizes.Any() && parent.Sizes.First().Kind() != SyntaxKind.OmittedArraySizeExpression) || parent.Sizes.SeparatorCount > 0) { // int []: added spacing operation on open [ // int[1], int[,]: need spacing operation return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinSquareBrackets); } } else { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceWithinSquareBrackets); } } // For spacing delimiters - after colon if (previousToken.IsColonInTypeBaseList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterColonInBaseTypeDeclaration); } // For spacing delimiters - before colon if (currentToken.IsColonInTypeBaseList()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBeforeColonInBaseTypeDeclaration); } // For spacing delimiters - after comma if ((previousToken.IsCommaInArgumentOrParameterList() && currentKind != SyntaxKind.OmittedTypeArgumentToken) || previousToken.IsCommaInInitializerExpression()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterComma); } // For spacing delimiters - before comma if ((currentToken.IsCommaInArgumentOrParameterList() && previousKind != SyntaxKind.OmittedTypeArgumentToken) || currentToken.IsCommaInInitializerExpression()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBeforeComma); } // For Spacing delimiters - after Dot if (previousToken.IsDotInMemberAccessOrQualifiedName()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterDot); } // For spacing delimiters - before Dot if (currentToken.IsDotInMemberAccessOrQualifiedName()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBeforeDot); } // For spacing delimiters - after semicolon if (previousToken.IsSemicolonInForStatement() && currentKind != SyntaxKind.CloseParenToken) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceAfterSemicolonsInForStatement); } // For spacing delimiters - before semicolon if (currentToken.IsSemicolonInForStatement()) { return AdjustSpacesOperationZeroOrOne(optionSet, CSharpFormattingOptions.SpaceBeforeSemicolonsInForStatement); } // For spacing around the binary operators if (currentToken.Parent is BinaryExpressionSyntax || previousToken.Parent is BinaryExpressionSyntax || currentToken.Parent is AssignmentExpressionSyntax || previousToken.Parent is AssignmentExpressionSyntax) { switch (optionSet.GetOption(CSharpFormattingOptions.SpacingAroundBinaryOperator)) { case BinaryOperatorSpacingOptions.Single: return CreateAdjustSpacesOperation(1, AdjustSpacesOption.ForceSpacesIfOnSingleLine); case BinaryOperatorSpacingOptions.Remove: if (currentKind == SyntaxKind.IsKeyword || currentKind == SyntaxKind.AsKeyword || previousKind == SyntaxKind.IsKeyword || previousKind == SyntaxKind.AsKeyword) { // User want spaces removed but at least one is required for the "as" & "is" keyword return CreateAdjustSpacesOperation(1, AdjustSpacesOption.ForceSpacesIfOnSingleLine); } else { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpacesIfOnSingleLine); } case BinaryOperatorSpacingOptions.Ignore: return CreateAdjustSpacesOperation(0, AdjustSpacesOption.PreserveSpaces); default: System.Diagnostics.Debug.Assert(false, "Invalid BinaryOperatorSpacingOptions"); break; } } // No space after $" and $@" at the start of an interpolated string if (previousKind == SyntaxKind.InterpolatedStringStartToken || previousKind == SyntaxKind.InterpolatedVerbatimStringStartToken) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } // No space before " at the end of an interpolated string if (currentKind == SyntaxKind.InterpolatedStringEndToken) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } // No space before { or after } in interpolations if ((currentKind == SyntaxKind.OpenBraceToken && currentToken.Parent is InterpolationSyntax) || (previousKind == SyntaxKind.CloseBraceToken && previousToken.Parent is InterpolationSyntax)) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } // Preserve space after { or before } in interpolations (i.e. between the braces and the expression) if ((previousKind == SyntaxKind.OpenBraceToken && previousToken.Parent is InterpolationSyntax) || (currentKind == SyntaxKind.CloseBraceToken && currentToken.Parent is InterpolationSyntax)) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.PreserveSpaces); } // No space before or after , in interpolation alignment clause if ((previousKind == SyntaxKind.CommaToken && previousToken.Parent is InterpolationAlignmentClauseSyntax) || (currentKind == SyntaxKind.CommaToken && currentToken.Parent is InterpolationAlignmentClauseSyntax)) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } // No space before or after : in interpolation format clause if ((previousKind == SyntaxKind.ColonToken && previousToken.Parent is InterpolationFormatClauseSyntax) || (currentKind == SyntaxKind.ColonToken && currentToken.Parent is InterpolationFormatClauseSyntax)) { return CreateAdjustSpacesOperation(0, AdjustSpacesOption.ForceSpaces); } return nextOperation.Invoke(); } public override void AddSuppressOperations(List<SuppressOperation> list, SyntaxNode node, SyntaxToken lastToken, OptionSet optionSet, NextAction<SuppressOperation> nextOperation) { nextOperation.Invoke(list); SuppressVariableDeclaration(list, node, optionSet); } private void SuppressVariableDeclaration(List<SuppressOperation> list, SyntaxNode node, OptionSet optionSet) { if (node.IsKind(SyntaxKind.FieldDeclaration) || node.IsKind(SyntaxKind.EventDeclaration) || node.IsKind(SyntaxKind.EventFieldDeclaration) || node.IsKind(SyntaxKind.LocalDeclarationStatement)) { if (optionSet.GetOption(CSharpFormattingOptions.SpacesIgnoreAroundVariableDeclaration)) { var firstToken = node.GetFirstToken(includeZeroWidth: true); var lastToken = node.GetLastToken(includeZeroWidth: true); list.Add(FormattingOperations.CreateSuppressOperation(firstToken, lastToken, SuppressOption.NoSpacing)); } } } private AdjustSpacesOperation AdjustSpacesOperationZeroOrOne(OptionSet optionSet, Option<bool> option, AdjustSpacesOption explicitOption = AdjustSpacesOption.ForceSpacesIfOnSingleLine) { if (optionSet.GetOption(option)) { return CreateAdjustSpacesOperation(1, explicitOption); } else { return CreateAdjustSpacesOperation(0, explicitOption); } } private bool HasFormattableBracketParent(SyntaxToken token) { return token.Parent.IsKind(SyntaxKind.ArrayRankSpecifier, SyntaxKind.BracketedArgumentList, SyntaxKind.BracketedParameterList, SyntaxKind.ImplicitArrayCreationExpression); } private bool IsFunctionLikeKeywordExpressionKind(SyntaxKind syntaxKind) { return (syntaxKind == SyntaxKind.TypeOfExpression || syntaxKind == SyntaxKind.DefaultExpression || syntaxKind == SyntaxKind.SizeOfExpression); } private bool IsControlFlowLikeKeywordStatementKind(SyntaxKind syntaxKind) { return (syntaxKind == SyntaxKind.IfStatement || syntaxKind == SyntaxKind.WhileStatement || syntaxKind == SyntaxKind.SwitchStatement || syntaxKind == SyntaxKind.ForStatement || syntaxKind == SyntaxKind.ForEachStatement || syntaxKind == SyntaxKind.DoStatement || syntaxKind == SyntaxKind.CatchDeclaration || syntaxKind == SyntaxKind.UsingStatement || syntaxKind == SyntaxKind.LockStatement || syntaxKind == SyntaxKind.FixedStatement); } } }
KevinRansom/roslyn
src/Workspaces/CSharp/Portable/Formatting/Rules/SpacingFormattingRule.cs
C#
apache-2.0
19,660
#include <lib/lib.h> int main(int argc, char* argv[]) { return MACRO_IN_LIB; }
marcinkwiatkowski/buck
test/com/facebook/buck/cxx/testdata/reexport_header_deps/bin.c
C
apache-2.0
82
#![crate_name = "foo"] #![doc(html_playground_url = "")] // compile-flags:-Z unstable-options --playground-url https://play.rust-lang.org/ //! module docs //! //! ``` //! println!("Hello, world!"); //! ``` // @!has foo/index.html '//a[@class="test-arrow"]' "Run"
graydon/rust
src/test/rustdoc/playground-empty.rs
Rust
apache-2.0
267
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.6.0_27) on Thu Dec 26 19:42:35 EST 2013 --> <title>com.jwetherell.augmented_reality.ui.objects</title> <meta name="date" content="2013-12-26"> <link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style"> </head> <body> <h1 class="bar"><a href="../../../../../com/jwetherell/augmented_reality/ui/objects/package-summary.html" target="classFrame">com.jwetherell.augmented_reality.ui.objects</a></h1> <div class="indexContainer"> <h2 title="Classes">Classes</h2> <ul title="Classes"> <li><a href="PaintableBox.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableBox</a></li> <li><a href="PaintableBoxedText.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableBoxedText</a></li> <li><a href="PaintableCircle.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableCircle</a></li> <li><a href="PaintableGps.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableGps</a></li> <li><a href="PaintableIcon.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableIcon</a></li> <li><a href="PaintableLine.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableLine</a></li> <li><a href="PaintableObject.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableObject</a></li> <li><a href="PaintablePoint.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintablePoint</a></li> <li><a href="PaintablePosition.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintablePosition</a></li> <li><a href="PaintableRadarPoints.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableRadarPoints</a></li> <li><a href="PaintableText.html" title="class in com.jwetherell.augmented_reality.ui.objects" target="classFrame">PaintableText</a></li> </ul> </div> </body> </html>
gulingfengze/Guide-AR
javadocs/com/jwetherell/augmented_reality/ui/objects/package-frame.html
HTML
apache-2.0
2,266
/* GStreamer * Copyright (C) <2013> Wim Taymans <wim.taymans@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include <stdio.h> #include <stdlib.h> #include <gst/gst.h> #include "gst/glib-compat-private.h" #define BUFFER_SIZE (1400) gint main (gint argc, gchar * argv[]) { gint i; GstBuffer *tmp; GstBufferPool *pool; GstClockTime start, end; GstClockTimeDiff dur1, dur2; guint64 nbuffers; GstStructure *conf; gst_init (&argc, &argv); if (argc != 2) { g_print ("usage: %s <nbuffers>\n", argv[0]); exit (-1); } nbuffers = atoi (argv[1]); if (nbuffers <= 0) { g_print ("number of buffers must be greater than 0\n"); exit (-3); } /* Let's just make sure the GstBufferClass is loaded ... */ tmp = gst_buffer_new (); gst_buffer_unref (tmp); pool = gst_buffer_pool_new (); conf = gst_buffer_pool_get_config (pool); gst_buffer_pool_config_set_params (conf, NULL, BUFFER_SIZE, 0, 0); gst_buffer_pool_set_config (pool, conf); gst_buffer_pool_set_active (pool, TRUE); /* allocate buffers directly */ start = gst_util_get_timestamp (); for (i = 0; i < nbuffers; i++) { tmp = gst_buffer_new_allocate (NULL, BUFFER_SIZE, NULL); gst_buffer_unref (tmp); } end = gst_util_get_timestamp (); dur1 = GST_CLOCK_DIFF (start, end); g_print ("*** total %" GST_TIME_FORMAT " - average %" GST_TIME_FORMAT " - Done creating %" G_GUINT64_FORMAT " fresh buffers\n", GST_TIME_ARGS (dur1), GST_TIME_ARGS (dur1 / nbuffers), nbuffers); /* allocate buffers from the pool */ start = gst_util_get_timestamp (); for (i = 0; i < nbuffers; i++) { gst_buffer_pool_acquire_buffer (pool, &tmp, NULL); gst_buffer_unref (tmp); } end = gst_util_get_timestamp (); dur2 = GST_CLOCK_DIFF (start, end); g_print ("*** total %" GST_TIME_FORMAT " - average %" GST_TIME_FORMAT " - Done creating %" G_GUINT64_FORMAT " pooled buffers\n", GST_TIME_ARGS (dur2), GST_TIME_ARGS (dur2 / nbuffers), nbuffers); g_print ("*** speedup %6.4lf\n", ((gdouble) dur1 / (gdouble) dur2)); gst_buffer_pool_set_active (pool, FALSE); gst_object_unref (pool); return 0; }
google/aistreams
third_party/gstreamer/tests/benchmarks/gstpoolstress.c
C
apache-2.0
2,862
/** * Copyright (C) 2011, 2012 camunda services GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.integrationtest.deployment.war; import org.camunda.bpm.engine.RepositoryService; import org.camunda.bpm.integrationtest.util.AbstractFoxPlatformIntegrationTest; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.junit.Arquillian; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @RunWith(Arquillian.class) public class TestWarDeployment extends AbstractFoxPlatformIntegrationTest { @Deployment public static WebArchive processArchive() { return initWebArchiveDeployment() .addAsResource("org/camunda/bpm/integrationtest/testDeployProcessArchive.bpmn20.xml"); } @Test public void testDeployProcessArchive() { Assert.assertNotNull(processEngine); RepositoryService repositoryService = processEngine.getRepositoryService(); long count = repositoryService.createProcessDefinitionQuery() .processDefinitionKey("testDeployProcessArchive") .count(); Assert.assertEquals(1, count); } }
subhrajyotim/camunda-bpm-platform
qa/integration-tests-engine/src/test/java/org/camunda/bpm/integrationtest/deployment/war/TestWarDeployment.java
Java
apache-2.0
1,697
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using Xunit; using System.Linq; using Microsoft.CodeAnalysis.CSharp.Test.Utilities; using Microsoft.CodeAnalysis.Test.Utilities; using Roslyn.Test.Utilities; using Xunit.Abstractions; namespace Microsoft.CodeAnalysis.CSharp.UnitTests.Parsing { [CompilerTrait(CompilerFeature.ReadOnlyReferences)] public class RefStructs : ParsingTests { public RefStructs(ITestOutputHelper output) : base(output) { } protected override SyntaxTree ParseTree(string text, CSharpParseOptions options) { return SyntaxFactory.ParseSyntaxTree(text, options: options); } [Fact] public void RefStructSimple() { var text = @" class Program { ref struct S1{} public ref struct S2{} } "; var comp = CreateCompilationWithMscorlib45(text, parseOptions: TestOptions.Regular.WithLanguageVersion(LanguageVersion.Latest), options: TestOptions.DebugDll); comp.VerifyDiagnostics(); } [Fact] public void RefStructSimpleLangVer() { var text = @" class Program { ref struct S1{} public ref struct S2{} } "; var comp = CreateCompilationWithMscorlib45(text, parseOptions: TestOptions.Regular.WithLanguageVersion(LanguageVersion.CSharp7), options: TestOptions.DebugDll); comp.VerifyDiagnostics( // (4,5): error CS8107: Feature 'ref structs' is not available in C# 7. Please use language version 7.2 or greater. // ref struct S1{} Diagnostic(ErrorCode.ERR_FeatureNotAvailableInVersion7, "ref").WithArguments("ref structs", "7.2").WithLocation(4, 5), // (6,12): error CS8107: Feature 'ref structs' is not available in C# 7. Please use language version 7.2 or greater. // public ref struct S2{} Diagnostic(ErrorCode.ERR_FeatureNotAvailableInVersion7, "ref").WithArguments("ref structs", "7.2").WithLocation(6, 12) ); } [Fact] public void RefStructErr() { var text = @" class Program { ref class S1{} public ref unsafe struct S2{} ref interface I1{}; public ref delegate ref int D1(); } "; var comp = CreateCompilationWithMscorlib45(text, parseOptions: TestOptions.Regular.WithLanguageVersion(LanguageVersion.Latest), options: TestOptions.DebugDll); comp.VerifyDiagnostics( // (4,9): error CS1031: Type expected // ref class S1{} Diagnostic(ErrorCode.ERR_TypeExpected, "class"), // (6,16): error CS1031: Type expected // public ref unsafe struct S2{} Diagnostic(ErrorCode.ERR_TypeExpected, "unsafe"), // (8,9): error CS1031: Type expected // ref interface I1{}; Diagnostic(ErrorCode.ERR_TypeExpected, "interface").WithLocation(8, 9), // (10,16): error CS1031: Type expected // public ref delegate ref int D1(); Diagnostic(ErrorCode.ERR_TypeExpected, "delegate").WithLocation(10, 16), // (6,30): error CS0227: Unsafe code may only appear if compiling with /unsafe // public ref unsafe struct S2{} Diagnostic(ErrorCode.ERR_IllegalUnsafe, "S2") ); } [Fact] public void PartialRefStruct() { var text = @" class Program { partial ref struct S {} partial ref struct S {} } "; var comp = CreateCompilation(text); comp.VerifyDiagnostics( // (4,13): error CS1585: Member modifier 'ref' must precede the member type and name // partial ref struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(4, 13), // (5,13): error CS1585: Member modifier 'ref' must precede the member type and name // partial ref struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(5, 13), // (5,24): error CS0102: The type 'Program' already contains a definition for 'S' // partial ref struct S {} Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "S").WithArguments("Program", "S").WithLocation(5, 24)); } [Fact] public void RefPartialStruct() { var comp = CreateCompilation(@" class C { ref partial struct S {} ref partial struct S {} }"); comp.VerifyDiagnostics(); } [Fact] public void RefPartialReadonlyStruct() { var comp = CreateCompilation(@" class C { ref partial readonly struct S {} ref partial readonly struct S {} }"); comp.VerifyDiagnostics( // (4,17): error CS1585: Member modifier 'readonly' must precede the member type and name // ref partial readonly struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "readonly").WithArguments("readonly").WithLocation(4, 17), // (5,17): error CS1585: Member modifier 'readonly' must precede the member type and name // ref partial readonly struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "readonly").WithArguments("readonly").WithLocation(5, 17), // (5,33): error CS0102: The type 'C' already contains a definition for 'S' // ref partial readonly struct S {} Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "S").WithArguments("C", "S").WithLocation(5, 33)); } [Fact] public void RefReadonlyPartialStruct() { var comp = CreateCompilation(@" class C { partial ref readonly struct S {} partial ref readonly struct S {} }"); comp.VerifyDiagnostics( // (4,13): error CS1585: Member modifier 'ref' must precede the member type and name // partial ref readonly struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(4, 13), // (4,26): error CS1031: Type expected // partial ref readonly struct S {} Diagnostic(ErrorCode.ERR_TypeExpected, "struct").WithLocation(4, 26), // (5,13): error CS1585: Member modifier 'ref' must precede the member type and name // partial ref readonly struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(5, 13), // (5,26): error CS1031: Type expected // partial ref readonly struct S {} Diagnostic(ErrorCode.ERR_TypeExpected, "struct").WithLocation(5, 26), // (5,33): error CS0102: The type 'C' already contains a definition for 'S' // partial ref readonly struct S {} Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "S").WithArguments("C", "S").WithLocation(5, 33)); } [Fact] public void ReadonlyPartialRefStruct() { var comp = CreateCompilation(@" class C { readonly partial ref struct S {} readonly partial ref struct S {} }"); comp.VerifyDiagnostics( // (4,22): error CS1585: Member modifier 'ref' must precede the member type and name // readonly partial ref struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(4, 22), // (5,22): error CS1585: Member modifier 'ref' must precede the member type and name // readonly partial ref struct S {} Diagnostic(ErrorCode.ERR_BadModifierLocation, "ref").WithArguments("ref").WithLocation(5, 22), // (5,33): error CS0102: The type 'C' already contains a definition for 'S' // readonly partial ref struct S {} Diagnostic(ErrorCode.ERR_DuplicateNameInClass, "S").WithArguments("C", "S").WithLocation(5, 33)); } [Fact] public void ReadonlyRefPartialStruct() { var comp = CreateCompilation(@" class C { readonly ref partial struct S {} readonly ref partial struct S {} }"); comp.VerifyDiagnostics(); } [Fact] public void StackAllocParsedAsSpan_Declaration() { CreateCompilationWithMscorlibAndSpan(@" using System; class Test { unsafe public void M() { int* a = stackalloc int[10]; var b = stackalloc int[10]; Span<int> c = stackalloc int [10]; } }", TestOptions.UnsafeDebugDll).GetParseDiagnostics().Verify(); } [Fact] public void StackAllocParsedAsSpan_LocalFunction() { CreateCompilationWithMscorlibAndSpan(@" using System; class Test { public void M() { unsafe void local() { int* x = stackalloc int[10]; } } }").GetParseDiagnostics().Verify(); } [Fact] public void StackAllocParsedAsSpan_MethodCall() { CreateCompilationWithMscorlibAndSpan(@" using System; class Test { public void M() { Visit(stackalloc int [10]); } public void Visit(Span<int> s) { } }").GetParseDiagnostics().Verify(); } [Fact] public void StackAllocParsedAsSpan_DotAccess() { CreateCompilationWithMscorlibAndSpan(@" using System; class Test { public void M() { Console.WriteLine((stackalloc int [10]).Length); } }").GetParseDiagnostics().Verify(); } [Fact] public void StackAllocParsedAsSpan_Cast() { CreateCompilationWithMscorlibAndSpan(@" using System; class Test { public void M() { void* x = (void*)(stackalloc int[10]); } }").GetParseDiagnostics().Verify(); } } }
OmarTawfik/roslyn
src/Compilers/CSharp/Test/Syntax/Parsing/RefStructs.cs
C#
apache-2.0
10,279
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.threadpool; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; public class ThreadPoolStats implements Writeable, ToXContentFragment, Iterable<ThreadPoolStats.Stats> { public static class Stats implements Writeable, ToXContentFragment, Comparable<Stats> { private final String name; private final int threads; private final int queue; private final int active; private final long rejected; private final int largest; private final long completed; public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) { this.name = name; this.threads = threads; this.queue = queue; this.active = active; this.rejected = rejected; this.largest = largest; this.completed = completed; } public Stats(StreamInput in) throws IOException { name = in.readString(); threads = in.readInt(); queue = in.readInt(); active = in.readInt(); rejected = in.readLong(); largest = in.readInt(); completed = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeInt(threads); out.writeInt(queue); out.writeInt(active); out.writeLong(rejected); out.writeInt(largest); out.writeLong(completed); } public String getName() { return this.name; } public int getThreads() { return this.threads; } public int getQueue() { return this.queue; } public int getActive() { return this.active; } public long getRejected() { return rejected; } public int getLargest() { return largest; } public long getCompleted() { return this.completed; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); if (threads != -1) { builder.field(Fields.THREADS, threads); } if (queue != -1) { builder.field(Fields.QUEUE, queue); } if (active != -1) { builder.field(Fields.ACTIVE, active); } if (rejected != -1) { builder.field(Fields.REJECTED, rejected); } if (largest != -1) { builder.field(Fields.LARGEST, largest); } if (completed != -1) { builder.field(Fields.COMPLETED, completed); } builder.endObject(); return builder; } @Override public int compareTo(Stats other) { if ((getName() == null) && (other.getName() == null)) { return 0; } else if ((getName() != null) && (other.getName() == null)) { return 1; } else if (getName() == null) { return -1; } else { int compare = getName().compareTo(other.getName()); if (compare == 0) { compare = Integer.compare(getThreads(), other.getThreads()); } return compare; } } } private List<Stats> stats; public ThreadPoolStats(List<Stats> stats) { Collections.sort(stats); this.stats = stats; } public ThreadPoolStats(StreamInput in) throws IOException { stats = in.readList(Stats::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(stats); } @Override public Iterator<Stats> iterator() { return stats.iterator(); } static final class Fields { static final String THREAD_POOL = "thread_pool"; static final String THREADS = "threads"; static final String QUEUE = "queue"; static final String ACTIVE = "active"; static final String REJECTED = "rejected"; static final String LARGEST = "largest"; static final String COMPLETED = "completed"; } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.THREAD_POOL); for (Stats stat : stats) { stat.toXContent(builder, params); } builder.endObject(); return builder; } }
GlenRSmith/elasticsearch
server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java
Java
apache-2.0
5,488
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.6.0_27) on Thu Dec 26 19:42:35 EST 2013 --> <title>Overview List</title> <meta name="date" content="2013-12-26"> <link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style"> </head> <body> <div class="indexHeader"><a href="allclasses-frame.html" target="packageFrame">All Classes</a></div> <div class="indexContainer"> <h2 title="Packages">Packages</h2> <ul title="Packages"> <li><a href="com/jwetherell/augmented_reality/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality</a></li> <li><a href="com/jwetherell/augmented_reality/activity/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.activity</a></li> <li><a href="com/jwetherell/augmented_reality/camera/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.camera</a></li> <li><a href="com/jwetherell/augmented_reality/common/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.common</a></li> <li><a href="com/jwetherell/augmented_reality/data/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.data</a></li> <li><a href="com/jwetherell/augmented_reality/ui/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.ui</a></li> <li><a href="com/jwetherell/augmented_reality/ui/objects/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.ui.objects</a></li> <li><a href="com/jwetherell/augmented_reality/widget/package-frame.html" target="packageFrame">com.jwetherell.augmented_reality.widget</a></li> </ul> </div> <p>&nbsp;</p> </body> </html>
gulingfengze/Guide-AR
javadocs/overview-frame.html
HTML
apache-2.0
1,753
/* * Copyright 2003,2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.sf.cglib.proxy; import java.util.List; import net.sf.cglib.core.*; interface CallbackGenerator { void generate(ClassEmitter ce, Context context, List methods) throws Exception; void generateStatic(CodeEmitter e, Context context, List methods) throws Exception; interface Context { ClassLoader getClassLoader(); CodeEmitter beginMethod(ClassEmitter ce, MethodInfo method); int getOriginalModifiers(MethodInfo method); int getIndex(MethodInfo method); void emitCallback(CodeEmitter ce, int index); Signature getImplSignature(MethodInfo method); void emitInvoke(CodeEmitter e, MethodInfo method); } }
vongosling/cglib-ext
src/proxy/net/sf/cglib/proxy/CallbackGenerator.java
Java
apache-2.0
1,311
import unittest import pysal from pysal.core.IOHandlers.arcgis_swm import ArcGISSwmIO import tempfile import os class test_ArcGISSwmIO(unittest.TestCase): def setUp(self): self.test_file = test_file = pysal.examples.get_path('ohio.swm') self.obj = ArcGISSwmIO(test_file, 'r') def test_close(self): f = self.obj f.close() self.failUnlessRaises(ValueError, f.read) def test_read(self): w = self.obj.read() self.assertEqual(88, w.n) self.assertEqual(5.25, w.mean_neighbors) self.assertEqual([1.0, 1.0, 1.0, 1.0], w[1].values()) def test_seek(self): self.test_read() self.failUnlessRaises(StopIteration, self.obj.read) self.obj.seek(0) self.test_read() def test_write(self): w = self.obj.read() f = tempfile.NamedTemporaryFile( suffix='.swm', dir=pysal.examples.get_path('')) fname = f.name f.close() o = pysal.open(fname, 'w') o.write(w) o.close() wnew = pysal.open(fname, 'r').read() self.assertEqual(wnew.pct_nonzero, w.pct_nonzero) os.remove(fname) if __name__ == '__main__': unittest.main()
badjr/pysal
pysal/core/IOHandlers/tests/test_arcgis_swm.py
Python
bsd-3-clause
1,219
//===-- NVPTXTargetMachine.h - Define TargetMachine for NVPTX ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the NVPTX specific subclass of TargetMachine. // //===----------------------------------------------------------------------===// #ifndef NVPTX_TARGETMACHINE_H #define NVPTX_TARGETMACHINE_H #include "ManagedStringPool.h" #include "NVPTXFrameLowering.h" #include "NVPTXISelLowering.h" #include "NVPTXInstrInfo.h" #include "NVPTXRegisterInfo.h" #include "NVPTXSubtarget.h" #include "llvm/IR/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSelectionDAGInfo.h" namespace llvm { /// NVPTXTargetMachine /// class NVPTXTargetMachine : public LLVMTargetMachine { NVPTXSubtarget Subtarget; const DataLayout DL; // Calculates type size & alignment NVPTXInstrInfo InstrInfo; NVPTXTargetLowering TLInfo; TargetSelectionDAGInfo TSInfo; // NVPTX does not have any call stack frame, but need a NVPTX specific // FrameLowering class because TargetFrameLowering is abstract. NVPTXFrameLowering FrameLowering; // Hold Strings that can be free'd all together with NVPTXTargetMachine ManagedStringPool ManagedStrPool; //bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level, // bool DisableVerify, MCContext *&OutCtx); public: NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OP, bool is64bit); virtual const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; } virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; } virtual const DataLayout *getDataLayout() const { return &DL; } virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget; } virtual const NVPTXRegisterInfo *getRegisterInfo() const { return &(InstrInfo.getRegisterInfo()); } virtual NVPTXTargetLowering *getTargetLowering() const { return const_cast<NVPTXTargetLowering *>(&TLInfo); } virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; } //virtual bool addInstSelector(PassManagerBase &PM, // CodeGenOpt::Level OptLevel); //virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level); ManagedStringPool *getManagedStrPool() const { return const_cast<ManagedStringPool *>(&ManagedStrPool); } virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); // Emission of machine code through JITCodeEmitter is not supported. virtual bool addPassesToEmitMachineCode(PassManagerBase &, JITCodeEmitter &, bool = true) { return true; } // Emission of machine code through MCJIT is not supported. virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&, raw_ostream &, bool = true) { return true; } }; // NVPTXTargetMachine. class NVPTXTargetMachine32 : public NVPTXTargetMachine { virtual void anchor(); public: NVPTXTargetMachine32(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; class NVPTXTargetMachine64 : public NVPTXTargetMachine { virtual void anchor(); public: NVPTXTargetMachine64(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); }; } // end namespace llvm #endif
santoshn/softboundcets-34
softboundcets-llvm-clang34/lib/Target/NVPTX/NVPTXTargetMachine.h
C
bsd-3-clause
4,038
<!doctype html> <title>line-height has no effect on placeholder</title> <link rel="help" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1714631"> <link rel="match" href="input-placeholder-line-height-ref.html"> <style> input::placeholder { line-height: 0; } </style> <input placeholder=Foo>
scheib/chromium
third_party/blink/web_tests/external/wpt/html/rendering/non-replaced-elements/form-controls/input-placeholder-line-height.html
HTML
bsd-3-clause
303
// Inferno utils/5c/sgen.c // http://code.google.com/p/inferno-os/source/browse/utils/5c/sgen.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #include "gc.h" Prog* gtext(Sym *s, int32 stkoff) { int32 a; a = 0; if(!(textflag & NOSPLIT)) a = argsize(); else if(stkoff >= 128) yyerror("stack frame too large for NOSPLIT function"); gpseudo(ATEXT, s, nodconst(stkoff)); p->to.type = D_CONST2; p->to.offset2 = a; return p; } void noretval(int n) { if(n & 1) { gins(ANOP, Z, Z); p->to.type = D_REG; p->to.reg = REGRET; } if(n & 2) { gins(ANOP, Z, Z); p->to.type = D_FREG; p->to.reg = FREGRET; } } /* * calculate addressability as follows * CONST ==> 20 $value * NAME ==> 10 name * REGISTER ==> 11 register * INDREG ==> 12 *[(reg)+offset] * &10 ==> 2 $name * ADD(2, 20) ==> 2 $name+offset * ADD(3, 20) ==> 3 $(reg)+offset * &12 ==> 3 $(reg)+offset * *11 ==> 11 ?? * *2 ==> 10 name * *3 ==> 12 *(reg)+offset * calculate complexity (number of registers) */ void xcom(Node *n) { Node *l, *r; int t; if(n == Z) return; l = n->left; r = n->right; n->addable = 0; n->complex = 0; switch(n->op) { case OCONST: n->addable = 20; return; case OREGISTER: n->addable = 11; return; case OINDREG: n->addable = 12; return; case ONAME: n->addable = 10; return; case OADDR: xcom(l); if(l->addable == 10) n->addable = 2; if(l->addable == 12) n->addable = 3; break; case OIND: xcom(l); if(l->addable == 11) n->addable = 12; if(l->addable == 3) n->addable = 12; if(l->addable == 2) n->addable = 10; break; case OADD: xcom(l); xcom(r); if(l->addable == 20) { if(r->addable == 2) n->addable = 2; if(r->addable == 3) n->addable = 3; } if(r->addable == 20) { if(l->addable == 2) n->addable = 2; if(l->addable == 3) n->addable = 3; } break; case OASLMUL: case OASMUL: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASASHL; r->vconst = t; r->type = types[TINT]; } break; case OMUL: case OLMUL: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASHL; r->vconst = t; r->type = types[TINT]; } t = vlog(l); if(t >= 0) { n->op = OASHL; n->left = r; n->right = l; r = l; l = n->left; r->vconst = t; r->type = types[TINT]; } break; case OASLDIV: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASLSHR; r->vconst = t; r->type = types[TINT]; } break; case OLDIV: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OLSHR; r->vconst = t; r->type = types[TINT]; } break; case OASLMOD: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OASAND; r->vconst--; } break; case OLMOD: xcom(l); xcom(r); t = vlog(r); if(t >= 0) { n->op = OAND; r->vconst--; } break; default: if(l != Z) xcom(l); if(r != Z) xcom(r); break; } if(n->addable >= 10) return; if(l != Z) n->complex = l->complex; if(r != Z) { if(r->complex == n->complex) n->complex = r->complex+1; else if(r->complex > n->complex) n->complex = r->complex; } if(n->complex == 0) n->complex++; if(com64(n)) return; switch(n->op) { case OFUNC: n->complex = FNX; break; case OADD: case OXOR: case OAND: case OOR: case OEQ: case ONE: /* * immediate operators, make const on right */ if(l->op == OCONST) { n->left = r; n->right = l; } break; } }
eternalNight/ucore_app_go
src/cmd/5c/sgen.c
C
bsd-3-clause
5,060
Timepicker for Twitter Bootstrap 3 fork of https://github.com/jdewit/bootstrap-timepicker =====================
afup/back-office
web/assets/plugins/bootstrap-timepicker/README.md
Markdown
mit
111
RSpec::Matchers.define :have_rule do |rule| match do |subject| if subject.class.name == 'Serverspec::Type::Iptables' subject.has_rule?(rule, @table, @chain) else subject.has_rule?(rule) end end chain :with_table do |table| @table = table end chain :with_chain do |chain| @chain = chain end end
memelet/serverspec
lib/serverspec/matchers/have_rule.rb
Ruby
mit
340
'use strict'; angular.module('showcase', [ 'showcase.angularWay', 'showcase.angularWay.withOptions', 'showcase.withAjax', 'showcase.withOptions', 'showcase.withPromise', 'showcase.angularWay.dataChange', 'showcase.bindAngularDirective', 'showcase.changeOptions', 'showcase.dataReload.withAjax', 'showcase.dataReload.withPromise', 'showcase.disableDeepWatchers', 'showcase.loadOptionsWithPromise', 'showcase.angularDirectiveInDOM', 'showcase.rerender', 'showcase.rowClickEvent', 'showcase.rowSelect', 'showcase.serverSideProcessing', 'showcase.bootstrapIntegration', 'showcase.overrideBootstrapOptions', 'showcase.withAngularTranslate', 'showcase.withColReorder', 'showcase.withColumnFilter', 'showcase.withLightColumnFilter', 'showcase.withColVis', 'showcase.withResponsive', 'showcase.withScroller', 'showcase.withTableTools', 'showcase.withFixedColumns', 'showcase.withFixedHeader', 'showcase.withButtons', 'showcase.withSelect', 'showcase.dtInstances', 'showcase.usages', 'ui.bootstrap', 'ui.router', 'hljs' ]) .config(sampleConfig) .config(routerConfig) .config(translateConfig) .config(debugDisabled) .run(initDT); backToTop.init({ theme: 'classic', // Available themes: 'classic', 'sky', 'slate' animation: 'fade' // Available animations: 'fade', 'slide' }); function debugDisabled($compileProvider)  { $compileProvider.debugInfoEnabled(false); } function sampleConfig(hljsServiceProvider) { hljsServiceProvider.setOptions({ // replace tab with 4 spaces tabReplace: ' ' }); } function routerConfig($stateProvider, $urlRouterProvider, USAGES) { $urlRouterProvider.otherwise('/welcome'); $stateProvider .state('welcome', { url: '/welcome', templateUrl: 'demo/partials/welcome.html', controller: function($rootScope) { $rootScope.$broadcast('event:changeView', 'welcome'); } }) .state('gettingStarted', { url: '/gettingStarted', templateUrl: 'demo/partials/gettingStarted.html', controller: function($rootScope) { $rootScope.$broadcast('event:changeView', 'gettingStarted'); } }) .state('api', { url: '/api', templateUrl: 'demo/api/api.html', controller: function($rootScope) { $rootScope.$broadcast('event:changeView', 'api'); } }); angular.forEach(USAGES, function(usages, key) { angular.forEach(usages, function(usage) { $stateProvider.state(usage.name, { url: '/' + usage.name, templateUrl: 'demo/' + key + '/' + usage.name + '.html', controller: function($rootScope) { $rootScope.$broadcast('event:changeView', usage.name); }, onExit: usage.onExit }); }); }); } function translateConfig($translateProvider) { $translateProvider.translations('en', { id: 'ID with angular-translate', firstName: 'First name with angular-translate', lastName: 'Last name with angular-translate' }); $translateProvider.translations('fr', { id: 'ID avec angular-translate', firstName: 'Prénom avec angular-translate', lastName: 'Nom avec angular-translate' }); $translateProvider.preferredLanguage('en'); } function initDT(DTDefaultOptions) { DTDefaultOptions.setLoadingTemplate('<img src="/angular-datatables/images/loading.gif" />'); }
Leo-g/Flask-Scaffold
app/templates/static/node_modules/angular-datatables/demo/app.js
JavaScript
mit
3,736
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_ #define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_ namespace sandbox { // Operation result codes returned by the sandbox API. enum ResultCode { SBOX_ALL_OK = 0, // Error is originating on the win32 layer. Call GetlastError() for more // information. SBOX_ERROR_GENERIC = 1, // An invalid combination of parameters was given to the API. SBOX_ERROR_BAD_PARAMS = 2, // The desired operation is not supported at this time. SBOX_ERROR_UNSUPPORTED = 3, // The request requires more memory that allocated or available. SBOX_ERROR_NO_SPACE = 4, // The ipc service requested does not exist. SBOX_ERROR_INVALID_IPC = 5, // The ipc service did not complete. SBOX_ERROR_FAILED_IPC = 6, // The requested handle was not found. SBOX_ERROR_NO_HANDLE = 7, // This function was not expected to be called at this time. SBOX_ERROR_UNEXPECTED_CALL = 8, // WaitForAllTargets is already called. SBOX_ERROR_WAIT_ALREADY_CALLED = 9, // A channel error prevented DoCall from executing. SBOX_ERROR_CHANNEL_ERROR = 10, // Failed to create the alternate desktop. SBOX_ERROR_CANNOT_CREATE_DESKTOP = 11, // Failed to create the alternate window station. SBOX_ERROR_CANNOT_CREATE_WINSTATION = 12, // Failed to switch back to the interactive window station. SBOX_ERROR_FAILED_TO_SWITCH_BACK_WINSTATION = 13, // The supplied AppContainer is not valid. SBOX_ERROR_INVALID_APP_CONTAINER = 14, // The supplied capability is not valid. SBOX_ERROR_INVALID_CAPABILITY = 15, // There is a failure initializing the AppContainer. SBOX_ERROR_CANNOT_INIT_APPCONTAINER = 16, // Initializing or updating ProcThreadAttributes failed. SBOX_ERROR_PROC_THREAD_ATTRIBUTES = 17, // Error in creating process. SBOX_ERROR_CREATE_PROCESS = 18, // Placeholder for last item of the enum. SBOX_ERROR_LAST }; // If the sandbox cannot create a secure environment for the target, the // target will be forcibly terminated. These are the process exit codes. enum TerminationCodes { SBOX_FATAL_INTEGRITY = 7006, // Could not set the integrity level. SBOX_FATAL_DROPTOKEN = 7007, // Could not lower the token. SBOX_FATAL_FLUSHANDLES = 7008, // Failed to flush registry handles. SBOX_FATAL_CACHEDISABLE = 7009, // Failed to forbid HCKU caching. SBOX_FATAL_CLOSEHANDLES = 7010, // Failed to close pending handles. SBOX_FATAL_MITIGATION = 7011, // Could not set the mitigation policy. SBOX_FATAL_MEMORY_EXCEEDED = 7012, // Exceeded the job memory limit. SBOX_FATAL_LAST }; class BrokerServices; class TargetServices; // Contains the pointer to a target or broker service. struct SandboxInterfaceInfo { BrokerServices* broker_services; TargetServices* target_services; }; #if SANDBOX_EXPORTS #define SANDBOX_INTERCEPT extern "C" __declspec(dllexport) #else #define SANDBOX_INTERCEPT extern "C" #endif enum InterceptionType { INTERCEPTION_INVALID = 0, INTERCEPTION_SERVICE_CALL, // Trampoline of an NT native call INTERCEPTION_EAT, INTERCEPTION_SIDESTEP, // Preamble patch INTERCEPTION_SMART_SIDESTEP, // Preamble patch but bypass internal calls INTERCEPTION_UNLOAD_MODULE, // Unload the module (don't patch) INTERCEPTION_LAST // Placeholder for last item in the enumeration }; } // namespace sandbox #endif // SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/sandbox/win/src/sandbox_types.h
C
mit
3,552
/* $Id$ */ /* * Written by Solar Designer <solar at openwall.com> in 2000-2011. * No copyright is claimed, and the software is hereby placed in the public * domain. In case this attempt to disclaim copyright and place the software * in the public domain is deemed null and void, then the software is * Copyright (c) 2000-2011 Solar Designer and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * See crypt_blowfish.c for more information. */ #ifndef _CRYPT_BLOWFISH_H #define _CRYPT_BLOWFISH_H #if 0 extern int _crypt_output_magic(const char *setting, char *output, int size); #endif extern char *php_crypt_blowfish_rn(const char *key, const char *setting, char *output, int size); #if 0 extern char *_crypt_gensalt_blowfish_rn(const char *prefix, unsigned long count, const char *input, int size, char *output, int output_size); #endif #endif
vs0uz4/openshift-cartridge-php
usr/php-5.6.20/include/php/ext/standard/crypt_blowfish.h
C
mit
1,080
/* * Copyright 2012 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.workbench.models.datamodel.rule; public class ExpressionUnboundFact extends ExpressionPart { private String factType; public ExpressionUnboundFact() { } public ExpressionUnboundFact( String factType ) { super( factType, factType, factType ); this.factType = factType; } public String getFactType() { return factType; } @Override public void accept( ExpressionVisitor visitor ) { visitor.visit( this ); } @Override public boolean equals( Object o ) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } if ( !super.equals( o ) ) { return false; } ExpressionUnboundFact that = (ExpressionUnboundFact) o; if ( factType != null ? !factType.equals( that.factType ) : that.factType != null ) { return false; } return true; } @Override public int hashCode() { int result = super.hashCode(); result = ~~result; result = 31 * result + ( factType != null ? factType.hashCode() : 0 ); result = ~~result; return result; } }
rokn/Count_Words_2015
testing/drools-master/drools-workbench-models/drools-workbench-models-datamodel-api/src/main/java/org/drools/workbench/models/datamodel/rule/ExpressionUnboundFact.java
Java
mit
1,906
// SPDX-License-Identifier: GPL-2.0 /* * Workingset detection * * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner */ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/writeback.h> #include <linux/shmem_fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/swap.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> /* * Double CLOCK lists * * Per node, two clock lists are maintained for file pages: the * inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list * are promoted to the active list, to protect them from reclaim, * whereas active pages are demoted to the inactive list when the * active list grows too big. * * fault ------------------------+ * | * +--------------+ | +-------------+ * reclaim <- | inactive | <-+-- demotion | active | <--+ * +--------------+ +-------------+ | * | | * +-------------- promotion ------------------+ * * * Access frequency and refault distance * * A workload is thrashing when its pages are frequently used but they * are evicted from the inactive list every time before another access * would have promoted them to the active list. * * In cases where the average access distance between thrashing pages * is bigger than the size of memory there is nothing that can be * done - the thrashing set could never fit into memory under any * circumstance. * * However, the average access distance could be bigger than the * inactive list, yet smaller than the size of memory. In this case, * the set could fit into memory if it weren't for the currently * active pages - which may be used more, hopefully less frequently: * * +-memory available to cache-+ * | | * +-inactive------+-active----+ * a b | c d e f g h i | J K L M N | * +---------------+-----------+ * * It is prohibitively expensive to accurately track access frequency * of pages. But a reasonable approximation can be made to measure * thrashing on the inactive list, after which refaulting pages can be * activated optimistically to compete with the existing active pages. * * Approximating inactive page access frequency - Observations: * * 1. When a page is accessed for the first time, it is added to the * head of the inactive list, slides every existing inactive page * towards the tail by one slot, and pushes the current tail page * out of memory. * * 2. When a page is accessed for the second time, it is promoted to * the active list, shrinking the inactive list by one slot. This * also slides all inactive pages that were faulted into the cache * more recently than the activated page towards the tail of the * inactive list. * * Thus: * * 1. The sum of evictions and activations between any two points in * time indicate the minimum number of inactive pages accessed in * between. * * 2. Moving one inactive page N page slots towards the tail of the * list requires at least N inactive page accesses. * * Combining these: * * 1. When a page is finally evicted from memory, the number of * inactive pages accessed while the page was in cache is at least * the number of page slots on the inactive list. * * 2. In addition, measuring the sum of evictions and activations (E) * at the time of a page's eviction, and comparing it to another * reading (R) at the time the page faults back into memory tells * the minimum number of accesses while the page was not cached. * This is called the refault distance. * * Because the first access of the page was the fault and the second * access the refault, we combine the in-cache distance with the * out-of-cache distance to get the complete minimum access distance * of this page: * * NR_inactive + (R - E) * * And knowing the minimum access distance of a page, we can easily * tell if the page would be able to stay in cache assuming all page * slots in the cache were available: * * NR_inactive + (R - E) <= NR_inactive + NR_active * * which can be further simplified to * * (R - E) <= NR_active * * Put into words, the refault distance (out-of-cache) can be seen as * a deficit in inactive list space (in-cache). If the inactive list * had (R - E) more page slots, the page would not have been evicted * in between accesses, but activated instead. And on a full system, * the only thing eating into inactive list space is active pages. * * * Refaulting inactive pages * * All that is known about the active list is that the pages have been * accessed more than once in the past. This means that at any given * time there is actually a good chance that pages on the active list * are no longer in active use. * * So when a refault distance of (R - E) is observed and there are at * least (R - E) active pages, the refaulting page is activated * optimistically in the hope that (R - E) active pages are actually * used less frequently than the refaulting page - or even not used at * all anymore. * * That means if inactive cache is refaulting with a suitable refault * distance, we assume the cache workingset is transitioning and put * pressure on the current active list. * * If this is wrong and demotion kicks in, the pages which are truly * used more frequently will be reactivated while the less frequently * used once will be evicted from memory. * * But if this is right, the stale pages will be pushed out of memory * and the used pages get to stay in cache. * * Refaulting active pages * * If on the other hand the refaulting pages have recently been * deactivated, it means that the active list is no longer protecting * actively used cache from reclaim. The cache is NOT transitioning to * a different workingset; the existing workingset is thrashing in the * space allocated to the page cache. * * * Implementation * * For each node's LRU lists, a counter for inactive evictions and * activations is maintained (node->nonresident_age). * * On eviction, a snapshot of this counter (along with some bits to * identify the node) is stored in the now empty page cache * slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible * refault distance will immediately activate the refaulting page. */ #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) /* * Eviction timestamps need to be able to cover the full range of * actionable refaults. However, bits are tight in the xarray * entry, and after storing the identifier for the lruvec there might * not be enough left to represent every single actionable refault. In * that case, we have to sacrifice granularity for distance, and group * evictions into coarser buckets by shaving off lower timestamp bits. */ static unsigned int bucket_order __read_mostly; static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, bool workingset) { eviction >>= bucket_order; eviction &= EVICTION_MASK; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; eviction = (eviction << 1) | workingset; return xa_mk_value(eviction); } static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp, bool *workingsetp) { unsigned long entry = xa_to_value(shadow); int memcgid, nid; bool workingset; workingset = entry & 1; entry >>= 1; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; *memcgidp = memcgid; *pgdat = NODE_DATA(nid); *evictionp = entry << bucket_order; *workingsetp = workingset; } /** * workingset_age_nonresident - age non-resident entries as LRU ages * @lruvec: the lruvec that was aged * @nr_pages: the number of pages to count * * As in-memory pages are aged, non-resident pages need to be aged as * well, in order for the refault distances later on to be comparable * to the in-memory dimensions. This function allows reclaim and LRU * operations to drive the non-resident aging along in parallel. */ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) { /* * Reclaiming a cgroup means reclaiming all its children in a * round-robin fashion. That means that each cgroup has an LRU * order that is composed of the LRU orders of its child * cgroups; and every page has an LRU position not just in the * cgroup that owns it, but in all of that group's ancestors. * * So when the physical inactive list of a leaf cgroup ages, * the virtual inactive lists of all its parents, including * the root cgroup's, age as well. */ do { atomic_long_add(nr_pages, &lruvec->nonresident_age); } while ((lruvec = parent_lruvec(lruvec))); } /** * workingset_eviction - note the eviction of a page from memory * @target_memcg: the cgroup that is causing the reclaim * @page: the page being evicted * * Returns a shadow entry to be stored in @page->mapping->i_pages in place * of the evicted @page so that a later refault can be detected. */ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) { struct pglist_data *pgdat = page_pgdat(page); unsigned long eviction; struct lruvec *lruvec; int memcgid; /* Page is fully exclusive and pins page's memory cgroup pointer */ VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); workingset_age_nonresident(lruvec, thp_nr_pages(page)); return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); } /** * workingset_refault - evaluate the refault of a previously evicted page * @page: the freshly allocated replacement page * @shadow: shadow entry of the evicted page * * Calculates and evaluates the refault distance of the previously * evicted page in the context of the node and the memcg whose memory * pressure caused the eviction. */ void workingset_refault(struct page *page, void *shadow) { bool file = page_is_file_lru(page); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; unsigned long workingset_size; struct pglist_data *pgdat; struct mem_cgroup *memcg; unsigned long eviction; struct lruvec *lruvec; unsigned long refault; bool workingset; int memcgid; unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the page's eviction. * * Note that in rare events the ID could have been recycled * for a new cgroup that refaults a shared page. This is * impossible to tell from the available data. However, this * should be a rare and limited disturbance, and activations * are always speculative anyway. Ultimately, it's the aging * algorithm's job to shake out the minimum access frequency * for the active cache. * * XXX: On !CONFIG_MEMCG, this will always return NULL; it * would be better if the root_mem_cgroup existed in all * configurations instead. */ eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) goto out; eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); /* * Calculate the refault distance * * The unsigned subtraction here gives an accurate distance * across nonresident_age overflows in most cases. There is a * special case: usually, shadow entries have a short lifetime * and are either refaulted or reclaimed along with the inode * before they get too old. But it is not impossible for the * nonresident_age to lap a shadow entry in the field, which * can then result in a false small refault distance, leading * to a false activation should this old entry actually * refault again. However, earlier kernels used to deactivate * unconditionally with *every* reclaim invocation for the * longest time, so the occasional inappropriate activation * leading to pressure on the active list is not a problem. */ refault_distance = (refault - eviction) & EVICTION_MASK; /* * The activation decision for this page is made at the level * where the eviction occurred, as that is where the LRU order * during page reclaim is being determined. * * However, the cgroup that will own the page is the one that * is actually experiencing the refault event. */ memcg = page_memcg(page); lruvec = mem_cgroup_lruvec(memcg, pgdat); inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if * all the memory was available to the workingset. Whether * workingset competition needs to consider anon or not depends * on having swap. */ workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); if (!file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_FILE); } if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { workingset_size += lruvec_page_state(eviction_lruvec, NR_ACTIVE_ANON); if (file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_ANON); } } if (refault_distance > workingset_size) goto out; SetPageActive(page); workingset_age_nonresident(lruvec, thp_nr_pages(page)); inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ if (workingset) { SetPageWorkingset(page); /* XXX: Move to lru_cache_add() when it supports new vs putback */ lru_note_cost_page(page); inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file); } out: rcu_read_unlock(); } /** * workingset_activation - note a page activation * @page: page that is being activated */ void workingset_activation(struct page *page) { struct mem_cgroup *memcg; struct lruvec *lruvec; rcu_read_lock(); /* * Filter non-memcg pages here, e.g. unmap can call * mark_page_accessed() on VDSO pages. * * XXX: See workingset_refault() - this should return * root_mem_cgroup even for !CONFIG_MEMCG. */ memcg = page_memcg_rcu(page); if (!mem_cgroup_disabled() && !memcg) goto out; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); workingset_age_nonresident(lruvec, thp_nr_pages(page)); out: rcu_read_unlock(); } /* * Shadow entries reflect the share of the working set that does not * fit into memory, so their number depends on the access pattern of * the workload. In most cases, they will refault or get reclaimed * along with the inode, but a (malicious) workload that streams * through files with a total size several times that of available * memory, while preventing the inodes from being reclaimed, can * create excessive amounts of shadow nodes. To keep a lid on this, * track shadow nodes and reclaim them when they grow way past the * point where they would still be useful. */ static struct list_lru shadow_nodes; void workingset_update_node(struct xa_node *node) { /* * Track non-empty nodes that contain only shadow entries; * unlink those that contain pages or are being freed. * * Avoid acquiring the list_lru lock when the nodes are * already where they should be. The list_empty() test is safe * as node->private_list is protected by the i_pages lock. */ VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) { list_lru_add(&shadow_nodes, &node->private_list); __inc_lruvec_kmem_state(node, WORKINGSET_NODES); } } else { if (!list_empty(&node->private_list)) { list_lru_del(&shadow_nodes, &node->private_list); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); } } } static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long max_nodes; unsigned long nodes; unsigned long pages; nodes = list_lru_shrink_count(&shadow_nodes, sc); if (!nodes) return SHRINK_EMPTY; /* * Approximate a reasonable limit for the nodes * containing shadow entries. We don't need to keep more * shadow entries than possible pages on the active list, * since refault distances bigger than that are dismissed. * * The size of the active list converges toward 100% of * overall page cache as memory grows, with only a tiny * inactive list. Assume the total cache size for that. * * Nodes might be sparsely populated, with only one shadow * entry in the extreme case. Obviously, we cannot keep one * node for every eligible shadow entry, so compromise on a * worst-case density of 1/8th. Below that, not all eligible * refaults can be detected anymore. * * On 64-bit with 7 xa_nodes per page and 64 slots * each, this will reclaim shadow entries when they consume * ~1.8% of available memory: * * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE */ #ifdef CONFIG_MEMCG if (sc->memcg) { struct lruvec *lruvec; int i; lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); pages += lruvec_page_state_local( lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; pages += lruvec_page_state_local( lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; } else #endif pages = node_present_pages(sc->nid); max_nodes = pages >> (XA_CHUNK_SHIFT - 3); if (nodes <= max_nodes) return 0; return nodes - max_nodes; } static enum lru_status shadow_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) __must_hold(lru_lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); struct address_space *mapping; int ret; /* * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any * address_space that has nodes on the LRU. * * We can then safely transition to the i_pages lock to * pin only the address_space of the particular node we want * to reclaim, take the node off-LRU, and drop the lru_lock. */ mapping = container_of(node->array, struct address_space, i_pages); /* Coming from the list, invert the lock order */ if (!xa_trylock(&mapping->i_pages)) { spin_unlock_irq(lru_lock); ret = LRU_RETRY; goto out; } list_lru_isolate(lru, item); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); spin_unlock(lru_lock); /* * The nodes should only contain one or more shadow entries, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ if (WARN_ON_ONCE(!node->nr_values)) goto out_invalid; if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; mapping->nrexceptional -= node->nr_values; xa_delete_node(node, workingset_update_node); __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); out_invalid: xa_unlock_irq(&mapping->i_pages); ret = LRU_REMOVED_RETRY; out: cond_resched(); spin_lock_irq(lru_lock); return ret; } static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { /* list_lru lock nests inside the IRQ-safe i_pages lock */ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, NULL); } static struct shrinker workingset_shadow_shrinker = { .count_objects = count_shadow_nodes, .scan_objects = scan_shadow_nodes, .seeks = 0, /* ->count reports only fully expendable nodes */ .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, }; /* * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe * i_pages lock. */ static struct lock_class_key shadow_nodes_key; static int __init workingset_init(void) { unsigned int timestamp_bits; unsigned int max_order; int ret; BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); /* * Calculate the eviction bucket size to cover the longest * actionable refault distance, which is currently half of * memory (totalram_pages/2). However, memory hotplug may add * some more pages at runtime, so keep working with up to * double the initial memory by using totalram_pages as-is. */ timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; max_order = fls_long(totalram_pages() - 1); if (max_order > timestamp_bits) bucket_order = max_order - timestamp_bits; pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); ret = prealloc_shrinker(&workingset_shadow_shrinker); if (ret) goto err; ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, &workingset_shadow_shrinker); if (ret) goto err_list_lru; register_shrinker_prepared(&workingset_shadow_shrinker); return 0; err_list_lru: free_prealloced_shrinker(&workingset_shadow_shrinker); err: return ret; } module_init(workingset_init);
openwrt-es/linux
mm/workingset.c
C
gpl-2.0
21,841
SET(CMAKE_CXX_COMPILER "/usr/bin/c++") SET(CMAKE_CXX_COMPILER_ARG1 "") SET(CMAKE_CXX_COMPILER_ID "GNU") SET(CMAKE_CXX_COMPILER_VERSION "4.7.2") SET(CMAKE_CXX_PLATFORM_ID "Linux") SET(CMAKE_AR "/usr/bin/ar") SET(CMAKE_RANLIB "/usr/bin/ranlib") SET(CMAKE_LINKER "/usr/bin/ld") SET(CMAKE_COMPILER_IS_GNUCXX 1) SET(CMAKE_CXX_COMPILER_LOADED 1) SET(CMAKE_COMPILER_IS_MINGW ) SET(CMAKE_COMPILER_IS_CYGWIN ) IF(CMAKE_COMPILER_IS_CYGWIN) SET(CYGWIN 1) SET(UNIX 1) ENDIF(CMAKE_COMPILER_IS_CYGWIN) SET(CMAKE_CXX_COMPILER_ENV_VAR "CXX") IF(CMAKE_COMPILER_IS_MINGW) SET(MINGW 1) ENDIF(CMAKE_COMPILER_IS_MINGW) SET(CMAKE_CXX_COMPILER_ID_RUN 1) SET(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC) SET(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;m;mm;CPP) SET(CMAKE_CXX_LINKER_PREFERENCE 30) SET(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1) # Save compiler ABI information. SET(CMAKE_CXX_SIZEOF_DATA_PTR "8") SET(CMAKE_CXX_COMPILER_ABI "ELF") SET(CMAKE_CXX_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") IF(CMAKE_CXX_SIZEOF_DATA_PTR) SET(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}") ENDIF(CMAKE_CXX_SIZEOF_DATA_PTR) IF(CMAKE_CXX_COMPILER_ABI) SET(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}") ENDIF(CMAKE_CXX_COMPILER_ABI) IF(CMAKE_CXX_LIBRARY_ARCHITECTURE) SET(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") ENDIF() SET(CMAKE_CXX_HAS_ISYSROOT "") SET(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;c") SET(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/4.7;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib")
Wittyshare/wittyshare
src/WsModule/build/CMakeFiles/CMakeCXXCompiler.cmake
CMake
gpl-2.0
1,589
<?php /** * @file * Initiates a browser-based installation of Drupal. */ // Change the directory to the Drupal root. chdir('..'); /** * Global flag to indicate the site is in installation mode. * * The constant is defined using define() instead of const so that PHP * versions prior to 5.3 can display proper PHP requirements instead of causing * a fatal error. */ define('MAINTENANCE_MODE', 'install'); // Exit early if running an incompatible PHP version to avoid fatal errors. // The minimum version is specified explicitly, as DRUPAL_MINIMUM_PHP is not // yet available. It is defined in bootstrap.inc, but it is not possible to // load that file yet as it would cause a fatal error on older versions of PHP. if (version_compare(PHP_VERSION, '5.4.2') < 0) { print 'Your PHP installation is too old. Drupal requires at least PHP 5.4.2. See the <a href="http://drupal.org/requirements">system requirements</a> page for more information.'; exit; } // Start the installer. require_once __DIR__ . '/vendor/autoload.php'; require_once __DIR__ . '/includes/install.core.inc'; install_drupal();
drupaals/demo.com
d8/core/install.php
PHP
gpl-2.0
1,109
#ifndef X86_64_TARGET_SIGNAL_H #define X86_64_TARGET_SIGNAL_H /* this struct defines a stack used during syscall handling */ typedef struct target_sigaltstack { abi_ulong ss_sp; abi_long ss_flags; abi_ulong ss_size; } target_stack_t; /* * sigaltstack controls */ #define TARGET_SS_ONSTACK 1 #define TARGET_SS_DISABLE 2 #define TARGET_MINSIGSTKSZ 2048 #define TARGET_SIGSTKSZ 8192 #include "../generic/signal.h" #endif /* X86_64_TARGET_SIGNAL_H */
Cisco-Talos/pyrebox
qemu/linux-user/x86_64/target_signal.h
C
gpl-2.0
460
// SPDX-License-Identifier: GPL-2.0 /* * Volume Management Device driver * Copyright (c) 2015, Intel Corporation. */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> #include <linux/srcu.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <asm/irqdomain.h> #include <asm/device.h> #include <asm/msi.h> #include <asm/msidef.h> #define VMD_CFGBAR 0 #define VMD_MEMBAR1 2 #define VMD_MEMBAR2 4 #define PCI_REG_VMCAP 0x40 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) #define PCI_REG_VMCONFIG 0x44 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) #define PCI_REG_VMLOCK 0x70 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) #define MB2_SHADOW_OFFSET 0x2000 #define MB2_SHADOW_SIZE 16 enum vmd_features { /* * Device may contain registers which hint the physical location of the * membars, in order to allow proper address translation during * resource assignment to enable guest virtualization */ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), /* * Device may provide root port configuration information which limits * bus numbering */ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), }; /* * Lock for manipulating VMD IRQ lists. */ static DEFINE_RAW_SPINLOCK(list_lock); /** * struct vmd_irq - private data to map driver IRQ to the VMD shared vector * @node: list item for parent traversal. * @irq: back pointer to parent. * @enabled: true if driver enabled IRQ * @virq: the virtual IRQ value provided to the requesting driver. * * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to * a VMD IRQ using this structure. */ struct vmd_irq { struct list_head node; struct vmd_irq_list *irq; bool enabled; unsigned int virq; }; /** * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector * @irq_list: the list of irq's the VMD one demuxes to. * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; }; struct vmd_dev { struct pci_dev *dev; spinlock_t cfg_lock; char __iomem *cfgbar; int msix_count; struct vmd_irq_list *irqs; struct pci_sysdata sysdata; struct resource resources[3]; struct irq_domain *irq_domain; struct pci_bus *bus; u8 busn_start; struct dma_map_ops dma_ops; struct dma_domain dma_domain; }; static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) { return container_of(bus->sysdata, struct vmd_dev, sysdata); } static inline unsigned int index_from_irqs(struct vmd_dev *vmd, struct vmd_irq_list *irqs) { return irqs - vmd->irqs; } /* * Drivers managing a device in a VMD domain allocate their own IRQs as before, * but the MSI entry for the hardware it's driving will be programmed with a * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its * domain into one of its own, and the VMD driver de-muxes these for the * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations * and irq_chip to set this up. */ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct vmd_irq *vmdirq = data->chip_data; struct vmd_irq_list *irq = vmdirq->irq; struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); msg->address_hi = MSI_ADDR_BASE_HI; msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); msg->data = 0; } /* * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. */ static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; raw_spin_lock_irqsave(&list_lock, flags); WARN_ON(vmdirq->enabled); list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); vmdirq->enabled = true; raw_spin_unlock_irqrestore(&list_lock, flags); data->chip->irq_unmask(data); } static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; unsigned long flags; data->chip->irq_mask(data); raw_spin_lock_irqsave(&list_lock, flags); if (vmdirq->enabled) { list_del_rcu(&vmdirq->node); vmdirq->enabled = false; } raw_spin_unlock_irqrestore(&list_lock, flags); } /* * XXX: Stubbed until we develop acceptable way to not create conflicts with * other devices sharing the same vector. */ static int vmd_irq_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { return -EINVAL; } static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", .irq_enable = vmd_irq_enable, .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, .irq_set_affinity = vmd_irq_set_affinity, }; static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, msi_alloc_info_t *arg) { return 0; } /* * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { int i, best = 1; unsigned long flags; if (vmd->msix_count == 1) return &vmd->irqs[0]; /* * White list for fast-interrupt handlers. All others will share the * "slow" interrupt vector. */ switch (msi_desc_to_pci_dev(desc)->class) { case PCI_CLASS_STORAGE_EXPRESS: break; default: return &vmd->irqs[0]; } raw_spin_lock_irqsave(&list_lock, flags); for (i = 1; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) best = i; vmd->irqs[best].count++; raw_spin_unlock_irqrestore(&list_lock, flags); return &vmd->irqs[best]; } static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); unsigned int index, vector; if (!vmdirq) return -ENOMEM; INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; index = index_from_irqs(vmd, vmdirq->irq); vector = pci_irq_vector(vmd->dev, index); irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } static void vmd_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq) { struct vmd_irq *vmdirq = irq_get_chip_data(virq); unsigned long flags; synchronize_srcu(&vmdirq->irq->srcu); /* XXX: Potential optimization to rebalance */ raw_spin_lock_irqsave(&list_lock, flags); vmdirq->irq->count--; raw_spin_unlock_irqrestore(&list_lock, flags); kfree(vmdirq); } static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *arg) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = vmd_from_bus(pdev->bus); if (nvec > vmd->msix_count) return vmd->msix_count; memset(arg, 0, sizeof(*arg)); return 0; } static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; } static struct msi_domain_ops vmd_msi_domain_ops = { .get_hwirq = vmd_get_hwirq, .msi_init = vmd_msi_init, .msi_free = vmd_msi_free, .msi_prepare = vmd_msi_prepare, .set_desc = vmd_set_desc, }; static struct msi_domain_info vmd_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_PCI_MSIX, .ops = &vmd_msi_domain_ops, .chip = &vmd_msi_controller, }; /* * VMD replaces the requester ID with its own. DMA mappings for devices in a * VMD domain need to be mapped for the VMD, not the device requiring * the mapping. */ static struct device *to_vmd_dev(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = vmd_from_bus(pdev->bus); return &vmd->dev->dev; } static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, gfp_t flag, unsigned long attrs) { return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); } static void vmd_free(struct device *dev, size_t size, void *vaddr, dma_addr_t addr, unsigned long attrs) { return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); } static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, attrs); } static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, attrs); } static dma_addr_t vmd_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, attrs); } static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); } static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); } static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); } static int vmd_dma_supported(struct device *dev, u64 mask) { return dma_supported(to_vmd_dev(dev), mask); } static u64 vmd_get_required_mask(struct device *dev) { return dma_get_required_mask(to_vmd_dev(dev)); } static void vmd_teardown_dma_ops(struct vmd_dev *vmd) { struct dma_domain *domain = &vmd->dma_domain; if (get_dma_ops(&vmd->dev->dev)) del_dma_domain(domain); } #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ do { \ if (source->fn) \ dest->fn = vmd_##fn; \ } while (0) static void vmd_setup_dma_ops(struct vmd_dev *vmd) { const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); struct dma_map_ops *dest = &vmd->dma_ops; struct dma_domain *domain = &vmd->dma_domain; domain->domain_nr = vmd->sysdata.domain; domain->dma_ops = dest; if (!source) return; ASSIGN_VMD_DMA_OPS(source, dest, alloc); ASSIGN_VMD_DMA_OPS(source, dest, free); ASSIGN_VMD_DMA_OPS(source, dest, mmap); ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); ASSIGN_VMD_DMA_OPS(source, dest, map_page); ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); ASSIGN_VMD_DMA_OPS(source, dest, map_sg); ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, unsigned int devfn, int reg, int len) { char __iomem *addr = vmd->cfgbar + ((bus->number - vmd->busn_start) << 20) + (devfn << 12) + reg; if ((addr - vmd->cfgbar) + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR])) return NULL; return addr; } /* * CPU may deadlock if config space is not serialized on some versions of this * hardware, so all config space access is done under a spinlock. */ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 *value) { struct vmd_dev *vmd = vmd_from_bus(bus); char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); break; case 2: *value = readw(addr); break; case 4: *value = readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } /* * VMD h/w converts non-posted config writes to posted memory writes. The * read-back in this function forces the completion so it returns only after * the config space was written, as expected. */ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, int len, u32 value) { struct vmd_dev *vmd = vmd_from_bus(bus); char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); unsigned long flags; int ret = 0; if (!addr) return -EFAULT; spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); readb(addr); break; case 2: writew(value, addr); readw(addr); break; case 4: writel(value, addr); readl(addr); break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } static struct pci_ops vmd_ops = { .read = vmd_pci_read, .write = vmd_pci_write, }; static void vmd_attach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; } static void vmd_detach_resources(struct vmd_dev *vmd) { vmd->dev->resource[VMD_MEMBAR1].child = NULL; vmd->dev->resource[VMD_MEMBAR2].child = NULL; } /* * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower * 16 bits are the PCI Segment Group (domain) number. Other bits are * currently reserved. */ static int vmd_find_free_domain(void) { int domain = 0xffff; struct pci_bus *bus = NULL; while ((bus = pci_find_next_bus(bus)) != NULL) domain = max_t(int, domain, pci_domain_nr(bus)); return domain + 1; } static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) { struct pci_sysdata *sd = &vmd->sysdata; struct fwnode_handle *fn; struct resource *res; u32 upper_bits; unsigned long flags; LIST_HEAD(resources); resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000; struct pci_bus *child; /* * Shadow registers may exist in certain VMD device ids which allow * guests to correctly assign host physical addresses to the root ports * and child devices. These registers will either return the host value * or 0, depending on an enable bit in the VMD device. */ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { u32 vmlock; int ret; membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); if (ret || vmlock == ~0) return -ENODEV; if (MB2_SHADOW_EN(vmlock)) { void __iomem *membar2; membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - readq(membar2 + MB2_SHADOW_OFFSET); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - readq(membar2 + MB2_SHADOW_OFFSET + 8); pci_iounmap(vmd->dev, membar2); } } /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127 or 128-255 */ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { u32 vmcap, vmconfig; pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); if (BUS_RESTRICT_CAP(vmcap) && (BUS_RESTRICT_CFG(vmconfig) == 0x1)) vmd->busn_start = 128; } res = &vmd->dev->resource[VMD_CFGBAR]; vmd->resources[0] = (struct resource) { .name = "VMD CFGBAR", .start = vmd->busn_start, .end = vmd->busn_start + (resource_size(res) >> 20) - 1, .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, }; /* * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can * put 32-bit resources in the window. * * There's no hardware reason why a 64-bit window *couldn't* * contain a 32-bit resource, but pbus_size_mem() computes the * bridge window size assuming a 64-bit window will contain no * 32-bit resources. __pci_assign_resource() enforces that * artificial restriction to make sure everything will fit. * * The only way we could use a 64-bit non-prefetchable MEMBAR is * if its address is <4GB so that we can convert it to a 32-bit * resource. To be visible to the host OS, all VMD endpoints must * be initially configured by platform BIOS, which includes setting * up these resources. We can assume the device is configured * according to the platform needs. */ res = &vmd->dev->resource[VMD_MEMBAR1]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[1] = (struct resource) { .name = "VMD MEMBAR1", .start = res->start, .end = res->end, .flags = flags, .parent = res, }; res = &vmd->dev->resource[VMD_MEMBAR2]; upper_bits = upper_32_bits(res->end); flags = res->flags & ~IORESOURCE_SIZEALIGN; if (!upper_bits) flags &= ~IORESOURCE_MEM_64; vmd->resources[2] = (struct resource) { .name = "VMD MEMBAR2", .start = res->start + membar2_offset, .end = res->end, .flags = flags, .parent = res, }; sd->vmd_domain = true; sd->domain = vmd_find_free_domain(); if (sd->domain < 0) return sd->domain; sd->node = pcibus_to_node(vmd->dev->bus); fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); if (!fn) return -ENODEV; vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); irq_domain_free_fwnode(fn); if (!vmd->irq_domain) return -ENODEV; pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, &vmd_ops, sd, &resources); if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); return -ENODEV; } vmd_attach_resources(vmd); vmd_setup_dma_ops(vmd); dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); pci_scan_child_bus(vmd->bus); pci_assign_unassigned_bus_resources(vmd->bus); /* * VMD root buses are virtual and don't return true on pci_is_pcie() * and will fail pcie_bus_configure_settings() early. It can instead be * run on each of the real root ports. */ list_for_each_entry(child, &vmd->bus->children, node) pcie_bus_configure_settings(child); pci_bus_add_devices(vmd->bus); WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, "domain"), "Can't create symlink to domain\n"); return 0; } static irqreturn_t vmd_irq(int irq, void *data) { struct vmd_irq_list *irqs = data; struct vmd_irq *vmdirq; int idx; idx = srcu_read_lock(&irqs->srcu); list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) generic_handle_irq(vmdirq->virq); srcu_read_unlock(&irqs->srcu, idx); return IRQ_HANDLED; } static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vmd_dev *vmd; int i, err; if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); if (!vmd) return -ENOMEM; vmd->dev = dev; err = pcim_enable_device(dev); if (err < 0) return err; vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); if (!vmd->cfgbar) return -ENOMEM; pci_set_master(dev); if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) return -ENODEV; vmd->msix_count = pci_msix_vec_count(dev); if (vmd->msix_count < 0) return -ENODEV; vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, PCI_IRQ_MSIX); if (vmd->msix_count < 0) return vmd->msix_count; vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), GFP_KERNEL); if (!vmd->irqs) return -ENOMEM; for (i = 0; i < vmd->msix_count; i++) { err = init_srcu_struct(&vmd->irqs[i].srcu); if (err) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), vmd_irq, IRQF_NO_THREAD, "vmd", &vmd->irqs[i]); if (err) return err; } spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); if (err) return err; dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", vmd->sysdata.domain); return 0; } static void vmd_cleanup_srcu(struct vmd_dev *vmd) { int i; for (i = 0; i < vmd->msix_count; i++) cleanup_srcu_struct(&vmd->irqs[i].srcu); } static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); } #ifdef CONFIG_PM_SLEEP static int vmd_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int i; for (i = 0; i < vmd->msix_count; i++) devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); pci_save_state(pdev); return 0; } static int vmd_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct vmd_dev *vmd = pci_get_drvdata(pdev); int err, i; for (i = 0; i < vmd->msix_count; i++) { err = devm_request_irq(dev, pci_irq_vector(pdev, i), vmd_irq, IRQF_NO_THREAD, "vmd", &vmd->irqs[i]); if (err) return err; } pci_restore_state(pdev); return 0; } #endif static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); static struct pci_driver vmd_drv = { .name = "vmd", .id_table = vmd_ids, .probe = vmd_probe, .remove = vmd_remove, .driver = { .pm = &vmd_dev_pm_ops, }, }; module_pci_driver(vmd_drv); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.6");
BPI-SINOVOIP/BPI-Mainline-kernel
linux-5.4/drivers/pci/controller/vmd.c
C
gpl-2.0
23,241
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) import codecs from pattern.vector import Document, PORTER, LEMMA # A Document is a "bag-of-words" that splits a string into words and counts them. # A list of words or dictionary of (word, count)-items can also be given. # Words (or more generally "features") and their word count ("feature weights") # can be used to compare documents. The word count in a document is normalized # between 0.0-1.0 so that shorted documents can be compared to longer documents. # Words can be stemmed or lemmatized before counting them. # The purpose of stemming is to bring variant forms a word together. # For example, "conspiracy" and "conspired" are both stemmed to "conspir". # Nowadays, lemmatization is usually preferred over stemming, # e.g., "conspiracies" => "conspiracy", "conspired" => "conspire". s = """ The shuttle Discovery, already delayed three times by technical problems and bad weather, was grounded again Friday, this time by a potentially dangerous gaseous hydrogen leak in a vent line attached to the ship's external tank. The Discovery was initially scheduled to make its 39th and final flight last Monday, bearing fresh supplies and an intelligent robot for the International Space Station. But complications delayed the flight from Monday to Friday, when the hydrogen leak led NASA to conclude that the shuttle would not be ready to launch before its flight window closed this Monday. """ # With threshold=1, only words that occur more than once are counted. # With stopwords=False, words like "the", "and", "I", "is" are ignored. document = Document(s, threshold=1, stopwords=False) print document.words print # The /corpus folder contains texts mined from Wikipedia. # Below is the mining script (we already executed it for you): #import os, codecs #from pattern.web import Wikipedia # #w = Wikipedia() #for q in ( # "badger", "bear", "dog", "dolphin", "lion", "parakeet", # "rabbit", "shark", "sparrow", "tiger", "wolf"): # s = w.search(q, cached=True) # s = s.plaintext() # print os.path.join("corpus2", q+".txt") # f = codecs.open(os.path.join("corpus2", q+".txt"), "w", encoding="utf-8") # f.write(s) # f.close() # Loading a document from a text file: f = os.path.join(os.path.dirname(__file__), "corpus", "wolf.txt") s = codecs.open(f, encoding="utf-8").read() document = Document(s, name="wolf", stemmer=PORTER) print document print document.keywords(top=10) # (weight, feature)-items. print # Same document, using lemmatization instead of stemming (slower): document = Document(s, name="wolf", stemmer=LEMMA) print document print document.keywords(top=10) print # In summary, a document is a bag-of-words representation of a text. # Bag-of-words means that the word order is discarded. # The dictionary of words (features) and their normalized word count (weights) # is also called the document vector: document = Document("a black cat and a white cat", stopwords=True) print document.words print document.vector.features for feature, weight in document.vector.items(): print feature, weight # Document vectors can be bundled into a Model (next example).
krishna11888/ai
third_party/pattern/examples/05-vector/01-document.py
Python
gpl-2.0
3,205
/* sam_header.c -- basic SAM/BAM header API. Copyright (C) 2009-2013 Genome Research Ltd. Author: Petr Danecek <pd3@sanger.ac.uk> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "sam_header.h" #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <stdarg.h> #include "htslib/khash.h" KHASH_MAP_INIT_STR(str, const char *) struct _HeaderList { struct _HeaderList *last; // Hack: Used and maintained only by list_append_to_end. Maintained in the root node only. struct _HeaderList *next; void *data; }; typedef struct _HeaderList list_t; typedef list_t HeaderDict; typedef struct { char key[2]; char *value; } HeaderTag; typedef struct { char type[2]; list_t *tags; } HeaderLine; const char *o_hd_tags[] = {"SO","GO",NULL}; const char *r_hd_tags[] = {"VN",NULL}; const char *o_sq_tags[] = {"AS","M5","UR","SP",NULL}; const char *r_sq_tags[] = {"SN","LN",NULL}; const char *u_sq_tags[] = {"SN",NULL}; const char *o_rg_tags[] = {"CN","DS","DT","FO","KS","LB","PG","PI","PL","PU","SM",NULL}; const char *r_rg_tags[] = {"ID",NULL}; const char *u_rg_tags[] = {"ID",NULL}; const char *o_pg_tags[] = {"VN","CL",NULL}; const char *r_pg_tags[] = {"ID",NULL}; const char *types[] = {"HD","SQ","RG","PG","CO",NULL}; const char **optional_tags[] = {o_hd_tags,o_sq_tags,o_rg_tags,o_pg_tags,NULL,NULL}; const char **required_tags[] = {r_hd_tags,r_sq_tags,r_rg_tags,r_pg_tags,NULL,NULL}; const char **unique_tags[] = {NULL, u_sq_tags,u_rg_tags,NULL,NULL,NULL}; static void debug(const char *format, ...) { va_list ap; va_start(ap, format); vfprintf(stderr, format, ap); va_end(ap); } #if 0 // Replaced by list_append_to_end static list_t *list_prepend(list_t *root, void *data) { list_t *l = malloc(sizeof(list_t)); l->next = root; l->data = data; return l; } #endif // Relies on the root->last being correct. Do not use with the other list_* // routines unless they are fixed to modify root->last as well. static list_t *list_append_to_end(list_t *root, void *data) { list_t *l = malloc(sizeof(list_t)); l->last = l; l->next = NULL; l->data = data; if ( !root ) return l; root->last->next = l; root->last = l; return root; } static list_t *list_append(list_t *root, void *data) { list_t *l = root; while (l && l->next) l = l->next; if ( l ) { l->next = malloc(sizeof(list_t)); l = l->next; } else { l = malloc(sizeof(list_t)); root = l; } l->data = data; l->next = NULL; return root; } static void list_free(list_t *root) { list_t *l = root; while (root) { l = root; root = root->next; free(l); } } // Look for a tag "XY" in a predefined const char *[] array. static int tag_exists(const char *tag, const char **tags) { int itag=0; if ( !tags ) return -1; while ( tags[itag] ) { if ( tags[itag][0]==tag[0] && tags[itag][1]==tag[1] ) return itag; itag++; } return -1; } // Mimics the behaviour of getline, except it returns pointer to the next chunk of the text // or NULL if everything has been read. The lineptr should be freed by the caller. The // newline character is stripped. static const char *nextline(char **lineptr, size_t *n, const char *text) { int len; const char *to = text; if ( !*to ) return NULL; while ( *to && *to!='\n' && *to!='\r' ) to++; len = to - text + 1; if ( *to ) { // Advance the pointer for the next call if ( *to=='\n' ) to++; else if ( *to=='\r' && *(to+1)=='\n' ) to+=2; } if ( !len ) return to; if ( !*lineptr ) { *lineptr = malloc(len); *n = len; } else if ( *n<len ) { *lineptr = realloc(*lineptr, len); *n = len; } if ( !*lineptr ) { debug("[nextline] Insufficient memory!\n"); return 0; } memcpy(*lineptr,text,len); (*lineptr)[len-1] = 0; return to; } // name points to "XY", value_from points to the first character of the value string and // value_to points to the last character of the value string. static HeaderTag *new_tag(const char *name, const char *value_from, const char *value_to) { HeaderTag *tag = malloc(sizeof(HeaderTag)); int len = value_to-value_from+1; tag->key[0] = name[0]; tag->key[1] = name[1]; tag->value = malloc(len+1); memcpy(tag->value,value_from,len+1); tag->value[len] = 0; return tag; } static HeaderTag *header_line_has_tag(HeaderLine *hline, const char *key) { list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; if ( tag->key[0]==key[0] && tag->key[1]==key[1] ) return tag; tags = tags->next; } return NULL; } // Return codes: // 0 .. different types or unique tags differ or conflicting tags, cannot be merged // 1 .. all tags identical -> no need to merge, drop one // 2 .. the unique tags match and there are some conflicting tags (same tag, different value) -> error, cannot be merged nor duplicated // 3 .. there are some missing complementary tags and no unique conflict -> can be merged into a single line static int sam_header_compare_lines(HeaderLine *hline1, HeaderLine *hline2) { HeaderTag *t1, *t2; if ( hline1->type[0]!=hline2->type[0] || hline1->type[1]!=hline2->type[1] ) return 0; int itype = tag_exists(hline1->type,types); if ( itype==-1 ) { debug("[sam_header_compare_lines] Unknown type [%c%c]\n", hline1->type[0],hline1->type[1]); return -1; // FIXME (lh3): error; I do not know how this will be handled in Petr's code } if ( unique_tags[itype] ) { t1 = header_line_has_tag(hline1,unique_tags[itype][0]); t2 = header_line_has_tag(hline2,unique_tags[itype][0]); if ( !t1 || !t2 ) // this should never happen, the unique tags are required return 2; if ( strcmp(t1->value,t2->value) ) return 0; // the unique tags differ, cannot be merged } if ( !required_tags[itype] && !optional_tags[itype] ) { t1 = hline1->tags->data; t2 = hline2->tags->data; if ( !strcmp(t1->value,t2->value) ) return 1; // identical comments return 0; } int missing=0, itag=0; while ( required_tags[itype] && required_tags[itype][itag] ) { t1 = header_line_has_tag(hline1,required_tags[itype][itag]); t2 = header_line_has_tag(hline2,required_tags[itype][itag]); if ( !t1 && !t2 ) return 2; // this should never happen else if ( !t1 || !t2 ) missing = 1; // there is some tag missing in one of the hlines else if ( strcmp(t1->value,t2->value) ) { if ( unique_tags[itype] ) return 2; // the lines have a matching unique tag but have a conflicting tag return 0; // the lines contain conflicting tags, cannot be merged } itag++; } itag = 0; while ( optional_tags[itype] && optional_tags[itype][itag] ) { t1 = header_line_has_tag(hline1,optional_tags[itype][itag]); t2 = header_line_has_tag(hline2,optional_tags[itype][itag]); if ( !t1 && !t2 ) { itag++; continue; } if ( !t1 || !t2 ) missing = 1; // there is some tag missing in one of the hlines else if ( strcmp(t1->value,t2->value) ) { if ( unique_tags[itype] ) return 2; // the lines have a matching unique tag but have a conflicting tag return 0; // the lines contain conflicting tags, cannot be merged } itag++; } if ( missing ) return 3; // there are some missing complementary tags with no conflicts, can be merged return 1; } static HeaderLine *sam_header_line_clone(const HeaderLine *hline) { list_t *tags; HeaderLine *out = malloc(sizeof(HeaderLine)); out->type[0] = hline->type[0]; out->type[1] = hline->type[1]; out->tags = NULL; tags = hline->tags; while (tags) { HeaderTag *old = tags->data; HeaderTag *new = malloc(sizeof(HeaderTag)); new->key[0] = old->key[0]; new->key[1] = old->key[1]; new->value = strdup(old->value); out->tags = list_append(out->tags, new); tags = tags->next; } return out; } static int sam_header_line_merge_with(HeaderLine *out_hline, const HeaderLine *tmpl_hline) { list_t *tmpl_tags; if ( out_hline->type[0]!=tmpl_hline->type[0] || out_hline->type[1]!=tmpl_hline->type[1] ) return 0; tmpl_tags = tmpl_hline->tags; while (tmpl_tags) { HeaderTag *tmpl_tag = tmpl_tags->data; HeaderTag *out_tag = header_line_has_tag(out_hline, tmpl_tag->key); if ( !out_tag ) { HeaderTag *tag = malloc(sizeof(HeaderTag)); tag->key[0] = tmpl_tag->key[0]; tag->key[1] = tmpl_tag->key[1]; tag->value = strdup(tmpl_tag->value); out_hline->tags = list_append(out_hline->tags,tag); } tmpl_tags = tmpl_tags->next; } return 1; } static HeaderLine *sam_header_line_parse(const char *headerLine) { HeaderLine *hline; HeaderTag *tag; const char *from, *to; from = headerLine; if ( *from != '@' ) { debug("[sam_header_line_parse] expected '@', got [%s]\n", headerLine); return 0; } to = ++from; while (*to && *to!='\t') to++; if ( to-from != 2 ) { debug("[sam_header_line_parse] expected '@XY', got [%s]\nHint: The header tags must be tab-separated.\n", headerLine); return 0; } hline = malloc(sizeof(HeaderLine)); hline->type[0] = from[0]; hline->type[1] = from[1]; hline->tags = NULL; int itype = tag_exists(hline->type, types); from = to; while (*to && *to=='\t') to++; if ( to-from != 1 ) { debug("[sam_header_line_parse] multiple tabs on line [%s] (%d)\n", headerLine,(int)(to-from)); free(hline); return 0; } from = to; while (*from) { while (*to && *to!='\t') to++; if ( !required_tags[itype] && !optional_tags[itype] ) { // CO is a special case, it can contain anything, including tabs if ( *to ) { to++; continue; } tag = new_tag(" ",from,to-1); } else tag = new_tag(from,from+3,to-1); if ( header_line_has_tag(hline,tag->key) ) debug("The tag '%c%c' present (at least) twice on line [%s]\n", tag->key[0],tag->key[1], headerLine); hline->tags = list_append(hline->tags, tag); from = to; while (*to && *to=='\t') to++; if ( *to && to-from != 1 ) { debug("[sam_header_line_parse] multiple tabs on line [%s] (%d)\n", headerLine,(int)(to-from)); return 0; } from = to; } return hline; } // Must be of an existing type, all tags must be recognised and all required tags must be present static int sam_header_line_validate(HeaderLine *hline) { list_t *tags; HeaderTag *tag; int itype, itag; // Is the type correct? itype = tag_exists(hline->type, types); if ( itype==-1 ) { debug("The type [%c%c] not recognised.\n", hline->type[0],hline->type[1]); return 0; } // Has all required tags? itag = 0; while ( required_tags[itype] && required_tags[itype][itag] ) { if ( !header_line_has_tag(hline,required_tags[itype][itag]) ) { debug("The tag [%c%c] required for [%c%c] not present.\n", required_tags[itype][itag][0],required_tags[itype][itag][1], hline->type[0],hline->type[1]); return 0; } itag++; } // Are all tags recognised? tags = hline->tags; while ( tags ) { tag = tags->data; if ( !tag_exists(tag->key,required_tags[itype]) && !tag_exists(tag->key,optional_tags[itype]) ) { // Lower case tags are user-defined values. if( !(islower(tag->key[0]) || islower(tag->key[1])) ) { // Neither is lower case, but tag was not recognized. debug("Unknown tag [%c%c] for [%c%c].\n", tag->key[0],tag->key[1], hline->type[0],hline->type[1]); // return 0; // Even unknown tags are allowed - for forward compatibility with new attributes } // else - allow user defined tag } tags = tags->next; } return 1; } static void print_header_line(FILE *fp, HeaderLine *hline) { list_t *tags = hline->tags; HeaderTag *tag; fprintf(fp, "@%c%c", hline->type[0],hline->type[1]); while (tags) { tag = tags->data; fprintf(fp, "\t"); if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) fprintf(fp, "%c%c:", tag->key[0],tag->key[1]); fprintf(fp, "%s", tag->value); tags = tags->next; } fprintf(fp,"\n"); } static void sam_header_line_free(HeaderLine *hline) { list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; free(tag->value); free(tag); tags = tags->next; } list_free(hline->tags); free(hline); } void sam_header_free(void *_header) { HeaderDict *header = (HeaderDict*)_header; list_t *hlines = header; while (hlines) { sam_header_line_free(hlines->data); hlines = hlines->next; } list_free(header); } HeaderDict *sam_header_clone(const HeaderDict *dict) { HeaderDict *out = NULL; while (dict) { HeaderLine *hline = dict->data; out = list_append(out, sam_header_line_clone(hline)); dict = dict->next; } return out; } // Returns a newly allocated string char *sam_header_write(const void *_header) { const HeaderDict *header = (const HeaderDict*)_header; char *out = NULL; int len=0, nout=0; const list_t *hlines; // Calculate the length of the string to allocate hlines = header; while (hlines) { len += 4; // @XY and \n HeaderLine *hline = hlines->data; list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; len += strlen(tag->value) + 1; // \t if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) len += strlen(tag->value) + 3; // XY: tags = tags->next; } hlines = hlines->next; } nout = 0; out = malloc(len+1); hlines = header; while (hlines) { HeaderLine *hline = hlines->data; nout += sprintf(out+nout,"@%c%c",hline->type[0],hline->type[1]); list_t *tags = hline->tags; while (tags) { HeaderTag *tag = tags->data; nout += sprintf(out+nout,"\t"); if ( tag->key[0]!=' ' || tag->key[1]!=' ' ) nout += sprintf(out+nout,"%c%c:", tag->key[0],tag->key[1]); nout += sprintf(out+nout,"%s", tag->value); tags = tags->next; } hlines = hlines->next; nout += sprintf(out+nout,"\n"); } out[len] = 0; return out; } void *sam_header_parse2(const char *headerText) { list_t *hlines = NULL; HeaderLine *hline; const char *text; char *buf=NULL; size_t nbuf = 0; int tovalidate = 0; if ( !headerText ) return 0; text = headerText; while ( (text=nextline(&buf, &nbuf, text)) ) { hline = sam_header_line_parse(buf); if ( hline && (!tovalidate || sam_header_line_validate(hline)) ) // With too many (~250,000) reference sequences the header parsing was too slow with list_append. hlines = list_append_to_end(hlines, hline); else { if (hline) sam_header_line_free(hline); sam_header_free(hlines); if ( buf ) free(buf); return NULL; } } if ( buf ) free(buf); return hlines; } void *sam_header2tbl(const void *_dict, char type[2], char key_tag[2], char value_tag[2]) { const HeaderDict *dict = (const HeaderDict*)_dict; const list_t *l = dict; khash_t(str) *tbl = kh_init(str); khiter_t k; int ret; if (_dict == 0) return tbl; // return an empty (not null) hash table while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key, *value; key = header_line_has_tag(hline,key_tag); value = header_line_has_tag(hline,value_tag); if ( !key || !value ) { l = l->next; continue; } k = kh_get(str, tbl, key->value); if ( k != kh_end(tbl) ) debug("[sam_header_lookup_table] They key %s not unique.\n", key->value); k = kh_put(str, tbl, key->value, &ret); kh_value(tbl, k) = value->value; l = l->next; } return tbl; } char **sam_header2list(const void *_dict, char type[2], char key_tag[2], int *_n) { const HeaderDict *dict = (const HeaderDict*)_dict; const list_t *l = dict; int max, n; char **ret; ret = 0; *_n = max = n = 0; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key; key = header_line_has_tag(hline,key_tag); if ( !key ) { l = l->next; continue; } if (n == max) { max = max? max<<1 : 4; ret = realloc(ret, max * sizeof(char*)); } ret[n++] = key->value; l = l->next; } *_n = n; return ret; } void *sam_header2key_val(void *iter, const char type[2], const char key_tag[2], const char value_tag[2], const char **_key, const char **_value) { list_t *l = iter; if ( !l ) return NULL; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } HeaderTag *key, *value; key = header_line_has_tag(hline,key_tag); value = header_line_has_tag(hline,value_tag); if ( !key && !value ) { l = l->next; continue; } *_key = key->value; *_value = value->value; return l->next; } return l; } const char *sam_tbl_get(void *h, const char *key) { khash_t(str) *tbl = (khash_t(str)*)h; khint_t k; k = kh_get(str, tbl, key); return k == kh_end(tbl)? 0 : kh_val(tbl, k); } int sam_tbl_size(void *h) { khash_t(str) *tbl = (khash_t(str)*)h; return h? kh_size(tbl) : 0; } void sam_tbl_destroy(void *h) { khash_t(str) *tbl = (khash_t(str)*)h; kh_destroy(str, tbl); } void *sam_header_merge(int n, const void **_dicts) { const HeaderDict **dicts = (const HeaderDict**)_dicts; HeaderDict *out_dict; int idict, status; if ( n<2 ) return NULL; out_dict = sam_header_clone(dicts[0]); for (idict=1; idict<n; idict++) { const list_t *tmpl_hlines = dicts[idict]; while ( tmpl_hlines ) { list_t *out_hlines = out_dict; int inserted = 0; while ( out_hlines ) { status = sam_header_compare_lines(tmpl_hlines->data, out_hlines->data); if ( status==0 ) { out_hlines = out_hlines->next; continue; } if ( status==2 ) { print_header_line(stderr,tmpl_hlines->data); print_header_line(stderr,out_hlines->data); debug("Conflicting lines, cannot merge the headers.\n"); return 0; } if ( status==3 ) sam_header_line_merge_with(out_hlines->data, tmpl_hlines->data); inserted = 1; break; } if ( !inserted ) out_dict = list_append(out_dict, sam_header_line_clone(tmpl_hlines->data)); tmpl_hlines = tmpl_hlines->next; } } return out_dict; } char **sam_header2tbl_n(const void *dict, const char type[2], const char *tags[], int *n) { int nout = 0; char **out = NULL; *n = 0; list_t *l = (list_t *)dict; if ( !l ) return NULL; int i, ntags = 0; while ( tags[ntags] ) ntags++; while (l) { HeaderLine *hline = l->data; if ( hline->type[0]!=type[0] || hline->type[1]!=type[1] ) { l = l->next; continue; } out = (char**) realloc(out, sizeof(char*)*(nout+1)*ntags); for (i=0; i<ntags; i++) { HeaderTag *key = header_line_has_tag(hline, tags[i]); if ( !key ) { out[nout*ntags+i] = NULL; continue; } out[nout*ntags+i] = key->value; } nout++; l = l->next; } *n = nout; return out; }
hanfang/scikit-ribo
tools/samtools-1.2/sam_header.c
C
gpl-2.0
22,559
/* * Dynamic To Top Plugin * http://www.mattvarone.com * * By Matt Varone * @sksmatt * */ var mv_dynamic_to_top;(function($,mv_dynamic_to_top){jQuery.fn.DynamicToTop=function(options){var defaults={text:mv_dynamic_to_top.text,min:parseInt(mv_dynamic_to_top.min,10),fade_in:600,fade_out:400,speed:parseInt(mv_dynamic_to_top.speed,10),easing:mv_dynamic_to_top.easing,version:mv_dynamic_to_top.version,id:'dynamic-to-top'},settings=$.extend(defaults,options);if(settings.version===""||settings.version==='0'){settings.text='<span>&nbsp;</span>';} if(!$.isFunction(settings.easing)){settings.easing='linear';} var $toTop=$('<a href=\"#\" id=\"'+settings.id+'\"></a>').html(settings.text);$toTop.hide().appendTo('body').click(function(){$('html, body').stop().animate({scrollTop:0},settings.speed,settings.easing);return false;});$(window).scroll(function(){var sd=jQuery(window).scrollTop();if(typeof document.body.style.maxHeight==="undefined"){$toTop.css({'position':'absolute','top':sd+$(window).height()-mv_dynamic_to_top.margin});} if(sd>settings.min){$toTop.fadeIn(settings.fade_in);}else{$toTop.fadeOut(settings.fade_out);}});};$('body').DynamicToTop();})(jQuery,mv_dynamic_to_top);
richardPZH/wordpress-plus
wp-content/plugins/dynamic-to-top/js/dynamic.to.top.min.js
JavaScript
gpl-2.0
1,192
/* packet-smrse.c * Routines for SMRSE Short Message Relay Service packet dissection * Ronnie Sahlberg 2004 * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/asn1.h> #include "packet-ber.h" #include "packet-smrse.h" #define PNAME "Short Message Relaying Service" #define PSNAME "SMRSE" #define PFNAME "smrse" #define TCP_PORT_SMRSE 4321 void proto_register_smrse(void); void proto_reg_handoff_smrse(void); /* Initialize the protocol and registered fields */ static int proto_smrse = -1; static int hf_smrse_reserved = -1; static int hf_smrse_tag = -1; static int hf_smrse_length = -1; static int hf_smrse_Octet_Format = -1; #include "packet-smrse-hf.c" /* Initialize the subtree pointers */ static gint ett_smrse = -1; #include "packet-smrse-ett.c" #include "packet-smrse-fn.c" static const value_string tag_vals[] = { { 1, "AliveTest" }, { 2, "AliveTestRsp" }, { 3, "Bind" }, { 4, "BindRsp" }, { 5, "BindFail" }, { 6, "Unbind" }, { 7, "MT" }, { 8, "MO" }, { 9, "Ack" }, { 10, "Error" }, { 11, "Alert" }, { 0, NULL } }; static int dissect_smrse(tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, void *data _U_) { proto_item *item = NULL; proto_tree *tree = NULL; guint8 reserved, tag; int offset=0; asn1_ctx_t asn1_ctx; asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo); reserved=tvb_get_guint8(tvb, 0); tag=tvb_get_guint8(tvb, 3); if( reserved!= 126 ) return 0; if( (tag<1)||(tag>11) ) return 0; if(parent_tree){ item = proto_tree_add_item(parent_tree, proto_smrse, tvb, 0, -1, ENC_NA); tree = proto_item_add_subtree(item, ett_smrse); } col_set_str(pinfo->cinfo, COL_PROTOCOL, "SMRSE"); col_add_str(pinfo->cinfo, COL_INFO, val_to_str(tag, tag_vals,"Unknown Tag:0x%02x")); proto_tree_add_item(tree, hf_smrse_reserved, tvb, 0, 1, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_smrse_length, tvb, 1, 2, ENC_BIG_ENDIAN); proto_tree_add_item(tree, hf_smrse_tag, tvb, 3, 1, ENC_BIG_ENDIAN); switch(tag){ case 1: case 2: offset=4; break; case 3: offset=dissect_smrse_SMR_Bind(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 4: offset=dissect_smrse_SMR_Bind_Confirm(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 5: offset=dissect_smrse_SMR_Bind_Failure(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 6: offset=dissect_smrse_SMR_Unbind(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 7: offset=dissect_smrse_RPDataMT(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 8: offset=dissect_smrse_RPDataMO(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 9: offset=dissect_smrse_RPAck(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 10: offset=dissect_smrse_RPError(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; case 11: offset=dissect_smrse_RPAlertSC(FALSE, tvb, 4, &asn1_ctx, tree, -1); break; } return offset; } /*--- proto_register_smrse ----------------------------------------------*/ void proto_register_smrse(void) { /* List of fields */ static hf_register_info hf[] = { { &hf_smrse_reserved, { "Reserved", "smrse.reserved", FT_UINT8, BASE_DEC, NULL, 0, "Reserved byte, must be 126", HFILL }}, { &hf_smrse_tag, { "Tag", "smrse.tag", FT_UINT8, BASE_DEC, VALS(tag_vals), 0, NULL, HFILL }}, { &hf_smrse_length, { "Length", "smrse.length", FT_UINT16, BASE_DEC, NULL, 0, "Length of SMRSE PDU", HFILL }}, { &hf_smrse_Octet_Format, { "octet-Format", "smrse.octet_Format", FT_STRING, BASE_NONE, NULL, 0, "SMS-Address/address-value/octet-format", HFILL }}, #include "packet-smrse-hfarr.c" }; /* List of subtrees */ static gint *ett[] = { &ett_smrse, #include "packet-smrse-ettarr.c" }; /* Register protocol */ proto_smrse = proto_register_protocol(PNAME, PSNAME, PFNAME); /* Register fields and subtrees */ proto_register_field_array(proto_smrse, hf, array_length(hf)); proto_register_subtree_array(ett, array_length(ett)); } /*--- proto_reg_handoff_smrse -------------------------------------------*/ void proto_reg_handoff_smrse(void) { dissector_handle_t smrse_handle; smrse_handle = new_create_dissector_handle(dissect_smrse, proto_smrse); dissector_add_uint("tcp.port",TCP_PORT_SMRSE, smrse_handle); }
frenos/wireshark
asn1/smrse/packet-smrse-template.c
C
gpl-2.0
5,066
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build irix package syscall //sysnb raw_ptrace(request int, pid int, addr *byte, data *byte) (err Errno) //ptrace(request _C_int, pid Pid_t, addr *byte, data *byte) _C_long
krichter722/gcc
libgo/go/syscall/libcall_irix.go
GO
gpl-2.0
338
#if !defined(_FXFT_VERSION_) || _FXFT_VERSION_ == 2501 /***************************************************************************/ /* */ /* truetype.c */ /* */ /* FreeType TrueType driver component (body only). */ /* */ /* Copyright 1996-2001, 2004, 2006, 2012 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #define FT_MAKE_OPTION_SINGLE_OBJECT #define FT2_BUILD_LIBRARY #include "../../include/ft2build.h" #include "ttpic.c" #include "ttdriver.c" /* driver interface */ #include "ttpload.c" /* tables loader */ #include "ttgload.c" /* glyph loader */ #include "ttobjs.c" /* object manager */ #ifdef TT_USE_BYTECODE_INTERPRETER #include "ttinterp.c" #include "ttsubpix.c" #endif #ifdef TT_CONFIG_OPTION_GX_VAR_SUPPORT #include "ttgxvar.c" /* gx distortable font */ #endif /* END */ #endif
s20121035/rk3288_android5.1_repo
external/pdfium/core/src/fxge/fx_freetype/fxft2.5.01/src/truetype/fxft_truetype.c
C
gpl-3.0
1,828
html, body { margin:0; padding: 0; font-family: "helvetica neue"; font-size: 14px; color: #666666; } #ubwidget { position: absolute; top: 0; bottom: 0; left: 0; right: 0; background: url("images/bg.png"); } .theme-pad #ubwidget { border-radius: 40px; border-width: 52px; -webkit-border-image: url("images/pad-bd.png") 52 repeat; -moz-border-image: url("images/pad-bd.png") 52 repeat; border-image: url("images/pad-bd.png") 52 repeat; } .theme-slate #ubwidget { border-radius: 44px; border-width: 52px; -webkit-border-image: url("images/slate-bd.png") 52 repeat; -moz-border-image: url("images/slate-bd.png") 52 repeat; border-image: url("images/slate-bd.png") 52 repeat; } #ubwidget > .wrapper { position: absolute; top: 0px; bottom: 0px; left: 0px; right: 0px; overflow: hidden; } .theme-slate #ubwidget > .wrapper, .theme-pad #ubwidget > .wrapper { position: absolute; top: -49px; bottom: -5px; left: -5px; right: -5px; overflow: hidden; } #toolbar { display: table; width: 100%; height: 44px; font-size: 24px; color: #FFCC99; padding:0 10px; } .theme-pad #toolbar, .theme-slate #toolbar { height: 38px; padding: 6px 0 0; } .theme-slate #toolbar { color: #7F613F; text-shadow: #FFDCA9 0 1px 0; } #toolbar > * { display: table-cell; height: 100%; vertical-align: middle; } #toolbar .actions { text-align: right; } #toolbar button, h1 { font-weight: normal; font-size: 24px; color: #FFCC99; margin: 0; } .theme-slate #toolbar button, .theme-slate h1 { color: #7F613F; text-shadow: #FFDCA9 0 1px 0; } #toolbar button { border: none; padding: none; outline: none; background: none; cursor: pointer; padding-left: 34px; margin-left: 10px; height: 32px; } #toolbar button span { display: block; line-height: 32px; } #toolbar button[role='edit'] { background: url("images/toolbar-edit.png") left top no-repeat; } #toolbar button[role='view'] { display: none; color: #FFF; background: url("images/toolbar-edit.png") left -32px no-repeat; } #toolbar button[role='reload'] { background: url("images/toolbar-reload.png") left top no-repeat; } #toolbar button[role='help'] { background: url("images/toolbar-help.png") left top no-repeat; display: none; } .hasHelp #toolbar button[role='help'] { display: inline-block; } .showHelp #toolbar button[role='help'] { color: #FFF; background-position: left -32px; } .theme-slate #toolbar button[role='edit'] { background-image: url("images/slate-toolbar-edit.png"); } .theme-slate #toolbar button[role='view'] { text-shadow: #7F613F 0 -1px 0; background: url("images/slate-toolbar-edit.png") left -32px no-repeat; } .theme-slate #toolbar button[role='reload'] { background-image: url("images/slate-toolbar-reload.png"); } .theme-slate #toolbar button[role='help'] { background-image: url("images/slate-toolbar-help.png"); } .showHelp.theme-slate #toolbar button[role='help'] { text-shadow: #7F613F 0 -1px 0; } .onEdit #toolbar button[role='view'] { display: inline-block; } .onEdit #toolbar button[role='edit'] { display: none; } #help { width: 300px; height: 400px; position: absolute; margin-top: 10px; right: 10px; z-index: 10000; display: none; } .showHelp #help { display: block; } #content { position: absolute; top: 44px; bottom: 0; overflow: auto; left: 0; right: 0; background-image: -moz-radial-gradient(center center, ellipse closest-side, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0) 100%); background-image: -webkit-radial-gradient(center center, ellipse closest-side, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0) 100%); background-image: -o-radial-gradient(center center, ellipse closest-side, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0) 100%); background-image: -ms-radial-gradient(center center, ellipse closest-side, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0) 100%); background-image: radial-gradient(center center, ellipse closest-side, rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0) 100%); } #parameters { display:none; padding: 10px 20px; background: url("images/parameters-bg.png"); -webkit-border-radius: 4px 4px 0 0; -moz-border-radius: 4px 4px 0 0; -mz-border-radius: 4px 4px 0 0; border-radius: 4px 4px 0 0; } #parameters label { font-style: italic; } #parameters label > select, #parameters label > input{ margin-left: 10px; width: 80px; } #parameters > div.inline { display: inline-block; } #parameters > div.inline+div.inline { margin-left: 20px; } #parameters input[type=text], #parameters input[type=number] { height: 26px; border: 1px solid #BBB; background-color: #FFF; padding: 0 4px; -webkit-border-radius: 4px; -moz-border-radius: 4px; -ms-border-radius: 4px; border-radius: 4px; -webkit-box-shadow: 0 1px 0 #FFF; -moz-box-shadow: 0 1px 0 #FFF; -ms-box-shadow: 0 1px 0 #FFF; box-shadow: 0 1px 0 #FFF; } #parameters input.tiny { width: 40px; } #parameters input.small { width: 80px; } #parameters input.medium { width: 120px; } #parameters input.long { width: 160px; } #scene { padding: 20px; } #scene > * { margin: 0 auto; } .onEdit #scene { } .onEdit #parameters { display: block; } /* .card-container { -webkit-perspective: 600px; width:100%; height: 100%; } .card { position: relative; width:100%; height: 100%; } .card > div { position: absolute; width:100%; height: 100%; -webkit-transform-style: preserve-3d; -webkit-transition: all .5s ease-in-out; -webkit-backface-visibility: hidden; } .card > div:first-child { -webkit-transform: rotateY( 0deg ); } .card > div:last-child { -webkit-transform: rotateY( -180deg ); } .card.flip > div:first-child { -webkit-transform: rotateY( 180deg ); } .card.flip > div:last-child { -webkit-transform: rotateY( 0deg ); } */
DIP-SEM/OpenBoard
resources/library/interactivities/Train.wgt/css/ubw-main.css
CSS
gpl-3.0
6,268
<?php // This file is part of Moodle - http://moodle.org/ // // Moodle is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Moodle is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Moodle. If not, see <http://www.gnu.org/licenses/>. /** * Strings for component 'role', language 'en', branch 'MOODLE_20_STABLE' * * @package core_role * @copyright 1999 onwards Martin Dougiamas {@link http://moodle.com} * @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later */ $string['addinganewrole'] = 'Adding a new role'; $string['addrole'] = 'Add a new role'; $string['advancedoverride'] = 'Advanced role override'; $string['allow'] = 'Allow'; $string['allowassign'] = 'Allow role assignments'; $string['allowed'] = 'Allowed'; $string['allowoverride'] = 'Allow role overrides'; $string['allowroletoassign'] = 'Allow users with role {$a->fromrole} to assign the role {$a->targetrole}'; $string['allowroletooverride'] = 'Allow users with role {$a->fromrole} to override the role {$a->targetrole}'; $string['allowroletoswitch'] = 'Allow users with role {$a->fromrole} to switch roles to the role {$a->targetrole}'; $string['allowswitch'] = 'Allow role switches'; $string['allsiteusers'] = 'All site users'; $string['archetype'] = 'Role archetype'; $string['archetype_help'] = 'The role archetype determines the permissions when a role is reset to default. It also determines any new permissions for the role when the site is upgraded.'; $string['archetypecoursecreator'] = 'ARCHETYPE: Course creator'; $string['archetypeeditingteacher'] = 'ARCHETYPE: Teacher (editing)'; $string['archetypefrontpage'] = 'ARCHETYPE: Authenticated user on frontpage'; $string['archetypeguest'] = 'ARCHETYPE: Guest'; $string['archetypemanager'] = 'ARCHETYPE: Manager'; $string['archetypestudent'] = 'ARCHETYPE: Student'; $string['archetypeteacher'] = 'ARCHETYPE: Teacher (non-editing)'; $string['archetypeuser'] = 'ARCHETYPE: Authenticated user'; $string['assignanotherrole'] = 'Assign another role'; $string['assignerror'] = 'Error while assigning the role {$a->role} to user {$a->user}.'; $string['assignglobalroles'] = 'Assign system roles'; $string['assignedroles'] = 'Assigned roles'; $string['assignmentcontext'] = 'Assignment context'; $string['assignmentoptions'] = 'Assignment options'; $string['assignrolenameincontext'] = 'Assign role \'{$a->role}\' in {$a->context}'; $string['assignrole'] = 'Assign role'; $string['assignroles'] = 'Assign roles'; $string['assignroles_help'] = 'By assigning a role to a user in a context, you are granting them the permissions contained in that role, for the current context and all lower contexts. For example, if a user is assigned the role of student in a course, they will also have the role of student for all activities and blocks within the course.'; $string['assignroles_link'] = 'admin/roles/assign'; $string['assignrolesin'] = 'Assign roles in {$a}'; $string['assignrolesrelativetothisuser'] = 'Assign roles relative to this user'; $string['backtoallroles'] = 'Back to the list of all roles'; $string['backup:anonymise'] = 'Anonymise user data on backup'; $string['backup:backupactivity'] = 'Backup activities'; $string['backup:backupcourse'] = 'Backup courses'; $string['backup:backupsection'] = 'Backup sections'; $string['backup:configure'] = 'Configure backup options'; $string['backup:downloadfile'] = 'Download files from backup areas'; $string['backup:backuptargethub'] = 'Backup for hub'; $string['backup:backuptargetimport'] = 'Backup for import'; $string['backup:userinfo'] = 'Backup user data'; $string['badges:awardbadge'] = 'Award badge to a user'; $string['badges:createbadge'] = 'Create/duplicate badges'; $string['badges:configuredetails'] = 'Set up/edit badge details'; $string['badges:configurecriteria'] = 'Set up/edit criteria of earning a badge'; $string['badges:configuremessages'] = 'Configure badge messages'; $string['badges:deletebadge'] = 'Delete badges'; $string['badges:earnbadge'] = 'Earn badge'; $string['badges:manageglobalsettings'] = 'Manage badges global settings'; $string['badges:manageownbadges'] = 'View and manage own earned badges'; $string['badges:viewawarded'] = 'View users who earned a specific badge without being able to award a badge'; $string['badges:viewbadges'] = 'View available badges without earning them'; $string['badges:viewotherbadges'] = 'View public badges in other users\' profiles'; $string['block:edit'] = 'Edit a block\'s settings'; $string['block:view'] = 'View block'; $string['blog:associatecourse'] = 'This capability is deprecated and does nothing.'; $string['blog:associatemodule'] = 'This capability is deprecated and does nothing.'; $string['blog:create'] = 'Create new blog entries'; $string['blog:manageentries'] = 'Edit and manage entries'; $string['blog:manageexternal'] = 'Edit and manage external blogs'; $string['blog:manageofficialtags'] = 'Manage official tags'; $string['blog:managepersonaltags'] = 'Manage personal tags'; $string['blog:search'] = 'Search blog entries'; $string['blog:view'] = 'View blog entries'; $string['blog:viewdrafts'] = 'View draft blog entries'; $string['calendar:manageentries'] = 'Manage any calendar entries'; $string['calendar:managegroupentries'] = 'Manage group calendar entries'; $string['calendar:manageownentries'] = 'Manage own calendar entries'; $string['capabilities'] = 'Capabilities'; $string['capability'] = 'Capability'; $string['category:create'] = 'Create categories'; $string['category:delete'] = 'Delete categories'; $string['category:manage'] = 'Manage categories'; $string['category:update'] = 'Update categories'; $string['category:viewhiddencategories'] = 'See hidden categories'; $string['category:visibility'] = 'See hidden categories'; $string['cohort:assign'] = 'Add and remove cohort members'; $string['cohort:view'] = 'View site-wide cohorts'; $string['cohort:manage'] = 'Create, delete and move cohorts'; $string['comment:delete'] = 'Delete comments'; $string['comment:post'] = 'Post comments'; $string['comment:view'] = 'Read comments'; $string['community:add'] = 'Use the community block to search hubs and find courses'; $string['community:download'] = 'Download a course from the community block'; $string['confirmaddadmin'] = 'Do you really want to add user <strong>{$a}</strong> as new site administrator?'; $string['confirmdeladmin'] = 'Do you really want to remove user <strong>{$a}</strong> from the list of site administrators?'; $string['confirmroleprevent'] = 'Do you really want to remove <strong>{$a->role}</strong> from the list of allowed roles for capability {$a->cap} in context {$a->context}?'; $string['confirmroleunprohibit'] = 'Do you really want to remove <strong>{$a->role}</strong> from the list of prohibited roles for capability {$a->cap} in context {$a->context}?'; $string['confirmunassign'] = 'Are you sure you wish to remove this role from this user?'; $string['confirmunassigntitle'] = 'Confirm role change'; $string['confirmunassignyes'] = 'Remove'; $string['confirmunassignno'] = 'Cancel'; $string['context'] = 'Context'; $string['course:activityvisibility'] = 'Hide/show activities'; $string['course:bulkmessaging'] = 'Send a message to many people'; $string['course:create'] = 'Create courses'; $string['course:delete'] = 'Delete courses'; $string['course:viewsuspendedusers'] = 'View suspended users'; $string['course:changecategory'] = 'Change course category'; $string['course:changefullname'] = 'Change course full name'; $string['course:changeidnumber'] = 'Change course ID number'; $string['course:changeshortname'] = 'Change course short name'; $string['course:changesummary'] = 'Change course summary'; $string['course:enrolconfig'] = 'Configure enrol instances in courses'; $string['course:enrolreview'] = 'Review course enrolments'; $string['course:ignorefilesizelimits'] = 'Use files larger than any file size restrictions'; $string['course:isincompletionreports'] = 'Be shown on completion reports'; $string['course:manageactivities'] = 'Manage activities'; $string['course:managefiles'] = 'Manage files'; $string['course:managegrades'] = 'Manage grades'; $string['course:managegroups'] = 'Manage groups'; $string['course:managescales'] = 'Manage scales'; $string['course:markcomplete'] = 'Mark users as complete in course completion'; $string['course:movesections'] = 'Move sections'; $string['course:publish'] = 'Publish a course into hub'; $string['course:request'] = 'Request new courses'; $string['course:reset'] = 'Reset course'; $string['course:reviewotherusers'] = 'Review other users'; $string['course:sectionvisibility'] = 'Control section visibility'; $string['course:setcurrentsection'] = 'Set current section'; $string['course:update'] = 'Update course settings'; $string['course:useremail'] = 'Enable/disable email address'; $string['course:view'] = 'View courses without participation'; $string['course:viewcoursegrades'] = 'View course grades'; $string['course:viewhiddenactivities'] = 'View hidden activities'; $string['course:viewhiddencourses'] = 'View hidden courses'; $string['course:viewhiddensections'] = 'View hidden sections'; $string['course:viewhiddenuserfields'] = 'View hidden user fields'; $string['course:viewparticipants'] = 'View participants'; $string['course:viewscales'] = 'View scales'; $string['course:visibility'] = 'Hide/show courses'; $string['createrolebycopying'] = 'Create a new role by copying {$a}'; $string['createthisrole'] = 'Create this role'; $string['currentcontext'] = 'Current context'; $string['currentrole'] = 'Current role'; $string['customroledescription'] = 'Custom description'; $string['customroledescription_help'] = 'Descriptions of standard roles are localised automatically if the custom description is empty.'; $string['customrolename'] = 'Custom full name'; $string['customrolename_help'] = 'Names of standard roles are localised automatically if the custom name is empty. You must provide a full name for all custom roles.'; $string['defaultrole'] = 'Default role'; $string['defaultx'] = 'Default: {$a}'; $string['defineroles'] = 'Define roles'; $string['deletecourseoverrides'] = 'Delete all overrides in course'; $string['deletelocalroles'] = 'Delete all local role assignments'; $string['deleterolesure'] = '<p>Are you sure that you want to delete role "{$a->name} ({$a->shortname})"?</p><p>Currently this role is assigned to {$a->count} users.</p>'; $string['deletexrole'] = 'Delete {$a} role'; $string['duplicaterole'] = 'Duplicate role'; $string['duplicaterolesure'] = '<p>Are you sure that you want to duplicate role "{$a->name} ({$a->shortname})"?</p>'; $string['editingrolex'] = 'Editing role \'{$a}\''; $string['editrole'] = 'Edit role'; $string['editxrole'] = 'Edit {$a} role'; $string['errorbadrolename'] = 'Incorrect role name'; $string['errorbadroleshortname'] = 'Incorrect role short name'; $string['errorexistsrolename'] = 'Role name already exists'; $string['errorexistsroleshortname'] = 'Role name already exists'; $string['eventroleallowassignupdated'] = 'Allow role assignment'; $string['eventroleallowoverrideupdated'] = 'Allow role override'; $string['eventroleallowswitchupdated'] = 'Allow role switch'; $string['eventroleassigned'] = 'Role assigned'; $string['eventrolecapabilitiesupdated'] = 'Role capabilities updated'; $string['eventroledeleted'] = 'Role deleted'; $string['eventroleunassigned'] = 'Role unassigned'; $string['existingadmins'] = 'Current site administrators'; $string['existingusers'] = '{$a} existing users'; $string['explanation'] = 'Explanation'; $string['export'] = 'Export'; $string['extusers'] = 'Existing users'; $string['extusersmatching'] = 'Existing users matching \'{$a}\''; $string['filter:manage'] = 'Manage local filter settings'; $string['frontpageuser'] = 'Authenticated user on frontpage'; $string['frontpageuserdescription'] = 'All logged in users in the frontpage course.'; $string['globalrole'] = 'System role'; $string['globalroleswarning'] = 'WARNING! Any roles you assign from this page will apply to the assigned users throughout the entire system, including the front page and all the courses.'; $string['gotoassignroles'] = 'Go to Assign roles for this {$a->contextlevel}'; $string['gotoassignsystemroles'] = 'Go to Assign system roles'; $string['grade:edit'] = 'Edit grades'; $string['grade:export'] = 'Export grades'; $string['grade:hide'] = 'Hide/unhide grades or items'; $string['grade:import'] = 'Import grades'; $string['grade:lock'] = 'Lock grades or items'; $string['grade:manage'] = 'Manage grade items'; $string['grade:managegradingforms'] = 'Manage advanced grading methods'; $string['grade:managesharedforms'] = 'Manage advanced grading form templates'; $string['grade:sharegradingforms'] = 'Share advanced grading form as a template'; $string['grade:manageletters'] = 'Manage letter grades'; $string['grade:manageoutcomes'] = 'Manage grade outcomes'; $string['grade:override'] = 'Override grades'; $string['grade:unlock'] = 'Unlock grades or items'; $string['grade:view'] = 'View own grades'; $string['grade:viewall'] = 'View grades of other users'; $string['grade:viewhidden'] = 'View hidden grades for owner'; $string['highlightedcellsshowdefault'] = 'The permissions highlighted in the table below are the defaults for the role archetype currently selected above.'; $string['highlightedcellsshowinherit'] = 'The highlighted cells in the table below show the permission (if any) that will be inherited. Apart from the capabilities whose permission you actually want to alter, you should leave everything set to Inherit.'; $string['checkglobalpermissions'] = 'Check system permissions'; $string['checkpermissions'] = 'Check permissions'; $string['checkpermissionsin'] = 'Check permissions in {$a}'; $string['checksystempermissionsfor'] = 'Check system permissions for {$a->fullname}'; $string['checkuserspermissionshere'] = 'Check permissions for {$a->fullname} has in this {$a->contextlevel}'; $string['chooseroletoassign'] = 'Please choose a role to assign'; $string['inactiveformorethan'] = 'inactive for more than {$a->timeperiod}'; $string['ingroup'] = 'in the group "{$a->group}"'; $string['inherit'] = 'Inherit'; $string['invalidpresetfile'] = 'Invalid role definition file'; $string['legacy:admin'] = 'LEGACY ROLE: Administrator'; $string['legacy:coursecreator'] = 'LEGACY ROLE: Course creator'; $string['legacy:editingteacher'] = 'LEGACY ROLE: Teacher (editing)'; $string['legacy:guest'] = 'LEGACY ROLE: Guest'; $string['legacy:student'] = 'LEGACY ROLE: Student'; $string['legacy:teacher'] = 'LEGACY ROLE: Teacher (non-editing)'; $string['legacytype'] = 'Legacy role type'; $string['legacy:user'] = 'LEGACY ROLE: Authenticated user'; $string['listallroles'] = 'List all roles'; $string['localroles'] = 'Locally assigned roles'; $string['mainadmin'] = 'Main administrator'; $string['mainadminset'] = 'Set main admin'; $string['manageadmins'] = 'Manage site administrators'; $string['manager'] = 'Manager'; $string['managerdescription'] = 'Managers can access course and modify them, they usually do not participate in courses.'; $string['manageroles'] = 'Manage roles'; $string['maybeassignedin'] = 'Context types where this role may be assigned'; $string['morethan'] = 'More than {$a}'; $string['multipleroles'] = 'Multiple roles'; $string['my:manageblocks'] = 'Manage Dashboard page blocks'; $string['my:configsyspages'] = 'Configure system templates for Dashboard pages'; $string['neededroles'] = 'Roles with permission'; $string['nocapabilitiesincontext'] = 'No capabilities available in this context'; $string['noneinthisx'] = 'None in this {$a}'; $string['noneinthisxmatching'] = 'No users matching \'{$a->search}\' in this {$a->contexttype}'; $string['norole'] = 'No role'; $string['noroles'] = 'No roles'; $string['noroleassignments'] = 'This user does not have any role assignments anywhere in this site.'; $string['notabletoassignroleshere'] = 'You are not able to assign any roles here'; $string['notabletooverrideroleshere'] = 'You are not able to override the permissions on any roles here'; $string['notes:manage'] = 'Manage notes'; $string['notes:view'] = 'View notes'; $string['notset'] = 'Not set'; $string['overrideanotherrole'] = 'Override another role'; $string['overridecontext'] = 'Override context'; $string['overridepermissions'] = 'Override permissions'; $string['overridepermissions_help'] = 'Permissions overrides enable selected capabilities to be allowed or prevented in a specific context.'; $string['overridepermissions_link'] = 'admin/roles/override'; $string['overridepermissionsforrole'] = 'Override permissions for role \'{$a->role}\' in {$a->context}'; $string['overridepermissionsin'] = 'Override permissions in {$a}'; $string['overrideroles'] = 'Override roles'; $string['overriderolesin'] = 'Override roles in {$a}'; $string['overrides'] = 'Overrides'; $string['overridesbycontext'] = 'Overrides (by context)'; $string['permission'] = 'Permission'; $string['permission_help'] = 'Permissions are capability settings. There are 4 options: * Not set * Allow - Permission is granted for the capability * Prevent - Permission is removed for the capability, even if allowed in a higher context * Prohibit - Permission is completely denied and cannot be overridden at any lower (more specific) context'; $string['permissions'] = 'Permissions'; $string['permissionsforuser'] = 'Permissions for user {$a}'; $string['permissionsincontext'] = 'Permissions in {$a}'; $string['portfolio:export'] = 'Export to portfolios'; $string['potentialusers'] = '{$a} potential users'; $string['potusers'] = 'Potential users'; $string['potusersmatching'] = 'Potential users matching \'{$a}\''; $string['prevent'] = 'Prevent'; $string['prohibit'] = 'Prohibit'; $string['prohibitedroles'] = 'Prohibited'; $string['question:add'] = 'Add new questions'; $string['question:config'] = 'Configure question types'; $string['question:editall'] = 'Edit all questions'; $string['question:editmine'] = 'Edit your own questions'; $string['question:flag'] = 'Flag questions while attempting them'; $string['question:managecategory'] = 'Edit question categories'; $string['question:moveall'] = 'Move all questions'; $string['question:movemine'] = 'Move your own questions'; $string['question:useall'] = 'Use all questions'; $string['question:usemine'] = 'Use your own questions'; $string['question:viewall'] = 'View all questions'; $string['question:viewmine'] = 'View your own questions'; $string['rating:rate'] = 'Add ratings to items'; $string['rating:view'] = 'View the total rating you received'; $string['rating:viewany'] = 'View total ratings that anyone received'; $string['rating:viewall'] = 'View all raw ratings given by individuals'; $string['resetrole'] = 'Reset'; $string['resettingrole'] = 'Resetting role \'{$a}\''; $string['restore:configure'] = 'Configure restore options'; $string['restore:createuser'] = 'Create users on restore'; $string['restore:restoreactivity'] = 'Restore activities'; $string['restore:restoresection'] = 'Restore sections'; $string['restore:restorecourse'] = 'Restore courses'; $string['restore:restoretargethub'] = 'Restore from files targeted as hub'; $string['restore:restoretargetimport'] = 'Restore from files targeted as import'; $string['restore:rolldates'] = 'Allowed to roll activity configuration dates on restore'; $string['restore:uploadfile'] = 'Upload files to backup areas'; $string['restore:userinfo'] = 'Restore user data'; $string['restore:viewautomatedfilearea'] = 'Restore courses from automated backups'; $string['risks'] = 'Risks'; $string['roleallowheader'] = 'Allow role:'; $string['roleallowinfo'] = 'Select a role to be added to the list of allowed roles in context {$a->context}, capability {$a->cap}:'; $string['role:assign'] = 'Assign roles to users'; $string['roleassignments'] = 'Role assignments'; $string['roledefinitions'] = 'Role definitions'; $string['rolefullname'] = 'Role name'; $string['roleincontext'] = '{$a->role} in {$a->context}'; $string['role:manage'] = 'Create and manage roles'; $string['role:override'] = 'Override permissions for others'; $string['role:review'] = 'Review permissions for others'; $string['roleprohibitheader'] = 'Prohibit role'; $string['roleprohibitinfo'] = 'Select a role to be added to the list of prohibited roles in context {$a->context}, capability {$a->cap}:'; $string['rolerisks'] = 'Role risks'; $string['roles'] = 'Roles'; $string['roles_help'] = 'A role is a collection of permissions defined for the whole system that you can assign to specific users in specific contexts.'; $string['roles_link'] = 'roles'; $string['role:safeoverride'] = 'Override safe permissions for others'; $string['roleselect'] = 'Select role'; $string['rolesforuser'] = 'Roles for user {$a}'; $string['roleshortname'] = 'Short name'; $string['roleshortname_help'] = 'Role short name is a low level role identifier in which only ASCII alphanumeric characters are allowed. Do not change short names of standard roles.'; $string['role:switchroles'] = 'Switch to other roles'; $string['roletoassign'] = 'Role to assign'; $string['roletooverride'] = 'Role to override'; $string['safeoverridenotice'] = 'Note: Capabilities with higher risks are locked because you are only allowed to override safe capabilities.'; $string['selectanotheruser'] = 'Select another user'; $string['selectauser'] = 'Select a user'; $string['selectrole'] = 'Select a role'; $string['showallroles'] = 'Show all roles'; $string['showthisuserspermissions'] = 'Show this user\'s permissions'; $string['site:accessallgroups'] = 'Access all groups'; $string['siteadministrators'] = 'Site administrators'; $string['site:approvecourse'] = 'Approve course creation'; $string['site:backup'] = 'Backup courses'; $string['site:config'] = 'Change site configuration'; $string['site:doanything'] = 'Allowed to do everything'; $string['site:doclinks'] = 'Show links to offsite docs'; $string['site:forcelanguage'] = 'Override course language'; $string['site:import'] = 'Import other courses into a course'; $string['site:manageblocks'] = 'Manage blocks on a page'; $string['site:mnetloginfromremote'] = 'Login from a remote application via MNet'; $string['site:mnetlogintoremote'] = 'Roam to a remote application via MNet'; $string['site:readallmessages'] = 'Read all messages on site'; $string['site:restore'] = 'Restore courses'; $string['site:sendmessage'] = 'Send messages to any user'; $string['site:trustcontent'] = 'Trust submitted content'; $string['site:uploadusers'] = 'Upload new users from file'; $string['site:viewfullnames'] = 'Always see full names of users'; $string['site:viewparticipants'] = 'View participants'; $string['site:viewreports'] = 'View reports'; $string['site:viewuseridentity'] = 'See full user identity in lists'; $string['tag:create'] = 'Create new tags'; $string['tag:edit'] = 'Edit existing tags'; $string['tag:editblocks'] = 'Edit blocks in tags pages'; $string['tag:manage'] = 'Manage all tags'; $string['tag:flag'] = 'Flag tags as inappropriate'; $string['thisusersroles'] = 'This user\'s role assignments'; $string['thisnewrole'] = 'This new role'; $string['unassignarole'] = 'Unassign role {$a}'; $string['unassignerror'] = 'Error while unassigning the role {$a->role} from user {$a->user}.'; $string['unassignconfirm'] = 'Do you really want to unassign "{$a->role}" role from user "{$a->user}"?'; $string['user:changeownpassword'] = 'Change own password'; $string['user:create'] = 'Create users'; $string['user:delete'] = 'Delete users'; $string['user:editmessageprofile'] = 'Edit user messaging profile'; $string['user:editownmessageprofile'] = 'Edit own user messaging profile'; $string['user:editownprofile'] = 'Edit own user profile'; $string['user:editprofile'] = 'Edit user profile'; $string['user:ignoreuserquota'] = 'Ignore user quota limit'; $string['user:loginas'] = 'Login as other users'; $string['user:manageblocks'] = 'Manage blocks on user profile of other users'; $string['user:manageownblocks'] = 'Manage blocks on own public user profile'; $string['user:manageownfiles'] = 'Manage files on own private file areas'; $string['user:managesyspages'] = 'Configure default page layout for public user profiles'; $string['user:readuserblogs'] = 'View all user blogs'; $string['user:readuserposts'] = 'View all user forum posts'; $string['user:update'] = 'Update user profiles'; $string['user:viewalldetails'] = 'View user full information'; $string['user:viewdetails'] = 'View user profiles'; $string['user:viewhiddendetails'] = 'View hidden details of users'; $string['user:viewlastip'] = 'View user last ip address'; $string['user:viewuseractivitiesreport'] = 'See user activity reports'; $string['user:viewusergrades'] = 'View user grades'; $string['roleresetdefaults'] = 'Defaults'; $string['roleresetrole'] = 'Use role or archetype'; $string['rolerepreset'] = 'Use role preset'; $string['usersfrom'] = 'Users from {$a}'; $string['usersfrommatching'] = 'Users from {$a->contextname} matching \'{$a->search}\''; $string['usersinthisx'] = 'Users in this {$a}'; $string['usersinthisxmatching'] = 'Users in this {$a->contexttype} matching \'{$a->search}\''; $string['userswithrole'] = 'All users with a role'; $string['userswiththisrole'] = 'Users with role'; $string['useshowadvancedtochange'] = 'Use \'Show advanced\' to change'; $string['viewingdefinitionofrolex'] = 'Viewing the definition of role \'{$a}\''; $string['viewrole'] = 'View role details'; $string['webservice:createtoken'] = 'Create a web service token'; $string['webservice:createmobiletoken'] = 'Create a web service token for mobile access'; $string['whydoesuserhavecap'] = 'Why does {$a->fullname} have capability {$a->capability} in context {$a->context}?'; $string['whydoesusernothavecap'] = 'Why does {$a->fullname} not have capability {$a->capability} in context {$a->context}?'; $string['xroleassignments'] = '{$a}\'s role assignments'; $string['xuserswiththerole'] = 'Users with the role "{$a->role}"';
nagyistoce/moodle
lang/en/role.php
PHP
gpl-3.0
26,335
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. #nullable disable using System; using Microsoft.CodeAnalysis.ErrorReporting; using Roslyn.Test.Utilities; using Roslyn.Utilities; using Xunit; namespace Microsoft.CodeAnalysis.UnitTests { public class ExceptionHelpersTests : TestBase { /// <summary> /// Test that throwing OperationCanceledException does NOT trigger FailFast /// </summary> [Fact] public void TestExecuteWithErrorReportingThrowOperationCanceledException() { var finallyExecuted = false; void a() { try { throw new OperationCanceledException(); } finally { finallyExecuted = true; } } try { try { a(); } catch (Exception e) when (FatalError.ReportAndPropagateUnlessCanceled(e)) { throw ExceptionUtilities.Unreachable; } Assert.True(false, "Should not get here because an exception should be thrown before this point."); } catch (OperationCanceledException) { Assert.True(finallyExecuted); return; } Assert.True(false, "Should have returned in the catch block before this point."); } } }
AmadeusW/roslyn
src/Workspaces/CoreTest/UtilityTest/ExceptionHelpersTests.cs
C#
apache-2.0
1,686
""" CMSIS-DAP Interface Firmware Copyright (c) 2009-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Extract and patch the interface without bootloader """ from options import get_options from paths import get_interface_path, TMP_DIR from utils import gen_binary, is_lpc, split_path from os.path import join if __name__ == '__main__': options = get_options() in_path = get_interface_path(options.interface, options.target, bootloader=False) _, name, _ = split_path(in_path) out_path = join(TMP_DIR, name + '.bin') print '\nELF: %s' % in_path gen_binary(in_path, out_path, is_lpc(options.interface)) print "\nBINARY: %s" % out_path
flyhung/CMSIS-DAP
tools/get_binary.py
Python
apache-2.0
1,166
/* * Copyright 2014 Soichiro Kashima * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.marshalchen.common.demoofui.observablescrollview; import android.os.Bundle; import android.support.v4.view.ViewCompat; import android.support.v7.app.ActionBarActivity; import android.support.v7.widget.Toolbar; import android.view.View; import com.github.ksoichiro.android.observablescrollview.ObservableScrollView; import com.github.ksoichiro.android.observablescrollview.ObservableScrollViewCallbacks; import com.github.ksoichiro.android.observablescrollview.ObservableWebView; import com.github.ksoichiro.android.observablescrollview.ScrollState; import com.marshalchen.common.demoofui.R; import com.nineoldandroids.view.ViewHelper; import com.nineoldandroids.view.ViewPropertyAnimator; public class ToolbarControlWebViewActivity extends ActionBarActivity { private View mHeaderView; private View mToolbarView; private ObservableScrollView mScrollView; private boolean mFirstScroll; private boolean mDragging; private int mBaseTranslationY; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.observable_scroll_view_activity_toolbarcontrolwebview); setSupportActionBar((Toolbar) findViewById(R.id.toolbar)); mHeaderView = findViewById(R.id.header); ViewCompat.setElevation(mHeaderView, getResources().getDimension(R.dimen.toolbar_elevation)); mToolbarView = findViewById(R.id.toolbar); mScrollView = (ObservableScrollView) findViewById(R.id.scroll); mScrollView.setScrollViewCallbacks(mScrollViewScrollCallbacks); ObservableWebView mWebView = (ObservableWebView) findViewById(R.id.web); mWebView.setScrollViewCallbacks(mWebViewScrollCallbacks); mWebView.loadUrl("file:///android_asset/lipsum.html"); } private ObservableScrollViewCallbacks mScrollViewScrollCallbacks = new ObservableScrollViewCallbacks() { @Override public void onScrollChanged(int scrollY, boolean firstScroll, boolean dragging) { if (mDragging) { int toolbarHeight = mToolbarView.getHeight(); if (mFirstScroll) { mFirstScroll = false; float currentHeaderTranslationY = ViewHelper.getTranslationY(mHeaderView); if (-toolbarHeight < currentHeaderTranslationY && toolbarHeight < scrollY) { mBaseTranslationY = scrollY; } } int headerTranslationY = Math.min(0, Math.max(-toolbarHeight, -(scrollY - mBaseTranslationY))); ViewPropertyAnimator.animate(mHeaderView).cancel(); ViewHelper.setTranslationY(mHeaderView, headerTranslationY); } } @Override public void onDownMotionEvent() { } @Override public void onUpOrCancelMotionEvent(ScrollState scrollState) { mDragging = false; mBaseTranslationY = 0; float headerTranslationY = ViewHelper.getTranslationY(mHeaderView); int toolbarHeight = mToolbarView.getHeight(); if (scrollState == ScrollState.UP) { if (toolbarHeight < mScrollView.getCurrentScrollY()) { if (headerTranslationY != -toolbarHeight) { ViewPropertyAnimator.animate(mHeaderView).cancel(); ViewPropertyAnimator.animate(mHeaderView).translationY(-toolbarHeight).setDuration(200).start(); } } } else if (scrollState == ScrollState.DOWN) { if (toolbarHeight < mScrollView.getCurrentScrollY()) { if (headerTranslationY != 0) { ViewPropertyAnimator.animate(mHeaderView).cancel(); ViewPropertyAnimator.animate(mHeaderView).translationY(0).setDuration(200).start(); } } } } }; private ObservableScrollViewCallbacks mWebViewScrollCallbacks = new ObservableScrollViewCallbacks() { @Override public void onScrollChanged(int scrollY, boolean firstScroll, boolean dragging) { } @Override public void onDownMotionEvent() { // Workaround: WebView inside a ScrollView absorbs down motion events, so observing // down motion event from the WebView is required. mFirstScroll = mDragging = true; } @Override public void onUpOrCancelMotionEvent(ScrollState scrollState) { } }; }
cymcsg/UltimateAndroid
deprecated/UltimateAndroidGradle/demoofui/src/main/java/com/marshalchen/common/demoofui/observablescrollview/ToolbarControlWebViewActivity.java
Java
apache-2.0
5,205
package create import ( "fmt" "io" "github.com/spf13/cobra" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/runtime" "github.com/openshift/origin/pkg/client" "github.com/openshift/origin/pkg/cmd/util/clientcmd" deployapi "github.com/openshift/origin/pkg/deploy/api" ) const ( DeploymentConfigRecommendedName = "deploymentconfig" deploymentConfigLong = ` Create a deployment config that uses a given image. Deployment configs define the template for a pod and manages deploying new images or configuration changes.` deploymentConfigExample = ` # Create an nginx deployment config named my-nginx %[1]s my-nginx --image=nginx` ) type CreateDeploymentConfigOptions struct { DC *deployapi.DeploymentConfig Client client.DeploymentConfigsNamespacer Mapper meta.RESTMapper OutputFormat string Out io.Writer Printer ObjectPrinter } // NewCmdCreateServiceAccount is a macro command to create a new service account func NewCmdCreateDeploymentConfig(name, fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command { o := &CreateDeploymentConfigOptions{Out: out} cmd := &cobra.Command{ Use: name + " NAME --image=IMAGE -- [COMMAND] [args...]", Short: "Create deployment config with default options that uses a given image.", Long: deploymentConfigLong, Example: fmt.Sprintf(deploymentConfigExample, fullName), Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(cmd, f, args)) cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.Run()) }, Aliases: []string{"dc"}, } cmd.Flags().String("image", "", "The image for the container to run.") cmd.MarkFlagRequired("image") cmdutil.AddOutputFlagsForMutation(cmd) return cmd } func (o *CreateDeploymentConfigOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args []string) error { argsLenAtDash := cmd.ArgsLenAtDash() switch { case (argsLenAtDash == -1 && len(args) != 1), (argsLenAtDash == 0), (argsLenAtDash > 1): return fmt.Errorf("NAME is required: %v", args) } labels := map[string]string{"deployment-config.name": args[0]} o.DC = &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{Name: args[0]}, Spec: deployapi.DeploymentConfigSpec{ Selector: labels, Replicas: 1, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: labels}, Spec: kapi.PodSpec{ Containers: []kapi.Container{ { Name: "default-container", Image: cmdutil.GetFlagString(cmd, "image"), Args: args[1:], }, }, }, }, }, } var err error o.DC.Namespace, _, err = f.DefaultNamespace() if err != nil { return err } o.Client, _, err = f.Clients() if err != nil { return err } o.Mapper, _ = f.Object(false) o.OutputFormat = cmdutil.GetFlagString(cmd, "output") o.Printer = func(obj runtime.Object, out io.Writer) error { return f.PrintObject(cmd, o.Mapper, obj, out) } return nil } func (o *CreateDeploymentConfigOptions) Validate() error { if o.DC == nil { return fmt.Errorf("DC is required") } if o.Client == nil { return fmt.Errorf("Client is required") } if o.Mapper == nil { return fmt.Errorf("Mapper is required") } if o.Out == nil { return fmt.Errorf("Out is required") } if o.Printer == nil { return fmt.Errorf("Printer is required") } return nil } func (o *CreateDeploymentConfigOptions) Run() error { actualObj, err := o.Client.DeploymentConfigs(o.DC.Namespace).Create(o.DC) if err != nil { return err } if useShortOutput := o.OutputFormat == "name"; useShortOutput || len(o.OutputFormat) == 0 { cmdutil.PrintSuccess(o.Mapper, useShortOutput, o.Out, "deploymentconfig", actualObj.Name, "created") return nil } return o.Printer(actualObj, o.Out) }
rhuss/gofabric8
vendor/github.com/openshift/origin/pkg/cmd/cli/cmd/create/deploymentconfig.go
GO
apache-2.0
3,854
/* * Copyright (C) 2016 Square, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package okhttp3; import java.io.IOException; import okio.Buffer; import okio.BufferedSource; import okio.Okio; import okio.Source; import okio.Timeout; import org.junit.Test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; public final class ResponseTest { @Test public void peekShorterThanResponse() throws Exception { Response response = newResponse(responseBody("abcdef")); ResponseBody peekedBody = response.peekBody(3); assertEquals("abc", peekedBody.string()); assertEquals("abcdef", response.body().string()); } @Test public void peekLongerThanResponse() throws Exception { Response response = newResponse(responseBody("abc")); ResponseBody peekedBody = response.peekBody(6); assertEquals("abc", peekedBody.string()); assertEquals("abc", response.body().string()); } @Test public void peekAfterReadingResponse() throws Exception { Response response = newResponse(responseBody("abc")); assertEquals("abc", response.body().string()); try { response.peekBody(3); fail(); } catch (IllegalStateException expected) { } } @Test public void eachPeakIsIndependent() throws Exception { Response response = newResponse(responseBody("abcdef")); ResponseBody p1 = response.peekBody(4); ResponseBody p2 = response.peekBody(2); assertEquals("abcdef", response.body().string()); assertEquals("abcd", p1.string()); assertEquals("ab", p2.string()); } /** * Returns a new response body that refuses to be read once it has been closed. This is true of * most {@link BufferedSource} instances, but not of {@link Buffer}. */ private ResponseBody responseBody(String content) { final Buffer data = new Buffer().writeUtf8(content); Source source = new Source() { boolean closed; @Override public void close() throws IOException { closed = true; } @Override public long read(Buffer sink, long byteCount) throws IOException { if (closed) throw new IllegalStateException(); return data.read(sink, byteCount); } @Override public Timeout timeout() { return Timeout.NONE; } }; return ResponseBody.create(null, -1, Okio.buffer(source)); } private Response newResponse(ResponseBody responseBody) { return new Response.Builder() .request(new Request.Builder() .url("https://example.com/") .build()) .protocol(Protocol.HTTP_1_1) .code(200) .body(responseBody) .build(); } }
zmarkan/okhttp
okhttp-tests/src/test/java/okhttp3/ResponseTest.java
Java
apache-2.0
3,177
<?php /** * HTML cache invalidation of all pages linking to a given title. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * http://www.gnu.org/copyleft/gpl.html * * @file * @ingroup Cache */ /** * Class to invalidate the HTML cache of all the pages linking to a given title. * * @ingroup Cache */ class HTMLCacheUpdate implements DeferrableUpdate { /** * @var Title */ public $mTitle; public $mTable; /** * @param $titleTo * @param $table * @param $start bool * @param $end bool */ function __construct( Title $titleTo, $table ) { $this->mTitle = $titleTo; $this->mTable = $table; } public function doUpdate() { wfProfileIn( __METHOD__ ); $job = new HTMLCacheUpdateJob( $this->mTitle, array( 'table' => $this->mTable, ) + Job::newRootJobParams( // "overall" refresh links job info "htmlCacheUpdate:{$this->mTable}:{$this->mTitle->getPrefixedText()}" ) ); $count = $this->mTitle->getBacklinkCache()->getNumLinks( $this->mTable, 200 ); if ( $count >= 200 ) { // many backlinks JobQueueGroup::singleton()->push( $job ); JobQueueGroup::singleton()->deduplicateRootJob( $job ); } else { // few backlinks ($count might be off even if 0) $dbw = wfGetDB( DB_MASTER ); $dbw->onTransactionIdle( function() use ( $job ) { $job->run(); // just do the purge query now } ); } wfProfileOut( __METHOD__ ); } }
BRL-CAD/web
wiki/includes/cache/HTMLCacheUpdate.php
PHP
bsd-2-clause
2,065
cask :v1 => 'witgui' do version '2.1.2' sha256 '4e108153a2cce9fede1358b265dfcd7d9f03c15658e2c9278ddad8a04260cf9b' url "http://desairem.altervista.org/witgui/download.php?version=#{version}" name 'Witgui' appcast 'http://desairem.altervista.org/witgui/appcast.xml', :sha256 => 'f982fdb6f7cfe0a307fad75e5e523096630f5eef88aa543014d2eed2d6f4b01d' homepage 'http://desairem.altervista.org/witgui/wordpress/' license :unknown # todo: change license and remove this comment; ':unknown' is a machine-generated placeholder app 'Witgui.app' end
vmrob/homebrew-cask
Casks/witgui.rb
Ruby
bsd-2-clause
566
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_APP_MODE_KIOSK_EXTERNAL_UPDATE_VALIDATOR_H_ #define CHROME_BROWSER_CHROMEOS_APP_MODE_KIOSK_EXTERNAL_UPDATE_VALIDATOR_H_ #include <string> #include "base/files/file_path.h" #include "base/memory/weak_ptr.h" #include "base/sequenced_task_runner.h" #include "extensions/browser/sandboxed_unpacker.h" namespace extensions { class Extension; } namespace chromeos { // Delegate class for KioskExternalUpdateValidator, derived class must support // WeakPtr. class KioskExternalUpdateValidatorDelegate { public: virtual void OnExtenalUpdateUnpackSuccess( const std::string& app_id, const std::string& version, const std::string& min_browser_version, const base::FilePath& temp_dir) = 0; virtual void OnExternalUpdateUnpackFailure(const std::string& app_id) = 0; protected: virtual ~KioskExternalUpdateValidatorDelegate() {} }; // Unpacks the crx file of the kiosk app and validates its signature. class KioskExternalUpdateValidator : public extensions::SandboxedUnpackerClient { public: KioskExternalUpdateValidator( const scoped_refptr<base::SequencedTaskRunner>& backend_task_runner, const extensions::CRXFileInfo& file, const base::FilePath& crx_unpack_dir, const base::WeakPtr<KioskExternalUpdateValidatorDelegate>& delegate); // Starts validating the external crx file. void Start(); private: ~KioskExternalUpdateValidator() override; // SandboxedUnpackerClient overrides. void OnUnpackFailure(const extensions::CrxInstallError& error) override; void OnUnpackSuccess(const base::FilePath& temp_dir, const base::FilePath& extension_dir, const base::DictionaryValue* original_manifest, const extensions::Extension* extension, const SkBitmap& install_icon) override; // Task runner for executing file I/O tasks. const scoped_refptr<base::SequencedTaskRunner> backend_task_runner_; // Information about the external crx file. extensions::CRXFileInfo crx_file_; // The temporary directory used by SandBoxedUnpacker for unpacking extensions. const base::FilePath crx_unpack_dir_; base::WeakPtr<KioskExternalUpdateValidatorDelegate> delegate_; DISALLOW_COPY_AND_ASSIGN(KioskExternalUpdateValidator); }; } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_APP_MODE_KIOSK_EXTERNAL_UPDATE_VALIDATOR_H_
guorendong/iridium-browser-ubuntu
chrome/browser/chromeos/app_mode/kiosk_external_update_validator.h
C
bsd-3-clause
2,596
import { MacOption24 } from "../../"; export = MacOption24;
markogresak/DefinitelyTyped
types/carbon__icons-react/lib/mac--option/24.d.ts
TypeScript
mit
61
export default (...modifiers): Array<string> => {};
recipesjs/ingredients
test/fixtures/flow/type-annotations/102/actual.js
JavaScript
mit
52
<?php namespace Concrete\Core\Search; use Concrete\Core\Application\EditResponse; use Concrete\Core\Entity\Search\Query; use Concrete\Core\Search\Result\Result as SearchResult; interface SessionQueryProviderInterface { function setSessionCurrentQuery(Query $query); function getSessionCurrentQuery(); function clearSessionCurrentQuery(); function getSessionNamespace(); }
Akhenoth/Factorian
concrete/src/Search/SessionQueryProviderInterface.php
PHP
mit
393
#ifndef NV_CORE_H #error "Do not include this file directly." #endif //#include <cstddef> // size_t, NULL // Function linkage #define DLL_IMPORT __declspec(dllimport) #define DLL_EXPORT __declspec(dllexport) #define DLL_EXPORT_CLASS DLL_EXPORT // Function calling modes #if NV_CPU_X86 # define NV_CDECL __attribute__((cdecl)) # define NV_STDCALL __attribute__((stdcall)) #else # define NV_CDECL # define NV_STDCALL #endif #define NV_FASTCALL __attribute__((fastcall)) #define NV_FORCEINLINE __attribute__((always_inline)) #define NV_DEPRECATED __attribute__((deprecated)) #if __GNUC__ > 2 #define NV_PURE __attribute__((pure)) #define NV_CONST __attribute__((const)) #else #define NV_PURE #define NV_CONST #endif #define NV_NOINLINE __attribute__((noinline)) // Define __FUNC__ properly. #if __STDC_VERSION__ < 199901L # if __GNUC__ >= 2 # define __FUNC__ __PRETTY_FUNCTION__ // __FUNCTION__ # else # define __FUNC__ "<unknown>" # endif #else # define __FUNC__ __PRETTY_FUNCTION__ #endif #define restrict __restrict__ /* // Type definitions typedef unsigned char uint8; typedef signed char int8; typedef unsigned short uint16; typedef signed short int16; typedef unsigned int uint32; typedef signed int int32; typedef unsigned long long uint64; typedef signed long long int64; // Aliases typedef uint32 uint; */
grendizerufo/nvidia-texture-tools
src/nvcore/DefsGnucWin32.h
C
mit
1,410
import { LogoTumblr16 } from "../../"; export = LogoTumblr16;
markogresak/DefinitelyTyped
types/carbon__icons-react/lib/logo--tumblr/16.d.ts
TypeScript
mit
63
// Karma configuration // Generated on Sun Apr 14 2013 18:31:17 GMT+0200 (CEST) // base path, that will be used to resolve files and exclude basePath = ''; // list of files / patterns to load in the browser files = [ JASMINE, JASMINE_ADAPTER, 'http://code.angularjs.org/1.1.4/angular.js', 'http://code.angularjs.org/1.1.4/angular-resource.js', 'http://code.angularjs.org/1.1.4/angular-mocks.js', 'http://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.4.4/underscore-min.js', 'src/restangular.js', 'test/*.js' ]; // list of files to exclude exclude = [ ]; // test results reporter to use // possible values: 'dots', 'progress', 'junit' reporters = ['progress']; // web server port port = 9877; // cli runner port runnerPort = 9101; // enable / disable colors in the output (reporters and logs) colors = true; // level of logging // possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG logLevel = LOG_INFO; // enable / disable watching file and executing tests whenever any file changes autoWatch = true; // Start these browsers, currently available: // - Chrome // - ChromeCanary // - Firefox // - Opera // - Safari (only Mac) // - PhantomJS // - IE (only Windows) browsers = ['PhantomJS']; // If browser does not capture in given timeout [ms], kill it captureTimeout = 60000; // Continuous Integration mode // if true, it capture browsers, run tests and exit singleRun = false;
kyleiwaniec/yahoo-finance
yahoo-finance/bower_components/angularytics/karma.underscore.conf.js
JavaScript
mit
1,447
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- Contributing author: Axel Kohlmeyer (Temple U) ------------------------------------------------------------------------- */ #include "pppm_omp.h" #include "atom.h" #include "comm.h" #include "domain.h" #include "error.h" #include "fix_omp.h" #include "force.h" #include "memory.h" #include "math_const.h" #include "math_special.h" #include <string.h> #include <math.h> #include "suffix.h" using namespace LAMMPS_NS; using namespace MathConst; using namespace MathSpecial; #ifdef FFT_SINGLE #define ZEROF 0.0f #else #define ZEROF 0.0 #endif #define EPS_HOC 1.0e-7 /* ---------------------------------------------------------------------- */ PPPMOMP::PPPMOMP(LAMMPS *lmp, int narg, char **arg) : PPPM(lmp, narg, arg), ThrOMP(lmp, THR_KSPACE) { triclinic_support = 0; suffix_flag |= Suffix::OMP; } /* ---------------------------------------------------------------------- allocate memory that depends on # of K-vectors and order ------------------------------------------------------------------------- */ void PPPMOMP::allocate() { PPPM::allocate(); #if defined(_OPENMP) #pragma omp parallel default(none) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif ThrData *thr = fix->get_thr(tid); thr->init_pppm(order,memory); } } /* ---------------------------------------------------------------------- free memory that depends on # of K-vectors and order ------------------------------------------------------------------------- */ void PPPMOMP::deallocate() { PPPM::deallocate(); #if defined(_OPENMP) #pragma omp parallel default(none) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif ThrData *thr = fix->get_thr(tid); thr->init_pppm(-order,memory); } } /* ---------------------------------------------------------------------- pre-compute modified (Hockney-Eastwood) Coulomb Green's function ------------------------------------------------------------------------- */ void PPPMOMP::compute_gf_ik() { const double * const prd = (triclinic==0) ? domain->prd : domain->prd_lamda; const double xprd = prd[0]; const double yprd = prd[1]; const double zprd = prd[2]; const double zprd_slab = zprd*slab_volfactor; const double unitkx = (MY_2PI/xprd); const double unitky = (MY_2PI/yprd); const double unitkz = (MY_2PI/zprd_slab); const int nbx = static_cast<int> ((g_ewald*xprd/(MY_PI*nx_pppm)) * pow(-log(EPS_HOC),0.25)); const int nby = static_cast<int> ((g_ewald*yprd/(MY_PI*ny_pppm)) * pow(-log(EPS_HOC),0.25)); const int nbz = static_cast<int> ((g_ewald*zprd_slab/(MY_PI*nz_pppm)) * pow(-log(EPS_HOC),0.25)); const int numk = nxhi_fft - nxlo_fft + 1; const int numl = nyhi_fft - nylo_fft + 1; const int twoorder = 2*order; #if defined(_OPENMP) #pragma omp parallel default(none) #endif { double snx,sny,snz; double argx,argy,argz,wx,wy,wz,sx,sy,sz,qx,qy,qz; double sum1,dot1,dot2; double numerator,denominator; double sqk; int k,l,m,nx,ny,nz,kper,lper,mper,n,nfrom,nto,tid; loop_setup_thr(nfrom, nto, tid, nfft, comm->nthreads); for (n = nfrom; n < nto; ++n) { m = n / (numl*numk); l = (n - m*numl*numk) / numk; k = n - m*numl*numk - l*numk; m += nzlo_fft; l += nylo_fft; k += nxlo_fft; mper = m - nz_pppm*(2*m/nz_pppm); snz = square(sin(0.5*unitkz*mper*zprd_slab/nz_pppm)); lper = l - ny_pppm*(2*l/ny_pppm); sny = square(sin(0.5*unitky*lper*yprd/ny_pppm)); kper = k - nx_pppm*(2*k/nx_pppm); snx = square(sin(0.5*unitkx*kper*xprd/nx_pppm)); sqk = square(unitkx*kper) + square(unitky*lper) + square(unitkz*mper); if (sqk != 0.0) { numerator = 12.5663706/sqk; denominator = gf_denom(snx,sny,snz); sum1 = 0.0; for (nx = -nbx; nx <= nbx; nx++) { qx = unitkx*(kper+nx_pppm*nx); sx = exp(-0.25*square(qx/g_ewald)); argx = 0.5*qx*xprd/nx_pppm; wx = powsinxx(argx,twoorder); for (ny = -nby; ny <= nby; ny++) { qy = unitky*(lper+ny_pppm*ny); sy = exp(-0.25*square(qy/g_ewald)); argy = 0.5*qy*yprd/ny_pppm; wy = powsinxx(argy,twoorder); for (nz = -nbz; nz <= nbz; nz++) { qz = unitkz*(mper+nz_pppm*nz); sz = exp(-0.25*square(qz/g_ewald)); argz = 0.5*qz*zprd_slab/nz_pppm; wz = powsinxx(argz,twoorder); dot1 = unitkx*kper*qx + unitky*lper*qy + unitkz*mper*qz; dot2 = qx*qx+qy*qy+qz*qz; sum1 += (dot1/dot2) * sx*sy*sz * wx*wy*wz; } } } greensfn[n] = numerator*sum1/denominator; } else greensfn[n] = 0.0; } } // end of parallel region } /* ---------------------------------------------------------------------- compute optimized Green's function for energy calculation ------------------------------------------------------------------------- */ void PPPMOMP::compute_gf_ad() { const double * const prd = (triclinic==0) ? domain->prd : domain->prd_lamda; const double xprd = prd[0]; const double yprd = prd[1]; const double zprd = prd[2]; const double zprd_slab = zprd*slab_volfactor; const double unitkx = (MY_2PI/xprd); const double unitky = (MY_2PI/yprd); const double unitkz = (MY_2PI/zprd_slab); const int numk = nxhi_fft - nxlo_fft + 1; const int numl = nyhi_fft - nylo_fft + 1; const int twoorder = 2*order; double sf0=0.0,sf1=0.0,sf2=0.0,sf3=0.0,sf4=0.0,sf5=0.0; #if defined(_OPENMP) #pragma omp parallel default(none) reduction(+:sf0,sf1,sf2,sf3,sf4,sf5) #endif { double snx,sny,snz,sqk; double argx,argy,argz,wx,wy,wz,sx,sy,sz,qx,qy,qz; double numerator,denominator; int k,l,m,kper,lper,mper,n,nfrom,nto,tid; loop_setup_thr(nfrom, nto, tid, nfft, comm->nthreads); for (n = nfrom; n < nto; ++n) { m = n / (numl*numk); l = (n - m*numl*numk) / numk; k = n - m*numl*numk - l*numk; m += nzlo_fft; l += nylo_fft; k += nxlo_fft; mper = m - nz_pppm*(2*m/nz_pppm); qz = unitkz*mper; snz = square(sin(0.5*qz*zprd_slab/nz_pppm)); sz = exp(-0.25*square(qz/g_ewald)); argz = 0.5*qz*zprd_slab/nz_pppm; wz = powsinxx(argz,twoorder); lper = l - ny_pppm*(2*l/ny_pppm); qy = unitky*lper; sny = square(sin(0.5*qy*yprd/ny_pppm)); sy = exp(-0.25*square(qy/g_ewald)); argy = 0.5*qy*yprd/ny_pppm; wy = powsinxx(argy,twoorder); kper = k - nx_pppm*(2*k/nx_pppm); qx = unitkx*kper; snx = square(sin(0.5*qx*xprd/nx_pppm)); sx = exp(-0.25*square(qx/g_ewald)); argx = 0.5*qx*xprd/nx_pppm; wx = powsinxx(argx,twoorder); sqk = qx*qx + qy*qy + qz*qz; if (sqk != 0.0) { numerator = MY_4PI/sqk; denominator = gf_denom(snx,sny,snz); greensfn[n] = numerator*sx*sy*sz*wx*wy*wz/denominator; sf0 += sf_precoeff1[n]*greensfn[n]; sf1 += sf_precoeff2[n]*greensfn[n]; sf2 += sf_precoeff3[n]*greensfn[n]; sf3 += sf_precoeff4[n]*greensfn[n]; sf4 += sf_precoeff5[n]*greensfn[n]; sf5 += sf_precoeff6[n]*greensfn[n]; } else { greensfn[n] = 0.0; sf0 += sf_precoeff1[n]*greensfn[n]; sf1 += sf_precoeff2[n]*greensfn[n]; sf2 += sf_precoeff3[n]*greensfn[n]; sf3 += sf_precoeff4[n]*greensfn[n]; sf4 += sf_precoeff5[n]*greensfn[n]; sf5 += sf_precoeff6[n]*greensfn[n]; } } } // end of paralle region // compute the coefficients for the self-force correction double prex, prey, prez, tmp[6]; prex = prey = prez = MY_PI/volume; prex *= nx_pppm/xprd; prey *= ny_pppm/yprd; prez *= nz_pppm/zprd_slab; tmp[0] = sf0 * prex; tmp[1] = sf1 * prex*2; tmp[2] = sf2 * prey; tmp[3] = sf3 * prey*2; tmp[4] = sf4 * prez; tmp[5] = sf5 * prez*2; // communicate values with other procs MPI_Allreduce(tmp,sf_coeff,6,MPI_DOUBLE,MPI_SUM,world); } /* ---------------------------------------------------------------------- run the regular toplevel compute method from plain PPPM which will have individual methods replaced by our threaded versions and then call the obligatory force reduction. ------------------------------------------------------------------------- */ void PPPMOMP::compute(int eflag, int vflag) { PPPM::compute(eflag,vflag); #if defined(_OPENMP) #pragma omp parallel default(none) shared(eflag,vflag) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif ThrData *thr = fix->get_thr(tid); reduce_thr(this, eflag, vflag, thr); } // end of omp parallel region } /* ---------------------------------------------------------------------- create discretized "density" on section of global grid due to my particles density(x,y,z) = charge "density" at grid points of my 3d brick (nxlo:nxhi,nylo:nyhi,nzlo:nzhi) is extent of my brick (including ghosts) in global grid ------------------------------------------------------------------------- */ void PPPMOMP::make_rho() { // clear 3d density array FFT_SCALAR * _noalias const d = &(density_brick[nzlo_out][nylo_out][nxlo_out]); memset(d,0,ngrid*sizeof(FFT_SCALAR)); // no local atoms => nothing else to do const int nlocal = atom->nlocal; if (nlocal == 0) return; const int ix = nxhi_out - nxlo_out + 1; const int iy = nyhi_out - nylo_out + 1; #if defined(_OPENMP) #pragma omp parallel default(none) #endif { const double * _noalias const q = atom->q; const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0]; const int3_t * _noalias const p2g = (int3_t *) part2grid[0]; const double boxlox = boxlo[0]; const double boxloy = boxlo[1]; const double boxloz = boxlo[2]; // determine range of grid points handled by this thread int i,jfrom,jto,tid; loop_setup_thr(jfrom,jto,tid,ngrid,comm->nthreads); // get per thread data ThrData *thr = fix->get_thr(tid); FFT_SCALAR * const * const r1d = static_cast<FFT_SCALAR **>(thr->get_rho1d()); // loop over my charges, add their contribution to nearby grid points // (nx,ny,nz) = global coords of grid pt to "lower left" of charge // (dx,dy,dz) = distance to "lower left" grid pt // loop over all local atoms for all threads for (i = 0; i < nlocal; i++) { const int nx = p2g[i].a; const int ny = p2g[i].b; const int nz = p2g[i].t; // pre-screen whether this atom will ever come within // reach of the data segement this thread is updating. if ( ((nz+nlower-nzlo_out)*ix*iy >= jto) || ((nz+nupper-nzlo_out+1)*ix*iy < jfrom) ) continue; const FFT_SCALAR dx = nx+shiftone - (x[i].x-boxlox)*delxinv; const FFT_SCALAR dy = ny+shiftone - (x[i].y-boxloy)*delyinv; const FFT_SCALAR dz = nz+shiftone - (x[i].z-boxloz)*delzinv; compute_rho1d_thr(r1d,dx,dy,dz); const FFT_SCALAR z0 = delvolinv * q[i]; for (int n = nlower; n <= nupper; ++n) { const int jn = (nz+n-nzlo_out)*ix*iy; const FFT_SCALAR y0 = z0*r1d[2][n]; for (int m = nlower; m <= nupper; ++m) { const int jm = jn+(ny+m-nylo_out)*ix; const FFT_SCALAR x0 = y0*r1d[1][m]; for (int l = nlower; l <= nupper; ++l) { const int jl = jm+nx+l-nxlo_out; // make sure each thread only updates // "his" elements of the density grid if (jl >= jto) break; if (jl < jfrom) continue; d[jl] += x0*r1d[0][l]; } } } } } } /* ---------------------------------------------------------------------- interpolate from grid to get electric field & force on my particles for ik ------------------------------------------------------------------------- */ void PPPMOMP::fieldforce_ik() { // loop over my charges, interpolate electric field from nearby grid points // (nx,ny,nz) = global coords of grid pt to "lower left" of charge // (dx,dy,dz) = distance to "lower left" grid pt // (mx,my,mz) = global coords of moving stencil pt // ek = 3 components of E-field on particle const int nthreads = comm->nthreads; const int nlocal = atom->nlocal; // no local atoms => nothing to do if (nlocal == 0) return; const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0]; const double * _noalias const q = atom->q; const int3_t * _noalias const p2g = (int3_t *) part2grid[0]; const double qqrd2e = force->qqrd2e; const double boxlox = boxlo[0]; const double boxloy = boxlo[1]; const double boxloz = boxlo[2]; #if defined(_OPENMP) #pragma omp parallel default(none) #endif { FFT_SCALAR x0,y0,z0,ekx,eky,ekz; int i,ifrom,ito,tid,l,m,n,mx,my,mz; loop_setup_thr(ifrom,ito,tid,nlocal,nthreads); // get per thread data ThrData *thr = fix->get_thr(tid); dbl3_t * _noalias const f = (dbl3_t *) thr->get_f()[0]; FFT_SCALAR * const * const r1d = static_cast<FFT_SCALAR **>(thr->get_rho1d()); for (i = ifrom; i < ito; ++i) { const int nx = p2g[i].a; const int ny = p2g[i].b; const int nz = p2g[i].t; const FFT_SCALAR dx = nx+shiftone - (x[i].x-boxlox)*delxinv; const FFT_SCALAR dy = ny+shiftone - (x[i].y-boxloy)*delyinv; const FFT_SCALAR dz = nz+shiftone - (x[i].z-boxloz)*delzinv; compute_rho1d_thr(r1d,dx,dy,dz); ekx = eky = ekz = ZEROF; for (n = nlower; n <= nupper; n++) { mz = n+nz; z0 = r1d[2][n]; for (m = nlower; m <= nupper; m++) { my = m+ny; y0 = z0*r1d[1][m]; for (l = nlower; l <= nupper; l++) { mx = l+nx; x0 = y0*r1d[0][l]; ekx -= x0*vdx_brick[mz][my][mx]; eky -= x0*vdy_brick[mz][my][mx]; ekz -= x0*vdz_brick[mz][my][mx]; } } } // convert E-field to force const double qfactor = qqrd2e * scale * q[i]; f[i].x += qfactor*ekx; f[i].y += qfactor*eky; if (slabflag != 2) f[i].z += qfactor*ekz; } } // end of parallel region } /* ---------------------------------------------------------------------- interpolate from grid to get electric field & force on my particles for ad ------------------------------------------------------------------------- */ void PPPMOMP::fieldforce_ad() { const int nthreads = comm->nthreads; const int nlocal = atom->nlocal; // no local atoms => nothing to do if (nlocal == 0) return; const double *prd = (triclinic == 0) ? domain->prd : domain->prd_lamda; const double hx_inv = nx_pppm/prd[0]; const double hy_inv = ny_pppm/prd[1]; const double hz_inv = nz_pppm/prd[2]; // loop over my charges, interpolate electric field from nearby grid points // (nx,ny,nz) = global coords of grid pt to "lower left" of charge // (dx,dy,dz) = distance to "lower left" grid pt // (mx,my,mz) = global coords of moving stencil pt // ek = 3 components of E-field on particle const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0]; const double * _noalias const q = atom->q; const int3_t * _noalias const p2g = (int3_t *) part2grid[0]; const double qqrd2e = force->qqrd2e; const double boxlox = boxlo[0]; const double boxloy = boxlo[1]; const double boxloz = boxlo[2]; #if defined(_OPENMP) #pragma omp parallel default(none) #endif { double s1,s2,s3,sf; FFT_SCALAR ekx,eky,ekz; int i,ifrom,ito,tid,l,m,n,mx,my,mz; loop_setup_thr(ifrom,ito,tid,nlocal,nthreads); // get per thread data ThrData *thr = fix->get_thr(tid); dbl3_t * _noalias const f = (dbl3_t *) thr->get_f()[0]; FFT_SCALAR * const * const r1d = static_cast<FFT_SCALAR **>(thr->get_rho1d()); FFT_SCALAR * const * const d1d = static_cast<FFT_SCALAR **>(thr->get_drho1d()); for (i = ifrom; i < ito; ++i) { const int nx = p2g[i].a; const int ny = p2g[i].b; const int nz = p2g[i].t; const FFT_SCALAR dx = nx+shiftone - (x[i].x-boxlox)*delxinv; const FFT_SCALAR dy = ny+shiftone - (x[i].y-boxloy)*delyinv; const FFT_SCALAR dz = nz+shiftone - (x[i].z-boxloz)*delzinv; compute_rho1d_thr(r1d,dx,dy,dz); compute_drho1d_thr(d1d,dx,dy,dz); ekx = eky = ekz = ZEROF; for (n = nlower; n <= nupper; n++) { mz = n+nz; for (m = nlower; m <= nupper; m++) { my = m+ny; for (l = nlower; l <= nupper; l++) { mx = l+nx; ekx += d1d[0][l]*r1d[1][m]*r1d[2][n]*u_brick[mz][my][mx]; eky += r1d[0][l]*d1d[1][m]*r1d[2][n]*u_brick[mz][my][mx]; ekz += r1d[0][l]*r1d[1][m]*d1d[2][n]*u_brick[mz][my][mx]; } } } ekx *= hx_inv; eky *= hy_inv; ekz *= hz_inv; // convert E-field to force and substract self forces const double qi = q[i]; const double qfactor = qqrd2e * scale * qi; s1 = x[i].x*hx_inv; sf = sf_coeff[0]*sin(MY_2PI*s1); sf += sf_coeff[1]*sin(MY_4PI*s1); sf *= 2.0*qi; f[i].x += qfactor*(ekx - sf); s2 = x[i].y*hy_inv; sf = sf_coeff[2]*sin(MY_2PI*s2); sf += sf_coeff[3]*sin(MY_4PI*s2); sf *= 2.0*qi; f[i].y += qfactor*(eky - sf); s3 = x[i].z*hz_inv; sf = sf_coeff[4]*sin(MY_2PI*s3); sf += sf_coeff[5]*sin(MY_4PI*s3); sf *= 2.0*qi; if (slabflag != 2) f[i].z += qfactor*(ekz - sf); } } // end of parallel region } /* ---------------------------------------------------------------------- interpolate from grid to get per-atom energy/virial ------------------------------------------------------------------------- */ void PPPMOMP::fieldforce_peratom() { const int nthreads = comm->nthreads; const int nlocal = atom->nlocal; // no local atoms => nothing to do if (nlocal == 0) return; // loop over my charges, interpolate from nearby grid points // (nx,ny,nz) = global coords of grid pt to "lower left" of charge // (dx,dy,dz) = distance to "lower left" grid pt // (mx,my,mz) = global coords of moving stencil pt const dbl3_t * _noalias const x = (dbl3_t *) atom->x[0]; const double * _noalias const q = atom->q; #if defined(_OPENMP) #pragma omp parallel default(none) #endif { FFT_SCALAR dx,dy,dz,x0,y0,z0; FFT_SCALAR u,v0,v1,v2,v3,v4,v5; int i,ifrom,ito,tid,l,m,n,nx,ny,nz,mx,my,mz; loop_setup_thr(ifrom,ito,tid,nlocal,nthreads); // get per thread data ThrData *thr = fix->get_thr(tid); FFT_SCALAR * const * const r1d = static_cast<FFT_SCALAR **>(thr->get_rho1d()); for (i = ifrom; i < ito; ++i) { nx = part2grid[i][0]; ny = part2grid[i][1]; nz = part2grid[i][2]; dx = nx+shiftone - (x[i].x-boxlo[0])*delxinv; dy = ny+shiftone - (x[i].y-boxlo[1])*delyinv; dz = nz+shiftone - (x[i].z-boxlo[2])*delzinv; compute_rho1d_thr(r1d,dx,dy,dz); u = v0 = v1 = v2 = v3 = v4 = v5 = ZEROF; for (n = nlower; n <= nupper; n++) { mz = n+nz; z0 = r1d[2][n]; for (m = nlower; m <= nupper; m++) { my = m+ny; y0 = z0*r1d[1][m]; for (l = nlower; l <= nupper; l++) { mx = l+nx; x0 = y0*r1d[0][l]; if (eflag_atom) u += x0*u_brick[mz][my][mx]; if (vflag_atom) { v0 += x0*v0_brick[mz][my][mx]; v1 += x0*v1_brick[mz][my][mx]; v2 += x0*v2_brick[mz][my][mx]; v3 += x0*v3_brick[mz][my][mx]; v4 += x0*v4_brick[mz][my][mx]; v5 += x0*v5_brick[mz][my][mx]; } } } } const double qi = q[i]; if (eflag_atom) eatom[i] += qi*u; if (vflag_atom) { vatom[i][0] += qi*v0; vatom[i][1] += qi*v1; vatom[i][2] += qi*v2; vatom[i][3] += qi*v3; vatom[i][4] += qi*v4; vatom[i][5] += qi*v5; } } } // end of parallel region } /* ---------------------------------------------------------------------- charge assignment into rho1d dx,dy,dz = distance of particle from "lower left" grid point ------------------------------------------------------------------------- */ void PPPMOMP::compute_rho1d_thr(FFT_SCALAR * const * const r1d, const FFT_SCALAR &dx, const FFT_SCALAR &dy, const FFT_SCALAR &dz) { int k,l; FFT_SCALAR r1,r2,r3; for (k = (1-order)/2; k <= order/2; k++) { r1 = r2 = r3 = ZEROF; for (l = order-1; l >= 0; l--) { r1 = rho_coeff[l][k] + r1*dx; r2 = rho_coeff[l][k] + r2*dy; r3 = rho_coeff[l][k] + r3*dz; } r1d[0][k] = r1; r1d[1][k] = r2; r1d[2][k] = r3; } } /* ---------------------------------------------------------------------- charge assignment into drho1d dx,dy,dz = distance of particle from "lower left" grid point ------------------------------------------------------------------------- */ void PPPMOMP::compute_drho1d_thr(FFT_SCALAR * const * const d1d, const FFT_SCALAR &dx, const FFT_SCALAR &dy, const FFT_SCALAR &dz) { int k,l; FFT_SCALAR r1,r2,r3; for (k = (1-order)/2; k <= order/2; k++) { r1 = r2 = r3 = ZEROF; for (l = order-2; l >= 0; l--) { r1 = drho_coeff[l][k] + r1*dx; r2 = drho_coeff[l][k] + r2*dy; r3 = drho_coeff[l][k] + r3*dz; } d1d[0][k] = r1; d1d[1][k] = r2; d1d[2][k] = r3; } }
ganzenmg/lammps_current
src/USER-OMP/pppm_omp.cpp
C++
gpl-2.0
22,330
/* * (C) Copyright 2002 * Daniel Engström, Omicron Ceti AB, daniel@omicron.se. * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* * board/config.h - configuration options, board specific */ #ifndef __CONFIG_H #define __CONFIG_H #define CONFIG_SKIP_RELOCATE_UBOOT /* * High Level Configuration Options * (easy to change) */ #define CONFIG_X86 1 /* This is a X86 CPU */ #define CONFIG_SYS_SC520 1 /* Include support for AMD SC520 */ #define CONFIG_SYS_SDRAM_PRECHARGE_DELAY 6 /* 6T */ #define CONFIG_SYS_SDRAM_REFRESH_RATE 78 /* 7.8uS (choices are 7.8, 15.6, 31.2 or 62.5uS) */ #define CONFIG_SYS_SDRAM_RAS_CAS_DELAY 3 /* 3T */ /* define at most one of these */ #undef CONFIG_SYS_SDRAM_CAS_LATENCY_2T #define CONFIG_SYS_SDRAM_CAS_LATENCY_3T #define CONFIG_SYS_SC520_HIGH_SPEED 0 /* 100 or 133MHz */ #undef CONFIG_SYS_SC520_RESET /* use SC520 MMCR's to reset cpu */ #undef CONFIG_SYS_SC520_TIMER /* use SC520 swtimers */ #define CONFIG_SYS_GENERIC_TIMER 1 /* use the i8254 PIT timers */ #undef CONFIG_SYS_TSC_TIMER /* use the Pentium TSC timers */ #define CONFIG_SYS_PCAT_INTERRUPTS #define CONFIG_SYS_NUM_IRQS 16 #define CONFIG_SYS_STACK_SIZE 0x8000 /* Size of bootloader stack */ #define CONFIG_SHOW_BOOT_PROGRESS 1 #define CONFIG_LAST_STAGE_INIT 1 /* * Size of malloc() pool */ #define CONFIG_MALLOC_SIZE (CONFIG_ENV_SIZE + 128*1024) #define CONFIG_BAUDRATE 9600 /* * BOOTP options */ #define CONFIG_BOOTP_BOOTFILESIZE #define CONFIG_BOOTP_BOOTPATH #define CONFIG_BOOTP_GATEWAY #define CONFIG_BOOTP_HOSTNAME /* * Command line configuration. */ #include <config_cmd_default.h> #define CONFIG_CMD_PCI #define CONFIG_CMD_JFFS2 #define CONFIG_CMD_IDE #define CONFIG_CMD_NET #define CONFIG_CMD_PCMCIA #define CONFIG_CMD_EEPROM #define CONFIG_BOOTDELAY 15 #define CONFIG_BOOTARGS "root=/dev/mtdblock1 console=ttyS0,9600 " \ "mtdparts=phys:7936k(root),256k(uboot) " #define CONFIG_BOOTCOMMAND "setenv bootargs root=/dev/nfs ip=autoconf " \ "console=ttyS0,9600 " \ "mtdparts=phys:7808k(root),128k(env),256k(uboot);" \ "bootp;bootm" #if defined(CONFIG_CMD_KGDB) #define CONFIG_KGDB_BAUDRATE 115200 /* speed to run kgdb serial port */ #define CONFIG_KGDB_SER_INDEX 2 /* which serial port to use */ #endif /* * Miscellaneous configurable options */ #define CONFIG_SYS_LONGHELP /* undef to save memory */ #define CONFIG_SYS_PROMPT "boot > " /* Monitor Command Prompt */ #define CONFIG_SYS_CBSIZE 256 /* Console I/O Buffer Size */ #define CONFIG_SYS_PBSIZE (CONFIG_SYS_CBSIZE+sizeof(CONFIG_SYS_PROMPT)+16) /* Print Buffer Size */ #define CONFIG_SYS_MAXARGS 16 /* max number of command args */ #define CONFIG_SYS_BARGSIZE CONFIG_SYS_CBSIZE /* Boot Argument Buffer Size */ #define CONFIG_SYS_MEMTEST_START 0x00100000 /* memtest works on */ #define CONFIG_SYS_MEMTEST_END 0x01000000 /* 1 ... 16 MB in DRAM */ #define CONFIG_SYS_LOAD_ADDR 0x100000 /* default load address */ #define CONFIG_SYS_HZ 1024 /* incrementer freq: 1kHz */ /* valid baudrates */ #define CONFIG_SYS_BAUDRATE_TABLE { 9600, 19200, 38400, 57600, 115200 } /*----------------------------------------------------------------------- * Physical Memory Map */ #define CONFIG_NR_DRAM_BANKS 4 /* we have 4 banks of DRAM */ /*----------------------------------------------------------------------- * FLASH and environment organization */ #define CONFIG_SYS_MAX_FLASH_BANKS 1 /* max number of memory banks */ #define CONFIG_SYS_MAX_FLASH_SECT 512 /* max number of sectors on one chip */ /* timeout values are in ticks */ #define CONFIG_SYS_FLASH_ERASE_TOUT (2*CONFIG_SYS_HZ) /* Timeout for Flash Erase */ #define CONFIG_SYS_FLASH_WRITE_TOUT (2*CONFIG_SYS_HZ) /* Timeout for Flash Write */ #define CONFIG_SPI_EEPROM /* SPI EEPROMs such as AT25010 or AT25640 */ #define CONFIG_MW_EEPROM /* MicroWire EEPROMS such as AT93LC46 */ #define CONFIG_DTT_DS1722 /* Dallas DS1722 SPI Temperature probe */ /* allow to overwrite serial and ethaddr */ #define CONFIG_ENV_OVERWRITE #if 0 /* Environment in flash */ #define CONFIG_ENV_IS_IN_FLASH 1 # define CONFIG_ENV_ADDR (0x387a0000) /* Addr of Environment Sector */ # define CONFIG_ENV_SIZE 0x20000 /* Total Size of Environment Sector (or 0x10000) */ # define CONFIG_ENV_OFFSET 0 #else /* Environment in EEPROM */ # define CONFIG_ENV_IS_IN_EEPROM 1 # define CONFIG_SPI # define CONFIG_SPI_X 1 # define CONFIG_ENV_SIZE 0x2000 /* Total Size of Environment EEPROM */ # define CONFIG_ENV_OFFSET 0x1c00 #endif /* * JFFS2 partitions * */ /* No command line, one static partition, whole device */ #undef CONFIG_CMD_MTDPARTS #define CONFIG_JFFS2_DEV "nor0" #define CONFIG_JFFS2_PART_SIZE 0xFFFFFFFF #define CONFIG_JFFS2_PART_OFFSET 0x00000000 /* mtdparts command line support */ /* Note: fake mtd_id used, no linux mtd map file */ /* #define CONFIG_CMD_MTDPARTS #define MTDIDS_DEFAULT "nor0=sc520_spunk-0" #define MTDPARTS_DEFAULT "mtdparts=sc520_spunk-0:-(jffs2)" */ /*----------------------------------------------------------------------- * Device drivers */ #define CONFIG_NET_MULTI /* Multi ethernet cards support */ #define CONFIG_EEPRO100 #define CONFIG_SYS_RX_ETH_BUFFER 8 /* use 8 rx buffer on eepro100 */ /************************************************************ * IDE/ATA stuff ************************************************************/ #define CONFIG_SYS_IDE_MAXBUS 2 /* max. 2 IDE busses */ #define CONFIG_SYS_IDE_MAXDEVICE (CONFIG_SYS_IDE_MAXBUS*2) /* max. 2 drives per IDE bus */ #define CONFIG_SYS_ATA_BASE_ADDR 0 #define CONFIG_SYS_ATA_IDE0_OFFSET 0x01f0 /* ide0 offset */ #define CONFIG_SYS_ATA_IDE1_OFFSET 0xe000 /* ide1 offset */ #define CONFIG_SYS_ATA_DATA_OFFSET 0 /* data reg offset */ #define CONFIG_SYS_ATA_REG_OFFSET 0 /* reg offset */ #define CONFIG_SYS_ATA_ALT_OFFSET 0x200 /* alternate register offset */ #define CONFIG_SYS_FIRST_PCMCIA_BUS 1 #undef CONFIG_IDE_LED /* no led for ide supported */ #undef CONFIG_IDE_RESET /* reset for ide unsupported... */ #undef CONFIG_IDE_RESET_ROUTINE /* no special reset function */ #define CONFIG_IDE_TI_CARDBUS #define CONFIG_SYS_PCMCIA_CIS_WIN 0x27f00000 #define CONFIG_SYS_PCMCIA_CIS_WIN_SIZE 0x00100000 #define CONFIG_SYS_PCMCIA_IO_WIN 0xe000 #define CONFIG_SYS_PCMCIA_IO_WIN_SIZE 16 /************************************************************ * DISK Partition support ************************************************************/ #define CONFIG_DOS_PARTITION #define CONFIG_MAC_PARTITION #define CONFIG_ISO_PARTITION /* Experimental */ /************************************************************ * RTC ***********************************************************/ #define CONFIG_RTC_MC146818 #undef CONFIG_WATCHDOG /* watchdog disabled */ /* * PCI stuff */ #define CONFIG_PCI /* include pci support */ #define CONFIG_PCI_PNP /* pci plug-and-play */ #define CONFIG_PCI_SCAN_SHOW #define CONFIG_SYS_FIRST_PCI_IRQ 9 #define CONFIG_SYS_SECOND_PCI_IRQ 10 #define CONFIG_SYS_THIRD_PCI_IRQ 11 #define CONFIG_SYS_FORTH_PCI_IRQ 12 #endif /* __CONFIG_H */
fwmiller/Conserver-Freescale-Linux-U-boot
rpm/BUILD/u-boot-2009.08/include/configs/sc520_spunk.h
C
gpl-2.0
8,075
/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */ /* Written 1994, 1995,1996 by Bao C. Ha. Copyright (C) 1994, 1995,1996 by Bao C. Ha. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached at bao.ha@srs.gov or 418 Hastings Place, Martinez, GA 30907. Things remaining to do: Better record keeping of errors. Eliminate transmit interrupt to reduce overhead. Implement "concurrent processing". I won't be doing it! Bugs: If you have a problem of not detecting the 82595 during a reboot (warm reset), disable the FLASH memory should fix it. This is a compatibility hardware problem. Versions: 0.13b basic ethtool support (aris, 09/13/2004) 0.13a in memory shortage, drop packets also in board (Michael Westermann <mw@microdata-pos.de>, 07/30/2002) 0.13 irq sharing, rewrote probe function, fixed a nasty bug in hardware_send_packet and a major cleanup (aris, 11/08/2001) 0.12d fixing a problem with single card detected as eight eth devices fixing a problem with sudden drop in card performance (chris (asdn@go2.pl), 10/29/2001) 0.12c fixing some problems with old cards (aris, 01/08/2001) 0.12b misc fixes (aris, 06/26/2000) 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x (aris (aris@conectiva.com.br), 05/19/2000) 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999) 0.11d added __initdata, __init stuff; call spin_lock_init in eepro_probe1. Replaced "eepro" by dev->name. Augmented the code protected by spin_lock in interrupt routine (PdP, 12/12/1998) 0.11c minor cleanup (PdP, RMC, 09/12/1998) 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module under 2.1.xx. Debug messages are flagged as KERN_DEBUG to avoid console flooding. Added locking at critical parts. Now the dawn thing is SMP safe. 0.11a Attempt to get 2.1.xx support up (RMC) 0.11 Brian Candler added support for multiple cards. Tested as a module, no idea if it works when compiled into kernel. 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails because the irq is lost somewhere. Fixed that by moving request_irq and free_irq to eepro_open and eepro_close respectively. 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt. I'll need to find a way to specify an ioport other than the default one in the PnP case. PnP definitively sucks. And, yes, this is not the only reason. 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup; to use. 0.10b Should work now with (some) Pro/10+. At least for me (and my two cards) it does. _No_ guarantee for function with non-Pro/10+ cards! (don't have any) (RMC, 9/11/96) 0.10 Added support for the Etherexpress Pro/10+. The IRQ map was changed significantly from the old pro/10. The new interrupt map was provided by Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu). (BCH, 9/3/96) 0.09 Fixed a race condition in the transmit algorithm, which causes crashes under heavy load with fast pentium computers. The performance should also improve a bit. The size of RX buffer, and hence TX buffer, can also be changed via lilo or insmod. (BCH, 7/31/96) 0.08 Implement 32-bit I/O for the 82595TX and 82595FX based lan cards. Disable full-duplex mode if TPE is not used. (BCH, 4/8/96) 0.07a Fix a stat report which counts every packet as a heart-beat failure. (BCH, 6/3/95) 0.07 Modified to support all other 82595-based lan cards. The IRQ vector of the EtherExpress Pro will be set according to the value saved in the EEPROM. For other cards, I will do autoirq_request() to grab the next available interrupt vector. (BCH, 3/17/95) 0.06a,b Interim released. Minor changes in the comments and print out format. (BCH, 3/9/95 and 3/14/95) 0.06 First stable release that I am comfortable with. (BCH, 3/2/95) 0.05 Complete testing of multicast. (BCH, 2/23/95) 0.04 Adding multicast support. (BCH, 2/14/95) 0.03 First widely alpha release for public testing. (BCH, 2/14/95) */ static const char version[] = "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n"; #include <linux/module.h> /* Sources: This driver wouldn't have been written without the availability of the Crynwr's Lan595 driver source code. It helps me to familiarize with the 82595 chipset while waiting for the Intel documentation. I also learned how to detect the 82595 using the packet driver's technique. This driver is written by cutting and pasting the skeleton.c driver provided by Donald Becker. I also borrowed the EEPROM routine from Donald Becker's 82586 driver. Datasheet for the Intel 82595 (including the TX and FX version). It provides just enough info that the casual reader might think that it documents the i82595. The User Manual for the 82595. It provides a lot of the missing information. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ethtool.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #define DRV_NAME "eepro" #define DRV_VERSION "0.13c" #define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) /* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ #define SLOW_DOWN inb(0x80) /* udelay(2) */ #define compat_init_data __initdata enum iftype { AUI=0, BNC=1, TPE=2 }; /* First, a few definitions that the brave might change. */ /* A zero-terminated list of I/O addresses to be probed. */ static unsigned int eepro_portlist[] compat_init_data = { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0}; /* note: 0x300 is default, the 595FX supports ALL IO Ports from 0x000 to 0x3F0, some of which are reserved in PCs */ /* To try the (not-really PnP Wakeup: */ /* #define PnPWakeup */ /* use 0 for production, 1 for verification, >2 for debug */ #ifndef NET_DEBUG #define NET_DEBUG 0 #endif static unsigned int net_debug = NET_DEBUG; /* The number of low I/O ports used by the ethercard. */ #define EEPRO_IO_EXTENT 16 /* Different 82595 chips */ #define LAN595 0 #define LAN595TX 1 #define LAN595FX 2 #define LAN595FX_10ISA 3 /* Information that need to be kept for each board. */ struct eepro_local { unsigned rx_start; unsigned tx_start; /* start of the transmit chain */ int tx_last; /* pointer to last packet in the transmit chain */ unsigned tx_end; /* end of the transmit chain (plus 1) */ int eepro; /* 1 for the EtherExpress Pro/10, 2 for the EtherExpress Pro/10+, 3 for the EtherExpress 10 (blue cards), 0 for other 82595-based lan cards. */ int version; /* a flag to indicate if this is a TX or FX version of the 82595 chip. */ int stepping; spinlock_t lock; /* Serializing lock */ unsigned rcv_ram; /* pre-calculated space for rx */ unsigned xmt_ram; /* pre-calculated space for tx */ unsigned char xmt_bar; unsigned char xmt_lower_limit_reg; unsigned char xmt_upper_limit_reg; short xmt_lower_limit; short xmt_upper_limit; short rcv_lower_limit; short rcv_upper_limit; unsigned char eeprom_reg; unsigned short word[8]; }; /* The station (ethernet) address prefix, used for IDing the board. */ #define SA_ADDR0 0x00 /* Etherexpress Pro/10 */ #define SA_ADDR1 0xaa #define SA_ADDR2 0x00 #define GetBit(x,y) ((x & (1<<y))>>y) /* EEPROM Word 0: */ #define ee_PnP 0 /* Plug 'n Play enable bit */ #define ee_Word1 1 /* Word 1? */ #define ee_BusWidth 2 /* 8/16 bit */ #define ee_FlashAddr 3 /* Flash Address */ #define ee_FlashMask 0x7 /* Mask */ #define ee_AutoIO 6 /* */ #define ee_reserved0 7 /* =0! */ #define ee_Flash 8 /* Flash there? */ #define ee_AutoNeg 9 /* Auto Negotiation enabled? */ #define ee_IO0 10 /* IO Address LSB */ #define ee_IO0Mask 0x /*...*/ #define ee_IO1 15 /* IO MSB */ /* EEPROM Word 1: */ #define ee_IntSel 0 /* Interrupt */ #define ee_IntMask 0x7 #define ee_LI 3 /* Link Integrity 0= enabled */ #define ee_PC 4 /* Polarity Correction 0= enabled */ #define ee_TPE_AUI 5 /* PortSelection 1=TPE */ #define ee_Jabber 6 /* Jabber prevention 0= enabled */ #define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */ #define ee_SMOUT 8 /* SMout Pin Control 0= Input */ #define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */ #define ee_reserved1 10 /* .. 12 =0! */ #define ee_AltReady 13 /* Alternate Ready, 0=normal */ #define ee_reserved2 14 /* =0! */ #define ee_Duplex 15 /* Word2,3,4: */ #define ee_IA5 0 /*bit start for individual Addr Byte 5 */ #define ee_IA4 8 /*bit start for individual Addr Byte 5 */ #define ee_IA3 0 /*bit start for individual Addr Byte 5 */ #define ee_IA2 8 /*bit start for individual Addr Byte 5 */ #define ee_IA1 0 /*bit start for individual Addr Byte 5 */ #define ee_IA0 8 /*bit start for individual Addr Byte 5 */ /* Word 5: */ #define ee_BNC_TPE 0 /* 0=TPE */ #define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */ #define ee_BootTypeMask 0x3 #define ee_NumConn 3 /* Number of Connections 0= One or Two */ #define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */ #define ee_PortTPE 5 #define ee_PortBNC 6 #define ee_PortAUI 7 #define ee_PowerMgt 10 /* 0= disabled */ #define ee_CP 13 /* Concurrent Processing */ #define ee_CPMask 0x7 /* Word 6: */ #define ee_Stepping 0 /* Stepping info */ #define ee_StepMask 0x0F #define ee_BoardID 4 /* Manucaturer Board ID, reserved */ #define ee_BoardMask 0x0FFF /* Word 7: */ #define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */ #define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */ /*..*/ #define ee_SIZE 0x40 /* total EEprom Size */ #define ee_Checksum 0xBABA /* initial and final value for adding checksum */ /* Card identification via EEprom: */ #define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */ #define ee_addr_id 0x11 /* Word offset for Card ID */ #define ee_addr_SN 0x12 /* Serial Number */ #define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */ #define ee_vendor_intel0 0x25 /* Vendor ID Intel */ #define ee_vendor_intel1 0xD4 #define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ #define ee_id_eepro10p1 0x31 #define TX_TIMEOUT 40 /* Index to functions, as function prototypes. */ static int eepro_probe1(struct net_device *dev, int autoprobe); static int eepro_open(struct net_device *dev); static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t eepro_interrupt(int irq, void *dev_id); static void eepro_rx(struct net_device *dev); static void eepro_transmit_interrupt(struct net_device *dev); static int eepro_close(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void eepro_tx_timeout (struct net_device *dev); static int read_eeprom(int ioaddr, int location, struct net_device *dev); static int hardware_send_packet(struct net_device *dev, void *buf, short length); static int eepro_grab_irq(struct net_device *dev); /* Details of the i82595. You will need either the datasheet or the user manual to understand what is going on here. The 82595 is very different from the 82586, 82593. The receive algorithm in eepro_rx() is just an implementation of the RCV ring structure that the Intel 82595 imposes at the hardware level. The receive buffer is set at 24K, and the transmit buffer is 8K. I am assuming that the total buffer memory is 32K, which is true for the Intel EtherExpress Pro/10. If it is less than that on a generic card, the driver will be broken. The transmit algorithm in the hardware_send_packet() is similar to the one in the eepro_rx(). The transmit buffer is a ring linked list. I just queue the next available packet to the end of the list. In my system, the 82595 is so fast that the list seems to always contain a single packet. In other systems with faster computers and more congested network traffics, the ring linked list should improve performance by allowing up to 8K worth of packets to be queued. The sizes of the receive and transmit buffers can now be changed via lilo or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0" where rx-buffer is in KB unit. Modules uses the parameter mem which is also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer." The receive buffer has to be more than 3K or less than 29K. Otherwise, it is reset to the default of 24K, and, hence, 8K for the trasnmit buffer (transmit-buffer = 32K - receive-buffer). */ #define RAM_SIZE 0x8000 #define RCV_HEADER 8 #define RCV_DEFAULT_RAM 0x6000 #define XMT_HEADER 8 #define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM) #define XMT_START_PRO RCV_DEFAULT_RAM #define XMT_START_10 0x0000 #define RCV_START_PRO 0x0000 #define RCV_START_10 XMT_DEFAULT_RAM #define RCV_DONE 0x0008 #define RX_OK 0x2000 #define RX_ERROR 0x0d81 #define TX_DONE_BIT 0x0080 #define TX_OK 0x2000 #define CHAIN_BIT 0x8000 #define XMT_STATUS 0x02 #define XMT_CHAIN 0x04 #define XMT_COUNT 0x06 #define BANK0_SELECT 0x00 #define BANK1_SELECT 0x40 #define BANK2_SELECT 0x80 /* Bank 0 registers */ #define COMMAND_REG 0x00 /* Register 0 */ #define MC_SETUP 0x03 #define XMT_CMD 0x04 #define DIAGNOSE_CMD 0x07 #define RCV_ENABLE_CMD 0x08 #define RCV_DISABLE_CMD 0x0a #define STOP_RCV_CMD 0x0b #define RESET_CMD 0x0e #define POWER_DOWN_CMD 0x18 #define RESUME_XMT_CMD 0x1c #define SEL_RESET_CMD 0x1e #define STATUS_REG 0x01 /* Register 1 */ #define RX_INT 0x02 #define TX_INT 0x04 #define EXEC_STATUS 0x30 #define ID_REG 0x02 /* Register 2 */ #define R_ROBIN_BITS 0xc0 /* round robin counter */ #define ID_REG_MASK 0x2c #define ID_REG_SIG 0x24 #define AUTO_ENABLE 0x10 #define INT_MASK_REG 0x03 /* Register 3 */ #define RX_STOP_MASK 0x01 #define RX_MASK 0x02 #define TX_MASK 0x04 #define EXEC_MASK 0x08 #define ALL_MASK 0x0f #define IO_32_BIT 0x10 #define RCV_BAR 0x04 /* The following are word (16-bit) registers */ #define RCV_STOP 0x06 #define XMT_BAR_PRO 0x0a #define XMT_BAR_10 0x0b #define HOST_ADDRESS_REG 0x0c #define IO_PORT 0x0e #define IO_PORT_32_BIT 0x0c /* Bank 1 registers */ #define REG1 0x01 #define WORD_WIDTH 0x02 #define INT_ENABLE 0x80 #define INT_NO_REG 0x02 #define RCV_LOWER_LIMIT_REG 0x08 #define RCV_UPPER_LIMIT_REG 0x09 #define XMT_LOWER_LIMIT_REG_PRO 0x0a #define XMT_UPPER_LIMIT_REG_PRO 0x0b #define XMT_LOWER_LIMIT_REG_10 0x0b #define XMT_UPPER_LIMIT_REG_10 0x0a /* Bank 2 registers */ #define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */ #define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */ #define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */ #define REG2 0x02 #define PRMSC_Mode 0x01 #define Multi_IA 0x20 #define REG3 0x03 #define TPE_BIT 0x04 #define BNC_BIT 0x20 #define REG13 0x0d #define FDX 0x00 #define A_N_ENABLE 0x02 #define I_ADD_REG0 0x04 #define I_ADD_REG1 0x05 #define I_ADD_REG2 0x06 #define I_ADD_REG3 0x07 #define I_ADD_REG4 0x08 #define I_ADD_REG5 0x09 #define EEPROM_REG_PRO 0x0a #define EEPROM_REG_10 0x0b #define EESK 0x01 #define EECS 0x02 #define EEDI 0x04 #define EEDO 0x08 /* do a full reset */ #define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr) /* do a nice reset */ #define eepro_sel_reset(ioaddr) { \ outb(SEL_RESET_CMD, ioaddr); \ SLOW_DOWN; \ SLOW_DOWN; \ } /* disable all interrupts */ #define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG) /* clear all interrupts */ #define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG) /* enable tx/rx */ #define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \ ioaddr + INT_MASK_REG) /* enable exec event interrupt */ #define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG) /* enable rx */ #define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr) /* disable rx */ #define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr) /* switch bank */ #define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr) #define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr) #define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr) /* enable interrupt line */ #define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\ ioaddr + REG1) /* disable interrupt line */ #define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \ ioaddr + REG1); /* set diagnose flag */ #define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr) /* ack for rx int */ #define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG) /* ack for tx int */ #define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG) /* a complete sel reset */ #define eepro_complete_selreset(ioaddr) { \ dev->stats.tx_errors++;\ eepro_sel_reset(ioaddr);\ lp->tx_end = \ lp->xmt_lower_limit;\ lp->tx_start = lp->tx_end;\ lp->tx_last = 0;\ dev->trans_start = jiffies;\ netif_wake_queue(dev);\ eepro_en_rx(ioaddr);\ } /* Check for a network adaptor of this type, and return '0' if one exists. If dev->base_addr == 0, probe all likely locations. If dev->base_addr == 1, always return failure. If dev->base_addr == 2, allocate space for the device and return success (detachable devices only). */ static int __init do_eepro_probe(struct net_device *dev) { int i; int base_addr = dev->base_addr; int irq = dev->irq; #ifdef PnPWakeup /* XXXX for multiple cards should this only be run once? */ /* Wakeup: */ #define WakeupPort 0x279 #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\ 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\ 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\ 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43} { unsigned short int WS[32]=WakeupSeq; if (request_region(WakeupPort, 2, "eepro wakeup")) { if (net_debug>5) printk(KERN_DEBUG "Waking UP\n"); outb_p(0,WakeupPort); outb_p(0,WakeupPort); for (i=0; i<32; i++) { outb_p(WS[i],WakeupPort); if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); } release_region(WakeupPort, 2); } else printk(KERN_WARNING "PnP wakeup region busy!\n"); } #endif if (base_addr > 0x1ff) /* Check a single specified location. */ return eepro_probe1(dev, 0); else if (base_addr != 0) /* Don't probe at all. */ return -ENXIO; for (i = 0; eepro_portlist[i]; i++) { dev->base_addr = eepro_portlist[i]; dev->irq = irq; if (eepro_probe1(dev, 1) == 0) return 0; } return -ENODEV; } #ifndef MODULE struct net_device * __init eepro_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local)); int err; if (!dev) return ERR_PTR(-ENODEV); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_eepro_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static void __init printEEPROMInfo(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; unsigned short Word; int i,j; j = ee_Checksum; for (i = 0; i < 8; i++) j += lp->word[i]; for ( ; i < ee_SIZE; i++) j += read_eeprom(ioaddr, i, dev); printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff); Word = lp->word[0]; printk(KERN_DEBUG "Word0:\n"); printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP)); printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 ); printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg)); printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4); if (net_debug>4) { Word = lp->word[1]; printk(KERN_DEBUG "Word1:\n"); printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask); printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI)); printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); } Word = lp->word[5]; printk(KERN_DEBUG "Word5:\n"); printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE)); printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn)); printk(KERN_DEBUG " Has "); if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); printk(KERN_DEBUG "port(s) \n"); Word = lp->word[6]; printk(KERN_DEBUG "Word6:\n"); printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask); printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID); Word = lp->word[7]; printk(KERN_DEBUG "Word7:\n"); printk(KERN_DEBUG " INT to IRQ:\n"); for (i=0, j=0; i<15; i++) if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i); printk(KERN_DEBUG "\n"); } /* function to recalculate the limits of buffer based on rcv_ram */ static void eepro_recalc (struct net_device *dev) { struct eepro_local * lp; lp = netdev_priv(dev); lp->xmt_ram = RAM_SIZE - lp->rcv_ram; if (lp->eepro == LAN595FX_10ISA) { lp->xmt_lower_limit = XMT_START_10; lp->xmt_upper_limit = (lp->xmt_ram - 2); lp->rcv_lower_limit = lp->xmt_ram; lp->rcv_upper_limit = (RAM_SIZE - 2); } else { lp->rcv_lower_limit = RCV_START_PRO; lp->rcv_upper_limit = (lp->rcv_ram - 2); lp->xmt_lower_limit = lp->rcv_ram; lp->xmt_upper_limit = (RAM_SIZE - 2); } } /* prints boot-time info */ static void __init eepro_print_info (struct net_device *dev) { struct eepro_local * lp = netdev_priv(dev); int i; const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; i = inb(dev->base_addr + ID_REG); printk(KERN_DEBUG " id: %#x ",i); printk(" io: %#x ", (unsigned)dev->base_addr); switch (lp->eepro) { case LAN595FX_10ISA: printk("%s: Intel EtherExpress 10 ISA\n at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595FX: printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595TX: printk("%s: Intel EtherExpress Pro/10 ISA at %#x,", dev->name, (unsigned)dev->base_addr); break; case LAN595: printk("%s: Intel 82595-based lan card at %#x,", dev->name, (unsigned)dev->base_addr); break; } printk(" %pM", dev->dev_addr); if (net_debug > 3) printk(KERN_DEBUG ", %dK RCV buffer", (int)(lp->rcv_ram)/1024); if (dev->irq > 2) printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); else printk(", %s.\n", ifmap[dev->if_port]); if (net_debug > 3) { i = lp->word[5]; if (i & 0x2000) /* bit 13 of EEPROM word 5 */ printk(KERN_DEBUG "%s: Concurrent Processing is " "enabled but not used!\n", dev->name); } /* Check the station address for the manufacturer's code */ if (net_debug>3) printEEPROMInfo(dev); } static const struct ethtool_ops eepro_ethtool_ops; static const struct net_device_ops eepro_netdev_ops = { .ndo_open = eepro_open, .ndo_stop = eepro_close, .ndo_start_xmit = eepro_send_packet, .ndo_set_multicast_list = set_multicast_list, .ndo_tx_timeout = eepro_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* This is the real probe routine. Linux has a history of friendly device probes on the ISA bus. A good device probe avoids doing writes, and verifies that the correct device exists and functions. */ static int __init eepro_probe1(struct net_device *dev, int autoprobe) { unsigned short station_addr[3], id, counter; int i; struct eepro_local *lp; int ioaddr = dev->base_addr; int err; /* Grab the region so we can find another board if autoIRQ fails. */ if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { if (!autoprobe) printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", ioaddr); return -EBUSY; } /* Now, we are going to check for the signature of the ID_REG (register 2 of bank 0) */ id = inb(ioaddr + ID_REG); if ((id & ID_REG_MASK) != ID_REG_SIG) goto exit; /* We seem to have the 82595 signature, let's play with its counter (last 2 bits of register 2 of bank 0) to be sure. */ counter = id & R_ROBIN_BITS; if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40)) goto exit; lp = netdev_priv(dev); memset(lp, 0, sizeof(struct eepro_local)); lp->xmt_bar = XMT_BAR_PRO; lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO; lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO; lp->eeprom_reg = EEPROM_REG_PRO; spin_lock_init(&lp->lock); /* Now, get the ethernet hardware address from the EEPROM */ station_addr[0] = read_eeprom(ioaddr, 2, dev); /* FIXME - find another way to know that we've found * an Etherexpress 10 */ if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) { lp->eepro = LAN595FX_10ISA; lp->eeprom_reg = EEPROM_REG_10; lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10; lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10; lp->xmt_bar = XMT_BAR_10; station_addr[0] = read_eeprom(ioaddr, 2, dev); } /* get all words at once. will be used here and for ethtool */ for (i = 0; i < 8; i++) { lp->word[i] = read_eeprom(ioaddr, i, dev); } station_addr[1] = lp->word[3]; station_addr[2] = lp->word[4]; if (!lp->eepro) { if (lp->word[7] == ee_FX_INT2IRQ) lp->eepro = 2; else if (station_addr[2] == SA_ADDR1) lp->eepro = 1; } /* Fill in the 'dev' fields. */ for (i=0; i < 6; i++) dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i]; /* RX buffer must be more than 3K and less than 29K */ if (dev->mem_end < 3072 || dev->mem_end > 29696) lp->rcv_ram = RCV_DEFAULT_RAM; /* calculate {xmt,rcv}_{lower,upper}_limit */ eepro_recalc(dev); if (GetBit(lp->word[5], ee_BNC_TPE)) dev->if_port = BNC; else dev->if_port = TPE; if (dev->irq < 2 && lp->eepro != 0) { /* Mask off INT number */ int count = lp->word[1] & 7; unsigned irqMask = lp->word[7]; while (count--) irqMask &= irqMask - 1; count = ffs(irqMask); if (count) dev->irq = count - 1; if (dev->irq < 2) { printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); goto exit; } else if (dev->irq == 2) { dev->irq = 9; } } dev->netdev_ops = &eepro_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &eepro_ethtool_ops; /* print boot time info */ eepro_print_info(dev); /* reset 82595 */ eepro_reset(ioaddr); err = register_netdev(dev); if (err) goto err; return 0; exit: err = -ENODEV; err: release_region(dev->base_addr, EEPRO_IO_EXTENT); return err; } /* Open/initialize the board. This is called (in the current kernel) sometime after booting when the 'ifconfig' program is run. This routine should set everything up anew at each open, even registers that "should" only need to be set once at boot, so that there is non-reboot way to recover if something goes wrong. */ static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; static int eepro_grab_irq(struct net_device *dev) { int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr; eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ /* Enable the interrupt line. */ eepro_en_intline(ioaddr); /* be CAREFUL, BANK 0 now */ eepro_sw2bank0(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Let EXEC event to interrupt */ eepro_en_intexec(ioaddr); do { eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ temp_reg = inb(ioaddr + INT_NO_REG); outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) { unsigned long irq_mask; /* Twinkle the interrupt, and check if it's seen */ irq_mask = probe_irq_on(); eepro_diag(ioaddr); /* RESET the 82595 */ mdelay(20); if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */ break; /* clear all interrupts */ eepro_clear_int(ioaddr); } } while (*++irqp); eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ /* Disable the physical interrupt line. */ eepro_dis_intline(ioaddr); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ /* Mask all the interrupts. */ eepro_dis_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); return dev->irq; } static int eepro_open(struct net_device *dev) { unsigned short temp_reg, old8, old9; int irqMask; int i, ioaddr = dev->base_addr; struct eepro_local *lp = netdev_priv(dev); if (net_debug > 3) printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name); irqMask = lp->word[7]; if (lp->eepro == LAN595FX_10ISA) { if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n"); } else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */ { lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n"); } else if ((dev->dev_addr[0] == SA_ADDR0 && dev->dev_addr[1] == SA_ADDR1 && dev->dev_addr[2] == SA_ADDR2)) { lp->eepro = 1; if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n"); } /* Yes, an Intel EtherExpress Pro/10 */ else lp->eepro = 0; /* No, it is a generic 82585 lan card */ /* Get the interrupt vector for the 82595 */ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } /* Initialize the 82595. */ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ temp_reg = inb(ioaddr + lp->eeprom_reg); lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */ if (net_debug > 3) printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping); if (temp_reg & 0x10) /* Check the TurnOff Enable bit */ outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg); for (i=0; i < 6; i++) outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i); temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */ | RCV_Discard_BadFrame, ioaddr + REG1); temp_reg = inb(ioaddr + REG2); /* Match broadcast */ outb(temp_reg | 0x14, ioaddr + REG2); temp_reg = inb(ioaddr + REG3); outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */ /* Set the receiving mode */ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ /* Set the interrupt vector */ temp_reg = inb(ioaddr + INT_NO_REG); if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG); else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); temp_reg = inb(ioaddr + INT_NO_REG); if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA) outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG); else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG); if (net_debug > 3) printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg); /* Initialize the RCV and XMT upper and lower limits */ outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); /* Enable the interrupt line. */ eepro_en_intline(ioaddr); /* Switch back to Bank 0 */ eepro_sw2bank0(ioaddr); /* Let RX and TX events to interrupt */ eepro_en_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Initialize RCV */ outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); lp->rx_start = lp->rcv_lower_limit; outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); /* Initialize XMT */ outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); lp->tx_start = lp->tx_end = lp->xmt_lower_limit; lp->tx_last = 0; /* Check for the i82595TX and i82595FX */ old8 = inb(ioaddr + 8); outb(~old8, ioaddr + 8); if ((temp_reg = inb(ioaddr + 8)) == old8) { if (net_debug > 3) printk(KERN_DEBUG "i82595 detected!\n"); lp->version = LAN595; } else { lp->version = LAN595TX; outb(old8, ioaddr + 8); old9 = inb(ioaddr + 9); if (irqMask==ee_FX_INT2IRQ) { if (net_debug > 3) { printk(KERN_DEBUG "IrqMask: %#x\n",irqMask); printk(KERN_DEBUG "i82595FX detected!\n"); } lp->version = LAN595FX; outb(old9, ioaddr + 9); if (dev->if_port != TPE) { /* Hopefully, this will fix the problem of using Pentiums and pro/10 w/ BNC. */ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ temp_reg = inb(ioaddr + REG13); /* disable the full duplex mode since it is not applicable with the 10Base2 cable. */ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13); eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */ } } else if (net_debug > 3) { printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff)); printk(KERN_DEBUG "i82595TX detected!\n"); } } eepro_sel_reset(ioaddr); netif_start_queue(dev); if (net_debug > 3) printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name); /* enabling rx */ eepro_en_rx(ioaddr); return 0; } static void eepro_tx_timeout (struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* if (net_debug > 1) */ printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); /* This is not a duplicate. One message for the console, one for the log file */ printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, "network cable problem"); eepro_complete_selreset(ioaddr); } static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); unsigned long flags; int ioaddr = dev->base_addr; short length = skb->len; if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return 0; length = ETH_ZLEN; } netif_stop_queue (dev); eepro_dis_int(ioaddr); spin_lock_irqsave(&lp->lock, flags); { unsigned char *buf = skb->data; if (hardware_send_packet(dev, buf, length)) /* we won't wake queue here because we're out of space */ dev->stats.tx_dropped++; else { dev->stats.tx_bytes+=skb->len; dev->trans_start = jiffies; netif_wake_queue(dev); } } dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ /* dev->stats.tx_aborted_errors++; */ if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); eepro_en_int(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); return 0; } /* The typical workload of the driver: Handle the network interface interrupts. */ static irqreturn_t eepro_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct eepro_local *lp; int ioaddr, status, boguscount = 20; int handled = 0; lp = netdev_priv(dev); spin_lock(&lp->lock); if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name); ioaddr = dev->base_addr; while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--)) { handled = 1; if (status & RX_INT) { if (net_debug > 4) printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name); eepro_dis_int(ioaddr); /* Get the received packets */ eepro_ack_rx(ioaddr); eepro_rx(dev); eepro_en_int(ioaddr); } if (status & TX_INT) { if (net_debug > 4) printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name); eepro_dis_int(ioaddr); /* Process the status of transmitted packets */ eepro_ack_tx(ioaddr); eepro_transmit_interrupt(dev); eepro_en_int(ioaddr); } } if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name); spin_unlock(&lp->lock); return IRQ_RETVAL(handled); } static int eepro_close(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; short temp_reg; netif_stop_queue(dev); eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */ /* Disable the physical interrupt line. */ temp_reg = inb(ioaddr + REG1); outb(temp_reg & 0x7f, ioaddr + REG1); eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */ /* Flush the Tx and disable Rx. */ outb(STOP_RCV_CMD, ioaddr); lp->tx_start = lp->tx_end = lp->xmt_lower_limit; lp->tx_last = 0; /* Mask all the interrupts. */ eepro_dis_int(ioaddr); /* clear all interrupts */ eepro_clear_int(ioaddr); /* Reset the 82595 */ eepro_reset(ioaddr); /* release the interrupt */ free_irq(dev->irq, dev); /* Update the statistics here. What statistics? */ return 0; } /* Set or clear the multicast filter for this adaptor. */ static void set_multicast_list(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned short mode; struct dev_mc_list *dmi=dev->mc_list; if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) { eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode | PRMSC_Mode, ioaddr + REG2); mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ } else if (dev->mc_count==0 ) { eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */ mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ } else { unsigned short status, *eaddrs; int i, boguscount = 0; /* Disable RX and TX interrupts. Necessary to avoid corruption of the HOST_ADDRESS_REG by interrupt service routines. */ eepro_dis_int(ioaddr); eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode | Multi_IA, ioaddr + REG2); mode = inb(ioaddr + REG3); outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG); outw(MC_SETUP, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(6*(dev->mc_count + 1), ioaddr + IO_PORT); for (i = 0; i < dev->mc_count; i++) { eaddrs=(unsigned short *)dmi->dmi_addr; dmi=dmi->next; outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); } eaddrs = (unsigned short *) dev->dev_addr; outw(eaddrs[0], ioaddr + IO_PORT); outw(eaddrs[1], ioaddr + IO_PORT); outw(eaddrs[2], ioaddr + IO_PORT); outw(lp->tx_end, ioaddr + lp->xmt_bar); outb(MC_SETUP, ioaddr); /* Update the transmit queue */ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1); if (lp->tx_start != lp->tx_end) { /* update the next address and the chain bit in the last packet */ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); outw(i, ioaddr + IO_PORT); outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); status = inw(ioaddr + IO_PORT); outw(status | CHAIN_BIT, ioaddr + IO_PORT); lp->tx_end = i ; } else { lp->tx_start = lp->tx_end = i ; } /* Acknowledge that the MC setup is done */ do { /* We should be doing this in the eepro_interrupt()! */ SLOW_DOWN; SLOW_DOWN; if (inb(ioaddr + STATUS_REG) & 0x08) { i = inb(ioaddr); outb(0x08, ioaddr + STATUS_REG); if (i & 0x20) { /* command ABORTed */ printk(KERN_NOTICE "%s: multicast setup failed.\n", dev->name); break; } else if ((i & 0x0f) == 0x03) { /* MC-Done */ printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n", dev->name, dev->mc_count, dev->mc_count > 1 ? "es":""); break; } } } while (++boguscount < 100); /* Re-enable RX and TX interrupts */ eepro_en_int(ioaddr); } if (lp->eepro == LAN595FX_10ISA) { eepro_complete_selreset(ioaddr); } else eepro_en_rx(ioaddr); } /* The horrible routine to read a word from the serial EEPROM. */ /* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */ /* The delay between EEPROM clock transitions. */ #define eeprom_delay() { udelay(40); } #define EE_READ_CMD (6 << 6) static int read_eeprom(int ioaddr, int location, struct net_device *dev) { int i; unsigned short retval = 0; struct eepro_local *lp = netdev_priv(dev); short ee_addr = ioaddr + lp->eeprom_reg; int read_cmd = location | EE_READ_CMD; short ctrl_val = EECS ; /* XXXX - black magic */ eepro_sw2bank1(ioaddr); outb(0x00, ioaddr + STATUS_REG); /* XXXX - black magic */ eepro_sw2bank2(ioaddr); outb(ctrl_val, ee_addr); /* Shift the read command bits out. */ for (i = 8; i >= 0; i--) { short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI : ctrl_val; outb(outval, ee_addr); outb(outval | EESK, ee_addr); /* EEPROM clock tick. */ eeprom_delay(); outb(outval, ee_addr); /* Finish EEPROM a clock tick. */ eeprom_delay(); } outb(ctrl_val, ee_addr); for (i = 16; i > 0; i--) { outb(ctrl_val | EESK, ee_addr); eeprom_delay(); retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0); outb(ctrl_val, ee_addr); eeprom_delay(); } /* Terminate the EEPROM access. */ ctrl_val &= ~EECS; outb(ctrl_val | EESK, ee_addr); eeprom_delay(); outb(ctrl_val, ee_addr); eeprom_delay(); eepro_sw2bank0(ioaddr); return retval; } static int hardware_send_packet(struct net_device *dev, void *buf, short length) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned status, tx_available, last, end; if (net_debug > 5) printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); /* determine how much of the transmit buffer space is available */ if (lp->tx_end > lp->tx_start) tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); else if (lp->tx_end < lp->tx_start) tx_available = lp->tx_start - lp->tx_end; else tx_available = lp->xmt_ram; if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { /* No space available ??? */ return 1; } last = lp->tx_end; end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { /* Arrrr!!!, must keep the xmt header together, several days were lost to chase this one down. */ last = lp->xmt_lower_limit; end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; } else end = lp->xmt_lower_limit + (end - lp->xmt_upper_limit + 2); } outw(last, ioaddr + HOST_ADDRESS_REG); outw(XMT_CMD, ioaddr + IO_PORT); outw(0, ioaddr + IO_PORT); outw(end, ioaddr + IO_PORT); outw(length, ioaddr + IO_PORT); if (lp->version == LAN595) outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1); else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ unsigned short temp = inb(ioaddr + INT_MASK_REG); outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2); outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); } /* A dummy read to flush the DRAM write pipeline */ status = inw(ioaddr + IO_PORT); if (lp->tx_start == lp->tx_end) { outw(last, ioaddr + lp->xmt_bar); outb(XMT_CMD, ioaddr); lp->tx_start = last; /* I don't like to change tx_start here */ } else { /* update the next address and the chain bit in the last packet */ if (lp->tx_end != last) { outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG); outw(last, ioaddr + IO_PORT); } outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG); status = inw(ioaddr + IO_PORT); outw(status | CHAIN_BIT, ioaddr + IO_PORT); /* Continue the transmit command */ outb(RESUME_XMT_CMD, ioaddr); } lp->tx_last = last; lp->tx_end = end; if (net_debug > 5) printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name); return 0; } static void eepro_rx(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; short boguscount = 20; short rcv_car = lp->rx_start; unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size; if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name); /* Set the read pointer to the start of the RCV */ outw(rcv_car, ioaddr + HOST_ADDRESS_REG); rcv_event = inw(ioaddr + IO_PORT); while (rcv_event == RCV_DONE) { rcv_status = inw(ioaddr + IO_PORT); rcv_next_frame = inw(ioaddr + IO_PORT); rcv_size = inw(ioaddr + IO_PORT); if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) { /* Malloc up new buffer. */ struct sk_buff *skb; dev->stats.rx_bytes+=rcv_size; rcv_size &= 0x3fff; skb = dev_alloc_skb(rcv_size+5); if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); break; } skb_reserve(skb,2); if (lp->version == LAN595) insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1); else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */ unsigned short temp = inb(ioaddr + INT_MASK_REG); outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG); insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), (rcv_size + 3) >> 2); outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG); } skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->stats.rx_packets++; } else { /* Not sure will ever reach here, I set the 595 to discard bad received frames */ dev->stats.rx_errors++; if (rcv_status & 0x0100) dev->stats.rx_over_errors++; else if (rcv_status & 0x0400) dev->stats.rx_frame_errors++; else if (rcv_status & 0x0800) dev->stats.rx_crc_errors++; printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); } if (rcv_status & 0x1000) dev->stats.rx_length_errors++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; if (--boguscount == 0) break; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); rcv_event = inw(ioaddr + IO_PORT); } if (rcv_car == 0) rcv_car = lp->rcv_upper_limit | 0xff; outw(rcv_car - 1, ioaddr + RCV_STOP); if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name); } static void eepro_transmit_interrupt(struct net_device *dev) { struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; short boguscount = 25; short xmt_status; while ((lp->tx_start != lp->tx_end) && boguscount--) { outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); xmt_status = inw(ioaddr+IO_PORT); if (!(xmt_status & TX_DONE_BIT)) break; xmt_status = inw(ioaddr+IO_PORT); lp->tx_start = inw(ioaddr+IO_PORT); netif_wake_queue (dev); if (xmt_status & TX_OK) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (xmt_status & 0x0400) { dev->stats.tx_carrier_errors++; printk(KERN_DEBUG "%s: carrier error\n", dev->name); printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); } else { printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); printk(KERN_DEBUG "%s: XMT status = %#x\n", dev->name, xmt_status); } } if (xmt_status & 0x000f) { dev->stats.collisions += (xmt_status & 0x000f); } if ((xmt_status & 0x0040) == 0x0) { dev->stats.tx_heartbeat_errors++; } } } static int eepro_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct eepro_local *lp = netdev_priv(dev); cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_Autoneg; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_Autoneg; if (GetBit(lp->word[5], ee_PortTPE)) { cmd->supported |= SUPPORTED_TP; cmd->advertising |= ADVERTISED_TP; } if (GetBit(lp->word[5], ee_PortBNC)) { cmd->supported |= SUPPORTED_BNC; cmd->advertising |= ADVERTISED_BNC; } if (GetBit(lp->word[5], ee_PortAUI)) { cmd->supported |= SUPPORTED_AUI; cmd->advertising |= ADVERTISED_AUI; } cmd->speed = SPEED_10; if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { cmd->duplex = DUPLEX_FULL; } else { cmd->duplex = DUPLEX_HALF; } cmd->port = dev->if_port; cmd->phy_address = dev->base_addr; cmd->transceiver = XCVR_INTERNAL; if (lp->word[0] & ee_AutoNeg) { cmd->autoneg = 1; } return 0; } static void eepro_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strcpy(drvinfo->driver, DRV_NAME); strcpy(drvinfo->version, DRV_VERSION); sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr); } static const struct ethtool_ops eepro_ethtool_ops = { .get_settings = eepro_ethtool_get_settings, .get_drvinfo = eepro_ethtool_get_drvinfo, }; #ifdef MODULE #define MAX_EEPRO 8 static struct net_device *dev_eepro[MAX_EEPRO]; static int io[MAX_EEPRO] = { [0 ... MAX_EEPRO-1] = -1 }; static int irq[MAX_EEPRO]; static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */ [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024 }; static int autodetect; static int n_eepro; /* For linux 2.1.xx */ MODULE_AUTHOR("Pascal Dupuis and others"); MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); MODULE_LICENSE("GPL"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); module_param(autodetect, int, 0); MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); int __init init_module(void) { struct net_device *dev; int i; if (io[0] == -1 && autodetect == 0) { printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n"); printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n"); return -ENODEV; } else if (autodetect) { /* if autodetect is set then we must force detection */ for (i = 0; i < MAX_EEPRO; i++) { io[i] = 0; } printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n"); } for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) { dev = alloc_etherdev(sizeof(struct eepro_local)); if (!dev) break; dev->mem_end = mem[i]; dev->base_addr = io[i]; dev->irq = irq[i]; if (do_eepro_probe(dev) == 0) { dev_eepro[n_eepro++] = dev; continue; } free_netdev(dev); break; } if (n_eepro) printk(KERN_INFO "%s", version); return n_eepro ? 0 : -ENODEV; } void __exit cleanup_module(void) { int i; for (i=0; i<n_eepro; i++) { struct net_device *dev = dev_eepro[i]; unregister_netdev(dev); release_region(dev->base_addr, EEPRO_IO_EXTENT); free_netdev(dev); } } #endif /* MODULE */
stevelord/PR30
linux-2.6.31/drivers/net/eepro.c
C
gpl-2.0
52,171
#!/bin/bash export "DEBIAN_FRONTEND=noninteractive" VAGRANT_CORE_FOLDER=$(cat "/.puphpet-stuff/vagrant-core-folder.txt") OS=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" ID) RELEASE=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" RELEASE) CODENAME=$(/bin/bash "${VAGRANT_CORE_FOLDER}/shell/os-detect.sh" CODENAME) if [[ ! -f /.puphpet-stuff/update-puppet ]]; then if [ "${OS}" == 'debian' ] || [ "${OS}" == 'ubuntu' ]; then echo "Downloading http://apt.puppetlabs.com/puppetlabs-release-${CODENAME}.deb" wget --quiet --tries=5 --connect-timeout=10 -O "/.puphpet-stuff/puppetlabs-release-${CODENAME}.deb" "http://apt.puppetlabs.com/puppetlabs-release-${CODENAME}.deb" echo "Finished downloading http://apt.puppetlabs.com/puppetlabs-release-${CODENAME}.deb" dpkg -i "/.puphpet-stuff/puppetlabs-release-${CODENAME}.deb" >/dev/null echo "Running update-puppet apt-get update" apt-get update >/dev/null echo "Finished running update-puppet apt-get update" echo "Updating Puppet to version 3.4.x" apt-get install -y puppet=3.4.3-1puppetlabs1 puppet-common=3.4.3-1puppetlabs1 >/dev/null PUPPET_VERSION=$(puppet help | grep 'Puppet v') echo "Finished updating puppet to latest version: ${PUPPET_VERSION}" touch /.puphpet-stuff/update-puppet echo "Created empty file /.puphpet-stuff/update-puppet" elif [ "${OS}" == 'centos' ]; then echo "Downloading http://yum.puppetlabs.com/el/${RELEASE}/products/x86_64/puppet-3.4.3-1.el6.noarch.rpm" yum -y --nogpgcheck install "http://yum.puppetlabs.com/el/${RELEASE}/products/x86_64/puppet-3.4.3-1.el6.noarch.rpm" >/dev/null echo "Finished downloading http://yum.puppetlabs.com/el/${RELEASE}/products/x86_64/puppet-3.4.3-1.el6.noarch.rpm" echo "Installing/Updating Puppet to version 3.4.x" yum -y install puppet >/dev/null PUPPET_VERSION=$(puppet help | grep 'Puppet v') echo "Finished installing/updating puppet to version: ${PUPPET_VERSION}" touch /.puphpet-stuff/update-puppet echo "Created empty file /.puphpet-stuff/update-puppet" fi fi
OlyaRybak/vuxenforvaltningens_informationsportal
puphpet/shell/update-puppet.sh
Shell
gpl-2.0
2,189
(function (_, $, Backbone, Drupal, drupalSettings) { "use strict"; /** * State of an in-place editable entity in the DOM. */ Drupal.edit.EntityModel = Drupal.edit.BaseModel.extend({ defaults: { // The DOM element that represents this entity. It may seem bizarre to // have a DOM element in a Backbone Model, but we need to be able to map // entities in the DOM to EntityModels in memory. el: null, // An entity ID, of the form "<entity type>/<entity ID>", e.g. "node/1". entityID: null, // An entity instance ID. The first intance of a specific entity (i.e. with // a given entity ID) is assigned 0, the second 1, and so on. entityInstanceID: null, // The unique ID of this entity instance on the page, of the form "<entity // type>/<entity ID>[entity instance ID]", e.g. "node/1[0]". id: null, // The label of the entity. label: null, // A Drupal.edit.FieldCollection for all fields of this entity. fields: null, // The attributes below are stateful. The ones above will never change // during the life of a EntityModel instance. // Indicates whether this instance of this entity is currently being // edited in-place. isActive: false, // Whether one or more fields have already been stored in TempStore. inTempStore: false, // Whether one or more fields have already been stored in TempStore *or* // the field that's currently being edited is in the 'changed' or a later // state. In other words, this boolean indicates whether a "Save" button is // necessary or not. isDirty: false, // Whether the request to the server has been made to commit this entity. // Used to prevent multiple such requests. isCommitting: false, // The current processing state of an entity. state: 'closed', // The IDs of the fields whose new values have been stored in TempStore. We // must store this on the EntityModel as well (even though it already is on // the FieldModel) because when a field is rerendered, its FieldModel is // destroyed and this allows us to transition it back to the proper state. fieldsInTempStore: [], // A flag the tells the application that this EntityModel must be reloaded // in order to restore the original values to its fields in the client. reload: false }, /** * @inheritdoc */ initialize: function () { this.set('fields', new Drupal.edit.FieldCollection()); // Respond to entity state changes. this.listenTo(this, 'change:state', this.stateChange); // The state of the entity is largely dependent on the state of its // fields. this.listenTo(this.get('fields'), 'change:state', this.fieldStateChange); // Call Drupal.edit.BaseModel's initialize() method. Drupal.edit.BaseModel.prototype.initialize.call(this); }, /** * Updates FieldModels' states when an EntityModel change occurs. * * @param Drupal.edit.EntityModel entityModel * @param String state * The state of the associated entity. One of Drupal.edit.EntityModel.states. * @param Object options */ stateChange: function (entityModel, state, options) { var to = state; switch (to) { case 'closed': this.set({ 'isActive': false, 'inTempStore': false, 'isDirty': false }); break; case 'launching': break; case 'opening': // Set the fields to candidate state. entityModel.get('fields').each(function (fieldModel) { fieldModel.set('state', 'candidate', options); }); break; case 'opened': // The entity is now ready for editing! this.set('isActive', true); break; case 'committing': // The user indicated they want to save the entity. var fields = this.get('fields'); // For fields that are in an active state, transition them to candidate. fields.chain() .filter(function (fieldModel) { return _.intersection([fieldModel.get('state')], ['active']).length; }) .each(function (fieldModel) { fieldModel.set('state', 'candidate'); }); // For fields that are in a changed state, field values must first be // stored in TempStore. fields.chain() .filter(function (fieldModel) { return _.intersection([fieldModel.get('state')], Drupal.edit.app.changedFieldStates).length; }) .each(function (fieldModel) { fieldModel.set('state', 'saving'); }); break; case 'deactivating': var changedFields = this.get('fields') .filter(function (fieldModel) { return _.intersection([fieldModel.get('state')], ['changed', 'invalid']).length; }); // If the entity contains unconfirmed or unsaved changes, return the // entity to an opened state and ask the user if they would like to save // the changes or discard the changes. // 1. One of the fields is in a changed state. The changed field might // just be a change in the client or it might have been saved to // tempstore. // 2. The saved flag is empty and the confirmed flag is empty. If the // entity has been saved to the server, the fields changed in the // client are irrelevant. If the changes are confirmed, then proceed // to set the fields to candidate state. if ((changedFields.length || this.get('fieldsInTempStore').length) && (!options.saved && !options.confirmed)) { // Cancel deactivation until the user confirms save or discard. this.set('state', 'opened', {confirming: true}); // An action in reaction to state change must be deferred. _.defer(function () { Drupal.edit.app.confirmEntityDeactivation(entityModel); }); } else { var invalidFields = this.get('fields') .filter(function (fieldModel) { return _.intersection([fieldModel.get('state')], ['invalid']).length; }); // Indicate if this EntityModel needs to be reloaded in order to // restore the original values of its fields. entityModel.set('reload', (this.get('fieldsInTempStore').length || invalidFields.length)); // Set all fields to the 'candidate' state. A changed field may have to // go through confirmation first. entityModel.get('fields').each(function (fieldModel) { // If the field is already in the candidate state, trigger a change // event so that the entityModel can move to the next state in // deactivation. if (_.intersection([fieldModel.get('state')], ['candidate', 'highlighted']).length) { fieldModel.trigger('change:state', fieldModel, fieldModel.get('state'), options); } else { fieldModel.set('state', 'candidate', options); } }); } break; case 'closing': // Set all fields to the 'inactive' state. options.reason = 'stop'; this.get('fields').each(function (fieldModel) { fieldModel.set({ 'inTempStore': false, 'state': 'inactive' }, options); }); break; } }, /** * Updates a Field and Entity model's "inTempStore" when appropriate. * * Helper function. * * @param Drupal.edit.EntityModel entityModel * The model of the entity for which a field's state attribute has changed. * @param Drupal.edit.FieldModel fieldModel * The model of the field whose state attribute has changed. * * @see fieldStateChange() */ _updateInTempStoreAttributes: function (entityModel, fieldModel) { var current = fieldModel.get('state'); var previous = fieldModel.previous('state'); var fieldsInTempStore = entityModel.get('fieldsInTempStore'); // If the fieldModel changed to the 'saved' state: remember that this // field was saved to TempStore. if (current === 'saved') { // Mark the entity as saved in TempStore, so that we can pass the // proper "reset TempStore" boolean value when communicating with the // server. entityModel.set('inTempStore', true); // Mark the field as saved in TempStore, so that visual indicators // signifying just that may be rendered. fieldModel.set('inTempStore', true); // Remember that this field is in TempStore, restore when rerendered. fieldsInTempStore.push(fieldModel.get('fieldID')); fieldsInTempStore = _.uniq(fieldsInTempStore); entityModel.set('fieldsInTempStore', fieldsInTempStore); } // If the fieldModel changed to the 'candidate' state from the // 'inactive' state, then this is a field for this entity that got // rerendered. Restore its previous 'inTempStore' attribute value. else if (current === 'candidate' && previous === 'inactive') { fieldModel.set('inTempStore', _.intersection([fieldModel.get('fieldID')], fieldsInTempStore).length > 0); } }, /** * Reacts to state changes in this entity's fields. * * @param Drupal.edit.FieldModel fieldModel * The model of the field whose state attribute changed. * @param String state * The state of the associated field. One of Drupal.edit.FieldModel.states. */ fieldStateChange: function (fieldModel, state) { var entityModel = this; var fieldState = state; // Switch on the entityModel state. // The EntityModel responds to FieldModel state changes as a function of its // state. For example, a field switching back to 'candidate' state when its // entity is in the 'opened' state has no effect on the entity. But that // same switch back to 'candidate' state of a field when the entity is in // the 'committing' state might allow the entity to proceed with the commit // flow. switch (this.get('state')) { case 'closed': case 'launching': // It should be impossible to reach these: fields can't change state // while the entity is closed or still launching. break; case 'opening': // We must change the entity to the 'opened' state, but it must first be // confirmed that all of its fieldModels have transitioned to the // 'candidate' state. // We do this here, because this is called every time a fieldModel // changes state, hence each time this is called, we get closer to the // goal of having all fieldModels in the 'candidate' state. // A state change in reaction to another state change must be deferred. _.defer(function () { entityModel.set('state', 'opened', { 'accept-field-states': Drupal.edit.app.readyFieldStates }); }); break; case 'opened': // Set the isDirty attribute when appropriate so that it is known when // to display the "Save" button in the entity toolbar. // Note that once a field has been changed, there's no way to discard // that change, hence it will have to be saved into TempStore, or the // in-place editing of this field will have to be stopped completely. // In other words: once any field enters the 'changed' field, then for // the remainder of the in-place editing session, the entity is by // definition dirty. if (fieldState === 'changed') { entityModel.set('isDirty', true); } else { this._updateInTempStoreAttributes(entityModel, fieldModel); } break; case 'committing': // If the field save returned a validation error, set the state of the // entity back to 'opened'. if (fieldState === 'invalid') { // A state change in reaction to another state change must be deferred. _.defer(function() { entityModel.set('state', 'opened', { reason: 'invalid' }); }); } else { this._updateInTempStoreAttributes(entityModel, fieldModel); } // Attempt to save the entity. If the entity's fields are not yet all in // a ready state, the save will not be processed. var options = { 'accept-field-states': Drupal.edit.app.readyFieldStates }; if (entityModel.set('isCommitting', true, options)) { entityModel.save({ success: function () { entityModel.set({ 'state': 'deactivating', 'isCommitting' : false }, {'saved': true}); }, error: function () { // Reset the "isCommitting" mutex. entityModel.set('isCommitting', false); // Change the state back to "opened", to allow the user to hit the // "Save" button again. entityModel.set('state', 'opened', { reason: 'networkerror' }); // Show a modal to inform the user of the network error. var message = Drupal.t('Your changes to <q>@entity-title</q> could not be saved, either due to a website problem or a network connection problem.<br>Please try again.', { '@entity-title' : entityModel.get('label') }); Drupal.edit.util.networkErrorModal(Drupal.t('Sorry!'), message); } }); } break; case 'deactivating': // When setting the entity to 'closing', require that all fieldModels // are in either the 'candidate' or 'highlighted' state. // A state change in reaction to another state change must be deferred. _.defer(function() { entityModel.set('state', 'closing', { 'accept-field-states': Drupal.edit.app.readyFieldStates }); }); break; case 'closing': // When setting the entity to 'closed', require that all fieldModels are // in the 'inactive' state. // A state change in reaction to another state change must be deferred. _.defer(function() { entityModel.set('state', 'closed', { 'accept-field-states': ['inactive'] }); }); break; } }, /** * Fires an AJAX request to the REST save URL for an entity. * * @param options * An object of options that contains: * - success: (optional) A function to invoke if the entity is success- * fully saved. */ save: function (options) { var entityModel = this; // @todo Simplify this once https://drupal.org/node/1533366 lands. // @see https://drupal.org/node/2029999. var id = 'edit-save-entity'; // Create a temporary element to be able to use Drupal.ajax. var $el = $('#edit-entity-toolbar').find('.action-save'); // This is the span element inside the button. // Create a Drupal.ajax instance to save the entity. var entitySaverAjax = new Drupal.ajax(id, $el, { url: Drupal.edit.util.buildUrl(entityModel.get('entityID'), drupalSettings.edit.entitySaveURL), event: 'edit-save.edit', progress: { type: 'none' }, error: function () { $el.off('edit-save.edit'); // Let the Drupal.edit.EntityModel Backbone model's error() method // handle errors. options.error.call(entityModel); } }); // Work-around for https://drupal.org/node/2019481 in Drupal 7. entitySaverAjax.commands = {}; // Entity saved successfully. entitySaverAjax.commands.editEntitySaved = function(ajax, response, status) { // Clean up. $(ajax.element).off('edit-save.edit'); // All fields have been moved from TempStore to permanent storage, update // the "inTempStore" attribute on FieldModels, on the EntityModel and // clear EntityModel's "fieldInTempStore" attribute. entityModel.get('fields').each(function (fieldModel) { fieldModel.set('inTempStore', false); }); entityModel.set('inTempStore', false); entityModel.set('fieldsInTempStore', []); // Invoke the optional success callback. if (options.success) { options.success.call(entityModel); } }; // Trigger the AJAX request, which will will return the editEntitySaved AJAX // command to which we then react. $el.trigger('edit-save.edit'); }, /** * {@inheritdoc} * * @param Object attrs * The attributes changes in the save or set call. * @param Object options * An object with the following option: * - String reason (optional): a string that conveys a particular reason * to allow for an exceptional state change. * - Array accept-field-states (optional) An array of strings that * represent field states that the entities must be in to validate. For * example, if accept-field-states is ['candidate', 'highlighted'], then * all the fields of the entity must be in either of these two states * for the save or set call to validate and proceed. */ validate: function (attrs, options) { var acceptedFieldStates = options['accept-field-states'] || []; // Validate state change. var currentState = this.get('state'); var nextState = attrs.state; if (currentState !== nextState) { // Ensure it's a valid state. if (_.indexOf(this.constructor.states, nextState) === -1) { return '"' + nextState + '" is an invalid state'; } // Ensure it's a state change that is allowed. // Check if the acceptStateChange function accepts it. if (!this._acceptStateChange(currentState, nextState, options)) { return 'state change not accepted'; } // If that function accepts it, then ensure all fields are also in an // acceptable state. else if (!this._fieldsHaveAcceptableStates(acceptedFieldStates)) { return 'state change not accepted because fields are not in acceptable state'; } } // Validate setting isCommitting = true. var currentIsCommitting = this.get('isCommitting'); var nextIsCommitting = attrs.isCommitting; if (currentIsCommitting === false && nextIsCommitting === true) { if (!this._fieldsHaveAcceptableStates(acceptedFieldStates)) { return 'isCommitting change not accepted because fields are not in acceptable state'; } } else if (currentIsCommitting === true && nextIsCommitting === true) { return "isCommiting is a mutex, hence only changes are allowed"; } }, // Like @see AppView.acceptEditorStateChange() _acceptStateChange: function (from, to, context) { var accept = true; // In general, enforce the states sequence. Disallow going back from a // "later" state to an "earlier" state, except in explicitly allowed // cases. if (!this.constructor.followsStateSequence(from, to)) { accept = false; // Allow: closing -> closed. // Necessary to stop editing an entity. if (from === 'closing' && to === 'closed') { accept = true; } // Allow: committing -> opened. // Necessary to be able to correct an invalid field, or to hit the "Save" // button again after a server/network error. else if (from === 'committing' && to === 'opened' && context.reason && (context.reason === 'invalid' || context.reason === 'networkerror')) { accept = true; } // Allow: deactivating -> opened. // Necessary to be able to confirm changes with the user. else if (from === 'deactivating' && to === 'opened' && context.confirming) { accept = true; } // Allow: opened -> deactivating. // Necessary to be able to stop editing. else if (from === 'opened' && to === 'deactivating' && context.confirmed) { accept = true; } } return accept; }, /** * @param Array acceptedFieldStates * @see validate() * @return Boolean */ _fieldsHaveAcceptableStates: function (acceptedFieldStates) { var accept = true; // If no acceptable field states are provided, assume all field states are // acceptable. We want to let validation pass as a default and only // check validity on calls to set that explicitly request it. if (acceptedFieldStates.length > 0) { var fieldStates = this.get('fields').pluck('state') || []; // If not all fields are in one of the accepted field states, then we // still can't allow this state change. if (_.difference(fieldStates, acceptedFieldStates).length) { accept = false; } } return accept; }, /** * @inheritdoc */ destroy: function (options) { Drupal.edit.BaseModel.prototype.destroy.call(this, options); this.stopListening(); // Destroy all fields of this entity. this.get('fields').each(function (fieldModel) { fieldModel.destroy(); }); }, /** * {@inheritdoc} */ sync: function () { // We don't use REST updates to sync. return; } }, { /** * A list (sequence) of all possible states an entity can be in during * in-place editing. */ states: [ // Initial state, like field's 'inactive' OR the user has just finished // in-place editing this entity. // - Trigger: none (initial) or EntityModel (finished). // - Expected behavior: (when not initial state): tear down // EntityToolbarView, in-place editors and related views. 'closed', // User has activated in-place editing of this entity. // - Trigger: user. // - Expected behavior: the EntityToolbarView is gets set up, in-place // editors (EditorViews) and related views for this entity's fields are // set up. Upon completion of those, the state is changed to 'opening'. 'launching', // Launching has finished. // - Trigger: application. // - Guarantees: in-place editors ready for use, all entity and field views // have been set up, all fields are in the 'inactive' state. // - Expected behavior: all fields are changed to the 'candidate' state and // once this is completed, the entity state will be changed to 'opened'. 'opening', // Opening has finished. // - Trigger: EntityModel. // - Guarantees: see 'opening', all fields are in the 'candidate' state. // - Expected behavior: the user is able to actually use in-place editing. 'opened', // User has clicked the 'Save' button (and has thus changed at least one // field). // - Trigger: user. // - Guarantees: see 'opened', plus: either a changed field is in TempStore, // or the user has just modified a field without activating (switching to) // another field. // - Expected behavior: 1) if any of the fields are not yet in TempStore, // save them to TempStore, 2) if then any of the fields has the 'invalid' // state, then change the entity state back to 'opened', otherwise: save // the entity by committing it from TempStore into permanent storage. 'committing', // User has clicked the 'Close' button, or has clicked the 'Save' button and // that was successfully completed. // - Trigger: user or EntityModel. // - Guarantees: when having clicked 'Close' hardly any: fields may be in a // variety of states; when having clicked 'Save': all fields are in the // 'candidate' state. // - Expected behavior: transition all fields to the 'candidate' state, // possibly requiring confirmation in the case of having clicked 'Close'. 'deactivating', // Deactivation has been completed. // - Trigger: EntityModel. // - Guarantees: all fields are in the 'candidate' state. // - Expected behavior: change all fields to the 'inactive' state. 'closing' ], /** * Indicates whether the 'from' state comes before the 'to' state. * * @param String from * One of Drupal.edit.EntityModel.states. * @param String to * One of Drupal.edit.EntityModel.states. * @return Boolean */ followsStateSequence: function (from, to) { return _.indexOf(this.states, from) < _.indexOf(this.states, to); } }); Drupal.edit.EntityCollection = Backbone.Collection.extend({ model: Drupal.edit.EntityModel }); }(_, jQuery, Backbone, Drupal, Drupal.settings));
christopherhuntley/daphnedixon
sites/all/modules/edit/js/models/EntityModel.js
JavaScript
gpl-2.0
24,459
.wp-block-file{display:flex;flex-wrap:wrap;justify-content:space-between;align-items:center;margin-bottom:0}.wp-block[data-align=left]>.wp-block-file,.wp-block[data-align=right]>.wp-block-file{height:auto}.wp-block-file .components-resizable-box__container{margin-bottom:1em}.wp-block-file .wp-block-file__preview{margin-bottom:1em;width:100%;height:100%}.wp-block-file .wp-block-file__preview-overlay{position:absolute;top:0;left:0;bottom:0;right:0}.wp-block-file .wp-block-file__content-wrapper{flex-grow:1}.wp-block-file a{min-width:1em}.wp-block-file .wp-block-file__button-richtext-wrapper{display:inline-block;margin-right:.75em}
CityOfPhiladelphia/phila.gov
wp/wp-includes/blocks/file/editor-rtl.min.css
CSS
gpl-2.0
635
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_gbl.h" #include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "qla_devtbl.h" #ifdef CONFIG_SPARC #include <asm/prom.h> #endif #include "qla_target.h" /* * QLogic ISP2x00 Hardware Support Function Prototypes. */ static int qla2x00_isp_firmware(scsi_qla_host_t *); static int qla2x00_setup_chip(scsi_qla_host_t *); static int qla2x00_fw_ready(scsi_qla_host_t *); static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); static int qla2x00_configure_local_loop(scsi_qla_host_t *); static int qla2x00_configure_fabric(scsi_qla_host_t *); static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *); static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea); static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, struct event_arg *); static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ void qla2x00_sp_timeout(struct timer_list *t) { srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; iocb->timeout(sp); } void qla2x00_sp_free(srb_t *sp) { struct srb_iocb *iocb = &sp->u.iocb_cmd; del_timer(&iocb->timer); qla2x00_rel_sp(sp); } void qla2xxx_rel_done_warning(srb_t *sp, int res) { WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); } void qla2xxx_rel_free_warning(srb_t *sp) { WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); } /* Asynchronous Login/Logout Routines -------------------------------------- */ unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *vha) { unsigned long tmo; struct qla_hw_data *ha = vha->hw; /* Firmware should use switch negotiated r_a_tov for timeout. */ tmo = ha->r_a_tov / 10 * 2; if (IS_QLAFX00(ha)) { tmo = FX00_DEF_RATOV * 2; } else if (!IS_FWI2_CAPABLE(ha)) { /* * Except for earlier ISPs where the timeout is seeded from the * initialization control block. */ tmo = ha->login_timeout; } return tmo; } static void qla24xx_abort_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; struct qla_qpair *qpair = sp->qpair; u32 handle; unsigned long flags; if (sp->cmd_sp) ql_dbg(ql_dbg_async, sp->vha, 0x507c, "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", sp->cmd_sp->handle, sp->cmd_sp->type, sp->handle, sp->type); else ql_dbg(ql_dbg_async, sp->vha, 0x507c, "Abort timeout 2 - hdl=%x, type=%x\n", sp->handle, sp->type); spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == sp->cmd_sp)) qpair->req->outstanding_cmds[handle] = NULL; /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; break; } } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (sp->cmd_sp) sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); sp->done(sp, QLA_OS_TIMER_EXPIRED); } static void qla24xx_abort_sp_done(srb_t *sp, int res) { struct srb_iocb *abt = &sp->u.iocb_cmd; srb_t *orig_sp = sp->cmd_sp; if (orig_sp) qla_wait_nvme_release_cmd_kref(orig_sp); del_timer(&sp->u.iocb_cmd.timer); if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&abt->u.abt.comp); else sp->free(sp); } int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, GFP_ATOMIC); if (!sp) return rval; abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; sp->qpair = cmd_sp->qpair; sp->cmd_sp = cmd_sp; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; abt_iocb->timeout = qla24xx_abort_iocb_timeout; init_completion(&abt_iocb->u.abt.comp); /* FW can send 2 x ABTS's timeout/20s */ qla2x00_init_timer(sp, 42); abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); sp->done = qla24xx_abort_sp_done; ql_dbg(ql_dbg_async, vha, 0x507c, "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, cmd_sp->type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { sp->free(sp); return rval; } if (wait) { wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_FUNCTION_FAILED; sp->free(sp); } return rval; } void qla2x00_async_iocb_timeout(void *data) { srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; int rc, h; unsigned long flags; if (fcport) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); } else { pr_info("Async-%s timeout - hdl=%x.\n", sp->name, sp->handle); } switch (sp->type) { case SRB_LOGIN_CMD: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; case SRB_LOGOUT_CMD: case SRB_CT_PTHRU_CMD: case SRB_MB_IOCB: case SRB_NACK_PLOGI: case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_CTRL_VP: default: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); sp->done(sp, QLA_FUNCTION_TIMEOUT); } break; } } static void qla2x00_async_login_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x20dd, "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; qla24xx_handle_plogi_done_event(vha, &ea); } sp->free(sp); } static inline bool fcport_is_smaller(fc_port_t *fcport) { if (wwn_to_u64(fcport->port_name) < wwn_to_u64(fcport->vha->port_name)) return true; else return false; } static inline bool fcport_is_bigger(fc_port_t *fcport) { return !fcport_is_smaller(fcport); } int qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || fcport->loop_id == FC_NO_LOOP_ID) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC - not sending command.\n", __func__, fcport->port_name); return rval; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; sp->type = SRB_LOGIN_CMD; sp->name = "login"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; lio = &sp->u.iocb_cmd; lio->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_login_sp_done; if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; else lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; ql_log(ql_log_warn, vha, 0x2072, "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->login_retry); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); goto done_free_sp; } return rval; done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } static void qla2x00_async_logout_sp_done(srb_t *sp, int res) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; qlt_logo_completion_handler(sp->fcport, res); sp->free(sp); } int qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; lio = &sp->u.iocb_cmd; lio->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_logout_sp_done; ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->port_name); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: sp->free(sp); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); return rval; } void qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { fcport->flags &= ~FCF_ASYNC_ACTIVE; /* Don't re-login in target mode */ if (!fcport->tgt_session) qla2x00_mark_device_lost(vha, fcport, 1); qlt_logo_completion_handler(fcport, data[0]); } static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) { struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; if (!test_bit(UNLOADING, &vha->dpc_flags)) qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, lio->u.logio.data); sp->free(sp); } int qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; struct srb_iocb *lio; int rval; rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_PRLO_CMD; sp->name = "prlo"; lio = &sp->u.iocb_cmd; lio->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_prlo_sp_done; ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: sp->free(sp); done: fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } static void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) { struct fc_port *fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); if (ea->data[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x2066, "%s %8phC: adisc fail: post delete\n", __func__, ea->fcport->port_name); /* deleted = 0 & logout_on_delete = force fw cleanup */ fcport->deleted = 0; fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); return; } if (ea->fcport->disc_state == DSC_DELETE_PEND) return; if (ea->sp->gen2 != ea->fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, ea->fcport->port_name); return; } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } __qla24xx_handle_gpdb_event(vha, ea); } static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct srb_iocb *lio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_disc, vha, 0x2066, "Async done-%s res %x %8phC\n", sp->name, res, sp->fcport->port_name); sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.rc = res; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.fcport = sp->fcport; ea.sp = sp; qla24xx_handle_adisc_event(vha, &ea); sp->free(sp); } int qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_ADISC_CMD; sp->name = "adisc"; lio = &sp->u.iocb_cmd; lio->timeout = qla2x00_async_iocb_timeout; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_adisc_sp_done; if (data[1] & QLA_LOGIO_LOGIN_RETRIED) lio->u.logio.flags |= SRB_LOGIN_RETRIED; ql_dbg(ql_dbg_disc, vha, 0x206f, "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: sp->free(sp); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_post_async_adisc_work(vha, fcport, data); return rval; } static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) { struct qla_hw_data *ha = vha->hw; if (IS_FWI2_CAPABLE(ha)) return loop_id > NPH_LAST_HANDLE; return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST; } /** * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID * @vha: adapter state pointer. * @dev: port structure pointer. * * Returns: * qla2x00 local function return status code. * * Context: * Kernel context. */ static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) { int rval; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; rval = QLA_SUCCESS; spin_lock_irqsave(&ha->vport_slock, flags); dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE); if (dev->loop_id >= LOOPID_MAP_SIZE || qla2x00_is_reserved_id(vha, dev->loop_id)) { dev->loop_id = FC_NO_LOOP_ID; rval = QLA_FUNCTION_FAILED; } else { set_bit(dev->loop_id, ha->loop_id_map); } spin_unlock_irqrestore(&ha->vport_slock, flags); if (rval == QLA_SUCCESS) ql_dbg(ql_dbg_disc, dev->vha, 0x2086, "Assigning new loopid=%x, portid=%x.\n", dev->loop_id, dev->d_id.b24); else ql_log(ql_log_warn, dev->vha, 0x2087, "No loop_id's available, portid=%x.\n", dev->d_id.b24); return rval; } void qla2x00_clear_loop_id(fc_port_t *fcport) { struct qla_hw_data *ha = fcport->vha->hw; if (fcport->loop_id == FC_NO_LOOP_ID || qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) return; clear_bit(fcport->loop_id, ha->loop_id_map); fcport->loop_id = FC_NO_LOOP_ID; } static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport, *conflict_fcport; struct get_name_list_extended *e; u16 i, n, found = 0, loop_id; port_id_t id; u64 wwn; u16 data[2]; u8 current_login_state, nvme_cls; fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, fcport->last_login_gen, fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id); if (fcport->disc_state == DSC_DELETE_PEND) return; if (ea->rc) { /* rval */ if (fcport->login_retry == 0) { ql_dbg(ql_dbg_disc, vha, 0x20de, "GNL failed Port login retry %8phN, retry cnt=%d.\n", fcport->port_name, fcport->login_retry); } return; } if (fcport->last_rscn_gen != fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } else if (fcport->last_login_gen != fcport->login_gen) { ql_dbg(ql_dbg_disc, vha, 0x20e0, "%s %8phC login gen changed\n", __func__, fcport->port_name); return; } n = ea->data[0] / sizeof(struct get_name_list_extended); ql_dbg(ql_dbg_disc, vha, 0x20e1, "%s %d %8phC n %d %02x%02x%02x lid %d \n", __func__, __LINE__, fcport->port_name, n, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id); for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; wwn = wwn_to_u64(e->port_name); id.b.domain = e->port_id[2]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[0]; id.b.rsvd_1 = 0; if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) continue; if (IS_SW_RESV_ADDR(id)) continue; found = 1; loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); nvme_cls = e->current_login_state >> 4; current_login_state = e->current_login_state & 0xf; if (PRLI_PHASE(nvme_cls)) { current_login_state = nvme_cls; fcport->fc4_type &= ~FS_FC4TYPE_FCP; fcport->fc4_type |= FS_FC4TYPE_NVME; } else if (PRLI_PHASE(current_login_state)) { fcport->fc4_type |= FS_FC4TYPE_FCP; fcport->fc4_type &= ~FS_FC4TYPE_NVME; } ql_dbg(ql_dbg_disc, vha, 0x20e2, "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", __func__, fcport->port_name, e->current_login_state, fcport->fw_login_state, fcport->fc4_type, id.b24, fcport->d_id.b24, loop_id, fcport->loop_id); switch (fcport->disc_state) { case DSC_DELETE_PEND: case DSC_DELETED: break; default: if ((id.b24 != fcport->d_id.b24 && fcport->d_id.b24 && fcport->loop_id != FC_NO_LOOP_ID) || (fcport->loop_id != FC_NO_LOOP_ID && fcport->loop_id != loop_id)) { ql_dbg(ql_dbg_disc, vha, 0x20e3, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); if (fcport->n2n_flag) fcport->d_id.b24 = 0; qlt_schedule_sess_for_deletion(fcport); return; } break; } fcport->loop_id = loop_id; if (fcport->n2n_flag) fcport->d_id.b24 = id.b24; wwn = wwn_to_u64(fcport->port_name); qlt_find_sess_invalidate_other(vha, wwn, id, loop_id, &conflict_fcport); if (conflict_fcport) { /* * Another share fcport share the same loop_id & * nport id. Conflict fcport needs to finish * cleanup before this fcport can proceed to login. */ conflict_fcport->conflict = fcport; fcport->login_pause = 1; } switch (vha->hw->current_topology) { default: switch (current_login_state) { case DSC_LS_PRLI_COMP: ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e4, "%s %d %8phC post gpdb\n", __func__, __LINE__, fcport->port_name); if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LS_PORT_UNAVAIL: default: if (fcport->loop_id == FC_NO_LOOP_ID) { qla2x00_find_new_loop_id(vha, fcport); fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; } ql_dbg(ql_dbg_disc, vha, 0x20e5, "%s %d %8phC\n", __func__, __LINE__, fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); break; } break; case ISP_CFG_N: fcport->fw_login_state = current_login_state; fcport->d_id = id; switch (current_login_state) { case DSC_LS_PRLI_PEND: /* * In the middle of PRLI. Let it finish. * Allow relogin code to recheck state again * with GNL. Push disc_state back to DELETED * so GNL can go out again */ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LS_PLOGI_COMP: if (fcport_is_bigger(fcport)) { /* local adapter is smaller */ if (fcport->loop_id != FC_NO_LOOP_ID) qla2x00_clear_loop_id(fcport); fcport->loop_id = loop_id; qla24xx_fcport_handle_login(vha, fcport); break; } fallthrough; default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ if (fcport->loop_id != FC_NO_LOOP_ID) qla2x00_clear_loop_id(fcport); fcport->loop_id = loop_id; qla24xx_fcport_handle_login(vha, fcport); } break; } break; } /* switch (ha->current_topology) */ } if (!found) { switch (vha->hw->current_topology) { case ISP_CFG_F: case ISP_CFG_FL: for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; id.b.domain = e->port_id[0]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[2]; id.b.rsvd_1 = 0; loop_id = le16_to_cpu(e->nport_handle); if (fcport->d_id.b24 == id.b24) { conflict_fcport = qla2x00_find_fcport_by_wwpn(vha, e->port_name, 0); if (conflict_fcport) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e5, "%s %d %8phC post del sess\n", __func__, __LINE__, conflict_fcport->port_name); qlt_schedule_sess_for_deletion (conflict_fcport); } } /* * FW already picked this loop id for * another fcport */ if (fcport->loop_id == loop_id) fcport->loop_id = FC_NO_LOOP_ID; } qla24xx_fcport_handle_login(vha, fcport); break; case ISP_CFG_N: qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); if (time_after_eq(jiffies, fcport->dm_login_expire)) { if (fcport->n2n_link_reset_cnt < 2) { fcport->n2n_link_reset_cnt++; /* * remote port is not sending PLOGI. * Reset link to kick start his state * machine */ set_bit(N2N_LINK_RESET, &vha->dpc_flags); } else { if (fcport->n2n_chip_reset < 1) { ql_log(ql_log_info, vha, 0x705d, "Chip reset to bring laser down"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); fcport->n2n_chip_reset++; } else { ql_log(ql_log_info, vha, 0x705d, "Remote port %8ph is not coming back\n", fcport->port_name); fcport->scan_state = 0; } } qla2xxx_wake_dpc(vha); } else { /* * report port suppose to do PLOGI. Give him * more time. FW will catch it. */ set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } break; default: break; } } } /* gnl_event */ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; unsigned long flags; struct fc_port *fcport = NULL, *tf; u16 i, n = 0, loop_id; struct event_arg ea; struct get_name_list_extended *e; u64 wwn; struct list_head h; bool found = false; ql_dbg(ql_dbg_disc, vha, 0x20e7, "Async done-%s res %x mb[1]=%x mb[2]=%x \n", sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], sp->u.iocb_cmd.u.mbx.in_mb[2]); if (res == QLA_FUNCTION_TIMEOUT) return; sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= sizeof(struct get_name_list_extended)) { n = sp->u.iocb_cmd.u.mbx.in_mb[1] / sizeof(struct get_name_list_extended); ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */ } for (i = 0; i < n; i++) { e = &vha->gnl.l[i]; loop_id = le16_to_cpu(e->nport_handle); /* mask out reserve bit */ loop_id = (loop_id & 0x7fff); set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, &wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, (loop_id & 0x7fff)); } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); INIT_LIST_HEAD(&h); fcport = tf = NULL; if (!list_empty(&vha->gnl.fcports)) list_splice_init(&vha->gnl.fcports, &h); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { list_del_init(&fcport->gnl_entry); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; qla24xx_handle_gnl_done_event(vha, &ea); } /* create new fcport if fw has knowledge of new sessions */ for (i = 0; i < n; i++) { port_id_t id; u64 wwnn; e = &vha->gnl.l[i]; wwn = wwn_to_u64(e->port_name); found = false; list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { if (!memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) { found = true; break; } } id.b.domain = e->port_id[2]; id.b.area = e->port_id[1]; id.b.al_pa = e->port_id[0]; id.b.rsvd_1 = 0; if (!found && wwn && !IS_SW_RESV_ADDR(id)) { ql_dbg(ql_dbg_disc, vha, 0x2065, "%s %d %8phC %06x post new sess\n", __func__, __LINE__, (u8 *)&wwn, id.b24); wwnn = wwn_to_u64(e->node_name); qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn, (u8 *)&wwnn, NULL, 0); } } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; if (!list_empty(&vha->gnl.fcports)) { /* retrigger gnl */ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, gnl_entry) { list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) break; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); } int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; struct srb_iocb *mbx; int rval = QLA_FUNCTION_FAILED; unsigned long flags; u16 *mb; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags |= FCF_ASYNC_SENT; qla2x00_set_fcport_disc_state(fcport, DSC_GNL); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); if (vha->gnl.sent) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); return QLA_SUCCESS; } vha->gnl.sent = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_MB_IOCB; sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; mbx = &sp->u.iocb_cmd; mbx->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_PORT_NODE_NAME_LIST; mb[1] = BIT_2 | BIT_3; mb[2] = MSW(vha->gnl.ldma); mb[3] = LSW(vha->gnl.ldma); mb[6] = MSW(MSD(vha->gnl.ldma)); mb[7] = LSW(MSD(vha->gnl.ldma)); mb[8] = vha->gnl.size; mb[9] = vha->vp_idx; sp->done = qla24xx_async_gnl_sp_done; ql_dbg(ql_dbg_disc, vha, 0x20da, "Async-%s - OUT WWPN %8phC hndl %x\n", sp->name, fcport->port_name, sp->handle); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: sp->free(sp); done: fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); return rval; } int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_GNL); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = sp->fcport; u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x20db, "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", sp->name, res, fcport->port_name, mb[1], mb[2]); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (res == QLA_FUNCTION_TIMEOUT) goto done; memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.sp = sp; qla24xx_handle_gpdb_event(vha, &ea); done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); sp->free(sp); } static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); } static void qla2x00_async_prli_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x2129, "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~FCF_ASYNC_SENT; if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; qla24xx_handle_prli_done_event(vha, &ea); } sp->free(sp); } int qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); return rval; } if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || fcport->fw_login_state == DSC_LS_PRLI_PEND) && qla_dual_mode_enabled(vha)) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); return rval; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) return rval; fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; sp->type = SRB_PRLI_CMD; sp->name = "prli"; lio = &sp->u.iocb_cmd; lio->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_prli_sp_done; lio->u.logio.flags = 0; if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; ql_dbg(ql_dbg_disc, vha, 0x211b, "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp"); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); goto done_free_sp; } return rval; done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; return rval; } int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_GPDB); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; e->u.fcport.opt = opt; fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) { srb_t *sp; struct srb_iocb *mbx; int rval = QLA_FUNCTION_FAILED; u16 *mb; dma_addr_t pd_dma; struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || fcport->loop_id == FC_NO_LOOP_ID) { ql_log(ql_log_warn, vha, 0xffff, "%s: %8phC - not sending command.\n", __func__, fcport->port_name); return rval; } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gpdb"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; mbx = &sp->u.iocb_cmd; mbx->timeout = qla2x00_async_iocb_timeout; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { ql_log(ql_log_warn, vha, 0xd043, "Failed to allocate port database structure.\n"); goto done_free_sp; } mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_GET_PORT_DATABASE; mb[1] = fcport->loop_id; mb[2] = MSW(pd_dma); mb[3] = LSW(pd_dma); mb[6] = MSW(MSD(pd_dma)); mb[7] = LSW(MSD(pd_dma)); mb[9] = vha->vp_idx; mb[10] = opt; mbx->u.mbx.in = pd; mbx->u.mbx.in_dma = pd_dma; sp->done = qla24xx_async_gpdb_sp_done; ql_dbg(ql_dbg_disc, vha, 0x20dc, "Async-%s %8phC hndl %x opt %x\n", sp->name, fcport->port_name, sp->handle, opt); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; return rval; done_free_sp: if (pd) dma_pool_free(ha->s_dma_pool, pd, pd_dma); sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; qla24xx_post_gpdb_work(vha, fcport, opt); return rval; } static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { unsigned long flags; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->login_gen++; ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { vha->fcport_count++; ea->fcport->login_succ = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_sched_upd_fcport(ea->fcport); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else if (ea->fcport->login_succ) { /* * We have an existing session. A late RSCN delivery * must have triggered the session to be re-validate. * Session is still valid. */ ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC session revalidate success\n", __func__, __LINE__, ea->fcport->port_name); qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; struct port_database_24xx *pd; struct srb *sp = ea->sp; uint8_t ls; pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; fcport->flags &= ~FCF_ASYNC_SENT; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, fcport->port_name, fcport->disc_state, pd->current_login_state, fcport->fc4_type, ea->rc); if (fcport->disc_state == DSC_DELETE_PEND) return; if (NVME_TARGET(vha->hw, fcport)) ls = pd->current_login_state >> 4; else ls = pd->current_login_state & 0xf; if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } switch (ls) { case PDS_PRLI_COMPLETE: __qla24xx_parse_gpdb(vha, fcport, pd); break; case PDS_PLOGI_PENDING: case PDS_PLOGI_COMPLETE: case PDS_PRLI_PENDING: case PDS_PRLI2_PENDING: /* Set discovery state back to GNL to Relogin attempt */ if (qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } return; case PDS_LOGO_PENDING: case PDS_PORT_UNAVAILABLE: default: ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); return; } __qla24xx_handle_gpdb_event(vha, ea); } /* gpdb event */ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u8 login = 0; int rc; if (qla_tgt_mode_enabled(vha)) return; if (qla_dual_mode_enabled(vha)) { if (N2N_TOPO(vha->hw)) { u64 mywwn, wwn; mywwn = wwn_to_u64(vha->port_name); wwn = wwn_to_u64(fcport->port_name); if (mywwn > wwn) login = 1; else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP) && time_after_eq(jiffies, fcport->plogi_nack_done_deadline)) login = 1; } else { login = 1; } } else { /* initiator mode */ login = 1; } if (login && fcport->login_retry) { fcport->login_retry--; if (fcport->loop_id == FC_NO_LOOP_ID) { fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; rc = qla2x00_find_new_loop_id(vha, fcport); if (rc) { ql_dbg(ql_dbg_disc, vha, 0x20e6, "%s %d %8phC post del sess - out of loopid\n", __func__, __LINE__, fcport->port_name); fcport->scan_state = 0; qlt_schedule_sess_for_deletion(fcport); return; } } ql_dbg(ql_dbg_disc, vha, 0x20bf, "%s %d %8phC post login\n", __func__, __LINE__, fcport->port_name); qla2x00_post_async_login_work(vha, fcport, NULL); } } int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u16 data[2]; u64 wwn; u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->flags, fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->login_gen, fcport->loop_id, fcport->scan_state); if (fcport->scan_state != QLA_FCPORT_FOUND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && qla_dual_mode_enabled(vha) && ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND))) return 0; if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && !N2N_TOPO(vha->hw)) { if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } } /* Target won't initiate port login if fabric is present */ if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; if (fcport->flags & FCF_ASYNC_SENT) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } switch (fcport->disc_state) { case DSC_DELETED: wwn = wwn_to_u64(fcport->node_name); switch (vha->hw->current_topology) { case ISP_CFG_N: if (fcport_is_smaller(fcport)) { /* this adapter is bigger */ if (fcport->login_retry) { if (fcport->loop_id == FC_NO_LOOP_ID) { qla2x00_find_new_loop_id(vha, fcport); fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; } fcport->login_retry--; qla_post_els_plogi_work(vha, fcport); } else { ql_log(ql_log_info, vha, 0x705d, "Unable to reach remote port %8phC", fcport->port_name); } } else { qla24xx_post_gnl_work(vha, fcport); } break; default: if (wwn == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post GNNID\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnnid_work(vha, fcport); } else if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnl_work(vha, fcport); } else { qla_chk_n2n_b4_login(vha, fcport); } break; } break; case DSC_GNL: switch (vha->hw->current_topology) { case ISP_CFG_N: if ((fcport->current_login_state & 0xf) == 0x6) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post GPDB work\n", __func__, __LINE__, fcport->port_name); fcport->chip_reset = vha->hw->base_qpair->chip_reset; qla24xx_post_gpdb_work(vha, fcport, 0); } else { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post %s PRLI\n", __func__, __LINE__, fcport->port_name, NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC"); qla24xx_post_prli_work(vha, fcport); } break; default: if (fcport->login_pause) { ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %d %8phC exit\n", __func__, __LINE__, fcport->port_name); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } qla_chk_n2n_b4_login(vha, fcport); break; } break; case DSC_LOGIN_FAILED: if (N2N_TOPO(vha->hw)) qla_chk_n2n_b4_login(vha, fcport); else qlt_schedule_sess_for_deletion(fcport); break; case DSC_LOGIN_COMPLETE: /* recheck login state */ data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LOGIN_PEND: if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) qla24xx_post_prli_work(vha, fcport); break; case DSC_UPD_FCPORT: sec = jiffies_to_msecs(jiffies - fcport->jiffies_at_registration)/1000; if (fcport->sec_since_registration < sec && sec && !(sec % 60)) { fcport->sec_since_registration = sec; ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, "%s %8phC - Slow Rport registration(%d Sec)\n", __func__, fcport->port_name, sec); } if (fcport->next_disc_state != DSC_DELETE_PEND) fcport->next_disc_state = DSC_ADISC; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; default: break; } return 0; } int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); if (!e) return QLA_FUNCTION_FAILED; e->u.new_sess.id = *id; e->u.new_sess.pla = pla; e->u.new_sess.fc4_type = fc4_type; memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); if (node_name) memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE); return qla2x00_post_work(vha, e); } void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport; unsigned long flags; fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); if (fcport) { fcport->scan_needed = 1; fcport->rscn_gen++; } spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); } void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; if (test_bit(UNLOADING, &vha->dpc_flags)) return; ql_dbg(ql_dbg_disc, vha, 0x2102, "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->deleted, fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen, fcport->flags); if (fcport->last_rscn_gen != fcport->rscn_gen) { ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); qla24xx_post_gnl_work(vha, fcport); return; } qla24xx_fcport_handle_login(vha, fcport); } void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) { /* for pure Target Mode, PRLI will not be initiated */ if (vha->host->active_mode == MODE_TARGET) return; ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } /* * RSCN(s) came in for this fcport, but the RSCN(s) was not able * to be consumed by the fcport */ void qla_rscn_replay(fc_port_t *fcport) { struct event_arg ea; switch (fcport->disc_state) { case DSC_DELETE_PEND: return; default: break; } if (fcport->scan_needed) { memset(&ea, 0, sizeof(ea)); ea.id = fcport->d_id; ea.id.b.rsvd_1 = RSCN_PORT_ADDR; qla2x00_handle_rscn(fcport->vha, &ea); } } static void qla2x00_tmf_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; int rc, h; unsigned long flags; rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; break; } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); tmf->u.tmf.data = QLA_FUNCTION_FAILED; complete(&tmf->u.tmf.comp); } } static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; complete(&tmf->u.tmf.comp); } int qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, uint32_t tag) { struct scsi_qla_host *vha = fcport->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; tm_iocb = &sp->u.iocb_cmd; sp->type = SRB_TM_CMD; sp->name = "tmf"; tm_iocb->timeout = qla2x00_tmf_iocb_timeout; init_completion(&tm_iocb->u.tmf.comp); qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); tm_iocb->u.tmf.flags = flags; tm_iocb->u.tmf.lun = lun; tm_iocb->u.tmf.data = tag; sp->done = qla2x00_tmf_sp_done; ql_dbg(ql_dbg_taskm, vha, 0x802f, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); rval = tm_iocb->u.tmf.data; if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { flags = tm_iocb->u.tmf.flags; lun = (uint16_t)tm_iocb->u.tmf.lun; /* Issue Marker IOCB */ qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, lun, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); } done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } int qla24xx_async_abort_command(srb_t *sp) { unsigned long flags = 0; uint32_t handle; fc_port_t *fcport = sp->fcport; struct qla_qpair *qpair = sp->qpair; struct scsi_qla_host *vha = fcport->vha; struct req_que *req = qpair->req; spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) break; } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ return QLA_FUNCTION_FAILED; } if (sp->type == SRB_FXIOCB_DCMD) return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, FXDISC_ABORT_IOCTL); return qla24xx_async_abort_cmd(sp, true); } static void qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post gpdb\n", __func__, __LINE__, ea->fcport->port_name); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->nvme_prli_service_param = ea->iop[0]; if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) ea->fcport->nvme_first_burst_size = (ea->iop[1] & 0xffff) * 512; else ea->fcport->nvme_first_burst_size = 0; qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; default: if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ set_bit(RELOGIN_NEEDED, &vha->dpc_flags); ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; break; } ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC priority %s, fc4type %x\n", __func__, __LINE__, ea->fcport->port_name, vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe", ea->fcport->fc4_type); if (N2N_TOPO(vha->hw)) { if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) { ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; ea->fcport->fc4_type |= FS_FC4TYPE_FCP; } else { ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; ea->fcport->fc4_type |= FS_FC4TYPE_NVME; } if (ea->fcport->n2n_link_reset_cnt < 3) { ea->fcport->n2n_link_reset_cnt++; vha->relogin_jif = jiffies + 2 * HZ; /* * PRLI failed. Reset link to kick start * state machine */ set_bit(N2N_LINK_RESET, &vha->dpc_flags); } else { ql_log(ql_log_warn, vha, 0x2119, "%s %d %8phC Unable to reconnect\n", __func__, __LINE__, ea->fcport->port_name); } } else { /* * switch connect. login failed. Take connection down * and allow relogin to retrigger */ if (NVME_FCP_TARGET(ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post %s prli\n", __func__, __LINE__, ea->fcport->port_name, (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ? "NVMe" : "FCP"); if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; else ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; } ea->fcport->flags &= ~FCF_ASYNC_SENT; ea->fcport->keep_nport_handle = 0; ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; } } void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { port_id_t cid; /* conflict Nport id */ u16 lid; struct fc_port *conflict_fcport; unsigned long flags; struct fc_port *fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, ea->sp->gen1, fcport->rscn_gen, ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND)) { ql_dbg(ql_dbg_disc, vha, 0x20ea, "%s %d %8phC Remote is trying to login\n", __func__, __LINE__, fcport->port_name); return; } if ((fcport->disc_state == DSC_DELETE_PEND) || (fcport->disc_state == DSC_DELETED)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC generation changed\n", __func__, fcport->port_name); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } else if (ea->sp->gen1 != fcport->rscn_gen) { ql_dbg(ql_dbg_disc, vha, 0x20d3, "%s %8phC RSCN generation changed\n", __func__, fcport->port_name); qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); return; } WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* * Driver must validate login state - If PRLI not complete, * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ if (NVME_TARGET(vha->hw, ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2117, "%s %d %8phC post prli\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, ea->fcport->d_id.b24); set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_post_gpdb_work(vha, ea->fcport, 0); } break; case MBS_COMMAND_ERROR: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea->fcport->flags &= ~FCF_ASYNC_SENT; qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else qla2x00_mark_device_lost(vha, ea->fcport, 1); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ cid.b.domain = (ea->iop[1] >> 16) & 0xff; cid.b.area = (ea->iop[1] >> 8) & 0xff; cid.b.al_pa = ea->iop[1] & 0xff; cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, "%s %d %8phC lid %#x in use with pid %06x post gnl\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, cid.b24); set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); ea->fcport->loop_id = FC_NO_LOOP_ID; qla24xx_post_gnl_work(vha, ea->fcport); break; case MBS_PORT_ID_USED: lid = ea->iop[1] & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(ea->fcport->port_name), ea->fcport->d_id, lid, &conflict_fcport); if (conflict_fcport) { /* * Another fcport share the same loop_id/nport id. * Conflict fcport needs to finish cleanup before this * fcport can proceed to login. */ conflict_fcport->conflict = ea->fcport; ea->fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); } else { ql_dbg(ql_dbg_disc, vha, 0x20ed, "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); qla2x00_clear_loop_id(ea->fcport); set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; } return; } /****************************************************************************/ /* QLogic ISP2x00 Hardware Support Functions. */ /****************************************************************************/ static int qla83xx_nic_core_fw_load(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t idc_major_ver, idc_minor_ver; uint16_t config[4]; qla83xx_idc_lock(vha, 0); /* SV: TODO: Assign initialization timeout from * flash-info / other param */ ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT; ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT; /* Set our fcoe function presence */ if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb077, "Error while setting DRV-Presence.\n"); rval = QLA_FUNCTION_FAILED; goto exit; } /* Decide the reset ownership */ qla83xx_reset_ownership(vha); /* * On first protocol driver load: * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery * register. * Others: Check compatibility with current IDC Major version. */ qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver); if (ha->flags.nic_core_reset_owner) { /* Set IDC Major version */ idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION; qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver); /* Clearing IDC-Lock-Recovery register */ qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0); } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) { /* * Clear further IDC participation if we are not compatible with * the current IDC Major Version. */ ql_log(ql_log_warn, vha, 0xb07d, "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n", idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION); __qla83xx_clear_drv_presence(vha); rval = QLA_FUNCTION_FAILED; goto exit; } /* Each function sets its supported Minor version. */ qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver); idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2)); qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver); if (ha->flags.nic_core_reset_owner) { memset(config, 0, sizeof(config)); if (!qla81xx_get_port_config(vha, config)) qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY); } rval = qla83xx_idc_state_handler(vha); exit: qla83xx_idc_unlock(vha, 0); return rval; } /* * qla2x00_initialize_adapter * Initialize board. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_initialize_adapter(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); /* Clear adapter flags. */ vha->flags.online = 0; ha->flags.chip_reset_done = 0; vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; vha->dpc_flags = 0; vha->flags.management_server_logged_in = 0; vha->marker_needed = 0; ha->isp_abort_cnt = 0; ha->beacon_blink_led = 0; set_bit(0, ha->req_qid_map); set_bit(0, ha->rsp_qid_map); ql_dbg(ql_dbg_init, vha, 0x0040, "Configuring PCI space...\n"); rval = ha->isp_ops->pci_config(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0044, "Unable to configure PCI space.\n"); return (rval); } ha->isp_ops->reset_chip(vha); /* Check for secure flash support */ if (IS_QLA28XX(ha)) { if (rd_reg_word(&reg->mailbox12) & BIT_0) ha->flags.secure_adapter = 1; ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", (ha->flags.secure_adapter) ? "Yes" : "No"); } rval = qla2xxx_get_flash_info(vha); if (rval) { ql_log(ql_log_fatal, vha, 0x004f, "Unable to validate FLASH data.\n"); return rval; } if (IS_QLA8044(ha)) { qla8044_read_reset_template(vha); /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. * If DONRESET_BIT0 is set, drivers should not set dev_state * to NEED_RESET. But if NEED_RESET is set, drivers should * should honor the reset. */ if (ql2xdontresethba == 1) qla8044_set_idc_dontreset(vha); } ha->isp_ops->get_flash_version(vha, req->ring); ql_dbg(ql_dbg_init, vha, 0x0061, "Configure NVRAM parameters...\n"); /* Let priority default to FCP, can be overridden by nvram_config */ ha->fc4_type_priority = FC4_PRIORITY_FCP; ha->isp_ops->nvram_config(vha); if (ha->fc4_type_priority != FC4_PRIORITY_FCP && ha->fc4_type_priority != FC4_PRIORITY_NVME) ha->fc4_type_priority = FC4_PRIORITY_FCP; ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ ql_log(ql_log_info, vha, 0x0077, "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); return QLA_FUNCTION_FAILED; } ql_dbg(ql_dbg_init, vha, 0x0078, "Verifying loaded RISC code...\n"); /* If smartsan enabled then require fdmi and rdp enabled */ if (ql2xsmartsan) { ql2xfdmienable = 1; ql2xrdpenable = 1; } if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { rval = ha->isp_ops->chip_diag(vha); if (rval) return (rval); rval = qla2x00_setup_chip(vha); if (rval) return (rval); } if (IS_QLA84XX(ha)) { ha->cs84xx = qla84xx_get_chip(vha); if (!ha->cs84xx) { ql_log(ql_log_warn, vha, 0x00d0, "Unable to configure ISP84XX.\n"); return QLA_FUNCTION_FAILED; } } if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); /* No point in continuing if firmware initialization failed. */ if (rval != QLA_SUCCESS) return rval; ha->flags.chip_reset_done = 1; if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { /* Issue verify 84xx FW IOCB to complete 84xx initialization */ rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00d4, "Unable to initialize ISP84XX.\n"); qla84xx_put_chip(vha); } } /* Load the NIC Core f/w if we are the first protocol driver. */ if (IS_QLA8031(ha)) { rval = qla83xx_nic_core_fw_load(vha); if (rval) ql_log(ql_log_warn, vha, 0x0124, "Error in initializing NIC Core f/w.\n"); } if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); if (IS_P3P_TYPE(ha)) qla82xx_set_driver_version(vha, QLA2XXX_VERSION); else qla25xx_set_driver_version(vha, QLA2XXX_VERSION); return (rval); } /** * qla2100_pci_config() - Setup ISP21xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla2100_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); pci_write_config_word(ha->pdev, PCI_COMMAND, w); pci_disable_rom(ha->pdev); /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_word(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla2300_pci_config() - Setup ISP23xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla2300_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags = 0; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); if (IS_QLA2322(ha) || IS_QLA6322(ha)) w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* * If this is a 2300 card and not 2312, reset the * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately, * the 2310 also reports itself as a 2300 so we need to get the * fb revision level -- a 6 indicates it really is a 2300 and * not a 2310. */ if (IS_QLA2300(ha)) { spin_lock_irqsave(&ha->hardware_lock, flags); /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(10); } /* Select FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x20); rd_reg_word(&reg->ctrl_status); /* Get the fb rev level */ ha->fb_rev = RD_FB_CMD_REG(ha, reg); if (ha->fb_rev == FPM_2300) pci_clear_mwi(ha->pdev); /* Deselect FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x0); rd_reg_word(&reg->ctrl_status); /* Release RISC module. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0) break; udelay(10); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); pci_disable_rom(ha->pdev); /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_word(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla24xx_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) pcix_set_mmrbc(ha->pdev, 2048); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 4096); pci_disable_rom(ha->pdev); ha->chip_revision = ha->pdev->revision; /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = rd_reg_dword(&reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers. * @vha: HA context * * Returns 0 on success. */ int qla25xx_pci_config(scsi_qla_host_t *vha) { uint16_t w; struct qla_hw_data *ha = vha->hw; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 4096); pci_disable_rom(ha->pdev); ha->chip_revision = ha->pdev->revision; return QLA_SUCCESS; } /** * qla2x00_isp_firmware() - Choose firmware image. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_isp_firmware(scsi_qla_host_t *vha) { int rval; uint16_t loop_id, topo, sw_cap; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; /* Assume loading risc code */ rval = QLA_FUNCTION_FAILED; if (ha->flags.disable_risc_code_load) { ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n"); /* Verify checksum of loaded RISC code. */ rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); if (rval == QLA_SUCCESS) { /* And, verify we are not in ROM code. */ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); } } if (rval) ql_dbg(ql_dbg_init, vha, 0x007a, "**** Load RISC code ****.\n"); return (rval); } /** * qla2x00_reset_chip() - Reset ISP chip. * @vha: HA context * * Returns 0 on success. */ int qla2x00_reset_chip(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t cnt; uint16_t cmd; int rval = QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(ha->pdev))) return rval; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); /* Turn off master enable */ cmd = 0; pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); if (!IS_QLA2100(ha)) { /* Pause RISC. */ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC); if (IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } } else { rd_reg_word(&reg->hccr); /* PCI Posting. */ udelay(10); } /* Select FPM registers. */ wrt_reg_word(&reg->ctrl_status, 0x20); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* FPM Soft Reset. */ wrt_reg_word(&reg->fpm_diag_config, 0x100); rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */ /* Toggle Fpm Reset. */ if (!IS_QLA2200(ha)) { wrt_reg_word(&reg->fpm_diag_config, 0x0); rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */ } /* Select frame buffer registers. */ wrt_reg_word(&reg->ctrl_status, 0x10); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* Reset frame buffer FIFOs. */ if (IS_QLA2200(ha)) { WRT_FB_CMD_REG(ha, reg, 0xa000); RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ } else { WRT_FB_CMD_REG(ha, reg, 0x00fc); /* Read back fb_cmd until zero or 3 seconds max */ for (cnt = 0; cnt < 3000; cnt++) { if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) break; udelay(100); } } /* Select RISC module registers. */ wrt_reg_word(&reg->ctrl_status, 0); rd_reg_word(&reg->ctrl_status); /* PCI Posting. */ /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ /* Release RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ } wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT); wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT); /* Reset ISP chip. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); /* Wait for RISC to recover from reset. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { /* * It is necessary to for a delay here since the card doesn't * respond to PCI reads during a reset. On some architectures * this will result in an MCA. */ udelay(20); for (cnt = 30000; cnt; cnt--) { if ((rd_reg_word(&reg->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(100); } } else udelay(10); /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->semaphore, 0); /* Release RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) break; udelay(100); } } else udelay(100); /* Turn on master enable */ cmd |= PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); /* Disable RISC pause on FPM parity error. */ if (!IS_QLA2100(ha)) { wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE); rd_reg_word(&reg->hccr); /* PCI Posting. */ } spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. * @vha: HA context * * Returns 0 on success. */ static int qla81xx_reset_mpi(scsi_qla_host_t *vha) { uint16_t mb[4] = {0x1010, 0, 1, 0}; if (!IS_QLA81XX(vha->hw)) return QLA_SUCCESS; return qla81xx_write_mpi_register(vha, mb); } /** * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. * @vha: HA context * * Returns 0 on success. */ static inline int qla24xx_reset_risc(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t cnt; uint16_t wd; static int abts_cnt; /* ISP abort retry counts */ int rval = QLA_SUCCESS; spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset RISC. */ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE)) set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_dword(&reg->ctrl_status), (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE)); wrt_reg_dword(&reg->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); /* Wait for firmware to complete NVRAM accesses. */ rd_reg_word(&reg->mailbox0); for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) udelay(5); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, "HCCR: 0x%x, MailBox0 Status 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_word(&reg->mailbox0)); /* Wait for soft-reset to complete. */ rd_reg_dword(&reg->ctrl_status); for (cnt = 0; cnt < 60; cnt++) { barrier(); if ((rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(5); } if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET)) set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, "HCCR: 0x%x, Soft Reset status: 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_dword(&reg->ctrl_status)); /* If required, do an MPI FW reset now */ if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { if (++abts_cnt < 5) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); } else { /* * We exhausted the ISP abort retries. We have to * set the board offline. */ abts_cnt = 0; vha->flags.online = 0; } } } wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET); rd_reg_dword(&reg->hccr); rd_reg_word(&reg->mailbox0); for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) udelay(5); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, "Host Risc 0x%x, mailbox0 0x%x\n", rd_reg_dword(&reg->hccr), rd_reg_word(&reg->mailbox0)); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f, "Driver in %s mode\n", IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling"); if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); return rval; } static void qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET); } static void qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET); wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); } static void qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) { uint32_t wd32 = 0; uint delta_msec = 100; uint elapsed_msec = 0; uint timeout_msec; ulong n; if (vha->hw->pdev->subsystem_device != 0x0175 && vha->hw->pdev->subsystem_device != 0x0240) return; wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); udelay(100); attempt: timeout_msec = TIMEOUT_SEMAPHORE; n = timeout_msec / delta_msec; while (n--) { qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET); qla25xx_read_risc_sema_reg(vha, &wd32); if (wd32 & RISC_SEMAPHORE) break; msleep(delta_msec); elapsed_msec += delta_msec; if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) goto force; } if (!(wd32 & RISC_SEMAPHORE)) goto force; if (!(wd32 & RISC_SEMAPHORE_FORCE)) goto acquired; qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR); timeout_msec = TIMEOUT_SEMAPHORE_FORCE; n = timeout_msec / delta_msec; while (n--) { qla25xx_read_risc_sema_reg(vha, &wd32); if (!(wd32 & RISC_SEMAPHORE_FORCE)) break; msleep(delta_msec); elapsed_msec += delta_msec; if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED) goto force; } if (wd32 & RISC_SEMAPHORE_FORCE) qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR); goto attempt; force: qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET); acquired: return; } /** * qla24xx_reset_chip() - Reset ISP24xx chip. * @vha: HA context * * Returns 0 on success. */ int qla24xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_FUNCTION_FAILED; if (pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure) { return rval; } ha->isp_ops->disable_intrs(ha); qla25xx_manipulate_risc_semaphore(vha); /* Perform RISC reset. */ rval = qla24xx_reset_risc(vha); return rval; } /** * qla2x00_chip_diag() - Test chip for proper operation. * @vha: HA context * * Returns 0 on success. */ int qla2x00_chip_diag(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags = 0; uint16_t data; uint32_t cnt; uint16_t mb[5]; struct req_que *req = ha->req_q_map[0]; /* Assume a failed state */ rval = QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", &reg->flash_address); spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset ISP chip. */ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET); /* * We need to have a delay here since the card will not respond while * in reset causing an MCA on some architectures. */ udelay(20); data = qla2x00_debounce_register(&reg->ctrl_status); for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { udelay(5); data = rd_reg_word(&reg->ctrl_status); barrier(); } if (!cnt) goto chip_diag_failed; ql_dbg(ql_dbg_init, vha, 0x007c, "Reset register cleared by chip reset.\n"); /* Reset RISC processor. */ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); /* Workaround for QLA2312 PCI parity error */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0)); for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) { udelay(5); data = RD_MAILBOX_REG(ha, reg, 0); barrier(); } } else udelay(10); if (!cnt) goto chip_diag_failed; /* Check product ID of chip */ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || mb[3] != PROD_ID_3) { ql_log(ql_log_warn, vha, 0x0062, "Wrong product ID = 0x%x,0x%x,0x%x.\n", mb[1], mb[2], mb[3]); goto chip_diag_failed; } ha->product_id[0] = mb[1]; ha->product_id[1] = mb[2]; ha->product_id[2] = mb[3]; ha->product_id[3] = mb[4]; /* Adjust fw RISC transfer size */ if (req->length > 1024) ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; else ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; if (IS_QLA2200(ha) && RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { /* Limit firmware transfer size with a 2200A */ ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n"); ha->device_type |= DT_ISP2200A; ha->fw_transfer_size = 128; } /* Wrap Incoming Mailboxes Test. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n"); rval = qla2x00_mbx_reg_test(vha); if (rval) ql_log(ql_log_warn, vha, 0x0080, "Failed mailbox send register test.\n"); else /* Flag a successful rval */ rval = QLA_SUCCESS; spin_lock_irqsave(&ha->hardware_lock, flags); chip_diag_failed: if (rval) ql_log(ql_log_info, vha, 0x0081, "Chip diagnostics **** FAILED ****.\n"); spin_unlock_irqrestore(&ha->hardware_lock, flags); return (rval); } /** * qla24xx_chip_diag() - Test ISP24xx for proper operation. * @vha: HA context * * Returns 0 on success. */ int qla24xx_chip_diag(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qla2x00_mbx_reg_test(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0082, "Failed mailbox send register test.\n"); } else { /* Flag a successful rval */ rval = QLA_SUCCESS; } return rval; } static void qla2x00_init_fce_trace(scsi_qla_host_t *vha) { int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) return; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); return; } /* Allocate memory for Fibre Channel Event Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); return; } rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x00bf, "Unable to initialize FCE (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); return; } ql_dbg(ql_dbg_init, vha, 0x00c0, "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); ha->flags.fce_enabled = 1; ha->fce_dma = tc_dma; ha->fce = tc; } static void qla2x00_init_eft_trace(scsi_qla_host_t *vha) { int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) return; if (ha->eft) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: EFT Mem is already allocated.\n", __func__); return; } /* Allocate memory for Extended Trace Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { ql_log(ql_log_warn, vha, 0x00c1, "Unable to allocate (%d KB) for EFT.\n", EFT_SIZE / 1024); return; } rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x00c2, "Unable to initialize EFT (%d).\n", rval); dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); return; } ql_dbg(ql_dbg_init, vha, 0x00c3, "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); ha->eft_dma = tc_dma; ha->eft = tc; } static void qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) { qla2x00_init_fce_trace(vha); qla2x00_init_eft_trace(vha); } void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, eft_size, fce_size, mq_size; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { fixed_size = sizeof(struct qla2100_fw_dump); } else if (IS_QLA23XX(ha)) { fixed_size = offsetof(struct qla2300_fw_dump, data_ram); mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); else if (IS_QLA25XX(ha)) fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); else fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues - Q0. * Resizing must be done at end-of-dump processing. */ mq_size += (ha->max_req_queues - 1) * (req->length * sizeof(request_t)); mq_size += (ha->max_rsp_queues - 1) * (rsp->length * sizeof(response_t)); } if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); qla2x00_init_fce_trace(vha); if (ha->fce) fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; qla2x00_init_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { struct fwdt *fwdt = ha->fwdt; uint j; for (j = 0; j < 2; j++, fwdt++) { if (!fwdt->template) { ql_dbg(ql_dbg_init, vha, 0x00ba, "-> fwdt%u no template\n", j); continue; } ql_dbg(ql_dbg_init, vha, 0x00fa, "-> fwdt%u calculating fwdump size...\n", j); fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( vha, fwdt->template); ql_dbg(ql_dbg_init, vha, 0x00fa, "-> fwdt%u calculated fwdump size = %#lx bytes\n", j, fwdt->dump_size); dump_size += fwdt->dump_size; } /* Add space for spare MPI fw dump. */ dump_size += ha->fwdt[1].dump_size; } else { req_q_size = req->length * sizeof(request_t); rsp_q_size = rsp->length * sizeof(response_t); dump_size = offsetof(struct qla2xxx_fw_dump, isp); dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; if (ha->exchoffld_buf) dump_size += sizeof(struct qla2xxx_offld_chain) + ha->exchoffld_size; if (ha->exlogin_buf) dump_size += sizeof(struct qla2xxx_offld_chain) + ha->exlogin_size; } if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { ql_dbg(ql_dbg_init, vha, 0x00c5, "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n", __func__, dump_size, ha->fw_dump_len, ha->fw_dump_alloc_len); fw_dump = vmalloc(dump_size); if (!fw_dump) { ql_log(ql_log_warn, vha, 0x00c4, "Unable to allocate (%d KB) for firmware dump.\n", dump_size / 1024); } else { mutex_lock(&ha->optrom_mutex); if (ha->fw_dumped) { memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len); vfree(ha->fw_dump); ha->fw_dump = fw_dump; ha->fw_dump_alloc_len = dump_size; ql_dbg(ql_dbg_init, vha, 0x00c5, "Re-Allocated (%d KB) and save firmware dump.\n", dump_size / 1024); } else { vfree(ha->fw_dump); ha->fw_dump = fw_dump; ha->fw_dump_len = ha->fw_dump_alloc_len = dump_size; ql_dbg(ql_dbg_init, vha, 0x00c5, "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->mpi_fw_dump = (char *)fw_dump + ha->fwdt[1].dump_size; mutex_unlock(&ha->optrom_mutex); return; } ha->fw_dump->signature[0] = 'Q'; ha->fw_dump->signature[1] = 'L'; ha->fw_dump->signature[2] = 'G'; ha->fw_dump->signature[3] = 'C'; ha->fw_dump->version = htonl(1); ha->fw_dump->fixed_size = htonl(fixed_size); ha->fw_dump->mem_size = htonl(mem_size); ha->fw_dump->req_q_size = htonl(req_q_size); ha->fw_dump->rsp_q_size = htonl(rsp_q_size); ha->fw_dump->eft_size = htonl(eft_size); ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma)); ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma)); ha->fw_dump->header_size = htonl(offsetof (struct qla2xxx_fw_dump, isp)); } mutex_unlock(&ha->optrom_mutex); } } } static int qla81xx_mpi_sync(scsi_qla_host_t *vha) { #define MPS_MASK 0xe0 int rval; uint16_t dc; uint32_t dw; if (!IS_QLA81XX(vha->hw)) return QLA_SUCCESS; rval = qla2x00_write_ram_word(vha, 0x7c00, 1); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0105, "Unable to acquire semaphore.\n"); goto done; } pci_read_config_word(vha->hw->pdev, 0x54, &dc); rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n"); goto done_release; } dc &= MPS_MASK; if (dc == (dw & MPS_MASK)) goto done_release; dw &= ~MPS_MASK; dw |= dc; rval = qla2x00_write_ram_word(vha, 0x7a15, dw); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n"); } done_release: rval = qla2x00_write_ram_word(vha, 0x7c00, 0); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x006d, "Unable to release semaphore.\n"); } done: return rval; } int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) { /* Don't try to reallocate the array */ if (req->outstanding_cmds) return QLA_SUCCESS; if (!IS_FWI2_CAPABLE(ha)) req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS; else { if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count) req->num_outstanding_cmds = ha->cur_fw_xcb_count; else req->num_outstanding_cmds = ha->cur_fw_iocb_count; } req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, sizeof(srb_t *), GFP_KERNEL); if (!req->outstanding_cmds) { /* * Try to allocate a minimal size just so we can get through * initialization. */ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS; req->outstanding_cmds = kcalloc(req->num_outstanding_cmds, sizeof(srb_t *), GFP_KERNEL); if (!req->outstanding_cmds) { ql_log(ql_log_fatal, NULL, 0x0126, "Failed to allocate memory for " "outstanding_cmds for req_que %p.\n", req); req->num_outstanding_cmds = 0; return QLA_FUNCTION_FAILED; } } return QLA_SUCCESS; } #define PRINT_FIELD(_field, _flag, _str) { \ if (a0->_field & _flag) {\ if (p) {\ strcat(ptr, "|");\ ptr++;\ leftover--;\ } \ len = snprintf(ptr, leftover, "%s", _str); \ p = 1;\ leftover -= len;\ ptr += len; \ } \ } static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) { #define STR_LEN 64 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data; u8 str[STR_LEN], *ptr, p; int leftover, len; memset(str, 0, STR_LEN); snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); ql_dbg(ql_dbg_init, vha, 0x015a, "SFP MFG Name: %s\n", str); memset(str, 0, STR_LEN); snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn); ql_dbg(ql_dbg_init, vha, 0x015c, "SFP Part Name: %s\n", str); /* media */ memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX"); PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair"); PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax"); PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax"); PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um"); PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um"); PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode"); ql_dbg(ql_dbg_init, vha, 0x0160, "SFP Media: %s\n", str); /* link length */ memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long"); PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short"); PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate"); PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long"); PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium"); ql_dbg(ql_dbg_init, vha, 0x0196, "SFP Link Length: %s\n", str); memset(str, 0, STR_LEN); ptr = str; leftover = STR_LEN; p = len = 0; PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)"); PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)"); PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)"); ql_dbg(ql_dbg_init, vha, 0x016e, "SFP FC Link Tech: %s\n", str); if (a0->length_km) ql_dbg(ql_dbg_init, vha, 0x016f, "SFP Distant: %d km\n", a0->length_km); if (a0->length_100m) ql_dbg(ql_dbg_init, vha, 0x0170, "SFP Distant: %d m\n", a0->length_100m*100); if (a0->length_50um_10m) ql_dbg(ql_dbg_init, vha, 0x0189, "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10); if (a0->length_62um_10m) ql_dbg(ql_dbg_init, vha, 0x018a, "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10); if (a0->length_om4_10m) ql_dbg(ql_dbg_init, vha, 0x0194, "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10); if (a0->length_om3_10m) ql_dbg(ql_dbg_init, vha, 0x0195, "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10); } /** * qla24xx_detect_sfp() * * @vha: adapter state pointer. * * @return * 0 -- Configure firmware to use short-range settings -- normal * buffer-to-buffer credits. * * 1 -- Configure firmware to use long-range settings -- extra * buffer-to-buffer credits should be allocated with * ha->lr_distance containing distance settings from NVRAM or SFP * (if supported). */ int qla24xx_detect_sfp(scsi_qla_host_t *vha) { int rc, used_nvram; struct sff_8247_a0 *a; struct qla_hw_data *ha = vha->hw; struct nvram_81xx *nv = ha->nvram; #define LR_DISTANCE_UNKNOWN 2 static const char * const types[] = { "Short", "Long" }; static const char * const lengths[] = { "(10km)", "(5km)", "" }; u8 ll = 0; /* Seed with NVRAM settings. */ used_nvram = 0; ha->flags.lr_detected = 0; if (IS_BPM_RANGE_CAPABLE(ha) && (nv->enhanced_features & NEF_LR_DIST_ENABLE)) { used_nvram = 1; ha->flags.lr_detected = 1; ha->lr_distance = (nv->enhanced_features >> LR_DIST_NV_POS) & LR_DIST_NV_MASK; } if (!IS_BPM_ENABLED(vha)) goto out; /* Determine SR/LR capabilities of SFP/Transceiver. */ rc = qla2x00_read_sfp_dev(vha, NULL, 0); if (rc) goto out; used_nvram = 0; a = (struct sff_8247_a0 *)vha->hw->sfp_data; qla2xxx_print_sfp_info(vha); ha->flags.lr_detected = 0; ll = a->fc_ll_cc7; if (ll & FC_LL_VL || ll & FC_LL_L) { /* Long range, track length. */ ha->flags.lr_detected = 1; if (a->length_km > 5 || a->length_100m > 50) ha->lr_distance = LR_DISTANCE_10K; else ha->lr_distance = LR_DISTANCE_5K; } out: ql_dbg(ql_dbg_async, vha, 0x507b, "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", types[ha->flags.lr_detected], ha->flags.lr_detected ? lengths[ha->lr_distance] : lengths[LR_DISTANCE_UNKNOWN], used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); return ha->flags.lr_detected; } void qla_init_iocb_limit(scsi_qla_host_t *vha) { u16 i, num_qps; u32 limit; struct qla_hw_data *ha = vha->hw; num_qps = ha->num_qpairs + 1; limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; ha->base_qpair->fwres.iocbs_limit = limit; ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; ha->base_qpair->fwres.iocbs_used = 0; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { ha->queue_pair_map[i]->fwres.iocbs_total = ha->orig_fw_iocb_count; ha->queue_pair_map[i]->fwres.iocbs_limit = limit; ha->queue_pair_map[i]->fwres.iocbs_qp_limit = limit / num_qps; ha->queue_pair_map[i]->fwres.iocbs_used = 0; } } } /** * qla2x00_setup_chip() - Load and start RISC firmware. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_setup_chip(scsi_qla_host_t *vha) { int rval; uint32_t srisc_address = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags; uint16_t fw_major_version; int done_once = 0; if (IS_P3P_TYPE(ha)) { rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { qla2x00_stop_firmware(vha); goto enable_82xx_npiv; } else goto failed; } if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { /* Disable SRAM, Instruction RAM and GP RAM parity. */ spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0)); rd_reg_word(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } qla81xx_mpi_sync(vha); execute_fw_with_lr: /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00c9, "Verifying Checksum of loaded RISC code.\n"); rval = qla2x00_verify_checksum(vha, srisc_address); if (rval == QLA_SUCCESS) { /* Start firmware execution. */ ql_dbg(ql_dbg_init, vha, 0x00ca, "Starting firmware.\n"); if (ql2xexlogins) ha->flags.exlogins_enabled = 1; if (qla_is_exch_offld_enabled(vha)) ha->flags.exchoffld_enabled = 1; rval = qla2x00_execute_fw(vha, srisc_address); /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { /* Enable BPM support? */ if (!done_once++ && qla24xx_detect_sfp(vha)) { ql_dbg(ql_dbg_init, vha, 0x00ca, "Re-starting firmware -- BPM.\n"); /* Best-effort - re-init. */ ha->isp_ops->reset_chip(vha); ha->isp_ops->chip_diag(vha); goto execute_fw_with_lr; } if (IS_ZIO_THRESHOLD_CAPABLE(ha)) qla27xx_set_zio_threshold(vha, ha->last_zio_threshold); rval = qla2x00_set_exlogins_buffer(vha); if (rval != QLA_SUCCESS) goto failed; rval = qla2x00_set_exchoffld_buffer(vha); if (rval != QLA_SUCCESS) goto failed; enable_82xx_npiv: fw_major_version = ha->fw_major_version; if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) goto failed; ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { ha->flags.npiv_supported = 1; if ((!ha->max_npiv_vports) || ((ha->max_npiv_vports + 1) % MIN_MULTI_ID_FABRIC)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; } qla2x00_get_resource_cnts(vha); qla_init_iocb_limit(vha); /* * Allocate the array of outstanding commands * now that we know the firmware resources. */ rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); if (rval != QLA_SUCCESS) goto failed; if (!fw_major_version && !(IS_P3P_TYPE(ha))) qla2x00_alloc_offload_mem(vha); if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); } else { goto failed; } } else { ql_log(ql_log_fatal, vha, 0x00cd, "ISP Firmware failed checksum.\n"); goto failed; } /* Enable PUREX PASSTHRU */ if (ql2xrdpenable || ha->flags.scm_supported_f) qla25xx_set_els_cmds_supported(vha); } else goto failed; if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { /* Enable proper parity. */ spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2300(ha)) /* SRAM parity */ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1); else /* SRAM, Instruction RAM and GP RAM parity */ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7); rd_reg_word(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flags.fac_supported = 1; else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { uint32_t size; rval = qla81xx_fac_get_sector_size(vha, &size); if (rval == QLA_SUCCESS) { ha->flags.fac_supported = 1; ha->fdt_block_size = size << 2; } else { ql_log(ql_log_warn, vha, 0x00ce, "Unsupported FAC firmware (%d.%02d.%02d).\n", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } } } failed: if (rval) { ql_log(ql_log_fatal, vha, 0x00cf, "Setup chip ****FAILED****.\n"); } return (rval); } /** * qla2x00_init_response_q_entries() - Initializes response queue entries. * @rsp: response queue * * Beginning of request ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ void qla2x00_init_response_q_entries(struct rsp_que *rsp) { uint16_t cnt; response_t *pkt; rsp->ring_ptr = rsp->ring; rsp->ring_index = 0; rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } } /** * qla2x00_update_fw_options() - Read and process firmware options. * @vha: HA context * * Returns 0 on success. */ void qla2x00_update_fw_options(scsi_qla_host_t *vha) { uint16_t swing, emphasis, tx_sens, rx_sens; struct qla_hw_data *ha = vha->hw; memset(ha->fw_options, 0, sizeof(ha->fw_options)); qla2x00_get_fw_options(vha, ha->fw_options); if (IS_QLA2100(ha) || IS_QLA2200(ha)) return; /* Serial Link options. */ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, "Serial link options.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; if (ha->fw_seriallink_options[3] & BIT_2) { ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING; /* 1G settings */ swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0); emphasis = (ha->fw_seriallink_options[2] & (BIT_4 | BIT_3)) >> 3; tx_sens = ha->fw_seriallink_options[0] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); rx_sens = (ha->fw_seriallink_options[0] & (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; ha->fw_options[10] = (emphasis << 14) | (swing << 8); if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { if (rx_sens == 0x0) rx_sens = 0x3; ha->fw_options[10] |= (tx_sens << 4) | rx_sens; } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) ha->fw_options[10] |= BIT_5 | ((rx_sens & (BIT_1 | BIT_0)) << 2) | (tx_sens & (BIT_1 | BIT_0)); /* 2G settings */ swing = (ha->fw_seriallink_options[2] & (BIT_7 | BIT_6 | BIT_5)) >> 5; emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0); tx_sens = ha->fw_seriallink_options[1] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); rx_sens = (ha->fw_seriallink_options[1] & (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4; ha->fw_options[11] = (emphasis << 14) | (swing << 8); if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) { if (rx_sens == 0x0) rx_sens = 0x3; ha->fw_options[11] |= (tx_sens << 4) | rx_sens; } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) ha->fw_options[11] |= BIT_5 | ((rx_sens & (BIT_1 | BIT_0)) << 2) | (tx_sens & (BIT_1 | BIT_0)); } /* FCP2 options. */ /* Return command IOCBs without waiting for an ABTS to complete. */ ha->fw_options[3] |= BIT_13; /* LED scheme. */ if (ha->flags.enable_led_scheme) ha->fw_options[2] |= BIT_12; /* Detect ISP6312. */ if (IS_QLA6312(ha)) ha->fw_options[2] |= BIT_13; /* Set Retry FLOGI in case of P2P connection */ if (ha->operating_mode == P2P) { ha->fw_options[2] |= BIT_3; ql_dbg(ql_dbg_disc, vha, 0x2100, "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", __func__, ha->fw_options[2]); } /* Update firmware options. */ qla2x00_set_fw_options(vha, ha->fw_options); } void qla24xx_update_fw_options(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; if (IS_P3P_TYPE(ha)) return; /* Hold status IOCBs until ABTS response received. */ if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; /* Set Retry FLOGI in case of P2P connection */ if (ha->operating_mode == P2P) { ha->fw_options[2] |= BIT_3; ql_dbg(ql_dbg_disc, vha, 0x2101, "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", __func__, ha->fw_options[2]); } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ if (ql2xmvasynctoatio && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_11; else ha->fw_options[2] &= ~BIT_11; } if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { /* * Tell FW to track each exchange to prevent * driver from using stale exchange. */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_4; else ha->fw_options[2] &= ~BIT_4; /* Reserve 1/2 of emergency exchanges for ELS.*/ if (qla2xuseresexchforels) ha->fw_options[2] |= BIT_8; else ha->fw_options[2] &= ~BIT_8; } if (ql2xrdpenable || ha->flags.scm_supported_f) ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; /* Enable Async 8130/8131 events -- transceiver insertion/removal */ if (IS_BPM_RANGE_CAPABLE(ha)) ha->fw_options[3] |= BIT_10; ql_dbg(ql_dbg_init, vha, 0x00e8, "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", __func__, ha->fw_options[1], ha->fw_options[2], ha->fw_options[3], vha->host->active_mode); if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) qla2x00_set_fw_options(vha, ha->fw_options); /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; rval = qla2x00_set_serdes_params(vha, le16_to_cpu(ha->fw_seriallink_options24[1]), le16_to_cpu(ha->fw_seriallink_options24[2]), le16_to_cpu(ha->fw_seriallink_options24[3])); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x0104, "Unable to update Serial Link options (%x).\n", rval); } } void qla2x00_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ ha->init_cb->request_q_outpointer = cpu_to_le16(0); ha->init_cb->response_q_inpointer = cpu_to_le16(0); ha->init_cb->request_q_length = cpu_to_le16(req->length); ha->init_cb->response_q_length = cpu_to_le16(rsp->length); put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ } void qla24xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; device_reg_t *reg = ISP_QUE_REG(ha, 0); struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; struct qla_msix_entry *msix; struct init_cb_24xx *icb; uint16_t rid = 0; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_24xx *)ha->init_cb; icb->request_q_outpointer = cpu_to_le16(0); icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); put_unaligned_le64(req->dma, &icb->request_q_address); put_unaligned_le64(rsp->dma, &icb->response_q_address); /* Setup ATIO queue dma pointers for target mode */ icb->atio_q_inpointer = cpu_to_le16(0); icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address); if (IS_SHADOW_REG_CAPABLE(ha)) icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = cpu_to_le16(rid); if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; ql_dbg(ql_dbg_init, vha, 0x0019, "Registering vector 0x%x for base que.\n", msix->entry); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ if (MSB(rid)) icb->firmware_options_2 |= cpu_to_le32(BIT_19); /* Use alternate PCI devfn */ if (LSB(rid)) icb->firmware_options_2 |= cpu_to_le32(BIT_18); /* Use Disable MSIX Handshake mode for capable adapters */ if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && (ha->flags.msix_enabled)) { icb->firmware_options_2 &= cpu_to_le32(~BIT_22); ha->flags.disable_msix_handshake = 1; ql_dbg(ql_dbg_init, vha, 0x00fe, "MSIX Handshake Disable Mode turned on.\n"); } else { icb->firmware_options_2 |= cpu_to_le32(BIT_22); } icb->firmware_options_2 |= cpu_to_le32(BIT_23); wrt_reg_dword(&reg->isp25mq.req_q_in, 0); wrt_reg_dword(&reg->isp25mq.req_q_out, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0); wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0); } else { wrt_reg_dword(&reg->isp24.req_q_in, 0); wrt_reg_dword(&reg->isp24.req_q_out, 0); wrt_reg_dword(&reg->isp24.rsp_q_in, 0); wrt_reg_dword(&reg->isp24.rsp_q_out, 0); } qlt_24xx_config_rings(vha); /* If the user has configured the speed, set it here */ if (ha->set_data_rate) { ql_dbg(ql_dbg_init, vha, 0x00fd, "Speed set by user : %s Gbps \n", qla2x00_get_link_speed_str(ha, ha->set_data_rate)); icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); } /* PCI posting */ rd_reg_word(&ioreg->hccr); } /** * qla2x00_init_rings() - Initializes firmware. * @vha: HA context * * Beginning of request ring has initialization control block already built * by nvram config routine. * * Returns 0 on success. */ int qla2x00_init_rings(scsi_qla_host_t *vha) { int rval; unsigned long flags = 0; int cnt, que; struct qla_hw_data *ha = vha->hw; struct req_que *req; struct rsp_que *rsp; struct mid_init_cb_24xx *mid_init_cb = (struct mid_init_cb_24xx *) ha->init_cb; spin_lock_irqsave(&ha->hardware_lock, flags); /* Clear outstanding commands array. */ for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req || !test_bit(que, ha->req_qid_map)) continue; req->out_ptr = (uint16_t *)(req->ring + req->length); *req->out_ptr = 0; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; /* Initialize firmware. */ req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; } for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; /* Initialize response queue entries */ if (IS_QLAFX00(ha)) qlafx00_init_response_q_entries(rsp); else qla2x00_init_response_q_entries(rsp); } ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; ha->tgt.atio_ring_index = 0; /* Initialize ATIO queue entries */ qlt_init_atio_q_entries(vha); ha->isp_ops->config_rings(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); if (IS_QLAFX00(ha)) { rval = qlafx00_init_firmware(vha, ha->init_cb_size); goto next_check; } /* Update any ISP specific firmware options before initialization. */ ha->isp_ops->update_fw_options(vha); if (ha->flags.npiv_supported) { if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); } if (IS_FWI2_CAPABLE(ha)) { mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->cur_fw_xcb_count); ha->flags.dport_enabled = (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & BIT_7) != 0; ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", (ha->flags.dport_enabled) ? "enabled" : "disabled"); /* FA-WWPN Status */ ha->flags.fawwpn_enabled = (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { ql_log(ql_log_fatal, vha, 0x00d2, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); QLA_FW_STARTED(ha); vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; } return (rval); } /** * qla2x00_fw_ready() - Waits for firmware ready. * @vha: HA context * * Returns 0 on success. */ static int qla2x00_fw_ready(scsi_qla_host_t *vha) { int rval; unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ uint16_t state[6]; struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(vha->hw)) return qlafx00_fw_ready(vha); rval = QLA_SUCCESS; /* Time to wait for loop down */ if (IS_P3P_TYPE(ha)) min_wait = 30; else min_wait = 20; /* * Firmware should take at most one RATOV to login, plus 5 seconds for * our own processing. */ if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) { wait_time = min_wait; } /* Min wait time if loop down */ mtime = jiffies + (min_wait * HZ); /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); /* Wait for ISP to finish LIP */ if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x801e, "Waiting for LIP to complete.\n"); do { memset(state, -1, sizeof(state)); rval = qla2x00_get_firmware_state(vha, state); if (rval == QLA_SUCCESS) { if (state[0] < FSTATE_LOSS_OF_SYNC) { vha->device_flags &= ~DFLG_NO_CABLE; } if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { ql_dbg(ql_dbg_taskm, vha, 0x801f, "fw_state=%x 84xx=%x.\n", state[0], state[2]); if ((state[2] & FSTATE_LOGGED_IN) && (state[2] & FSTATE_WAITING_FOR_VERIFY)) { ql_dbg(ql_dbg_taskm, vha, 0x8028, "Sending verify iocb.\n"); cs84xx_time = jiffies; rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8007, "Init chip failed.\n"); break; } /* Add time taken to initialize. */ cs84xx_time = jiffies - cs84xx_time; wtime += cs84xx_time; mtime += cs84xx_time; ql_dbg(ql_dbg_taskm, vha, 0x8008, "Increasing wait time by %ld. " "New time %ld.\n", cs84xx_time, wtime); } } else if (state[0] == FSTATE_READY) { ql_dbg(ql_dbg_taskm, vha, 0x8037, "F/W Ready - OK.\n"); qla2x00_get_retry_cnt(vha, &ha->retry_count, &ha->login_timeout, &ha->r_a_tov); rval = QLA_SUCCESS; break; } rval = QLA_FUNCTION_FAILED; if (atomic_read(&vha->loop_down_timer) && state[0] != FSTATE_READY) { /* Loop down. Timeout on min_wait for states * other than Wait for Login. */ if (time_after_eq(jiffies, mtime)) { ql_log(ql_log_info, vha, 0x8038, "Cable is unplugged...\n"); vha->device_flags |= DFLG_NO_CABLE; break; } } } else { /* Mailbox cmd failed. Timeout on min_wait. */ if (time_after_eq(jiffies, mtime) || ha->flags.isp82xx_fw_hung) break; } if (time_after_eq(jiffies, wtime)) break; /* Delay for a while */ msleep(500); } while (1); ql_dbg(ql_dbg_taskm, vha, 0x803a, "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0], state[1], state[2], state[3], state[4], state[5], jiffies); if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { ql_log(ql_log_warn, vha, 0x803b, "Firmware ready **** FAILED ****.\n"); } return (rval); } /* * qla2x00_configure_hba * Setup adapter context. * * Input: * ha = adapter state pointer. * * Returns: * 0 = success * * Context: * Kernel context. */ static int qla2x00_configure_hba(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; uint16_t topo; uint16_t sw_cap; uint8_t al_pa; uint8_t area; uint8_t domain; char connect_type[22]; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); port_id_t id; unsigned long flags; /* Get host addresses. */ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); if (rval != QLA_SUCCESS) { if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || IS_CNA_CAPABLE(ha) || (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { ql_dbg(ql_dbg_disc, vha, 0x2008, "Loop is in a transition state.\n"); } else { ql_log(ql_log_warn, vha, 0x2009, "Unable to get host loop ID.\n"); if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) && (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) { ql_log(ql_log_warn, vha, 0x1151, "Doing link init.\n"); if (qla24xx_link_initialize(vha) == QLA_SUCCESS) return rval; } set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } return (rval); } if (topo == 4) { ql_log(ql_log_info, vha, 0x200a, "Cannot get topology - retrying.\n"); return (QLA_FUNCTION_FAILED); } vha->loop_id = loop_id; /* initialize */ ha->min_external_loopid = SNS_FIRST_LOOP_ID; ha->operating_mode = LOOP; ha->switch_cap = 0; switch (topo) { case 0: ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; case 1: ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n"); ha->switch_cap = sw_cap; ha->current_topology = ISP_CFG_FL; strcpy(connect_type, "(FL_Port)"); break; case 2: ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); ha->operating_mode = P2P; ha->current_topology = ISP_CFG_N; strcpy(connect_type, "(N_Port-to-N_Port)"); break; case 3: ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n"); ha->switch_cap = sw_cap; ha->operating_mode = P2P; ha->current_topology = ISP_CFG_F; strcpy(connect_type, "(F_Port)"); break; default: ql_dbg(ql_dbg_disc, vha, 0x200f, "HBA in unknown topology %x, using NL.\n", topo); ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; } /* Save Host port and loop ID. */ /* byte order - Big Endian */ id.b.domain = domain; id.b.area = area; id.b.al_pa = al_pa; id.b.rsvd_1 = 0; spin_lock_irqsave(&ha->hardware_lock, flags); if (!(topo == 2 && ha->flags.n2n_bigger)) qlt_update_host_map(vha, id); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!vha->flags.init_done) ql_log(ql_log_info, vha, 0x2010, "Topology - %s, Host Loop address 0x%x.\n", connect_type, vha->loop_id); return(rval); } inline void qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, const char *def) { char *st, *en; uint16_t index; uint64_t zero[2] = { 0 }; struct qla_hw_data *ha = vha->hw; int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); if (len > sizeof(zero)) len = sizeof(zero); if (memcmp(model, &zero, len) != 0) { memcpy(ha->model_number, model, len); st = en = ha->model_number; en += len - 1; while (en > st) { if (*en != 0x20 && *en != 0x00) break; *en-- = '\0'; } index = (ha->pdev->subsystem_device & 0xff); if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) strlcpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { index = (ha->pdev->subsystem_device & 0xff); if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { strlcpy(ha->model_number, qla2x00_model_name[index * 2], sizeof(ha->model_number)); strlcpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { strlcpy(ha->model_number, def, sizeof(ha->model_number)); } } if (IS_FWI2_CAPABLE(ha)) qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc, sizeof(ha->model_desc)); } /* On sparc systems, obtain port and node WWN from firmware * properties. */ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv) { #ifdef CONFIG_SPARC struct qla_hw_data *ha = vha->hw; struct pci_dev *pdev = ha->pdev; struct device_node *dp = pci_device_to_OF_node(pdev); const u8 *val; int len; val = of_get_property(dp, "port-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->port_name, val, WWN_SIZE); val = of_get_property(dp, "node-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->node_name, val, WWN_SIZE); #endif } /* * NVRAM configuration for ISP 2xxx * * Input: * ha = adapter block pointer. * * Output: * initialization control block in response_ring * host adapters parameters in host adapter block * * Returns: * 0 = success. */ int qla2x00_nvram_config(scsi_qla_host_t *vha) { int rval; uint8_t chksum = 0; uint16_t cnt; uint8_t *dptr1, *dptr2; struct qla_hw_data *ha = vha->hw; init_cb_t *icb = ha->init_cb; nvram_t *nv = ha->nvram; uint8_t *ptr = ha->nvram; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; rval = QLA_SUCCESS; /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(*nv); ha->nvram_base = 0; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1) ha->nvram_base = 0x80; /* Get NVRAM data and calculate checksum. */ ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) chksum += *ptr++; ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, "Contents of NVRAM.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || nv->nvram_version < 1) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x0064, "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, nv->nvram_version); ql_log(ql_log_warn, vha, 0x0065, "Falling back to " "functioning (yet invalid -- WWPN) defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->parameter_block_version = ICB_VERSION; if (IS_QLA23XX(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; nv->frame_payload_size = cpu_to_le16(2048); nv->special_options[1] = BIT_7; } else if (IS_QLA2200(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; nv->frame_payload_size = cpu_to_le16(1024); } else if (IS_QLA2100(ha)) { nv->firmware_options[0] = BIT_3 | BIT_1; nv->firmware_options[1] = BIT_5; nv->frame_payload_size = cpu_to_le16(1024); } nv->max_iocb_allocation = cpu_to_le16(256); nv->execution_throttle = cpu_to_le16(16); nv->retry_count = 8; nv->retry_delay = 1; nv->port_name[0] = 33; nv->port_name[3] = 224; nv->port_name[4] = 139; qla2xxx_nvram_wwn_from_ofw(vha, nv); nv->login_timeout = 4; /* * Set default host adapter parameters */ nv->host_p[1] = BIT_2; nv->reset_delay = 5; nv->port_down_retry_count = 8; nv->max_luns_per_target = cpu_to_le16(8); nv->link_down_timeout = 60; rval = 1; } /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* * Setup driver NVRAM options. */ nv->firmware_options[0] |= (BIT_6 | BIT_1); nv->firmware_options[0] &= ~(BIT_5 | BIT_4); nv->firmware_options[1] |= (BIT_5 | BIT_0); nv->firmware_options[1] &= ~BIT_4; if (IS_QLA23XX(ha)) { nv->firmware_options[0] |= BIT_2; nv->firmware_options[0] &= ~BIT_3; nv->special_options[0] &= ~BIT_6; nv->add_firmware_options[1] |= BIT_5 | BIT_4; if (IS_QLA2300(ha)) { if (ha->fb_rev == FPM_2310) { strcpy(ha->model_number, "QLA2310"); } else { strcpy(ha->model_number, "QLA2300"); } } else { qla2x00_set_model_info(vha, nv->model_number, sizeof(nv->model_number), "QLA23xx"); } } else if (IS_QLA2200(ha)) { nv->firmware_options[0] |= BIT_2; /* * 'Point-to-point preferred, else loop' is not a safe * connection mode setting. */ if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) == (BIT_5 | BIT_4)) { /* Force 'loop preferred, else point-to-point'. */ nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4); nv->add_firmware_options[0] |= BIT_5; } strcpy(ha->model_number, "QLA22xx"); } else /*if (IS_QLA2100(ha))*/ { strcpy(ha->model_number, "QLA2100"); } /* * Copy over NVRAM RISC parameter block to initialization control block. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->parameter_block_version; cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; /* Copy 2nd half. */ dptr1 = (uint8_t *)icb->add_firmware_options; cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; while (cnt--) *dptr1++ = *dptr2++; ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* Use alternate WWN? */ if (nv->host_p[1] & BIT_7) { memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options[1] & BIT_6) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } /* * Set host adapter parameters. */ /* * BIT_7 in the host-parameters section allows for modification to * internal driver logging. */ if (nv->host_p[0] & BIT_7) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); /* Always load RISC code on non ISP2[12]00 chips. */ if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0; ha->flags.disable_serdes = 0; ha->operating_mode = (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; memcpy(ha->fw_seriallink_options, nv->seriallink_options, sizeof(ha->fw_seriallink_options)); /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = nv->retry_count; /* Set minimum login_timeout to 4 seconds. */ if (nv->login_timeout != ql2xlogintimeout) nv->login_timeout = ql2xlogintimeout; if (nv->login_timeout < 4) nv->login_timeout = 4; ha->login_timeout = nv->login_timeout; /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (nv->link_down_timeout == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = nv->link_down_timeout; ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* * Need enough time to try and get the port back. */ ha->port_down_retry_count = nv->port_down_retry_count; if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = nv->retry_count; if (ha->port_down_retry_count == nv->port_down_retry_count && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; icb->lun_enables = cpu_to_le16(0); icb->command_resource_count = 0; icb->immediate_notify_resource_count = 0; icb->timeout = cpu_to_le16(0); if (IS_QLA2100(ha) || IS_QLA2200(ha)) { /* Enable RIO */ icb->firmware_options[0] &= ~BIT_3; icb->add_firmware_options[0] &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); icb->add_firmware_options[0] |= BIT_2; icb->response_accumulation_timer = 3; icb->interrupt_delay_timer = 5; vha->flags.process_response_queue = 1; } else { /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = icb->add_firmware_options[0] & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = icb->interrupt_delay_timer ? icb->interrupt_delay_timer : 2; } icb->add_firmware_options[0] &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x0068, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; vha->flags.process_response_queue = 1; } } if (rval) { ql_log(ql_log_warn, vha, 0x0069, "NVRAM configuration failed.\n"); } return (rval); } static void qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; unsigned long flags; spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport : fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); if (rport) { ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, "%s %8phN. rport %p roles %x\n", __func__, fcport->port_name, rport, rport->roles); fc_remote_port_delete(rport); } } void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; old_state = atomic_read(&fcport->state); atomic_set(&fcport->state, state); /* Don't print state transitions during initial allocation of fcport */ if (old_state && old_state != state) { ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n", fcport->port_name, port_state_str[old_state], port_state_str[state], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } } /** * qla2x00_alloc_fcport() - Allocate a generic fcport. * @vha: HA context * @flags: allocation flags * * Returns a pointer to the allocated fcport, or NULL, if none available. */ fc_port_t * qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) { fc_port_t *fcport; fcport = kzalloc(sizeof(fc_port_t), flags); if (!fcport) return NULL; fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, flags); if (!fcport->ct_desc.ct_sns) { ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); return NULL; } /* Setup fcport template structure. */ fcport->vha = vha; fcport->port_type = FCT_UNKNOWN; fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; fcport->fp_speed = PORT_SPEED_UNKNOWN; fcport->disc_state = DSC_DELETED; fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; fcport->deleted = QLA_SESS_DELETED; fcport->login_retry = vha->hw->login_retry_count; fcport->chip_reset = vha->hw->base_qpair->chip_reset; fcport->logout_on_delete = 1; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; fcport->tgt_short_link_down_cnt = 0; fcport->dev_loss_tmo = 0; if (!fcport->ct_desc.ct_sns) { ql_log(ql_log_warn, vha, 0xd049, "Failed to allocate ct_sns request.\n"); kfree(fcport); return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); return fcport; } void qla2x00_free_fcport(fc_port_t *fcport) { if (fcport->ct_desc.ct_sns) { dma_free_coherent(&fcport->vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns, fcport->ct_desc.ct_sns_dma); fcport->ct_desc.ct_sns = NULL; } list_del(&fcport->list); qla2x00_clear_loop_id(fcport); kfree(fcport); } static void qla_get_login_template(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; u32 *bp, sz; __be32 *q; memset(ha->init_cb, 0, ha->init_cb_size); sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, ha->init_cb, sz); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00d1, "PLOGI ELS param read fail.\n"); return; } q = (__be32 *)&ha->plogi_els_payld.fl_csp; bp = (uint32_t *)ha->init_cb; cpu_to_be32_array(q, bp, sz / 4); ha->flags.plogi_template_valid = 1; } /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * 1 = error. * 2 = database was full and device was not configured. */ static int qla2x00_configure_loop(scsi_qla_host_t *vha) { int rval; unsigned long flags, save_flags; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; /* Get Initiator ID */ if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { rval = qla2x00_configure_hba(vha); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2013, "Unable to configure HBA.\n"); return (rval); } } save_flags = flags = vha->dpc_flags; ql_dbg(ql_dbg_disc, vha, 0x2014, "Configure loop -- dpc flags = 0x%lx.\n", flags); /* * If we have both an RSCN and PORT UPDATE pending then handle them * both at the same time. */ clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); clear_bit(RSCN_UPDATE, &vha->dpc_flags); qla2x00_get_data_rate(vha); qla_get_login_template(vha); /* Determine what we need to do */ if ((ha->current_topology == ISP_CFG_FL || ha->current_topology == ISP_CFG_F) && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); } else if (ha->current_topology == ISP_CFG_NL || ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || (test_bit(ABORT_ISP_ACTIVE, &flags))) { set_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { ql_dbg(ql_dbg_disc, vha, 0x2015, "Loop resync needed, failing.\n"); rval = QLA_FUNCTION_FAILED; } else rval = qla2x00_configure_local_loop(vha); } if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { if (LOOP_TRANSITION(vha)) { ql_dbg(ql_dbg_disc, vha, 0x2099, "Needs RSCN update and loop transition.\n"); rval = QLA_FUNCTION_FAILED; } else rval = qla2x00_configure_fabric(vha); } if (rval == QLA_SUCCESS) { if (atomic_read(&vha->loop_down_timer) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { rval = QLA_FUNCTION_FAILED; } else { atomic_set(&vha->loop_state, LOOP_READY); ql_dbg(ql_dbg_disc, vha, 0x2069, "LOOP READY.\n"); ha->flags.fw_init_done = 1; /* * Process any ATIO queue entries that came in * while we weren't online. */ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { spin_lock_irqsave(&ha->tgt.atio_lock, flags); qlt_24xx_process_atio_queue(vha, 0); spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); } } } if (rval) { ql_dbg(ql_dbg_disc, vha, 0x206a, "%s *** FAILED ***.\n", __func__); } else { ql_dbg(ql_dbg_disc, vha, 0x206b, "%s: exiting normally.\n", __func__); } /* Restore state if a resync event occurred during processing */ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); if (test_bit(RSCN_UPDATE, &save_flags)) { set_bit(RSCN_UPDATE, &vha->dpc_flags); } } return (rval); } static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) { unsigned long flags; fc_port_t *fcport; if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->n2n_flag) { qla24xx_fcport_handle_login(vha, fcport); return QLA_SUCCESS; } } spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_retry++; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } return QLA_FUNCTION_FAILED; } /* * qla2x00_configure_local_loop * Updates Fibre Channel Device Database with local loop devices. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. */ static int qla2x00_configure_local_loop(scsi_qla_host_t *vha) { int rval, rval2; int found_devs; int found; fc_port_t *fcport, *new_fcport; uint16_t index; uint16_t entries; struct gid_list_info *gid; uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Inititae N2N login. */ if (N2N_TOPO(ha)) return qla2x00_configure_n2n_loop(vha); found_devs = 0; new_fcport = NULL; entries = MAX_FIBRE_DEVICES_LOOP; /* Get list of logged in devices. */ memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, &entries); if (rval != QLA_SUCCESS) goto err; ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, ha->gid_list, entries * sizeof(*ha->gid_list)); if (entries == 0) { spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_retry++; spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } } else { vha->scan.scan_retry = 0; } list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; } /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x2012, "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto err; } new_fcport->flags &= ~FCF_FABRIC_DEVICE; /* Add devices to port list. */ gid = ha->gid_list; for (index = 0; index < entries; index++) { domain = gid->domain; area = gid->area; al_pa = gid->al_pa; if (IS_QLA2100(ha) || IS_QLA2200(ha)) loop_id = gid->loop_id_2100; else loop_id = le16_to_cpu(gid->loop_id); gid = (void *)gid + ha->gid_list_info_size; /* Bypass reserved domain fields. */ if ((domain & 0xf0) == 0xf0) continue; /* Bypass if not same domain and area of adapter. */ if (area && domain && ((area != vha->d_id.b.area) || (domain != vha->d_id.b.domain)) && (ha->current_topology == ISP_CFG_NL)) continue; /* Bypass invalid local loop ID. */ if (loop_id > LAST_LOCAL_LOOP_ID) continue; memset(new_fcport->port_name, 0, WWN_SIZE); /* Fill in member data. */ new_fcport->d_id.b.domain = domain; new_fcport->d_id.b.area = area; new_fcport->d_id.b.al_pa = al_pa; new_fcport->loop_id = loop_id; new_fcport->scan_state = QLA_FCPORT_FOUND; rval2 = qla2x00_get_port_database(vha, new_fcport, 0); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2097, "Failed to retrieve fcport information " "-- get_port_database=%x, loop_id=0x%04x.\n", rval2, new_fcport->loop_id); /* Skip retry if N2N */ if (ha->current_topology != ISP_CFG_N) { ql_dbg(ql_dbg_disc, vha, 0x2105, "Scheduling resync.\n"); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); continue; } } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Check for matching device in port list. */ found = 0; fcport = NULL; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(new_fcport->port_name, fcport->port_name, WWN_SIZE)) continue; fcport->flags &= ~FCF_FABRIC_DEVICE; fcport->loop_id = new_fcport->loop_id; fcport->port_type = new_fcport->port_type; fcport->d_id.b24 = new_fcport->d_id.b24; memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); fcport->scan_state = QLA_FCPORT_FOUND; found++; break; } if (!found) { /* New device, add to fcports list. */ list_add_tail(&new_fcport->list, &vha->vp_fcports); /* Allocate a new replacement fcport. */ fcport = new_fcport; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0xd031, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; goto err; } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; found_devs++; } list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; if (fcport->scan_state == QLA_FCPORT_SCAN) { if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); continue; } } } if (fcport->scan_state == QLA_FCPORT_FOUND) qla24xx_fcport_handle_login(vha, fcport); } qla2x00_free_fcport(new_fcport); return rval; err: ql_dbg(ql_dbg_disc, vha, 0x2098, "Configure local loop error exit: rval=%x.\n", rval); return rval; } static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; if (!IS_IIDMA_CAPABLE(ha)) return; if (atomic_read(&fcport->state) != FCS_ONLINE) return; if (fcport->fp_speed == PORT_SPEED_UNKNOWN || fcport->fp_speed > ha->link_data_rate || !ha->flags.gpsc_supported) return; rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, mb); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2004, "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); } else { ql_dbg(ql_dbg_disc, vha, 0x2005, "iIDMA adjusted to %s GB/s (%X) on %8phN.\n", qla2x00_get_link_speed_str(ha, fcport->fp_speed), fcport->fp_speed, fcport->port_name); } } void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) { qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); } int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA); if (!e) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; return qla2x00_post_work(vha, e); } /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ static void qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) { struct fc_rport_identifiers rport_ids; struct fc_rport *rport; unsigned long flags; if (atomic_read(&fcport->state) == FCS_ONLINE) return; rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); if (!rport) { ql_log(ql_log_warn, vha, 0x2006, "Unable to allocate fc remote port.\n"); return; } spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); fcport->dev_loss_tmo = rport->dev_loss_tmo; rport->supported_classes = fcport->supported_classes; rport_ids.roles = FC_PORT_ROLE_UNKNOWN; if (fcport->port_type == FCT_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; if (fcport->port_type == FCT_TARGET) rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (fcport->port_type & FCT_NVME_INITIATOR) rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; if (fcport->port_type & FCT_NVME_TARGET) rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; if (fcport->port_type & FCT_NVME_DISCOVERY) rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; ql_dbg(ql_dbg_disc, vha, 0x20ee, "%s %8phN. rport %p is %s mode\n", __func__, fcport->port_name, rport, (fcport->port_type == FCT_TARGET) ? "tgt" : ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); fc_remote_port_rolechg(rport, rport_ids.roles); } /* * qla2x00_update_fcport * Updates device on list. * * Input: * ha = adapter block pointer. * fcport = port structure pointer. * * Return: * 0 - Success * BIT_0 - error * * Context: * Kernel context. */ void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { if (IS_SW_RESV_ADDR(fcport->d_id)) return; ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->deleted = 0; if (vha->hw->current_topology == ISP_CFG_NL) fcport->logout_on_delete = 0; else fcport->logout_on_delete = 1; fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { fcport->tgt_short_link_down_cnt++; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; } switch (vha->hw->current_topology) { case ISP_CFG_N: case ISP_CFG_NL: fcport->keep_nport_handle = 1; break; default: break; } qla2x00_iidma_fcport(vha, fcport); qla2x00_dfs_create_rport(vha, fcport); if (NVME_TARGET(vha->hw, fcport)) { qla_nvme_register_remote(vha, fcport); qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); qla2x00_set_fcport_state(fcport, FCS_ONLINE); return; } qla24xx_update_fcport_fcp_prio(vha, fcport); switch (vha->host->active_mode) { case MODE_INITIATOR: qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); break; case MODE_DUAL: qla2x00_reg_remote_port(vha, fcport); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); break; default: break; } qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; ql_dbg(ql_dbg_disc, vha, 0x20d7, "%s %d %8phC post gfpnid fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gfpnid_work(vha, fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20d7, "%s %d %8phC post gpsc fcp_cnt %d\n", __func__, __LINE__, fcport->port_name, vha->fcport_count); qla24xx_post_gpsc_work(vha, fcport); } } qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); } void qla_register_fcport_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, reg_work); u32 rscn_gen = fcport->rscn_gen; u16 data[2]; if (IS_SW_RESV_ADDR(fcport->d_id)) return; qla2x00_update_fcport(fcport->vha, fcport); if (rscn_gen != fcport->rscn_gen) { /* RSCN(s) came in while registration */ switch (fcport->next_disc_state) { case DSC_DELETE_PEND: qlt_schedule_sess_for_deletion(fcport); break; case DSC_ADISC: data[0] = data[1] = 0; qla2x00_post_async_adisc_work(fcport->vha, fcport, data); break; default: break; } } } /* * qla2x00_configure_fabric * Setup SNS devices with loop ID's. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success. * BIT_0 = error */ static int qla2x00_configure_fabric(scsi_qla_host_t *vha) { int rval; fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; int discovery_gen; /* If FL port exists, then SNS is present */ if (IS_FWI2_CAPABLE(ha)) loop_id = NPH_F_PORT; else loop_id = SNS_FL_PORT; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x20a0, "MBX_GET_PORT_NAME failed, No FL Port.\n"); vha->device_flags &= ~SWITCH_FOUND; return (QLA_SUCCESS); } vha->device_flags |= SWITCH_FOUND; rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0); if (rval != QLA_SUCCESS) ql_dbg(ql_dbg_disc, vha, 0x20ff, "Failed to get Fabric Port Name\n"); if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { rval = qla2x00_send_change_request(vha, 0x3, 0); if (rval != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0x121, "Failed to enable receiving of RSCN requests: 0x%x.\n", rval); } do { qla2x00_mgmt_svr_login(vha); /* Ensure we are logged into the SNS. */ loop_id = NPH_SNS_LID(ha); rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 0xfc, mb, BIT_1|BIT_0); if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x20a1, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n", loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return rval; } /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) qla2x00_fdmi_register(vha); if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { if (qla2x00_rft_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x20a2, "Register FC-4 TYPE failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209a, "Register FC-4 Features failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } if (vha->flags.nvme_enabled) { if (qla2x00_rff_id(vha, FC_TYPE_NVME)) { ql_dbg(ql_dbg_disc, vha, 0x2049, "Register NVME FC Type Features failed.\n"); } } if (qla2x00_rnn_id(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x2104, "Register Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } else if (qla2x00_rsnn_nn(vha)) { /* EMPTY */ ql_dbg(ql_dbg_disc, vha, 0x209b, "Register Symbolic Node Name failed.\n"); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; } } /* Mark the time right before querying FW for connected ports. * This process is long, asynchronous and by the time it's done, * collected information might not be accurate anymore. E.g. * disconnected port might have re-connected and a brand new * session has been created. In this case session's generation * will be newer than discovery_gen. */ qlt_do_generation_tick(vha, &discovery_gen); if (USE_ASYNC_SCAN(ha)) { rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, NULL); if (rval) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } else { list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; rval = qla2x00_find_all_fabric_devs(vha); } if (rval != QLA_SUCCESS) break; } while (0); if (!vha->nvme_local_port && vha->flags.nvme_enabled) qla_nvme_register_hba(vha); if (rval) ql_dbg(ql_dbg_disc, vha, 0x2068, "Configure fabric error exit rval=%d.\n", rval); return (rval); } /* * qla2x00_find_all_fabric_devs * * Input: * ha = adapter block pointer. * dev = database device entry pointer. * * Returns: * 0 = success. * * Context: * Kernel context. */ static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) { int rval; uint16_t loop_id; fc_port_t *fcport, *new_fcport; int found; sw_info_t *swl; int swl_idx; int first_dev, last_dev; port_id_t wrap = {}, nxt_d_id; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); unsigned long flags; rval = QLA_SUCCESS; /* Try GID_PT to get device list, else GAN. */ if (!ha->swl) ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t), GFP_KERNEL); swl = ha->swl; if (!swl) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x209c, "GID_PT allocations failed, fallback on GA_NXT.\n"); } else { memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t)); if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) { swl = NULL; if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } /* If other queries succeeded probe for FC-4 type */ if (swl) { qla2x00_gff_id(vha, swl); if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) return rval; } } swl_idx = 0; /* Allocate temporary fcport for any new fcports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0x209d, "Failed to allocate memory for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); /* Set start port ID scan at adapter ID. */ first_dev = 1; last_dev = 0; /* Starting free loop ID. */ loop_id = ha->min_external_loopid; for (; loop_id <= ha->max_loop_id; loop_id++) { if (qla2x00_is_reserved_id(vha, loop_id)) continue; if (ha->current_topology == ISP_CFG_FL && (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))) { atomic_set(&vha->loop_down_timer, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; } if (swl != NULL) { if (last_dev) { wrap.b24 = new_fcport->d_id.b24; } else { new_fcport->d_id.b24 = swl[swl_idx].d_id.b24; memcpy(new_fcport->node_name, swl[swl_idx].node_name, WWN_SIZE); memcpy(new_fcport->port_name, swl[swl_idx].port_name, WWN_SIZE); memcpy(new_fcport->fabric_port_name, swl[swl_idx].fabric_port_name, WWN_SIZE); new_fcport->fp_speed = swl[swl_idx].fp_speed; new_fcport->fc4_type = swl[swl_idx].fc4_type; new_fcport->nvme_flag = 0; if (vha->flags.nvme_enabled && swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { ql_log(ql_log_info, vha, 0x2131, "FOUND: NVME port %8phC as FC Type 28h\n", new_fcport->port_name); } if (swl[swl_idx].d_id.b.rsvd_1 != 0) { last_dev = 1; } swl_idx++; } } else { /* Send GA_NXT to the switch */ rval = qla2x00_ga_nxt(vha, new_fcport); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x209e, "SNS scan failed -- assuming " "zero-entry result.\n"); rval = QLA_SUCCESS; break; } } /* If wrap on switch device list, exit. */ if (first_dev) { wrap.b24 = new_fcport->d_id.b24; first_dev = 0; } else if (new_fcport->d_id.b24 == wrap.b24) { ql_dbg(ql_dbg_disc, vha, 0x209f, "Device wrap (%02x%02x%02x).\n", new_fcport->d_id.b.domain, new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa); break; } /* Bypass if same physical adapter. */ if (new_fcport->d_id.b24 == base_vha->d_id.b24) continue; /* Bypass virtual ports of the same host. */ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24)) continue; /* Bypass if same domain and area of adapter. */ if (((new_fcport->d_id.b24 & 0xffff00) == (vha->d_id.b24 & 0xffff00)) && ha->current_topology == ISP_CFG_FL) continue; /* Bypass reserved domain fields. */ if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) continue; /* Bypass ports whose FCP-4 type is not FCP_SCSI */ if (ql2xgffidenable && (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && new_fcport->fc4_type != 0)) continue; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* Locate matching device in database. */ found = 0; list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(new_fcport->port_name, fcport->port_name, WWN_SIZE)) continue; fcport->scan_state = QLA_FCPORT_FOUND; found++; /* Update port state. */ memcpy(fcport->fabric_port_name, new_fcport->fabric_port_name, WWN_SIZE); fcport->fp_speed = new_fcport->fp_speed; /* * If address the same and state FCS_ONLINE * (or in target mode), nothing changed. */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && (atomic_read(&fcport->state) == FCS_ONLINE || (vha->host->active_mode == MODE_TARGET))) { break; } if (fcport->login_retry == 0) fcport->login_retry = vha->hw->login_retry_count; /* * If device was not a fabric device before. */ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { fcport->d_id.b24 = new_fcport->d_id.b24; qla2x00_clear_loop_id(fcport); fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); break; } /* * Port ID changed or device was marked to be updated; * Log it out if still logged in and mark it for * relogin later. */ if (qla_tgt_mode_enabled(base_vha)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, "port changed FC ID, %8phC" " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id, new_fcport->d_id.b.domain, new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa); fcport->d_id.b24 = new_fcport->d_id.b24; break; } fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; break; } if (found && NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; fcport->login_succ = 0; } } if (found) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); continue; } /* If device was not in our fcports list, then add it. */ new_fcport->scan_state = QLA_FCPORT_FOUND; list_add_tail(&new_fcport->list, &vha->vp_fcports); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); /* Allocate a new replacement fcport. */ nxt_d_id.b24 = new_fcport->d_id.b24; new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (new_fcport == NULL) { ql_log(ql_log_warn, vha, 0xd032, "Memory allocation failed for fcport.\n"); return (QLA_MEMORY_ALLOC_FAILED); } new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); new_fcport->d_id.b24 = nxt_d_id.b24; } qla2x00_free_fcport(new_fcport); /* * Logout all previous fabric dev marked lost, except FCP2 devices. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; if (fcport->scan_state == QLA_FCPORT_SCAN) { if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_BROADCAST) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); qlt_schedule_sess_for_deletion(fcport); continue; } } } if (fcport->scan_state == QLA_FCPORT_FOUND && (fcport->flags & FCF_LOGIN_NEEDED) != 0) qla24xx_fcport_handle_login(vha, fcport); } return (rval); } /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) { int loop_id = FC_NO_LOOP_ID; int lid = NPH_MGMT_SERVER - vha->vp_idx; unsigned long flags; struct qla_hw_data *ha = vha->hw; if (vha->vp_idx == 0) { set_bit(NPH_MGMT_SERVER, ha->loop_id_map); return NPH_MGMT_SERVER; } /* pick id from high and work down to low */ spin_lock_irqsave(&ha->vport_slock, flags); for (; lid > 0; lid--) { if (!test_bit(lid, vha->hw->loop_id_map)) { set_bit(lid, vha->hw->loop_id_map); loop_id = lid; break; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return loop_id; } /* * qla2x00_fabric_login * Issue fabric login command. * * Input: * ha = adapter block pointer. * device = pointer to FC device type structure. * * Returns: * 0 - Login successfully * 1 - Login failed * 2 - Initiator device * 3 - Fatal error */ int qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t *next_loopid) { int rval; int retry; uint16_t tmp_loopid; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; retry = 0; tmp_loopid = 0; for (;;) { ql_dbg(ql_dbg_disc, vha, 0x2000, "Trying Fabric Login w/loop id 0x%04x for port " "%02x%02x%02x.\n", fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); /* Login fcport on switch. */ rval = ha->isp_ops->fabric_login(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, mb, BIT_0); if (rval != QLA_SUCCESS) { return rval; } if (mb[0] == MBS_PORT_ID_USED) { /* * Device has another loop ID. The firmware team * recommends the driver perform an implicit login with * the specified ID again. The ID we just used is save * here so we return with an ID that can be tried by * the next login. */ retry++; tmp_loopid = fcport->loop_id; fcport->loop_id = mb[1]; ql_dbg(ql_dbg_disc, vha, 0x2001, "Fabric Login: port in use - next loop " "id=0x%04x, port id= %02x%02x%02x.\n", fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } else if (mb[0] == MBS_COMMAND_COMPLETE) { /* * Login succeeded. */ if (retry) { /* A retry occurred before. */ *next_loopid = tmp_loopid; } else { /* * No retry occurred before. Just increment the * ID value for next login. */ *next_loopid = (fcport->loop_id + 1); } if (mb[1] & BIT_0) { fcport->port_type = FCT_INITIATOR; } else { fcport->port_type = FCT_TARGET; if (mb[1] & BIT_1) { fcport->flags |= FCF_FCP2_DEVICE; } } if (mb[10] & BIT_0) fcport->supported_classes |= FC_COS_CLASS2; if (mb[10] & BIT_1) fcport->supported_classes |= FC_COS_CLASS3; if (IS_FWI2_CAPABLE(ha)) { if (mb[10] & BIT_7) fcport->flags |= FCF_CONF_COMP_SUPPORTED; } rval = QLA_SUCCESS; break; } else if (mb[0] == MBS_LOOP_ID_USED) { /* * Loop ID already used, try next loop ID. */ fcport->loop_id++; rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != QLA_SUCCESS) { /* Ran out of loop IDs to use */ break; } } else if (mb[0] == MBS_COMMAND_ERROR) { /* * Firmware possibly timed out during login. If NO * retries are left to do then the device is declared * dead. */ *next_loopid = fcport->loop_id; ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); qla2x00_mark_device_lost(vha, fcport, 1); rval = 1; break; } else { /* * unrecoverable / not handled error */ ql_dbg(ql_dbg_disc, vha, 0x2002, "Failed=%x port_id=%02x%02x%02x loop_id=%x " "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, fcport->loop_id, jiffies); *next_loopid = fcport->loop_id; ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); qla2x00_clear_loop_id(fcport); fcport->login_retry = 0; rval = 3; break; } } return (rval); } /* * qla2x00_local_device_login * Issue local device login command. * * Input: * ha = adapter block pointer. * loop_id = loop id of device to login to. * * Returns (Where's the #define!!!!): * 0 - Login successfully * 1 - Login failed * 3 - Fatal error */ int qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; uint16_t mb[MAILBOX_REGISTER_COUNT]; memset(mb, 0, sizeof(mb)); rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0); if (rval == QLA_SUCCESS) { /* Interrogate mailbox registers for any errors */ if (mb[0] == MBS_COMMAND_ERROR) rval = 1; else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR) /* device not in PCB table */ rval = 3; } return (rval); } /* * qla2x00_loop_resync * Resync with fibre channel devices. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (vha->flags.online) { if (!(rval = qla2x00_fw_ready(vha))) { /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { if (!IS_QLAFX00(vha->hw)) { /* * Issue a marker after FW becomes * ready. */ qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL); vha->marker_needed = 0; } /* Remap devices on Loop. */ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); if (IS_QLAFX00(vha->hw)) qlafx00_configure_devices(vha); else qla2x00_configure_loop(vha); wait_time--; } while (!atomic_read(&vha->loop_down_timer) && !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && wait_time && (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); } } if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) return (QLA_FUNCTION_FAILED); if (rval) ql_dbg(ql_dbg_disc, vha, 0x206c, "%s *** FAILED ***.\n", __func__); return (rval); } /* * qla2x00_perform_loop_resync * Description: This function will set the appropriate flags and call * qla2x00_loop_resync. If successful loop will be resynced * Arguments : scsi_qla_host_t pointer * returm : Success or Failure */ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) { int32_t rval = 0; if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { /*Configure the flags so that resync happens properly*/ atomic_set(&ha->loop_down_timer, 0); if (!(ha->device_flags & DFLG_NO_CABLE)) { atomic_set(&ha->loop_state, LOOP_UP); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); rval = qla2x00_loop_resync(ha); } else atomic_set(&ha->loop_state, LOOP_DEAD); clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); } return rval; } void qla2x00_update_fcports(scsi_qla_host_t *base_vha) { fc_port_t *fcport; struct scsi_qla_host *vha; struct qla_hw_data *ha = base_vha->hw; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); /* Go with deferred removal of rport references. */ list_for_each_entry(vha, &base_vha->hw->vp_list, list) { atomic_inc(&vha->vref_count); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->drport && atomic_read(&fcport->state) != FCS_UNCONFIGURED) { spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); spin_lock_irqsave(&ha->vport_slock, flags); } } atomic_dec(&vha->vref_count); wake_up(&vha->vref_waitq); } spin_unlock_irqrestore(&ha->vport_slock, flags); } /* Assumes idc_lock always held on entry */ void qla83xx_reset_ownership(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t drv_presence, drv_presence_mask; uint32_t dev_part_info1, dev_part_info2, class_type; uint32_t class_type_mask = 0x3; uint16_t fcoe_other_function = 0xffff, i; if (IS_QLA8044(ha)) { drv_presence = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); dev_part_info1 = qla8044_rd_direct(vha, QLA8044_CRB_DEV_PART_INFO_INDEX); dev_part_info2 = qla8044_rd_direct(vha, QLA8044_CRB_DEV_PART_INFO2); } else { qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); } for (i = 0; i < 8; i++) { class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && (i != ha->portnum)) { fcoe_other_function = i; break; } } if (fcoe_other_function == 0xffff) { for (i = 0; i < 8; i++) { class_type = ((dev_part_info2 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && ((i + 8) != ha->portnum)) { fcoe_other_function = i + 8; break; } } } /* * Prepare drv-presence mask based on fcoe functions present. * However consider only valid physical fcoe function numbers (0-15). */ drv_presence_mask = ~((1 << (ha->portnum)) | ((fcoe_other_function == 0xffff) ? 0 : (1 << (fcoe_other_function)))); /* We are the reset owner iff: * - No other protocol drivers present. * - This is the lowest among fcoe functions. */ if (!(drv_presence & drv_presence_mask) && (ha->portnum < fcoe_other_function)) { ql_dbg(ql_dbg_p3p, vha, 0xb07f, "This host is Reset owner.\n"); ha->flags.nic_core_reset_owner = 1; } } static int __qla83xx_set_drv_ack(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_ack; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); if (rval == QLA_SUCCESS) { drv_ack |= (1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); } return rval; } static int __qla83xx_clear_drv_ack(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; uint32_t drv_ack; rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack); if (rval == QLA_SUCCESS) { drv_ack &= ~(1 << ha->portnum); rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack); } return rval; } static const char * qla83xx_dev_state_to_string(uint32_t dev_state) { switch (dev_state) { case QLA8XXX_DEV_COLD: return "COLD/RE-INIT"; case QLA8XXX_DEV_INITIALIZING: return "INITIALIZING"; case QLA8XXX_DEV_READY: return "READY"; case QLA8XXX_DEV_NEED_RESET: return "NEED RESET"; case QLA8XXX_DEV_NEED_QUIESCENT: return "NEED QUIESCENT"; case QLA8XXX_DEV_FAILED: return "FAILED"; case QLA8XXX_DEV_QUIESCENT: return "QUIESCENT"; default: return "Unknown"; } } /* Assumes idc-lock always held on entry */ void qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type) { struct qla_hw_data *ha = vha->hw; uint32_t idc_audit_reg = 0, duration_secs = 0; switch (audit_type) { case IDC_AUDIT_TIMESTAMP: ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000); idc_audit_reg = (ha->portnum) | (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8); qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); break; case IDC_AUDIT_COMPLETION: duration_secs = ((jiffies_to_msecs(jiffies) - jiffies_to_msecs(ha->idc_audit_ts)) / 1000); idc_audit_reg = (ha->portnum) | (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8); qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg); break; default: ql_log(ql_log_warn, vha, 0xb078, "Invalid audit type specified.\n"); break; } } /* Assumes idc_lock always held on entry */ static int qla83xx_initiating_reset(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t idc_control, dev_state; __qla83xx_get_idc_control(vha, &idc_control); if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) { ql_log(ql_log_info, vha, 0xb080, "NIC Core reset has been disabled. idc-control=0x%x\n", idc_control); return QLA_FUNCTION_FAILED; } /* Set NEED-RESET iff in READY state and we are the reset-owner */ qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) { qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n"); qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); } else { const char *state = qla83xx_dev_state_to_string(dev_state); ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state); /* SV: XXX: Is timeout required here? */ /* Wait for IDC state change READY -> NEED_RESET */ while (dev_state == QLA8XXX_DEV_READY) { qla83xx_idc_unlock(vha, 0); msleep(200); qla83xx_idc_lock(vha, 0); qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state); } } /* Send IDC ack by writing to drv-ack register */ __qla83xx_set_drv_ack(vha); return QLA_SUCCESS; } int __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control) { return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control); } int __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control) { return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control); } static int qla83xx_check_driver_presence(scsi_qla_host_t *vha) { uint32_t drv_presence = 0; struct qla_hw_data *ha = vha->hw; qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); if (drv_presence & (1 << ha->portnum)) return QLA_SUCCESS; else return QLA_TEST_FAILED; } int qla83xx_nic_core_reset(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_p3p, vha, 0xb058, "Entered %s().\n", __func__); if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0xb059, "Device in unrecoverable FAILED state.\n"); return QLA_FUNCTION_FAILED; } qla83xx_idc_lock(vha, 0); if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb05a, "Function=0x%x has been removed from IDC participation.\n", ha->portnum); rval = QLA_FUNCTION_FAILED; goto exit; } qla83xx_reset_ownership(vha); rval = qla83xx_initiating_reset(vha); /* * Perform reset if we are the reset-owner, * else wait till IDC state changes to READY/FAILED. */ if (rval == QLA_SUCCESS) { rval = qla83xx_idc_state_handler(vha); if (rval == QLA_SUCCESS) ha->flags.nic_core_hung = 0; __qla83xx_clear_drv_ack(vha); } exit: qla83xx_idc_unlock(vha, 0); ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__); return rval; } int qla2xxx_mctp_dump(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval = QLA_FUNCTION_FAILED; if (!IS_MCTP_CAPABLE(ha)) { /* This message can be removed from the final version */ ql_log(ql_log_info, vha, 0x506d, "This board is not MCTP capable\n"); return rval; } if (!ha->mctp_dump) { ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL); if (!ha->mctp_dump) { ql_log(ql_log_warn, vha, 0x506e, "Failed to allocate memory for mctp dump\n"); return rval; } } #define MCTP_DUMP_STR_ADDR 0x00000000 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma, MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x506f, "Failed to capture mctp dump\n"); } else { ql_log(ql_log_info, vha, 0x5070, "Mctp dump capture for host (%ld/%p).\n", vha->host_no, ha->mctp_dump); ha->mctp_dumped = 1; } if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) { ha->flags.nic_core_reset_hdlr_active = 1; rval = qla83xx_restart_nic_firmware(vha); if (rval) /* NIC Core reset failed. */ ql_log(ql_log_warn, vha, 0x5071, "Failed to restart nic firmware\n"); else ql_dbg(ql_dbg_p3p, vha, 0xb084, "Restarted NIC firmware successfully.\n"); ha->flags.nic_core_reset_hdlr_active = 0; } return rval; } /* * qla2x00_quiesce_io * Description: This function will block the new I/Os * Its not aborting any I/Os as context * is not destroyed during quiescence * Arguments: scsi_qla_host_t * return : void */ void qla2x00_quiesce_io(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; ql_dbg(ql_dbg_dpc, vha, 0x401d, "Quiescing I/O - ha=%p.\n", ha); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); list_for_each_entry(vp, &ha->vp_list, list) qla2x00_mark_all_devices_lost(vp); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* Wait for pending cmds to complete */ WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != QLA_SUCCESS); } void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; unsigned long flags; fc_port_t *fcport; u16 i; /* For ISP82XX, driver waits for completion of the commands. * online flag should be set. */ if (!(IS_P3P_TYPE(ha))) vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); vha->qla_stats.total_isp_aborts++; ql_log(ql_log_info, vha, 0x00af, "Performing ISP error recovery - ha=%p.\n", ha); ha->flags.purge_mbox = 1; /* For ISP82XX, reset_chip is just disabling interrupts. * Driver waits for the completion of the commands. * the interrupts need to be enabled. */ if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); ha->link_data_rate = PORT_SPEED_UNKNOWN; SAVE_TOPO(ha); ha->flags.rida_fmt2 = 0; ha->flags.n2n_ae = 0; ha->flags.lip_ae = 0; ha->current_topology = 0; QLA_FW_STOPPED(ha); ha->flags.fw_init_done = 0; ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) ha->queue_pair_map[i]->chip_reset = ha->base_qpair->chip_reset; } /* purge MBox commands */ if (atomic_read(&ha->num_pend_mbx_stage3)) { clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } i = 0; while (atomic_read(&ha->num_pend_mbx_stage3) || atomic_read(&ha->num_pend_mbx_stage2) || atomic_read(&ha->num_pend_mbx_stage1)) { msleep(20); i++; if (i > 50) break; } ha->flags.purge_mbox = 0; atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* Clear all async request states across all VPs. */ list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->scan_state = 0; } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vp->vp_fcports, list) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } spin_unlock_irqrestore(&ha->vport_slock, flags); if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ if (IS_P3P_TYPE(ha)) { qla82xx_chip_reset_cleanup(vha); ql_log(ql_log_info, vha, 0x00b4, "Done chip reset cleanup.\n"); /* Done waiting for pending commands. * Reset the online flag. */ vha->flags.online = 0; } /* Requeue all commands in outstanding command list. */ qla2x00_abort_all_cmds(vha, DID_RESET << 16); } /* memory barrier */ wmb(); } /* * qla2x00_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla2x00_abort_isp(scsi_qla_host_t *vha) { int rval; uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; struct req_que *req = ha->req_q_map[0]; unsigned long flags; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); if (vha->hw->flags.port_isolated) return status; if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { ha->flags.chip_reset_done = 1; vha->flags.online = 1; status = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); return status; } if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, vha, 0xb05c, "Clearing fcoe driver presence.\n"); if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_p3p, vha, 0xb073, "Error while clearing DRV-Presence.\n"); } if (unlikely(pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure)) { clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 0; return status; } switch (vha->qlini_mode) { case QLA2XXX_INI_MODE_DISABLED: if (!qla_tgt_mode_enabled(vha)) return 0; break; case QLA2XXX_INI_MODE_DUAL: if (!qla_dual_mode_enabled(vha)) return 0; break; case QLA2XXX_INI_MODE_ENABLED: default: break; } ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); if (!atomic_read(&vha->loop_down_timer)) { /* * Issue marker command only when we are going * to start the I/O . */ vha->marker_needed = 1; } vha->flags.online = 1; ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (IS_QLA81XX(ha) || IS_QLA8031(ha)) qla2x00_get_fw_version(vha); if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x8033, "Unable to reinitialize FCE " "(%d).\n", rval); ha->flags.fce_enabled = 0; } } if (ha->eft) { memset(ha->eft, 0, EFT_SIZE); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x8034, "Unable to reinitialize EFT " "(%d).\n", rval); } } } else { /* failed the ISP abort */ vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_fatal, vha, 0x8035, "ISP error recover failed - " "board disabled.\n"); /* * The next call disables the board * completely. */ qla2x00_abort_isp_cleanup(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 0; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_dbg(ql_dbg_taskm, vha, 0x8020, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); status = 1; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8021, "ISP error recovery - retrying (%d) " "more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); status = 1; } } } if (vha->hw->flags.port_isolated) { qla2x00_abort_isp_cleanup(vha); return status; } if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); qla2x00_configure_hba(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, vha, 0xb05d, "Setting back fcoe driver presence.\n"); if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_p3p, vha, 0xb074, "Error while setting DRV-Presence.\n"); } } else { ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", __func__); } return(status); } /* * qla2x00_restart_isp * restarts the ISP after a reset * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ static int qla2x00_restart_isp(scsi_qla_host_t *vha) { int status; struct qla_hw_data *ha = vha->hw; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { vha->flags.online = 0; status = ha->isp_ops->chip_diag(vha); if (status) return status; status = qla2x00_setup_chip(vha); if (status) return status; } status = qla2x00_init_rings(vha); if (status) return status; clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; /* Initialize the queues in use */ qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (status) { /* if no cable then assume it's good */ return vha->device_flags & DFLG_NO_CABLE ? 0 : status; } /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return 0; } static int qla25xx_init_queues(struct qla_hw_data *ha) { struct rsp_que *rsp = NULL; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int ret = -1; int i; for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; if (rsp && test_bit(i, ha->rsp_qid_map)) { rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) ql_dbg(ql_dbg_init, base_vha, 0x00ff, "%s Rsp que: %d init failed.\n", __func__, rsp->id); else ql_dbg(ql_dbg_init, base_vha, 0x0100, "%s Rsp que: %d inited.\n", __func__, rsp->id); } } for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; if (req && test_bit(i, ha->req_qid_map)) { /* Clear outstanding commands array. */ req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) ql_dbg(ql_dbg_init, base_vha, 0x0101, "%s Req que: %d init failed.\n", __func__, req->id); else ql_dbg(ql_dbg_init, base_vha, 0x0102, "%s Req que: %d inited.\n", __func__, req->id); } } return ret; } /* * qla2x00_reset_adapter * Reset adapter. * * Input: * ha = adapter block pointer. */ int qla2x00_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_word(&reg->hccr, HCCR_RESET_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC); rd_reg_word(&reg->hccr); /* PCI Posting. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } int qla24xx_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET); rd_reg_dword(&reg->hccr); wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE); rd_reg_dword(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); return QLA_SUCCESS; } /* On sparc systems, obtain port and node WWN from firmware * properties. */ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, struct nvram_24xx *nv) { #ifdef CONFIG_SPARC struct qla_hw_data *ha = vha->hw; struct pci_dev *pdev = ha->pdev; struct device_node *dp = pci_device_to_OF_node(pdev); const u8 *val; int len; val = of_get_property(dp, "port-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->port_name, val, WWN_SIZE); val = of_get_property(dp, "node-wwn", &len); if (val && len >= WWN_SIZE) memcpy(nv->node_name, val, WWN_SIZE); #endif } int qla24xx_nvram_config(scsi_qla_host_t *vha) { int rval; struct init_cb_24xx *icb; struct nvram_24xx *nv; __le32 *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; icb = (struct init_cb_24xx *)ha->init_cb; nv = ha->nvram; /* Determine NVRAM starting address. */ if (ha->port_no == 0) { ha->nvram_base = FA_NVRAM_FUNC0_ADDR; ha->vpd_base = FA_NVRAM_VPD0_ADDR; } else { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; ha->isp_ops->read_nvram(vha, ha->vpd, ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); /* Get NVRAM data into cache and calculate checksum. */ dptr = (__force __le32 *)nv; ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, "Contents of NVRAM\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x006b, "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, nv->nvram_version); ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); ql_log(ql_log_warn, vha, 0x006c, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = cpu_to_le16(2048); nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->hard_address = cpu_to_le16(124); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; nv->port_name[5] = 0x1c; nv->port_name[6] = 0x55; nv->port_name[7] = 0x86; nv->node_name[0] = 0x20; nv->node_name[1] = 0x00; nv->node_name[2] = 0x00; nv->node_name[3] = 0xe0; nv->node_name[4] = 0x8b; nv->node_name[5] = 0x1c; nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; qla24xx_nvram_wwn_from_ofw(vha, nv); nv->login_retry_count = cpu_to_le16(8); nv->interrupt_delay_timer = cpu_to_le16(0); nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); nv->firmware_options_2 = cpu_to_le32(2 << 4); nv->firmware_options_2 |= cpu_to_le32(BIT_12); nv->firmware_options_3 = cpu_to_le32(2 << 13); nv->host_p = cpu_to_le32(BIT_11|BIT_10); nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; nv->max_luns_per_target = cpu_to_le16(128); nv->port_down_retry_count = cpu_to_le16(30); nv->link_down_timeout = cpu_to_le16(30); rval = 1; } if (qla_tgt_mode_enabled(vha)) { /* Don't enable full login after initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Don't enable LIP full login for initiator */ nv->host_p &= cpu_to_le32(~BIT_10); } qlt_24xx_config_nvram_stage1(vha, nv); /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* Copy 1st segment. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->version; cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; icb->login_retry_count = nv->login_retry_count; icb->link_down_on_nos = nv->link_down_on_nos; /* Copy 2nd segment. */ dptr1 = (uint8_t *)&icb->interrupt_delay_timer; dptr2 = (uint8_t *)&nv->interrupt_delay_timer; cnt = (uint8_t *)&icb->reserved_3 - (uint8_t *)&icb->interrupt_delay_timer; while (cnt--) *dptr1++ = *dptr2++; ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* * Setup driver NVRAM options. */ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), "QLA2462"); qlt_24xx_config_nvram_stage2(vha, icb); if (nv->host_p & cpu_to_le32(BIT_15)) { /* Use alternate WWN? */ memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } /* Set host adapter parameters. */ ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = 0; ha->flags.enable_lip_full_login = le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; ha->flags.enable_target_reset = le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; ha->flags.enable_led_scheme = 0; ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & (BIT_6 | BIT_5 | BIT_4)) >> 4; memcpy(ha->fw_seriallink_options24, nv->seriallink_options, sizeof(ha->fw_seriallink_options24)); /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); /* Set minimum login_timeout to 4 seconds. */ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (le16_to_cpu(nv->link_down_timeout) == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* Need enough time to try and get the port back. */ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = le16_to_cpu(nv->login_retry_count); if (ha->port_down_retry_count == le16_to_cpu(nv->port_down_retry_count) && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= cpu_to_le32(BIT_8); /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer) : 2; } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x006f, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); } if (rval) { ql_log(ql_log_warn, vha, 0x0070, "NVRAM configuration failed.\n"); } return (rval); } static void qla27xx_print_image(struct scsi_qla_host *vha, char *name, struct qla27xx_image_status *image_status) { ql_dbg(ql_dbg_init, vha, 0x018b, "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", name, "status", image_status->image_status_mask, le16_to_cpu(image_status->generation), image_status->ver_major, image_status->ver_minor, image_status->bitmap, le32_to_cpu(image_status->checksum), le32_to_cpu(image_status->signature)); } static bool qla28xx_check_aux_image_status_signature( struct qla27xx_image_status *image_status) { ulong signature = le32_to_cpu(image_status->signature); return signature != QLA28XX_AUX_IMG_STATUS_SIGN; } static bool qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) { ulong signature = le32_to_cpu(image_status->signature); return signature != QLA27XX_IMG_STATUS_SIGN && signature != QLA28XX_IMG_STATUS_SIGN; } static ulong qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) { __le32 *p = (__force __le32 *)image_status; uint n = sizeof(*image_status) / sizeof(*p); uint32_t sum = 0; for ( ; n--; p++) sum += le32_to_cpup(p); return sum; } static inline uint qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) { return aux->bitmap & bitmask ? QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; } static void qla28xx_component_status( struct active_regions *active_regions, struct qla27xx_image_status *aux) { active_regions->aux.board_config = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); active_regions->aux.vpd_nvram = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); active_regions->aux.npiv_config_0_1 = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); active_regions->aux.npiv_config_2_3 = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); } static int qla27xx_compare_image_generation( struct qla27xx_image_status *pri_image_status, struct qla27xx_image_status *sec_image_status) { /* calculate generation delta as uint16 (this accounts for wrap) */ int16_t delta = le16_to_cpu(pri_image_status->generation) - le16_to_cpu(sec_image_status->generation); ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); return delta; } void qla28xx_get_aux_images( struct scsi_qla_host *vha, struct active_regions *active_regions) { struct qla_hw_data *ha = vha->hw; struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; if (!ha->flt_region_aux_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); goto check_sec_image; } qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Primary aux image signature (%#x) not valid\n", le32_to_cpu(pri_aux_image_status.signature)); goto check_sec_image; } if (qla27xx_image_status_checksum(&pri_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Primary aux image checksum failed\n"); goto check_sec_image; } valid_pri_image = true; if (pri_aux_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Primary aux image is active\n"); active_pri_image = true; } check_sec_image: if (!ha->flt_region_aux_img_status_sec) { ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary aux image not addressed\n"); goto check_valid_image; } qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Secondary aux image signature (%#x) not valid\n", le32_to_cpu(sec_aux_image_status.signature)); goto check_valid_image; } if (qla27xx_image_status_checksum(&sec_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Secondary aux image checksum failed\n"); goto check_valid_image; } valid_sec_image = true; if (sec_aux_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Secondary aux image is active\n"); active_sec_image = true; } check_valid_image: if (valid_pri_image && active_pri_image && valid_sec_image && active_sec_image) { if (qla27xx_compare_image_generation(&pri_aux_image_status, &sec_aux_image_status) >= 0) { qla28xx_component_status(active_regions, &pri_aux_image_status); } else { qla28xx_component_status(active_regions, &sec_aux_image_status); } } else if (valid_pri_image && active_pri_image) { qla28xx_component_status(active_regions, &pri_aux_image_status); } else if (valid_sec_image && active_sec_image) { qla28xx_component_status(active_regions, &sec_aux_image_status); } ql_dbg(ql_dbg_init, vha, 0x018f, "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", active_regions->aux.board_config, active_regions->aux.vpd_nvram, active_regions->aux.npiv_config_0_1, active_regions->aux.npiv_config_2_3); } void qla27xx_get_active_image(struct scsi_qla_host *vha, struct active_regions *active_regions) { struct qla_hw_data *ha = vha->hw; struct qla27xx_image_status pri_image_status, sec_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; if (!ha->flt_region_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); goto check_sec_image; } if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != QLA_SUCCESS) { WARN_ON_ONCE(true); goto check_sec_image; } qla27xx_print_image(vha, "Primary image", &pri_image_status); if (qla27xx_check_image_status_signature(&pri_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Primary image signature (%#x) not valid\n", le32_to_cpu(pri_image_status.signature)); goto check_sec_image; } if (qla27xx_image_status_checksum(&pri_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Primary image checksum failed\n"); goto check_sec_image; } valid_pri_image = true; if (pri_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Primary image is active\n"); active_pri_image = true; } check_sec_image: if (!ha->flt_region_img_status_sec) { ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); goto check_valid_image; } qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); qla27xx_print_image(vha, "Secondary image", &sec_image_status); if (qla27xx_check_image_status_signature(&sec_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, "Secondary image signature (%#x) not valid\n", le32_to_cpu(sec_image_status.signature)); goto check_valid_image; } if (qla27xx_image_status_checksum(&sec_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, "Secondary image checksum failed\n"); goto check_valid_image; } valid_sec_image = true; if (sec_image_status.image_status_mask & 1) { ql_dbg(ql_dbg_init, vha, 0x018d, "Secondary image is active\n"); active_sec_image = true; } check_valid_image: if (valid_pri_image && active_pri_image) active_regions->global = QLA27XX_PRIMARY_IMAGE; if (valid_sec_image && active_sec_image) { if (!active_regions->global || qla27xx_compare_image_generation( &pri_image_status, &sec_image_status) < 0) { active_regions->global = QLA27XX_SECONDARY_IMAGE; } } ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", active_regions->global == QLA27XX_DEFAULT_IMAGE ? "default (boot/fw)" : active_regions->global == QLA27XX_PRIMARY_IMAGE ? "primary" : active_regions->global == QLA27XX_SECONDARY_IMAGE ? "secondary" : "invalid", active_regions->global); } bool qla24xx_risc_firmware_invalid(uint32_t *dword) { return !(dword[4] | dword[5] | dword[6] | dword[7]) || !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); } static int qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, uint32_t faddr) { int rval; uint templates, segments, fragment; ulong i; uint j; ulong dlen; uint32_t *dcode; uint32_t risc_addr, risc_size, risc_attr = 0; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct fwdt *fwdt = ha->fwdt; ql_dbg(ql_dbg_init, vha, 0x008b, "FW: Loading firmware from flash (%x).\n", faddr); dcode = (uint32_t *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, "Unable to verify the integrity of flash firmware " "image.\n"); ql_log(ql_log_fatal, vha, 0x008d, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } dcode = (uint32_t *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); qla24xx_read_flash_data(vha, dcode, faddr, 10); risc_addr = be32_to_cpu((__force __be32)dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[3]); if (!*srisc_addr) { *srisc_addr = risc_addr; risc_attr = be32_to_cpu((__force __be32)dcode[9]); } dlen = ha->fw_transfer_size >> 2; for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x008e, "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", fragment, risc_addr, faddr, dlen); qla24xx_read_flash_data(vha, dcode, faddr, dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x008f, "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } faddr += dlen; risc_addr += dlen; risc_size -= dlen; } } if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; templates = (risc_attr & BIT_9) ? 2 : 1; ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); for (j = 0; j < templates; j++, fwdt++) { vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; dcode = (uint32_t *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 7); risc_size = be32_to_cpu((__force __be32)dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", j, faddr, risc_size); if (!risc_size || !~risc_size) { ql_dbg(ql_dbg_init, vha, 0x0162, "-> fwdt%u failed to read array\n", j); goto failed; } /* skip header and ignore checksum */ faddr += 7; risc_size -= 8; ql_dbg(ql_dbg_init, vha, 0x0163, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); fwdt->template = vmalloc(risc_size * sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0164, "-> fwdt%u failed allocate template.\n", j); goto failed; } dcode = fwdt->template; qla24xx_read_flash_data(vha, dcode, faddr, risc_size); if (!qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0165, "-> fwdt%u failed template validate\n", j); goto failed; } dlen = qla27xx_fwdt_template_size(dcode); ql_dbg(ql_dbg_init, vha, 0x0166, "-> fwdt%u template size %#lx bytes (%#lx words)\n", j, dlen, dlen / sizeof(*dcode)); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0167, "-> fwdt%u template exceeds array (%-lu bytes)\n", j, dlen - risc_size * sizeof(*dcode)); goto failed; } fwdt->length = dlen; ql_dbg(ql_dbg_init, vha, 0x0168, "-> fwdt%u loaded template ok\n", j); faddr += risc_size + 1; } return QLA_SUCCESS; failed: vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; return QLA_SUCCESS; } #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" int qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; int i, fragment; uint16_t *wcode; __be16 *fwcode; uint32_t risc_addr, risc_size, fwclen, wlen, *seg; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_info, vha, 0x0083, "Firmware image unavailable.\n"); ql_log(ql_log_info, vha, 0x0084, "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } rval = QLA_SUCCESS; wcode = (uint16_t *)req->ring; *srisc_addr = 0; fwcode = (__force __be16 *)blob->fw->data; fwclen = 0; /* Validate firmware image by checking version. */ if (blob->fw->size < 8 * sizeof(uint16_t)) { ql_log(ql_log_fatal, vha, 0x0085, "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); goto fail_fw_integrity; } for (i = 0; i < 4; i++) wcode[i] = be16_to_cpu(fwcode[i + 4]); if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && wcode[2] == 0 && wcode[3] == 0)) { ql_log(ql_log_fatal, vha, 0x0086, "Unable to verify integrity of firmware image.\n"); ql_log(ql_log_fatal, vha, 0x0087, "Firmware data: %04x %04x %04x %04x.\n", wcode[0], wcode[1], wcode[2], wcode[3]); goto fail_fw_integrity; } seg = blob->segs; while (*seg && rval == QLA_SUCCESS) { risc_addr = *seg; *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; risc_size = be16_to_cpu(fwcode[3]); /* Validate firmware image size. */ fwclen += risc_size * sizeof(uint16_t); if (blob->fw->size < fwclen) { ql_log(ql_log_fatal, vha, 0x0088, "Unable to verify integrity of firmware image " "(%zd).\n", blob->fw->size); goto fail_fw_integrity; } fragment = 0; while (risc_size > 0 && rval == QLA_SUCCESS) { wlen = (uint16_t)(ha->fw_transfer_size >> 1); if (wlen > risc_size) wlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x0089, "Loading risc segment@ risc addr %x number of " "words 0x%x.\n", risc_addr, wlen); for (i = 0; i < wlen; i++) wcode[i] = swab16((__force u32)fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); if (rval) { ql_log(ql_log_fatal, vha, 0x008a, "Failed to load segment %d of firmware.\n", fragment); break; } fwcode += wlen; risc_addr += wlen; risc_size -= wlen; fragment++; } /* Next segment. */ seg++; } return rval; fail_fw_integrity: return QLA_FUNCTION_FAILED; } static int qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; uint templates, segments, fragment; uint32_t *dcode; ulong dlen; uint32_t risc_addr, risc_size, risc_attr = 0; ulong i; uint j; struct fw_blob *blob; __be32 *fwcode; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct fwdt *fwdt = ha->fwdt; ql_dbg(ql_dbg_init, vha, 0x0090, "-> FW: Loading via request-firmware.\n"); blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_warn, vha, 0x0092, "-> Firmware file not found.\n"); return QLA_FUNCTION_FAILED; } fwcode = (__force __be32 *)blob->fw->data; dcode = (__force uint32_t *)fwcode; if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x0093, "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); ql_log(ql_log_fatal, vha, 0x0095, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } dcode = (uint32_t *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x0096, "-> Loading segment %u...\n", j); risc_addr = be32_to_cpu(fwcode[2]); risc_size = be32_to_cpu(fwcode[3]); if (!*srisc_addr) { *srisc_addr = risc_addr; risc_attr = be32_to_cpu(fwcode[9]); } dlen = ha->fw_transfer_size >> 2; for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x0097, "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", fragment, risc_addr, (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32((__force u32)fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x0098, "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } fwcode += dlen; risc_addr += dlen; risc_size -= dlen; } } if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; templates = (risc_attr & BIT_9) ? 2 : 1; ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); for (j = 0; j < templates; j++, fwdt++) { vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; risc_size = be32_to_cpu(fwcode[2]); ql_dbg(ql_dbg_init, vha, 0x0171, "-> fwdt%u template array at %#x (%#x dwords)\n", j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), risc_size); if (!risc_size || !~risc_size) { ql_dbg(ql_dbg_init, vha, 0x0172, "-> fwdt%u failed to read array\n", j); goto failed; } /* skip header and ignore checksum */ fwcode += 7; risc_size -= 8; ql_dbg(ql_dbg_init, vha, 0x0173, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); fwdt->template = vmalloc(risc_size * sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0174, "-> fwdt%u failed allocate template.\n", j); goto failed; } dcode = fwdt->template; for (i = 0; i < risc_size; i++) dcode[i] = (__force u32)fwcode[i]; if (!qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0175, "-> fwdt%u failed template validate\n", j); goto failed; } dlen = qla27xx_fwdt_template_size(dcode); ql_dbg(ql_dbg_init, vha, 0x0176, "-> fwdt%u template size %#lx bytes (%#lx words)\n", j, dlen, dlen / sizeof(*dcode)); if (dlen > risc_size * sizeof(*dcode)) { ql_log(ql_log_warn, vha, 0x0177, "-> fwdt%u template exceeds array (%-lu bytes)\n", j, dlen - risc_size * sizeof(*dcode)); goto failed; } fwdt->length = dlen; ql_dbg(ql_dbg_init, vha, 0x0178, "-> fwdt%u loaded template ok\n", j); fwcode += risc_size + 1; } return QLA_SUCCESS; failed: vfree(fwdt->template); fwdt->template = NULL; fwdt->length = 0; return QLA_SUCCESS; } int qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; if (ql2xfwloadbin == 1) return qla81xx_load_risc(vha, srisc_addr); /* * FW Load priority: * 1) Firmware via request-firmware interface (.bin file). * 2) Firmware residing in flash. */ rval = qla24xx_load_risc_blob(vha, srisc_addr); if (rval == QLA_SUCCESS) return rval; return qla24xx_load_risc_flash(vha, srisc_addr, vha->hw->flt_region_fw); } int qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; struct qla_hw_data *ha = vha->hw; struct active_regions active_regions = { }; if (ql2xfwloadbin == 2) goto try_blob_fw; /* FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). * 3) Golden-Firmware residing in flash -- (limited operation). */ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto try_primary_fw; qla27xx_get_active_image(vha, &active_regions); if (active_regions.global != QLA27XX_SECONDARY_IMAGE) goto try_primary_fw; ql_dbg(ql_dbg_init, vha, 0x008b, "Loading secondary firmware image.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); if (!rval) return rval; try_primary_fw: ql_dbg(ql_dbg_init, vha, 0x008b, "Loading primary firmware image.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (!rval) return rval; try_blob_fw: rval = qla24xx_load_risc_blob(vha, srisc_addr); if (!rval || !ha->flt_region_gold_fw) return rval; ql_log(ql_log_info, vha, 0x0099, "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); if (rval) return rval; ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); ha->flags.running_gold_fw = 1; return rval; } void qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) { int ret, retries; struct qla_hw_data *ha = vha->hw; if (ha->flags.pci_channel_io_perm_failure) return; if (!IS_FWI2_CAPABLE(ha)) return; if (!ha->fw_major_version) return; if (!ha->flags.fw_started) return; ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && ret != QLA_INVALID_COMMAND && retries ; retries--) { ha->isp_ops->reset_chip(vha); if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) continue; if (qla2x00_setup_chip(vha) != QLA_SUCCESS) continue; ql_log(ql_log_info, vha, 0x8015, "Attempting retry of stop-firmware command.\n"); ret = qla2x00_stop_firmware(vha); } QLA_FW_STOPPED(ha); ha->flags.fw_init_done = 0; } int qla24xx_configure_vhba(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int rval2; uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); } vha->flags.management_server_logged_in = 0; /* Login to SNS first */ rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { if (rval2 == QLA_MEMORY_ALLOC_FAILED) ql_dbg(ql_dbg_init, vha, 0x0120, "Failed SNS login: loop_id=%x, rval2=%d\n", NPH_SNS, rval2); else ql_dbg(ql_dbg_init, vha, 0x0103, "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " "mb[2]=%x mb[6]=%x mb[7]=%x.\n", NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); return (QLA_FUNCTION_FAILED); } atomic_set(&vha->loop_down_timer, 0); atomic_set(&vha->loop_state, LOOP_UP); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); rval = qla2x00_loop_resync(base_vha); return rval; } /* 84XX Support **************************************************************/ static LIST_HEAD(qla_cs84xx_list); static DEFINE_MUTEX(qla_cs84xx_mutex); static struct qla_chip_state_84xx * qla84xx_get_chip(struct scsi_qla_host *vha) { struct qla_chip_state_84xx *cs84xx; struct qla_hw_data *ha = vha->hw; mutex_lock(&qla_cs84xx_mutex); /* Find any shared 84xx chip. */ list_for_each_entry(cs84xx, &qla_cs84xx_list, list) { if (cs84xx->bus == ha->pdev->bus) { kref_get(&cs84xx->kref); goto done; } } cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL); if (!cs84xx) goto done; kref_init(&cs84xx->kref); spin_lock_init(&cs84xx->access_lock); mutex_init(&cs84xx->fw_update_mutex); cs84xx->bus = ha->pdev->bus; list_add_tail(&cs84xx->list, &qla_cs84xx_list); done: mutex_unlock(&qla_cs84xx_mutex); return cs84xx; } static void __qla84xx_chip_release(struct kref *kref) { struct qla_chip_state_84xx *cs84xx = container_of(kref, struct qla_chip_state_84xx, kref); mutex_lock(&qla_cs84xx_mutex); list_del(&cs84xx->list); mutex_unlock(&qla_cs84xx_mutex); kfree(cs84xx); } void qla84xx_put_chip(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; if (ha->cs84xx) kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); } static int qla84xx_init_chip(scsi_qla_host_t *vha) { int rval; uint16_t status[2]; struct qla_hw_data *ha = vha->hw; mutex_lock(&ha->cs84xx->fw_update_mutex); rval = qla84xx_verify_chip(vha, status); mutex_unlock(&ha->cs84xx->fw_update_mutex); return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED : QLA_SUCCESS; } /* 81XX Support **************************************************************/ int qla81xx_nvram_config(scsi_qla_host_t *vha) { int rval; struct init_cb_81xx *icb; struct nvram_81xx *nv; __le32 *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; struct qla_hw_data *ha = vha->hw; uint32_t faddr; struct active_regions active_regions = { }; rval = QLA_SUCCESS; icb = (struct init_cb_81xx *)ha->init_cb; nv = ha->nvram; /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) ha->vpd_size = FA_VPD_SIZE_82XX; if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) qla28xx_get_aux_images(vha, &active_regions); /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; faddr = ha->flt_region_vpd; if (IS_QLA28XX(ha)) { if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_vpd_sec; ql_dbg(ql_dbg_init, vha, 0x0110, "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); } ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); /* Get NVRAM data into cache and calculate checksum. */ faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } ql_dbg(ql_dbg_init, vha, 0x0110, "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); dptr = (__force __le32 *)nv; for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, "Contents of NVRAM:\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_info, vha, 0x0073, "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", chksum, nv->id, le16_to_cpu(nv->nvram_version)); ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); ql_log(ql_log_info, vha, 0x0074, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); /* * Set default initialization control block. */ memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); nv->frame_payload_size = cpu_to_le16(2048); nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->port_name[0] = 0x21; nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; nv->port_name[5] = 0x1c; nv->port_name[6] = 0x55; nv->port_name[7] = 0x86; nv->node_name[0] = 0x20; nv->node_name[1] = 0x00; nv->node_name[2] = 0x00; nv->node_name[3] = 0xe0; nv->node_name[4] = 0x8b; nv->node_name[5] = 0x1c; nv->node_name[6] = 0x55; nv->node_name[7] = 0x86; nv->login_retry_count = cpu_to_le16(8); nv->interrupt_delay_timer = cpu_to_le16(0); nv->login_timeout = cpu_to_le16(0); nv->firmware_options_1 = cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); nv->firmware_options_2 = cpu_to_le32(2 << 4); nv->firmware_options_2 |= cpu_to_le32(BIT_12); nv->firmware_options_3 = cpu_to_le32(2 << 13); nv->host_p = cpu_to_le32(BIT_11|BIT_10); nv->efi_parameters = cpu_to_le32(0); nv->reset_delay = 5; nv->max_luns_per_target = cpu_to_le16(128); nv->port_down_retry_count = cpu_to_le16(30); nv->link_down_timeout = cpu_to_le16(180); nv->enode_mac[0] = 0x00; nv->enode_mac[1] = 0xC0; nv->enode_mac[2] = 0xDD; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; nv->enode_mac[5] = 0x06 + ha->port_no + 1; rval = 1; } if (IS_T10_PI_CAPABLE(ha)) nv->frame_payload_size &= cpu_to_le16(~7); qlt_81xx_config_nvram_stage1(vha, nv); /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); /* Copy 1st segment. */ dptr1 = (uint8_t *)icb; dptr2 = (uint8_t *)&nv->version; cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version; while (cnt--) *dptr1++ = *dptr2++; icb->login_retry_count = nv->login_retry_count; /* Copy 2nd segment. */ dptr1 = (uint8_t *)&icb->interrupt_delay_timer; dptr2 = (uint8_t *)&nv->interrupt_delay_timer; cnt = (uint8_t *)&icb->reserved_5 - (uint8_t *)&icb->interrupt_delay_timer; while (cnt--) *dptr1++ = *dptr2++; memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { icb->enode_mac[0] = 0x00; icb->enode_mac[1] = 0xC0; icb->enode_mac[2] = 0xDD; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; icb->enode_mac[5] = 0x06 + ha->port_no + 1; } /* Use extended-initialization control block. */ memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); /* * Setup driver NVRAM options. */ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), "QLE8XXX"); qlt_81xx_config_nvram_stage2(vha, icb); /* Use alternate WWN? */ if (nv->host_p & cpu_to_le32(BIT_15)) { memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); } /* Prepare nodename */ if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) { /* * Firmware will apply the following mask if the nodename was * not provided. */ memcpy(icb->node_name, icb->port_name, WWN_SIZE); icb->node_name[0] &= 0xF0; } if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { if ((nv->enhanced_features & BIT_7) == 0) ha->flags.scm_supported_a = 1; } /* Set host adapter parameters. */ ha->flags.disable_risc_code_load = 0; ha->flags.enable_lip_reset = 0; ha->flags.enable_lip_full_login = le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0; ha->flags.enable_target_reset = le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0; ha->flags.enable_led_scheme = 0; ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0; ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) & (BIT_6 | BIT_5 | BIT_4)) >> 4; /* save HBA serial number */ ha->serial0 = icb->port_name[5]; ha->serial1 = icb->port_name[6]; ha->serial2 = icb->port_name[7]; memcpy(vha->node_name, icb->node_name, WWN_SIZE); memcpy(vha->port_name, icb->port_name, WWN_SIZE); icb->execution_throttle = cpu_to_le16(0xFFFF); ha->retry_count = le16_to_cpu(nv->login_retry_count); /* Set minimum login_timeout to 4 seconds. */ if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) nv->login_timeout = cpu_to_le16(ql2xlogintimeout); if (le16_to_cpu(nv->login_timeout) < 4) nv->login_timeout = cpu_to_le16(4); ha->login_timeout = le16_to_cpu(nv->login_timeout); /* Set minimum RATOV to 100 tenths of a second. */ ha->r_a_tov = 100; ha->loop_reset_delay = nv->reset_delay; /* Link Down Timeout = 0: * * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: * * The driver waits for the link to come up after link down * before returning I/Os to OS with "DID_NO_CONNECT". */ if (le16_to_cpu(nv->link_down_timeout) == 0) { ha->loop_down_abort_time = (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT); } else { ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout); ha->loop_down_abort_time = (LOOP_DOWN_TIME - ha->link_down_timeout); } /* Need enough time to try and get the port back. */ ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count); if (qlport_down_retry) ha->port_down_retry_count = qlport_down_retry; /* Set login_retry_count */ ha->login_retry_count = le16_to_cpu(nv->login_retry_count); if (ha->port_down_retry_count == le16_to_cpu(nv->port_down_retry_count) && ha->port_down_retry_count > 3) ha->login_retry_count = ha->port_down_retry_count; else if (ha->port_down_retry_count > (int)ha->login_retry_count) ha->login_retry_count = ha->port_down_retry_count; if (ql2xloginretrycount) ha->login_retry_count = ql2xloginretrycount; /* if not running MSI-X we need handshaking on interrupts */ if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) icb->firmware_options_2 |= cpu_to_le32(BIT_22); /* Enable ZIO. */ if (!vha->flags.init_done) { ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & (BIT_3 | BIT_2 | BIT_1 | BIT_0); ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? le16_to_cpu(icb->interrupt_delay_timer) : 2; } icb->firmware_options_2 &= cpu_to_le32( ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); vha->flags.process_response_queue = 0; if (ha->zio_mode != QLA_ZIO_DISABLED) { ha->zio_mode = QLA_ZIO_MODE_6; ql_log(ql_log_info, vha, 0x0075, "ZIO mode %d enabled; timer delay (%d us).\n", ha->zio_mode, ha->zio_timer * 100); icb->firmware_options_2 |= cpu_to_le32( (uint32_t)ha->zio_mode); icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); vha->flags.process_response_queue = 1; } /* enable RIDA Format2 */ icb->firmware_options_3 |= cpu_to_le32(BIT_0); /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= cpu_to_le32(BIT_8); /* Determine NVMe/FCP priority for target ports */ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); } return (rval); } int qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; unsigned long flags; status = qla2x00_init_rings(vha); if (!status) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; status = qla2x00_fw_ready(vha); if (!status) { /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); vha->flags.online = 1; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; } if (!status) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); if (!atomic_read(&vha->loop_down_timer)) { /* * Issue marker command only when we are going * to start the I/O . */ vha->marker_needed = 1; } ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); /* Update the firmware version */ status = qla82xx_check_md_needed(vha); if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); if (rval) { ql_log(ql_log_warn, vha, 0x8001, "Unable to reinitialize FCE (%d).\n", rval); ha->flags.fce_enabled = 0; } } if (ha->eft) { memset(ha->eft, 0, EFT_SIZE); rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); if (rval) { ql_log(ql_log_warn, vha, 0x8010, "Unable to reinitialize EFT (%d).\n", rval); } } } if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8011, "qla82xx_restart_isp succeeded.\n"); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); } else { ql_log(ql_log_warn, vha, 0x8016, "qla82xx_restart_isp **** FAILED ****.\n"); } return status; } /* * qla24xx_get_fcp_prio * Gets the fcp cmd priority value for the logged in port. * Looks for a match of the port descriptors within * each of the fcp prio config entries. If a match is found, * the tag (priority) value is returned. * * Input: * vha = scsi host structure pointer. * fcport = port structure pointer. * * Return: * non-zero (if found) * -1 (if not found) * * Context: * Kernel context */ static int qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int i, entries; uint8_t pid_match, wwn_match; int priority; uint32_t pid1, pid2; uint64_t wwn1, wwn2; struct qla_fcp_prio_entry *pri_entry; struct qla_hw_data *ha = vha->hw; if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) return -1; priority = -1; entries = ha->fcp_prio_cfg->num_entries; pri_entry = &ha->fcp_prio_cfg->entry[0]; for (i = 0; i < entries; i++) { pid_match = wwn_match = 0; if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) { pri_entry++; continue; } /* check source pid for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) { pid1 = pri_entry->src_pid & INVALID_PORT_ID; pid2 = vha->d_id.b24 & INVALID_PORT_ID; if (pid1 == INVALID_PORT_ID) pid_match++; else if (pid1 == pid2) pid_match++; } /* check destination pid for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) { pid1 = pri_entry->dst_pid & INVALID_PORT_ID; pid2 = fcport->d_id.b24 & INVALID_PORT_ID; if (pid1 == INVALID_PORT_ID) pid_match++; else if (pid1 == pid2) pid_match++; } /* check source WWN for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) { wwn1 = wwn_to_u64(vha->port_name); wwn2 = wwn_to_u64(pri_entry->src_wwpn); if (wwn2 == (uint64_t)-1) wwn_match++; else if (wwn1 == wwn2) wwn_match++; } /* check destination WWN for a match */ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) { wwn1 = wwn_to_u64(fcport->port_name); wwn2 = wwn_to_u64(pri_entry->dst_wwpn); if (wwn2 == (uint64_t)-1) wwn_match++; else if (wwn1 == wwn2) wwn_match++; } if (pid_match == 2 || wwn_match == 2) { /* Found a matching entry */ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) priority = pri_entry->tag; break; } pri_entry++; } return priority; } /* * qla24xx_update_fcport_fcp_prio * Activates fcp priority for the logged in fc port * * Input: * vha = scsi host structure pointer. * fcp = port structure pointer. * * Return: * QLA_SUCCESS or QLA_FUNCTION_FAILED * * Context: * Kernel context. */ int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int ret; int priority; uint16_t mb[5]; if (fcport->port_type != FCT_TARGET || fcport->loop_id == FC_NO_LOOP_ID) return QLA_FUNCTION_FAILED; priority = qla24xx_get_fcp_prio(vha, fcport); if (priority < 0) return QLA_FUNCTION_FAILED; if (IS_P3P_TYPE(vha->hw)) { fcport->fcp_prio = priority & 0xf; return QLA_SUCCESS; } ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); if (ret == QLA_SUCCESS) { if (fcport->fcp_prio != priority) ql_dbg(ql_dbg_user, vha, 0x709e, "Updated FCP_CMND priority - value=%d loop_id=%d " "port_id=%02x%02x%02x.\n", priority, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); fcport->fcp_prio = priority & 0xf; } else ql_dbg(ql_dbg_user, vha, 0x704f, "Unable to update FCP_CMND priority - ret=0x%x for " "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); return ret; } /* * qla24xx_update_all_fcp_prio * Activates fcp priority for all the logged in ports * * Input: * ha = adapter block pointer. * * Return: * QLA_SUCCESS or QLA_FUNCTION_FAILED * * Context: * Kernel context. */ int qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha) { int ret; fc_port_t *fcport; ret = QLA_FUNCTION_FAILED; /* We need to set priority for all logged in ports */ list_for_each_entry(fcport, &vha->vp_fcports, list) ret = qla24xx_update_fcport_fcp_prio(vha, fcport); return ret; } struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx, bool startqp) { int rsp_id = 0; int req_id = 0; int i; struct qla_hw_data *ha = vha->hw; uint16_t qpair_id = 0; struct qla_qpair *qpair = NULL; struct qla_msix_entry *msix; if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) { ql_log(ql_log_warn, vha, 0x00181, "FW/Driver is not multi-queue capable.\n"); return NULL; } if (ql2xmqsupport || ql2xnvmeenable) { qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); if (qpair == NULL) { ql_log(ql_log_warn, vha, 0x0182, "Failed to allocate memory for queue pair.\n"); return NULL; } qpair->hw = vha->hw; qpair->vha = vha; qpair->qp_lock_ptr = &qpair->qp_lock; spin_lock_init(&qpair->qp_lock); qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; /* Assign available que pair id */ mutex_lock(&ha->mq_lock); qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); if (ha->num_qpairs >= ha->max_qpairs) { mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, vha, 0x0183, "No resources to create additional q pair.\n"); goto fail_qid_map; } ha->num_qpairs++; set_bit(qpair_id, ha->qpair_qid_map); ha->queue_pair_map[qpair_id] = qpair; qpair->id = qpair_id; qpair->vp_idx = vp_idx; qpair->fw_started = ha->flags.fw_started; INIT_LIST_HEAD(&qpair->hints_list); qpair->chip_reset = ha->base_qpair->chip_reset; qpair->enable_class_2 = ha->base_qpair->enable_class_2; qpair->enable_explicit_conf = ha->base_qpair->enable_explicit_conf; for (i = 0; i < ha->msix_count; i++) { msix = &ha->msix_entries[i]; if (msix->in_use) continue; qpair->msix = msix; ql_dbg(ql_dbg_multiq, vha, 0xc00f, "Vector %x selected for qpair\n", msix->vector); break; } if (!qpair->msix) { ql_log(ql_log_warn, vha, 0x0184, "Out of MSI-X vectors!.\n"); goto fail_msix; } qpair->msix->in_use = 1; list_add_tail(&qpair->qp_list_elem, &vha->qp_list); qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) qpair->reqq_start_iocbs = qla_83xx_start_iocbs; mutex_unlock(&ha->mq_lock); /* Create response queue first */ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp); if (!rsp_id) { ql_log(ql_log_warn, vha, 0x0185, "Failed to create response queue.\n"); goto fail_rsp; } qpair->rsp = ha->rsp_q_map[rsp_id]; /* Create request queue */ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos, startqp); if (!req_id) { ql_log(ql_log_warn, vha, 0x0186, "Failed to create request queue.\n"); goto fail_req; } qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; /* init qpair to this cpu. Will adjust at run time. */ qla_cpu_update(qpair, smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) qpair->difdix_supported = 1; } qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); if (!qpair->srb_mempool) { ql_log(ql_log_warn, vha, 0xd036, "Failed to create srb mempool for qpair %d\n", qpair->id); goto fail_mempool; } /* Mark as online */ qpair->online = 1; if (!vha->flags.qpairs_available) vha->flags.qpairs_available = 1; ql_dbg(ql_dbg_multiq, vha, 0xc00d, "Request/Response queue pair created, id %d\n", qpair->id); ql_dbg(ql_dbg_init, vha, 0x0187, "Request/Response queue pair created, id %d\n", qpair->id); } return qpair; fail_mempool: fail_req: qla25xx_delete_rsp_que(vha, qpair->rsp); fail_rsp: mutex_lock(&ha->mq_lock); qpair->msix->in_use = 0; list_del(&qpair->qp_list_elem); if (list_empty(&vha->qp_list)) vha->flags.qpairs_available = 0; fail_msix: ha->queue_pair_map[qpair_id] = NULL; clear_bit(qpair_id, ha->qpair_qid_map); ha->num_qpairs--; mutex_unlock(&ha->mq_lock); fail_qid_map: kfree(qpair); return NULL; } int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) { int ret = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = qpair->hw; qpair->delete_in_progress = 1; ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; ret = qla25xx_delete_rsp_que(vha, qpair->rsp); if (ret != QLA_SUCCESS) goto fail; mutex_lock(&ha->mq_lock); ha->queue_pair_map[qpair->id] = NULL; clear_bit(qpair->id, ha->qpair_qid_map); ha->num_qpairs--; list_del(&qpair->qp_list_elem); if (list_empty(&vha->qp_list)) { vha->flags.qpairs_available = 0; vha->flags.qpairs_req_created = 0; vha->flags.qpairs_rsp_created = 0; } mempool_destroy(qpair->srb_mempool); kfree(qpair); mutex_unlock(&ha->mq_lock); return QLA_SUCCESS; fail: return ret; } uint64_t qla2x00_count_set_bits(uint32_t num) { /* Brian Kernighan's Algorithm */ u64 count = 0; while (num) { num &= (num - 1); count++; } return count; } uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha) { fc_port_t *f, *tf; u64 count = 0; f = NULL; tf = NULL; list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { if (f->port_type != FCT_TARGET) continue; count++; } return count; } int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags) { scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = NULL; unsigned long int_flags; if (flags & QLA2XX_HW_ERROR) vha->hw_err_cnt = 0; if (flags & QLA2XX_SHT_LNK_DWN) vha->short_link_down_cnt = 0; if (flags & QLA2XX_INT_ERR) vha->interface_err_cnt = 0; if (flags & QLA2XX_CMD_TIMEOUT) vha->cmd_timeout_cnt = 0; if (flags & QLA2XX_RESET_CMD_ERR) vha->reset_cmd_err_cnt = 0; if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->tgt_short_link_down_cnt = 0; fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); } vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; return 0; } int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags) { return qla2xxx_reset_stats(host, flags); } int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags) { return qla2xxx_reset_stats(host, flags); } int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags, void *data, u64 size) { scsi_qla_host_t *vha = shost_priv(host); struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; struct ql_vnd_stats *rsp_data = &resp->stats; u64 ini_entry_count = 0; u64 i = 0; u64 entry_count = 0; u64 num_tgt = 0; u32 tmp_stat_type = 0; fc_port_t *fcport = NULL; unsigned long int_flags; /* Copy stat type to work on it */ tmp_stat_type = flags; if (tmp_stat_type & BIT_17) { num_tgt = qla2x00_get_num_tgts(vha); /* unset BIT_17 */ tmp_stat_type &= ~(1 << 17); } ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); entry_count = ini_entry_count + num_tgt; rsp_data->entry_count = entry_count; i = 0; if (flags & QLA2XX_HW_ERROR) { rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->hw_err_cnt; i++; } if (flags & QLA2XX_SHT_LNK_DWN) { rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->short_link_down_cnt; i++; } if (flags & QLA2XX_INT_ERR) { rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->interface_err_cnt; i++; } if (flags & QLA2XX_CMD_TIMEOUT) { rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; i++; } if (flags & QLA2XX_RESET_CMD_ERR) { rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; rsp_data->entry[i].tgt_num = 0x0; rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; i++; } /* i will continue from previous loop, as target * entries are after initiator */ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->port_type != FCT_TARGET) continue; if (!fcport->rport) continue; rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; rsp_data->entry[i].tgt_num = fcport->rport->number; rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; i++; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); } resp->status = EXT_STATUS_OK; return 0; } int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags, struct fc_rport *rport, void *data, u64 size) { struct ql_vnd_tgt_stats_resp *tgt_data = data; fc_port_t *fcport = *(fc_port_t **)rport->dd_data; tgt_data->status = 0; tgt_data->stats.entry_count = 1; tgt_data->stats.entry[0].stat_type = flags; tgt_data->stats.entry[0].tgt_num = rport->number; tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; return 0; } int qla2xxx_disable_port(struct Scsi_Host *host) { scsi_qla_host_t *vha = shost_priv(host); vha->hw->flags.port_isolated = 1; if (qla2x00_chip_is_down(vha)) return 0; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); qla2x00_wait_for_sess_deletion(vha); } return 0; } int qla2xxx_enable_port(struct Scsi_Host *host) { scsi_qla_host_t *vha = shost_priv(host); vha->hw->flags.port_isolated = 0; /* Set the flag to 1, so that isp_abort can proceed */ vha->flags.online = 1; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return 0; }
openwrt-es/linux
drivers/scsi/qla2xxx/qla_init.c
C
gpl-2.0
250,879
<?php if (!defined('W3TC')) die(); ?> <?php $this->checkbox('minify.html.strip.crlf', false, 'html_') ?> Line break removal</label><br />
sudocoda/rs27
wp-content/plugins/w3-total-cache/inc/options/minify/html.php
PHP
gpl-2.0
140
/* * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.sun.media.sound; /** * This class is used to identify destinations in connection blocks, * see ModelConnectionBlock. * * @author Karl Helgason */ public final class ModelDestination { public static final ModelIdentifier DESTINATION_NONE = null; public static final ModelIdentifier DESTINATION_KEYNUMBER = new ModelIdentifier("noteon", "keynumber"); public static final ModelIdentifier DESTINATION_VELOCITY = new ModelIdentifier("noteon", "velocity"); public static final ModelIdentifier DESTINATION_PITCH = new ModelIdentifier("osc", "pitch"); // cent public static final ModelIdentifier DESTINATION_GAIN = new ModelIdentifier("mixer", "gain"); // cB public static final ModelIdentifier DESTINATION_PAN = new ModelIdentifier("mixer", "pan"); // 0.1 % public static final ModelIdentifier DESTINATION_REVERB = new ModelIdentifier("mixer", "reverb"); // 0.1 % public static final ModelIdentifier DESTINATION_CHORUS = new ModelIdentifier("mixer", "chorus"); // 0.1 % public static final ModelIdentifier DESTINATION_LFO1_DELAY = new ModelIdentifier("lfo", "delay", 0); // timecent public static final ModelIdentifier DESTINATION_LFO1_FREQ = new ModelIdentifier("lfo", "freq", 0); // cent public static final ModelIdentifier DESTINATION_LFO2_DELAY = new ModelIdentifier("lfo", "delay", 1); // timecent public static final ModelIdentifier DESTINATION_LFO2_FREQ = new ModelIdentifier("lfo", "freq", 1); // cent public static final ModelIdentifier DESTINATION_EG1_DELAY = new ModelIdentifier("eg", "delay", 0); // timecent public static final ModelIdentifier DESTINATION_EG1_ATTACK = new ModelIdentifier("eg", "attack", 0); // timecent public static final ModelIdentifier DESTINATION_EG1_HOLD = new ModelIdentifier("eg", "hold", 0); // timecent public static final ModelIdentifier DESTINATION_EG1_DECAY = new ModelIdentifier("eg", "decay", 0); // timecent public static final ModelIdentifier DESTINATION_EG1_SUSTAIN = new ModelIdentifier("eg", "sustain", 0); // 0.1 % (I want this to be value not %) public static final ModelIdentifier DESTINATION_EG1_RELEASE = new ModelIdentifier("eg", "release", 0); // timecent public static final ModelIdentifier DESTINATION_EG1_SHUTDOWN = new ModelIdentifier("eg", "shutdown", 0); // timecent public static final ModelIdentifier DESTINATION_EG2_DELAY = new ModelIdentifier("eg", "delay", 1); // timecent public static final ModelIdentifier DESTINATION_EG2_ATTACK = new ModelIdentifier("eg", "attack", 1); // timecent public static final ModelIdentifier DESTINATION_EG2_HOLD = new ModelIdentifier("eg", "hold", 1); // 0.1 % public static final ModelIdentifier DESTINATION_EG2_DECAY = new ModelIdentifier("eg", "decay", 1); // timecent public static final ModelIdentifier DESTINATION_EG2_SUSTAIN = new ModelIdentifier("eg", "sustain", 1); // 0.1 % ( I want this to be value not %) public static final ModelIdentifier DESTINATION_EG2_RELEASE = new ModelIdentifier("eg", "release", 1); // timecent public static final ModelIdentifier DESTINATION_EG2_SHUTDOWN = new ModelIdentifier("eg", "shutdown", 1); // timecent public static final ModelIdentifier DESTINATION_FILTER_FREQ = new ModelIdentifier("filter", "freq", 0); // cent public static final ModelIdentifier DESTINATION_FILTER_Q = new ModelIdentifier("filter", "q", 0); // cB private ModelIdentifier destination = DESTINATION_NONE; private ModelTransform transform = new ModelStandardTransform(); public ModelDestination() { } public ModelDestination(ModelIdentifier id) { destination = id; } public ModelIdentifier getIdentifier() { return destination; } public void setIdentifier(ModelIdentifier destination) { this.destination = destination; } public ModelTransform getTransform() { return transform; } public void setTransform(ModelTransform transform) { this.transform = transform; } }
FauxFaux/jdk9-jdk
src/java.desktop/share/classes/com/sun/media/sound/ModelDestination.java
Java
gpl-2.0
5,633
/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation \\/ M anipulation | ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>. Class Foam::extrudeModels::linearDirection Description Extrudes by transforming points in a specified direction by a given distance \*---------------------------------------------------------------------------*/ #ifndef linearDirection_H #define linearDirection_H #include "point.H" #include "extrudeModel.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // namespace Foam { namespace extrudeModels { /*---------------------------------------------------------------------------*\ Class linearDirection Declaration \*---------------------------------------------------------------------------*/ class linearDirection : public extrudeModel { // Private data //- Extrude direction vector direction_; //- Layer thickness scalar thickness_; public: //- Runtime type information TypeName("linearDirection"); // Constructors //- Construct from dictionary linearDirection(const dictionary& dict); //- Destructor virtual ~linearDirection(); // Member Operators point operator() ( const point& surfacePoint, const vector& surfaceNormal, const label layer ) const; }; // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // } // End namespace extrudeModels } // End namespace Foam // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // #endif // ************************************************************************* //
OpenFOAM/OpenFOAM-2.3.x
src/mesh/extrudeModel/linearDirection/linearDirection.H
C++
gpl-3.0
2,664
/* YUI 3.5.1 (build 22) Copyright 2012 Yahoo! Inc. All rights reserved. Licensed under the BSD License. http://yuilibrary.com/license/ */ YUI.add('event-valuechange', function(Y) { /** Adds a synthetic `valueChange` event that fires when the `value` property of an `<input>` or `<textarea>` node changes as a result of a keystroke, mouse operation, or input method editor (IME) input event. Usage: YUI().use('event-valuechange', function (Y) { Y.one('#my-input').on('valueChange', function (e) { }); }); @module event-valuechange **/ /** Provides the implementation for the synthetic `valueChange` event. This class isn't meant to be used directly, but is public to make monkeypatching possible. Usage: YUI().use('event-valuechange', function (Y) { Y.one('#my-input').on('valueChange', function (e) { }); }); @class ValueChange @static */ var DATA_KEY = '_valuechange', VALUE = 'value', config, // defined at the end of this file // Just a simple namespace to make methods overridable. VC = { // -- Static Constants ----------------------------------------------------- /** Interval (in milliseconds) at which to poll for changes to the value of an element with one or more `valueChange` subscribers when the user is likely to be interacting with it. @property POLL_INTERVAL @type Number @default 50 @static **/ POLL_INTERVAL: 50, /** Timeout (in milliseconds) after which to stop polling when there hasn't been any new activity (keypresses, mouse clicks, etc.) on an element. @property TIMEOUT @type Number @default 10000 @static **/ TIMEOUT: 10000, // -- Protected Static Methods --------------------------------------------- /** Called at an interval to poll for changes to the value of the specified node. @method _poll @param {Node} node Node to poll. @param {Object} options Options object. @param {EventFacade} [options.e] Event facade of the event that initiated the polling. @protected @static **/ _poll: function (node, options) { var domNode = node._node, // performance cheat; getValue() is a big hit when polling event = options.e, newVal = domNode && domNode.value, vcData = node._data && node._data[DATA_KEY], // another perf cheat facade, prevVal; if (!domNode || !vcData) { VC._stopPolling(node); return; } prevVal = vcData.prevVal; if (newVal !== prevVal) { vcData.prevVal = newVal; facade = { _event : event, currentTarget: (event && event.currentTarget) || node, newVal : newVal, prevVal : prevVal, target : (event && event.target) || node }; Y.Object.each(vcData.notifiers, function (notifier) { notifier.fire(facade); }); VC._refreshTimeout(node); } }, /** Restarts the inactivity timeout for the specified node. @method _refreshTimeout @param {Node} node Node to refresh. @param {SyntheticEvent.Notifier} notifier @protected @static **/ _refreshTimeout: function (node, notifier) { // The node may have been destroyed, so check that it still exists // before trying to get its data. Otherwise an error will occur. if (!node._node) { return; } var vcData = node.getData(DATA_KEY); VC._stopTimeout(node); // avoid dupes // If we don't see any changes within the timeout period (10 seconds by // default), stop polling. vcData.timeout = setTimeout(function () { VC._stopPolling(node, notifier); }, VC.TIMEOUT); }, /** Begins polling for changes to the `value` property of the specified node. If polling is already underway for the specified node, it will not be restarted unless the `force` option is `true` @method _startPolling @param {Node} node Node to watch. @param {SyntheticEvent.Notifier} notifier @param {Object} options Options object. @param {EventFacade} [options.e] Event facade of the event that initiated the polling. @param {Boolean} [options.force=false] If `true`, polling will be restarted even if we're already polling this node. @protected @static **/ _startPolling: function (node, notifier, options) { if (!node.test('input,textarea')) { return; } var vcData = node.getData(DATA_KEY); if (!vcData) { vcData = {prevVal: node.get(VALUE)}; node.setData(DATA_KEY, vcData); } vcData.notifiers || (vcData.notifiers = {}); // Don't bother continuing if we're already polling this node, unless // `options.force` is true. if (vcData.interval) { if (options.force) { VC._stopPolling(node, notifier); // restart polling, but avoid dupe polls } else { vcData.notifiers[Y.stamp(notifier)] = notifier; return; } } // Poll for changes to the node's value. We can't rely on keyboard // events for this, since the value may change due to a mouse-initiated // paste event, an IME input event, or for some other reason that // doesn't trigger a key event. vcData.notifiers[Y.stamp(notifier)] = notifier; vcData.interval = setInterval(function () { VC._poll(node, vcData, options); }, VC.POLL_INTERVAL); VC._refreshTimeout(node, notifier); }, /** Stops polling for changes to the specified node's `value` attribute. @method _stopPolling @param {Node} node Node to stop polling on. @param {SyntheticEvent.Notifier} [notifier] Notifier to remove from the node. If not specified, all notifiers will be removed. @protected @static **/ _stopPolling: function (node, notifier) { // The node may have been destroyed, so check that it still exists // before trying to get its data. Otherwise an error will occur. if (!node._node) { return; } var vcData = node.getData(DATA_KEY) || {}; clearInterval(vcData.interval); delete vcData.interval; VC._stopTimeout(node); if (notifier) { vcData.notifiers && delete vcData.notifiers[Y.stamp(notifier)]; } else { vcData.notifiers = {}; } }, /** Clears the inactivity timeout for the specified node, if any. @method _stopTimeout @param {Node} node @protected @static **/ _stopTimeout: function (node) { var vcData = node.getData(DATA_KEY) || {}; clearTimeout(vcData.timeout); delete vcData.timeout; }, // -- Protected Static Event Handlers -------------------------------------- /** Stops polling when a node's blur event fires. @method _onBlur @param {EventFacade} e @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onBlur: function (e, notifier) { VC._stopPolling(e.currentTarget, notifier); }, /** Resets a node's history and starts polling when a focus event occurs. @method _onFocus @param {EventFacade} e @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onFocus: function (e, notifier) { var node = e.currentTarget, vcData = node.getData(DATA_KEY); if (!vcData) { vcData = {}; node.setData(DATA_KEY, vcData); } vcData.prevVal = node.get(VALUE); VC._startPolling(node, notifier, {e: e}); }, /** Starts polling when a node receives a keyDown event. @method _onKeyDown @param {EventFacade} e @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onKeyDown: function (e, notifier) { VC._startPolling(e.currentTarget, notifier, {e: e}); }, /** Starts polling when an IME-related keyUp event occurs on a node. @method _onKeyUp @param {EventFacade} e @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onKeyUp: function (e, notifier) { // These charCodes indicate that an IME has started. We'll restart // polling and give the IME up to 10 seconds (by default) to finish. if (e.charCode === 229 || e.charCode === 197) { VC._startPolling(e.currentTarget, notifier, { e : e, force: true }); } }, /** Starts polling when a node receives a mouseDown event. @method _onMouseDown @param {EventFacade} e @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onMouseDown: function (e, notifier) { VC._startPolling(e.currentTarget, notifier, {e: e}); }, /** Called when the `valuechange` event receives a new subscriber. @method _onSubscribe @param {Node} node @param {Subscription} sub @param {SyntheticEvent.Notifier} notifier @param {Function|String} [filter] Filter function or selector string. Only provided for delegate subscriptions. @protected @static **/ _onSubscribe: function (node, sub, notifier, filter) { var _valuechange, callbacks, nodes; callbacks = { blur : VC._onBlur, focus : VC._onFocus, keydown : VC._onKeyDown, keyup : VC._onKeyUp, mousedown: VC._onMouseDown }; // Store a utility object on the notifier to hold stuff that needs to be // passed around to trigger event handlers, polling handlers, etc. _valuechange = notifier._valuechange = {}; if (filter) { // If a filter is provided, then this is a delegated subscription. _valuechange.delegated = true; // Add a function to the notifier that we can use to find all // nodes that pass the delegate filter. _valuechange.getNodes = function () { return node.all('input,textarea').filter(filter); }; // Store the initial values for each descendant of the container // node that passes the delegate filter. _valuechange.getNodes().each(function (child) { if (!child.getData(DATA_KEY)) { child.setData(DATA_KEY, {prevVal: child.get(VALUE)}); } }); notifier._handles = Y.delegate(callbacks, node, filter, null, notifier); } else { // This is a normal (non-delegated) event subscription. if (!node.test('input,textarea')) { return; } if (!node.getData(DATA_KEY)) { node.setData(DATA_KEY, {prevVal: node.get(VALUE)}); } notifier._handles = node.on(callbacks, null, null, notifier); } }, /** Called when the `valuechange` event loses a subscriber. @method _onUnsubscribe @param {Node} node @param {Subscription} subscription @param {SyntheticEvent.Notifier} notifier @protected @static **/ _onUnsubscribe: function (node, subscription, notifier) { var _valuechange = notifier._valuechange; notifier._handles && notifier._handles.detach(); if (_valuechange.delegated) { _valuechange.getNodes().each(function (child) { VC._stopPolling(child, notifier); }); } else { VC._stopPolling(node, notifier); } } }; /** Synthetic event that fires when the `value` property of an `<input>` or `<textarea>` node changes as a result of a user-initiated keystroke, mouse operation, or input method editor (IME) input event. Unlike the `onchange` event, this event fires when the value actually changes and not when the element loses focus. This event also reports IME and multi-stroke input more reliably than `oninput` or the various key events across browsers. For performance reasons, only focused nodes are monitored for changes, so programmatic value changes on nodes that don't have focus won't be detected. @example YUI().use('event-valuechange', function (Y) { Y.one('#my-input').on('valueChange', function (e) { }); }); @event valuechange @param {String} prevVal Previous value prior to the latest change. @param {String} newVal New value after the latest change. @for YUI **/ config = { detach: VC._onUnsubscribe, on : VC._onSubscribe, delegate : VC._onSubscribe, detachDelegate: VC._onUnsubscribe, publishConfig: { emitFacade: true } }; Y.Event.define('valuechange', config); Y.Event.define('valueChange', config); // deprecated, but supported for backcompat Y.ValueChange = VC; }, '3.5.1' ,{requires:['event-focus', 'event-synthetic']});
sergiomt/zesped
src/webapp/js/yui/event-valuechange/event-valuechange.js
JavaScript
agpl-3.0
13,297
""" Read/Write AMQP frames over network transports. 2009-01-14 Barry Pederson <bp@barryp.org> """ # Copyright (C) 2009 Barry Pederson <bp@barryp.org> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 import re import socket # # See if Python 2.6+ SSL support is available # try: import ssl HAVE_PY26_SSL = True except: HAVE_PY26_SSL = False try: bytes except: # Python 2.5 and lower bytes = str from struct import pack, unpack AMQP_PORT = 5672 # Yes, Advanced Message Queuing Protocol Protocol is redundant AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x09\x01'.encode('latin_1') # Match things like: [fe80::1]:5432, from RFC 2732 IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?') class _AbstractTransport(object): """ Common superclass for TCP and SSL transports """ def __init__(self, host, connect_timeout): msg = 'socket.getaddrinfo() for %s returned an empty list' % host port = AMQP_PORT m = IPV6_LITERAL.match(host) if m: host = m.group(1) if m.group(2): port = int(m.group(2)) else: if ':' in host: host, port = host.rsplit(':', 1) port = int(port) self.sock = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.SOL_TCP): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) self.sock.settimeout(connect_timeout) self.sock.connect(sa) except socket.error, msg: self.sock.close() self.sock = None continue break if not self.sock: # Didn't connect, return the most recent error message raise socket.error, msg self.sock.settimeout(None) self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self._setup_transport() self._write(AMQP_PROTOCOL_HEADER) def __del__(self): self.close() def _read(self, n): """ Read exactly n bytes from the peer """ raise NotImplementedError('Must be overriden in subclass') def _setup_transport(self): """ Do any additional initialization of the class (used by the subclasses). """ pass def _shutdown_transport(self): """ Do any preliminary work in shutting down the connection. """ pass def _write(self, s): """ Completely write a string to the peer. """ raise NotImplementedError('Must be overriden in subclass') def close(self): if self.sock is not None: self._shutdown_transport() # Call shutdown first to make sure that pending messages # reach the AMQP broker if the program exits after # calling this method. self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() self.sock = None def read_frame(self): """ Read an AMQP frame. """ frame_type, channel, size = unpack('>BHI', self._read(7)) payload = self._read(size) ch = ord(self._read(1)) if ch == 206: # '\xce' return frame_type, channel, payload else: raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch) def write_frame(self, frame_type, channel, payload): """ Write out an AMQP frame. """ size = len(payload) self._write(pack('>BHI%dsB' % size, frame_type, channel, size, payload, 0xce)) class SSLTransport(_AbstractTransport): """ Transport that works over SSL """ def __init__(self, host, connect_timeout, ssl): if isinstance(ssl, dict): self.sslopts = ssl self.sslobj = None super(SSLTransport, self).__init__(host, connect_timeout) def _setup_transport(self): """ Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version. """ if HAVE_PY26_SSL: if hasattr(self, 'sslopts'): self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts) else: self.sslobj = ssl.wrap_socket(self.sock) self.sslobj.do_handshake() else: self.sslobj = socket.ssl(self.sock) def _shutdown_transport(self): """ Unwrap a Python 2.6 SSL socket, so we can call shutdown() """ if HAVE_PY26_SSL and (self.sslobj is not None): self.sock = self.sslobj.unwrap() self.sslobj = None def _read(self, n): """ It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest. """ result = self.sslobj.read(n) while len(result) < n: s = self.sslobj.read(n - len(result)) if not s: raise IOError('Socket closed') result += s return result def _write(self, s): """ Write a string out to the SSL socket fully. """ while s: n = self.sslobj.write(s) if not n: raise IOError('Socket closed') s = s[n:] class TCPTransport(_AbstractTransport): """ Transport that deals directly with TCP socket. """ def _setup_transport(self): """ Setup to _write() directly to the socket, and do our own buffered reads. """ self._write = self.sock.sendall self._read_buffer = bytes() def _read(self, n): """ Read exactly n bytes from the socket """ while len(self._read_buffer) < n: s = self.sock.recv(65536) if not s: raise IOError('Socket closed') self._read_buffer += s result = self._read_buffer[:n] self._read_buffer = self._read_buffer[n:] return result def create_transport(host, connect_timeout, ssl=False): """ Given a few parameters from the Connection constructor, select and create a subclass of _AbstractTransport. """ if ssl: return SSLTransport(host, connect_timeout, ssl) else: return TCPTransport(host, connect_timeout)
mzdaniel/oh-mainline
vendor/packages/amqplib/amqplib/client_0_8/transport.py
Python
agpl-3.0
7,349
$(document).delegate('.storage_graph_link', 'click', function(e){ var anchor = this, el = $(anchor), id = el.attr('data-status'); if(e.ctrlKey || e.metaKey){ return true; }else{ e.preventDefault(); } var cell = document.getElementById(id); var text = el.html(); if (text == '[:: show ::]') { anchor.innerHTML = '[:: hide ::]'; if (cell.nodeName == 'IMG') { // <img src='...'/> cell.src=anchor.href; } else { $.ajax({ type: "get", url: anchor.href, success : function(response, textStatus) { cell.style.display = 'block'; cell.parentNode.style.display = 'block'; cell.innerHTML = response; var data = $('#countTrendMeta',cell).text(); graphLineChart($('#countTrend',cell)[0],eval('('+data+')')); data = $('#longTrendMeta',cell).text(); graphLineChart($('#longTrend',cell)[0],eval('('+data+')')); data = $('#avgTrendMeta',cell).text(); graphLineChart($('#avgTrend',cell)[0],eval('('+data+')')); data = $('#errorTrendMeta',cell).text(); graphLineChart($('#errorTrend',cell)[0],eval('('+data+')')); data = $('#piechartMeta',cell).text(); graphPieChart($('#piechart',cell)[0],eval('('+data+')')); } }); } } else { anchor.innerHTML = '[:: show ::]'; cell.style.display = 'none'; cell.parentNode.style.display = 'none'; } })
jialinsun/cat
cat-home/src/main/webapp/js/storage.js
JavaScript
apache-2.0
1,379
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * */ package org.apache.axis2.jaxws.message.databinding.impl; import org.apache.axiom.om.OMElement; import org.apache.axiom.om.OMOutputFormat; import org.apache.axis2.jaxws.ExceptionFactory; import org.apache.axis2.jaxws.message.Message; import org.apache.axis2.jaxws.message.databinding.SOAPEnvelopeBlock; import org.apache.axis2.jaxws.message.factory.BlockFactory; import org.apache.axis2.jaxws.message.factory.MessageFactory; import org.apache.axis2.jaxws.message.impl.BlockImpl; import org.apache.axis2.jaxws.message.util.SOAPElementReader; import org.apache.axis2.jaxws.registry.FactoryRegistry; import javax.xml.namespace.QName; import javax.xml.soap.SOAPElement; import javax.xml.soap.SOAPEnvelope; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import javax.xml.stream.XMLStreamWriter; import javax.xml.ws.WebServiceException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; /** * * */ public class SOAPEnvelopeBlockImpl extends BlockImpl implements SOAPEnvelopeBlock { /** * Called by SOAPEnvelopeBlockFactory * * @param busObject * @param busContext * @param qName * @param factory */ public SOAPEnvelopeBlockImpl(Object busObject, Object busContext, QName qName, BlockFactory factory) { super(busObject, busContext, (qName == null) ? getQName((SOAPEnvelope)busObject) : qName, factory); } /** * Called by SOAPEnvelopeBlockFactory * * @param omElement * @param busContext * @param qName * @param factory */ public SOAPEnvelopeBlockImpl(OMElement omElement, Object busContext, QName qName, BlockFactory factory) { super(omElement, busContext, qName, factory); } /* (non-Javadoc) * @see org.apache.axis2.jaxws.message.impl.BlockImpl#_getBOFromReader(javax.xml.stream.XMLStreamReader, java.lang.Object) */ @Override protected Object _getBOFromReader(XMLStreamReader reader, Object busContext) throws XMLStreamException, WebServiceException { MessageFactory mf = (MessageFactory)FactoryRegistry.getFactory(MessageFactory.class); Message message = mf.createFrom(reader, null); SOAPEnvelope env = message.getAsSOAPEnvelope(); this.setQName(getQName(env)); return env; } /* (non-Javadoc) * @see org.apache.axis2.jaxws.message.impl.BlockImpl#_getReaderFromBO(java.lang.Object, java.lang.Object) */ @Override protected XMLStreamReader _getReaderFromBO(Object busObj, Object busContext) throws XMLStreamException, WebServiceException { return new SOAPElementReader((SOAPElement)busObj); } /* (non-Javadoc) * @see org.apache.axis2.jaxws.message.impl.BlockImpl#_outputFromBO(java.lang.Object, java.lang.Object, javax.xml.stream.XMLStreamWriter) */ @Override protected void _outputFromBO(Object busObject, Object busContext, XMLStreamWriter writer) throws XMLStreamException, WebServiceException { XMLStreamReader reader = _getReaderFromBO(busObject, busContext); _outputFromReader(reader, writer); } /** * Get the QName of the envelope * * @param env * @return QName */ private static QName getQName(SOAPEnvelope env) { return new QName(env.getNamespaceURI(), env.getLocalName(), env.getPrefix()); } public boolean isElementData() { return true; } public void close() { return; // Nothing to close } public InputStream getXMLInputStream(String encoding) throws UnsupportedEncodingException { byte[] bytes = getXMLBytes(encoding); return new ByteArrayInputStream(bytes); } public Object getObject() { try { return getBusinessObject(false); } catch (XMLStreamException e) { throw ExceptionFactory.makeWebServiceException(e); } } public boolean isDestructiveRead() { return false; } public boolean isDestructiveWrite() { return false; } public byte[] getXMLBytes(String encoding) throws UnsupportedEncodingException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); OMOutputFormat format = new OMOutputFormat(); format.setCharSetEncoding(encoding); try { serialize(baos, format); baos.flush(); return baos.toByteArray(); } catch (XMLStreamException e) { throw ExceptionFactory.makeWebServiceException(e); } catch (IOException e) { throw ExceptionFactory.makeWebServiceException(e); } } }
arunasujith/wso2-axis2
modules/jaxws/src/org/apache/axis2/jaxws/message/databinding/impl/SOAPEnvelopeBlockImpl.java
Java
apache-2.0
5,741
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.cassandra.schema; import java.util.Map; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; import com.google.common.base.Objects; /** * An immutable class representing keyspace parameters (durability and replication). */ public final class KeyspaceParams { public static final boolean DEFAULT_DURABLE_WRITES = true; /** * This determines durable writes for the {@link org.apache.cassandra.config.SchemaConstants#SCHEMA_KEYSPACE_NAME} * and {@link org.apache.cassandra.config.SchemaConstants#SYSTEM_KEYSPACE_NAME} keyspaces, * the only reason it is not final is for commitlog unit tests. It should only be changed for testing purposes. */ @VisibleForTesting public static boolean DEFAULT_LOCAL_DURABLE_WRITES = true; public enum Option { DURABLE_WRITES, REPLICATION; @Override public String toString() { return name().toLowerCase(); } } public final boolean durableWrites; public final ReplicationParams replication; public KeyspaceParams(boolean durableWrites, ReplicationParams replication) { this.durableWrites = durableWrites; this.replication = replication; } public static KeyspaceParams create(boolean durableWrites, Map<String, String> replication) { return new KeyspaceParams(durableWrites, ReplicationParams.fromMap(replication)); } public static KeyspaceParams local() { return new KeyspaceParams(DEFAULT_LOCAL_DURABLE_WRITES, ReplicationParams.local()); } public static KeyspaceParams simple(int replicationFactor) { return new KeyspaceParams(true, ReplicationParams.simple(replicationFactor)); } public static KeyspaceParams simpleTransient(int replicationFactor) { return new KeyspaceParams(false, ReplicationParams.simple(replicationFactor)); } public static KeyspaceParams nts(Object... args) { return new KeyspaceParams(true, ReplicationParams.nts(args)); } public void validate(String name) { replication.validate(name); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof KeyspaceParams)) return false; KeyspaceParams p = (KeyspaceParams) o; return durableWrites == p.durableWrites && replication.equals(p.replication); } @Override public int hashCode() { return Objects.hashCode(durableWrites, replication); } @Override public String toString() { return MoreObjects.toStringHelper(this) .add(Option.DURABLE_WRITES.toString(), durableWrites) .add(Option.REPLICATION.toString(), replication) .toString(); } }
yhnishi/cassandra
src/java/org/apache/cassandra/schema/KeyspaceParams.java
Java
apache-2.0
3,724